code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
#########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2013 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
import math
import pylab
import numpy
import moose
def main():
"""
This example builds a dose-response of a bistable model of a chemical
system. It uses the kinetic solver *Ksolve* and the steady-state finder
*SteadyState*.
The model is set up within the script.
The basic approach is to increment the control variable, **a** in this
case, while monitoring **b**.
The algorithm marches through a series of values of the buffered pool
**a** and measures resultant values of pool **b**. At each cycle
the algorithm calls the steady-state finder. Since **a** is incremented
only a small amount on each step, each new steady state is
(usually) quite close to the previous one. The exception is when there
is a state transition.
Here we plot three dose-response curves to illustrate the bistable
nature of the system.
On the upward going curve in blue, **a** starts low. Here,
**b** follows the low arm of the curve
and then jumps up to the high value at roughly *log( [a] ) = -0.55*.
On the downward going curve in green, **b** follows the high arm
of the curve forming a nice hysteretic loop.
Eventually **b** has to fall to the low state at about
*log( [a] ) = -0.83*
Through nasty concentration manipulations, we find the third arm
of the curve, which tracks the unstable fixed point. This is in red.
We find this arm by
setting an initial point close to the unstable fixed point, which
the steady-state finder duly locates. We then follow a dose-response
curve as with the other arms of the curve.
Note that the steady-state solver doesn't always succeed in finding a
good solution, despite moving only in small steps. Nevertheless the
resultant curves are smooth because it gives up pretty close to the
correct value, simply because the successive points are close together.
Overall, the system is pretty robust despite the core root-finder
computations in GSL being temperamental.
In doing a production dose-response series
you may wish to sample concentration space logarithmically rather than
linearly.
"""
compartment = makeModel()
ksolve = moose.Ksolve( '/model/compartment/ksolve' )
stoich = moose.Stoich( '/model/compartment/stoich' )
stoich.compartment = compartment
stoich.ksolve = ksolve
stoich.path = "/model/compartment/##"
state = moose.SteadyState( '/model/compartment/state' )
moose.reinit()
state.stoich = stoich
state.convergenceCriterion = 1e-6
moose.seed( 111 ) # Used when generating the samples in state space
b = moose.element( '/model/compartment/b' )
a = moose.element( '/model/compartment/a' )
c = moose.element( '/model/compartment/c' )
a.concInit = 0.1
deltaA = 0.002
num = 150
avec = []
bvec = []
moose.reinit()
# Now go up.
for i in range( 0, num ):
moose.start( 1.0 ) # Run the model for 1 seconds.
state.settle() # This function finds the steady states.
avec.append( a.conc )
bvec.append( b.conc )
a.concInit += deltaA
#print i, a.conc, b.conc
pylab.plot( numpy.log10( avec ), numpy.log10( bvec ), label='b vs a up' )
# Now go down.
avec = []
bvec = []
for i in range( 0, num ):
moose.start( 1.0 ) # Run the model for 1 seconds.
state.settle() # This function finds the steady states.
avec.append( a.conc )
bvec.append( b.conc )
a.concInit -= deltaA
#print i, a.conc, b.conc
pylab.plot( numpy.log10( avec ), numpy.log10( bvec ), label='b vs a down' )
# Now aim for the middle. We do this by judiciously choosing a
# start point that should be closer to the unstable fixed point.
avec = []
bvec = []
a.concInit = 0.28
b.conc = 0.15
for i in range( 0, 65 ):
moose.start( 1.0 ) # Run the model for 1 seconds.
state.settle() # This function finds the steady states.
avec.append( a.conc )
bvec.append( b.conc )
a.concInit -= deltaA
#print i, a.conc, b.conc
pylab.plot( numpy.log10( avec ), numpy.log10( bvec ), label='b vs a mid' )
pylab.ylim( [-1.7, 1.2] )
pylab.legend()
pylab.show()
quit()
def makeModel():
""" This function creates a bistable reaction system using explicit
MOOSE calls rather than load from a file.
The reaction is::
a ---b---> 2b # b catalyzes a to form more of b.
2b ---c---> a # c catalyzes b to form a.
a <======> 2b # a interconverts to b.
"""
# create container for model
model = moose.Neutral( 'model' )
compartment = moose.CubeMesh( '/model/compartment' )
compartment.volume = 1e-15
# the mesh is created automatically by the compartment
mesh = moose.element( '/model/compartment/mesh' )
# create molecules and reactions
a = moose.BufPool( '/model/compartment/a' )
b = moose.Pool( '/model/compartment/b' )
c = moose.Pool( '/model/compartment/c' )
enz1 = moose.Enz( '/model/compartment/b/enz1' )
enz2 = moose.Enz( '/model/compartment/c/enz2' )
cplx1 = moose.Pool( '/model/compartment/b/enz1/cplx' )
cplx2 = moose.Pool( '/model/compartment/c/enz2/cplx' )
reac = moose.Reac( '/model/compartment/reac' )
# connect them up for reactions
moose.connect( enz1, 'sub', a, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' )
moose.connect( enz1, 'prd', b, 'reac' ) # Note 2 molecules of b.
moose.connect( enz1, 'enz', b, 'reac' )
moose.connect( enz1, 'cplx', cplx1, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' )
moose.connect( enz2, 'sub', b, 'reac' ) # Note 2 molecules of b.
moose.connect( enz2, 'prd', a, 'reac' )
moose.connect( enz2, 'enz', c, 'reac' )
moose.connect( enz2, 'cplx', cplx2, 'reac' )
moose.connect( reac, 'sub', a, 'reac' )
moose.connect( reac, 'prd', b, 'reac' )
moose.connect( reac, 'prd', b, 'reac' ) # Note 2 order in b.
# Assign parameters
a.concInit = 1
b.concInit = 0
c.concInit = 0.01
enz1.kcat = 0.4
enz1.Km = 4
enz2.kcat = 0.6
enz2.Km = 0.01
reac.Kf = 0.001
reac.Kb = 0.01
return compartment
# Run the 'main' if this script is executed standalone.
if __name__ == '__main__':
main()
| dilawar/moose-full | moose-examples/snippets/chemDoseResponse.py | Python | gpl-2.0 | 6,804 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental library that exposes XLA operations directly in TensorFlow.
It is sometimes useful to be able to build HLO programs directly from
TensorFlow. This file provides Tensorflow operators that mirror the semantics of
HLO operators as closely as possible.
Note: There is no promise of backward or forward compatibility for operators
defined in this module. This is primarily because the underlying HLO operators
do not promise backward or forward compatibility.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.compiler.tf2xla.ops import gen_xla_ops
from tensorflow.core.framework import attr_value_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_random_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import special_math_ops
# TODO(phawkins): provide wrappers for all XLA operators. Currently the missing
# ops include:
# infeed/outfeed (available via tf.contrib.tpu)
# collectives, e.g., cross-replica-sum (available via tf.contrib.tpu)
# conditional
# gather/scatter
# collapse
# This file reuses builtin names (following XLA's names, so we can call things
# like xla.max), so we capture the builtin versions here.
# pylint: disable=redefined-builtin
_max = max
_min = min
_slice = slice # pylint: disable=invalid-name
constant = constant_op.constant
# Unary operators.
# For most arithmetic operators there is a TensorFlow operator
# that exactly corresponds to each XLA operator. Rather than defining
# XLA-specific variants, we reuse the corresponding TensorFlow operator.
# TODO(phawkins): It would be even better to have TensorFlow operators that 1:1
# wrap every HLO operator, because that would allow us to be confident that the
# semantics match.
def _unary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def unary_op_wrapper(x, name=None):
return fn(x, name=name)
return unary_op_wrapper
abs = _unary_op(math_ops.abs)
# TODO(phawkins): implement clz.
conj = _unary_op(math_ops.conj)
cos = _unary_op(math_ops.cos)
ceil = _unary_op(math_ops.ceil)
digamma = _unary_op(math_ops.digamma)
erf = _unary_op(math_ops.erf)
erfc = _unary_op(math_ops.erfc)
erfinv = _unary_op(math_ops.erfinv)
ndtri = _unary_op(math_ops.ndtri)
exp = _unary_op(math_ops.exp)
expm1 = _unary_op(math_ops.expm1)
floor = _unary_op(math_ops.floor)
imag = _unary_op(math_ops.imag)
is_finite = _unary_op(math_ops.is_finite)
lgamma = _unary_op(math_ops.lgamma)
log = _unary_op(math_ops.log)
log1p = _unary_op(math_ops.log1p)
logical_not = _unary_op(math_ops.logical_not)
neg = _unary_op(math_ops.neg)
real = _unary_op(math_ops.real)
# TODO(phawkins): unlike xla::Round, this rounds to even instead of zero for
# numbers halfway between two integers.
round = _unary_op(math_ops.round)
sin = _unary_op(math_ops.sin)
sign = _unary_op(math_ops.sign)
tanh = _unary_op(math_ops.tanh)
# Bessel
bessel_i0e = _unary_op(special_math_ops.bessel_i0e)
bessel_i1e = _unary_op(special_math_ops.bessel_i1e)
# Binary operators
# The main difference between TensorFlow and XLA binary ops is the broadcasting
# semantics. TensorFlow uses Numpy-style broadcasting semantics, whereas XLA
# requires an explicit specification of which dimensions to broadcast if the
# arguments have different ranks.
def _broadcasting_binary_op(fn):
"""Wraps a binary Tensorflow operator and performs XLA-style broadcasting."""
def broadcasting_binary_op_wrapper(x, y, broadcast_dims=None, name=None):
"""Inner wrapper function."""
broadcast_dims = broadcast_dims or []
broadcast_dims = ops.convert_to_tensor(broadcast_dims, dtypes.int64)
# Rather than relying on having static shape information in the TensorFlow
# graph, we use an XlaBroadcastHelper op that can compute the correct shapes
# at JIT compilation time.
x, y = gen_xla_ops.xla_broadcast_helper(x, y, broadcast_dims)
return fn(x, y, name=name)
return broadcasting_binary_op_wrapper
# Map from TF signed types to TF unsigned types.
_SIGNED_TO_UNSIGNED_TABLE = {
dtypes.int8: dtypes.uint8,
dtypes.int16: dtypes.uint16,
dtypes.int32: dtypes.uint32,
dtypes.int64: dtypes.uint64,
}
# Map from TF unsigned types to TF signed types.
_UNSIGNED_TO_SIGNED_TABLE = {
dtypes.uint8: dtypes.int8,
dtypes.uint16: dtypes.int16,
dtypes.uint32: dtypes.int32,
dtypes.uint64: dtypes.int64,
}
def _shift_right_logical_helper(x, y, name=None):
"""Performs an integer right logical shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
signed = dtype in _SIGNED_TO_UNSIGNED_TABLE
if signed:
unsigned_dtype = _SIGNED_TO_UNSIGNED_TABLE[dtype]
x = math_ops.cast(x, unsigned_dtype)
y = math_ops.cast(y, unsigned_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if signed:
output = math_ops.cast(output, dtype)
return output
def _shift_right_arithmetic_helper(x, y, name=None):
"""Performs an integer right arithmetic shift irrespective of input type."""
assert y.dtype == x.dtype
dtype = x.dtype
unsigned = dtype in _UNSIGNED_TO_SIGNED_TABLE
if unsigned:
signed_dtype = _UNSIGNED_TO_SIGNED_TABLE[dtype]
x = math_ops.cast(x, signed_dtype)
y = math_ops.cast(y, signed_dtype)
output = bitwise_ops.right_shift(x, y, name=name)
if unsigned:
output = math_ops.cast(output, dtype)
return output
add = _broadcasting_binary_op(math_ops.add)
sub = _broadcasting_binary_op(math_ops.sub)
mul = _broadcasting_binary_op(math_ops.mul)
div = _broadcasting_binary_op(math_ops.div)
rem = _broadcasting_binary_op(gen_math_ops.mod)
max = _broadcasting_binary_op(math_ops.maximum)
min = _broadcasting_binary_op(math_ops.minimum)
atan2 = _broadcasting_binary_op(math_ops.atan2)
complex = _broadcasting_binary_op(math_ops.complex)
logical_and = _broadcasting_binary_op(math_ops.logical_and)
logical_or = _broadcasting_binary_op(math_ops.logical_or)
logical_xor = _broadcasting_binary_op(math_ops.logical_xor)
eq = _broadcasting_binary_op(math_ops.equal)
ne = _broadcasting_binary_op(math_ops.not_equal)
ge = _broadcasting_binary_op(math_ops.greater_equal)
gt = _broadcasting_binary_op(math_ops.greater)
le = _broadcasting_binary_op(math_ops.less_equal)
lt = _broadcasting_binary_op(math_ops.less)
pow = _broadcasting_binary_op(math_ops.pow)
shift_left = _broadcasting_binary_op(bitwise_ops.left_shift)
shift_right_logical = _broadcasting_binary_op(_shift_right_logical_helper)
shift_right_arithmetic = _broadcasting_binary_op(_shift_right_arithmetic_helper)
igamma = _broadcasting_binary_op(math_ops.igamma)
igamma_grad_a = _broadcasting_binary_op(gen_math_ops.igamma_grad_a)
random_gamma_grad = _broadcasting_binary_op(gen_random_ops.random_gamma_grad)
igammac = _broadcasting_binary_op(math_ops.igammac)
polygamma = _broadcasting_binary_op(math_ops.polygamma)
zeta = _broadcasting_binary_op(math_ops.zeta)
def _binary_op(fn):
"""Wrapper that restricts `fn` to have the correct signature."""
def binary_op_wrapper(x, y, name=None):
return fn(x, y, name=name)
return binary_op_wrapper
transpose = _binary_op(array_ops.transpose)
rev = _binary_op(array_ops.reverse)
bitcast_convert_type = array_ops.bitcast
def broadcast(x, dims, name=None):
x = ops.convert_to_tensor(x)
shape = array_ops.concat([constant_op.constant(dims),
array_ops.shape(x)],
axis=0)
return array_ops.broadcast_to(x, shape, name=name)
def clamp(a, x, b, name=None):
return min(max(a, x, name=name), b, name=name)
concatenate = array_ops.concat
def conv(lhs,
rhs,
window_strides,
padding,
lhs_dilation,
rhs_dilation,
dimension_numbers,
feature_group_count=1,
precision_config=None,
name=None):
"""Wraps the XLA ConvGeneralDilated operator.
ConvGeneralDilated is the most general form of XLA convolution and is
documented at
https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution
Args:
lhs: the input tensor
rhs: the kernel tensor
window_strides: the inter-window strides
padding: the padding to apply at the start and end of each input dimensions
lhs_dilation: dilation to apply between input elements
rhs_dilation: dilation to apply between kernel elements
dimension_numbers: a `ConvolutionDimensionNumbers` proto.
feature_group_count: number of feature groups for grouped convolution.
precision_config: a `xla.PrecisionConfig` proto.
name: an optional name for the operator
Returns:
A tensor representing the output of the convolution.
"""
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_conv(
lhs,
rhs,
window_strides=window_strides,
padding=padding,
lhs_dilation=lhs_dilation,
rhs_dilation=rhs_dilation,
feature_group_count=feature_group_count,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
convert_element_type = math_ops.cast
def dot(lhs, rhs, name=None):
return math_ops.tensordot(lhs, rhs, axes=1, name=name)
def dot_general(lhs, rhs, dimension_numbers, precision_config=None, name=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_dot(
lhs,
rhs,
dimension_numbers=dimension_numbers.SerializeToString(),
precision_config=precision_config_proto,
name=name)
def self_adjoint_eig(a, lower, max_iter, epsilon):
return gen_xla_ops.xla_self_adjoint_eig(a, lower, max_iter, epsilon)
def svd(a, max_iter, epsilon, precision_config=None):
precision_config_proto = ""
if precision_config:
precision_config_proto = precision_config.SerializeToString()
return gen_xla_ops.xla_svd(a, max_iter, epsilon, precision_config_proto)
dynamic_slice = gen_xla_ops.xla_dynamic_slice
dynamic_update_slice = gen_xla_ops.xla_dynamic_update_slice
einsum = gen_xla_ops.xla_einsum
# TODO(phawkins): generalize tf.pad to support interior padding, and then remove
# the XLA-specific pad operator.
pad = gen_xla_ops.xla_pad
def random_normal(mu, sigma, dims, name=None):
mu = ops.convert_to_tensor(mu)
return random_ops.random_normal(
dims, mean=mu, stddev=sigma, dtype=mu.dtype, name=name)
def random_uniform(minval, maxval, dims, name=None):
minval = ops.convert_to_tensor(minval)
return random_ops.random_uniform(
dims, minval, maxval, dtype=minval.dtype, name=name)
recv = gen_xla_ops.xla_recv
reduce = gen_xla_ops.xla_reduce
variadic_reduce = gen_xla_ops.xla_variadic_reduce
def reduce_window(operand,
init,
reducer,
window_dimensions,
window_strides=None,
base_dilations=None,
window_dilations=None,
padding=None,
name=None):
"""Wraps the XLA ReduceWindow operator.
ReduceWindow is documented at
https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow .
Args:
operand: the input tensor
init: a scalar tensor representing the initial value for the reduction
reducer: a reduction function that combines a pair of scalars.
window_dimensions: shape of the window, as a list of integers
window_strides: inter-window strides, as a list of integers. Optional; if
omitted, defaults to strides of 1.
padding: padding to apply to 'operand'. List of (low, high) pairs of
integers that specify the padding to apply before and after each
dimension. Optional; if omitted, defaults to no padding.
name: the operator name, or None.
Returns:
A tensor that represents the output of the reduce_window operator.
"""
window_strides = window_strides or [1] * len(window_dimensions)
base_dilations = base_dilations or [1] * len(window_dimensions)
window_dilations = window_dilations or [1] * len(window_dimensions)
padding = padding or [(0, 0)] * len(window_dimensions)
return gen_xla_ops.xla_reduce_window(
input=operand,
init_value=init,
window_dimensions=window_dimensions,
window_strides=window_strides,
base_dilations=base_dilations,
window_dilations=window_dilations,
padding=padding,
computation=reducer,
name=name)
replica_id = gen_xla_ops.xla_replica_id
# Set a static bound for the given input value as a hint to Xla compiler,
# returns the same value.
# Usage:
# def f(t, p):
# p = xla.set_bound(p, 3) # Tells xla the constraint that p <= 3.
# return t[:p] # xla knows the bound of the slice is 3.
set_bound = gen_xla_ops.xla_set_bound
# Make a static dimension into a xla bounded dynamic dimension. The current
# static dimension size will become the bound and the second operand becomes the
# dynamic size of the dimension.
#
# This should mostly be used for testing.
#
# def f():
# array = tf.convert_to_tensor([[1, 2, 3, 4, 5]])
# # Tells xla the valid size of the array is 3.
# dim = 0
# p = xla_set_dynamic_dimension_size(array, dim, 3)
# assert(reduce_sum(p) == 6) # xla knows only the first 3 elements are valid.
set_dynamic_dimension_size = gen_xla_ops.xla_set_dynamic_dimension_size
def reshape(x, new_sizes, dimensions=None, name=None):
if dimensions is not None:
x = array_ops.transpose(x, dimensions)
x = array_ops.reshape(x, new_sizes, name=name)
return x
def select(condition, x, y, name=None):
return array_ops.where(condition, x, y, name)
select_and_scatter = gen_xla_ops.xla_select_and_scatter
send = gen_xla_ops.xla_send
def slice(x, start_dims, limit_dims, strides):
spec = [
_slice(start, limit, stride)
for (start, limit, stride) in zip(start_dims, limit_dims, strides)
]
return x[tuple(spec)]
sharding = gen_xla_ops.xla_sharding
@ops.RegisterGradient("XlaSharding")
def _sharding_grad(op, grad):
grad_sharding = gen_xla_ops.xla_sharding(grad)
# pylint: disable=protected-access
grad_sharding.op._set_attr(
"_XlaSharding", attr_value_pb2.AttrValue(s=op.get_attr("_XlaSharding")))
return [grad_sharding]
spmd_full_to_shard_shape = gen_xla_ops.xla_spmd_full_to_shard_shape
spmd_shard_to_full_shape = gen_xla_ops.xla_spmd_shard_to_full_shape
@ops.RegisterGradient("XlaSpmdFullToShardShape")
def _spmd_full_to_shard_shape_grad(op, grad):
s2f = gen_xla_ops.xla_spmd_shard_to_full_shape(
grad,
manual_sharding=op.get_attr("manual_sharding"),
full_shape=op.inputs[0].shape.as_list())
return [s2f]
@ops.RegisterGradient("XlaSpmdShardToFullShape")
def _spmd_shard_to_full_shape_grad(op, grad):
f2s = gen_xla_ops.xla_spmd_full_to_shard_shape(
grad, manual_sharding=op.get_attr("manual_sharding"))
return [f2s]
sort = gen_xla_ops.xla_sort
key_value_sort = gen_xla_ops.xla_key_value_sort
while_loop = gen_xla_ops.xla_while
dequantize = gen_xla_ops.xla_dequantize
def gather(operand, start_indices, dimension_numbers, slice_sizes,
indices_are_sorted=False, name=None):
return gen_xla_ops.xla_gather(
operand,
start_indices,
slice_sizes=slice_sizes,
dimension_numbers=dimension_numbers.SerializeToString(),
indices_are_sorted=indices_are_sorted,
name=name)
def scatter(operand, scatter_indices, updates, update_computation,
dimension_numbers, indices_are_sorted=False, name=None):
return gen_xla_ops.xla_scatter(
operand,
scatter_indices,
updates,
update_computation=update_computation,
dimension_numbers=dimension_numbers.SerializeToString(),
indices_are_sorted=indices_are_sorted,
name=name)
| aam-at/tensorflow | tensorflow/compiler/tf2xla/python/xla.py | Python | apache-2.0 | 16,835 |
"""
Support for tracking the moon phases.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.moon/
"""
import asyncio
import logging
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_NAME)
import homeassistant.util.dt as dt_util
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Moon'
ICON = 'mdi:brightness-3'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the Moon sensor."""
name = config.get(CONF_NAME)
async_add_devices([MoonSensor(name)], True)
class MoonSensor(Entity):
"""Representation of a Moon sensor."""
def __init__(self, name):
"""Initialize the sensor."""
self._name = name
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
if self._state == 0:
return 'New moon'
elif self._state < 7:
return 'Waxing crescent'
elif self._state == 7:
return 'First quarter'
elif self._state < 14:
return 'Waxing gibbous'
elif self._state == 14:
return 'Full moon'
elif self._state < 21:
return 'Waning gibbous'
elif self._state == 21:
return 'Last quarter'
return 'Waning crescent'
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@asyncio.coroutine
def async_update(self):
"""Get the time and updates the states."""
from astral import Astral
today = dt_util.as_local(dt_util.utcnow()).date()
self._state = Astral().moon_phase(today)
| MungoRae/home-assistant | homeassistant/components/sensor/moon.py | Python | apache-2.0 | 2,108 |
# coding: utf-8
# ## Capital One Labs Data Scientist Assignment - Part one
# #### Author Information:
#
# Oguz Semerci<br>
# [email protected]<br>
# ### Summary of the investigation
# We have in hand a regression problem with 5000 observations and 254 features, whose names are not known. The number of features is fairly large and we are expected to report on the important features. Therefore we it will be sensible to use a model that appropriatly regularize the outcome and automatically perform 'feature-selection'. ***Lasso*** is perfect for that as is uses an ***L1 norm penalty*** on the coefficient vector and promotes sparsity.
#
# In addition to Lasso, we performed ***random forest regression***, which outperformed Lasso on the training set. To compute the optimal regularization parameter for Lasso, we empleyed a ***cross validation*** approach. And we determined optimal number of trees and number of features for the random forrest regresssor via a small scale ***grid search*** powered by cross validation.
#
# Other feature selection methods such as forward selection where features are added one at a time could have been used. We will omit more complex feature engineering methods such as considering higher order terms and correlation analysis to add new features to incorporate interactions.
#
# Some comments on the qualty of the data set and the data cleaning methods that were employed are given next.
# ### I- Data Preparation
# In[1]:
import matplotlib.pyplot as plt
import numpy as np
# A quick visual investigation of the 'codetest.train.text' file reveals that some of the features are categorical. Also there are missing entries that needs to be taken care of.
# Let's do the initial analysis using Pandas.
# In[2]:
#load the given data set
import os
import pandas as pd
train_data_path = os.path.join('data', 'codetest_train.txt')
test_data_path = os.path.join('data', 'codetest_test.txt')
df_train = pd.read_csv(train_data_path, sep = '\t')
df_test = pd.read_csv(test_data_path, sep = '\t')
# Let's list the categorical features and their unique values:
# In[3]:
categorical_keys = []
for key,dt in zip(df_train.keys(), df_train.dtypes):
if dt != 'float64':
categorical_keys.append(key)
print('{}: {}'.format(key,dt))
print(df_train[key].unique())
print(df_train[key].describe())
print('')
# Let's impute missing categorical features with 'top' values, and regular features with 'mean'.
# In[4]:
for key in df_train.keys():
if key in categorical_keys:
df_train[key] = df_train[key].fillna(df_train[key].mode()[0])
else:
df_train[key] = df_train[key].fillna(df_train[key].mean())
for key in df_test.keys():
if key in categorical_keys:
df_test[key] = df_test[key].fillna(df_train[key].mode()[0])
else:
df_test[key] = df_test[key].fillna(df_train[key].mean())
# As a sanity check let's make sure we imputed the 'nan' values from categorical features.
# In[5]:
for key,dt in zip(df_train.keys(), df_train.dtypes):
if dt != 'float64':
categorical_keys.append(key)
print('{}: {}'.format(key,dt))
print(df_train[key].unique())
print(df_train[key].describe())
print('')
# Let's make sure the features have unit standard deviation since we'll be using Lasso.
# In[6]:
for key in df_train.keys():
if key not in categorical_keys and key != 'target':
std = df_train[key].std()
df_train[key] = df_train[key] / std
df_test[key] = df_test[key] / std
# Convert the Pandas data frame to Numpy arrays.
# In[7]:
# I learned this trick of going from a data frame to a python dictionary from the following post:
# http://stackoverflow.com/questions/20024584/vectorizing-a-pandas-dataframe-for-scikit-learn
# note the first index in the df.keys() is the 'target' vector
X_train = df_train[list(df_train.keys())[1:]].T.to_dict().values()
X_test = df_test.T.to_dict().values()
Y_train = df_train['target'].values
# Now we'll use scikit-learn's DictVectorizer method to encode the categorical variables using one-hot encoding.
# In[8]:
from sklearn.feature_extraction import DictVectorizer
vec = DictVectorizer()
vec.fit(X_train)
X_train = vec.transform(X_train).toarray()
X_test = vec.transform(X_test).toarray()
# Now we have the the data in the format scikit-learn expects.
# Compute basic statistics of the target vector and plot its boxplot and histogram.
# In[9]:
print('Statistics for the target vector in training data:')
print('--------------------------------')
print('98th percentile : {:.2f}'.format(np.percentile(Y_train, 98)))
print('75th percentile : {:.2f}'.format(np.percentile(Y_train, 75)))
print('2th percentile : {:.2f}'.format(np.percentile(Y_train, 5)))
print('median : {:.2f}'.format(np.median(Y_train)))
print('mean : {:.2f}'.format(np.mean(Y_train)))
print('std : {:.2f}'.format(np.std(Y_train)))
# In[10]:
fig = plt.figure(figsize = (7,3))
ax1 = plt.subplot(121)
ax1.boxplot(Y_train)
ax1.set_ylabel('target value')
ax2 = plt.subplot(122)
_, _,_ = ax2.hist(Y_train,bins = 30)
ax2.set_xlabel('target value')
plt.show()
# One can argue that values outside the interval [-20,20] are outliers. Yet they could contain useful information as well. Note that the number of data points is much larger than number of features, and we'll be using a regularized fit. So let's keep the larger observations in the data set.
# ### II- Model Selection
# Since we decided to leave feature engineering to Lasso, the only crucial element in the model selection procedure is the tuning of the regularization parameter. We will leave this task to scikit-learn's LassoCV method which automatically finds the best value using cross validation. We chose to use 10-fold cross validation. Specifically we need to pick an optimal $\alpha$ for the following minimization problem:
#
# $\min_{ \beta \in \mathbb{R}^p } \left\{ \frac{1}{2N} \left\| Y - X \beta \right\|_2^2 + \alpha \| \beta \|_1 \right\}$,
#
# where $\beta$ is the unknown parameter vector, N is the number of samples, and $X$ and $Y$ are training data and target vector repectively.
# In[11]:
#The code in this cell is partially borrowed from here: http://bit.ly/1JPHF2x
# LassoCV:
from sklearn.linear_model import LassoCV
model = LassoCV(cv=10).fit(X_train, Y_train)
# list of alphas that were tried-out
m_log_alphas = -np.log10(model.alphas_)
plt.figure()
plt.plot(m_log_alphas, model.mse_path_, ':')
plt.plot(m_log_alphas, model.mse_path_.mean(axis=-1), 'k',
label='Average across the folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k',
label='optimal alpha')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean square error')
plt.title('Mean square error on each fold')
plt.axis('tight')
plt.show()
from sklearn.metrics import mean_squared_error
Y_predict = model.predict(X_train)
r2 = model.score(X_train, Y_train)
mse = mean_squared_error(Y_predict, Y_train)
print('Optimal alpha: {:.2f}'.format(model.alpha_))
print('R2 with Lasso: {:.6f}'.format(r2))
print('MSE with Lasso: {:.6f}'.format(mse))
# The above plot nicely demonstrates the MSE error as a funciton of the regularization parameter $\alpha$. The optimal value is picked right where the average (over all cross validation errors) MSE is minimum (rigth before it increases again due to over-regularization).
# Let's visualize given target vector with our prediction for the first 100 data instances.
# In[12]:
fig = plt.figure(figsize=(15,3))
plt.plot(Y_train[:100], 'o', label = 'data')
plt.plot(Y_predict[:100], '-o',color = 'g', label = 'prediction')
plt.legend()
plt.xlabel('data index')
plt.ylabel('target value')
title = ['Training performance of the Lasso model for the first 100 data points -' +
' Overall R2:' + str(round(r2,2)) + ' - MSE:' + str(round(mse,2))]
plt.title(title[0])
plt.ylim([-15,15])
plt.show()
# Let's find out which parameters are set to zero by the L1 regularizer:
# In[13]:
beta = model.coef_
feature_indexes = []
feature_names = []
feature_coeffs = []
for i,b in enumerate(beta):
if b>0:
feature_indexes.append(i)
feature_names.append(vec.feature_names_[i])
feature_coeffs.append(model.coef_[i])
#print('{:5s}: {:.4}'.format(vec.feature_names_[i],b))
print('{} features with nonzero coefficients:'.format(len(feature_names)))
feature_coeffs, feature_names = zip(*(sorted(zip(feature_coeffs, feature_names),reverse = True)))
# Note that onlt 37 features were selected to be important by Lasso. Next, let's take a look at the coefficient values of selected features.
# In[14]:
width = 0.6
fig = plt.figure(figsize = (12,3))
ax = plt.subplot(111)
ax.bar(range(len(feature_coeffs)), feature_coeffs, width = width)
ax.set_xticks(np.arange(len(feature_coeffs)) + width/2)
ax.set_xticklabels(feature_names, rotation = 90)
ax.set_title('Features and their fitted coefficients predicted by Lasso')
ax.set_ylabel('coefficient value')
plt.show()
# As observed from the above bar plot the most effective features are f_175 and f_205 within the framework of our model. More specifically, a unit standard deviation (computed using the training data) increase in the features shown above will result in an increase in the target vector given by the coefficients given above. Note that this insight is only valid for our linear model and one should avoid comments on causality as the correlation, independance or interaction between features are not considered.
#
# We conclude the feature analysis here. The important features selected by the Lasso regression is given above. Their predictive power could further be investigated via a more detailed analysis of the coefficient values and perhaps their p-values.
# Below we try out two more methods:
#
# - Least squares fit to a linear model with features selected by Lasso
# - Random forest regressor
# #### Ordinary Least Squares with Selected Features
# Let's create a new training set with only the important features and see if we'll improveon the R2 performance using a ordinary least squares fit:
# In[15]:
#manually add the missing categorical features for f_237 and f_61"
for key in vec.vocabulary_.keys():
if 'f_237' in key or 'f_61' in key:
feature_indexes.append(vec.vocabulary_[key])
feature_indexes = set(feature_indexes)
# In[16]:
X_train2 = X_train[:,list(feature_indexes)]
X_test2 = X_test[:,list(feature_indexes)]
# In[17]:
from sklearn.linear_model import LinearRegression
olr = LinearRegression()
olr.fit(X_train2,Y_train)
Y_predict2 = olr.predict(X_train2)
mse2 = mean_squared_error(Y_predict2, Y_train)
r22 = olr.score(X_train2, Y_train)
print('R2 with ordinary linear regression with selected features: {:.6f}'.format(r22))
print('MSE with ordinary linear regression with selected features: {:.6f}'.format(mse2))
# #### Random Forests Regresssor
# Random forrests are good as they control overtiffing by averaging the results from lots of trees fitted into data. Let's devise a quick grid search random forrest regressor and see how we'll do.
# In[18]:
from sklearn.ensemble import RandomForestRegressor
from sklearn import grid_search
parameters = {'max_features': ('sqrt', 'log2'), 'n_estimators': [5,10,15,20]}
rf = RandomForestRegressor()
model_rf = grid_search.GridSearchCV(rf, parameters, cv = 10)
model_rf.fit(X_train, Y_train)
Y_predict = model_rf.predict(X_train)
mse3 = mean_squared_error(Y_predict, Y_train)
r23 = model_rf.score(X_train, Y_train)
print('Random Forest R2: {}'.format(r23))
print('Random Forest MSE: {}'.format(mse3))
# ### III- Conclusion
# We used Lasso regresssion to automatically select important features where 37 features were indentified to be important.
#
# Random forrest classifier performed significantly better on the trainin set, where the performance was measured using 10-fold cross validation. Here are the measn square errors (MSE) and $R^2$ scores for three methods we tried:
#
# | Lasso | OLR with Selected Features | Random Forrest
# --- | --- | --- | ---
# MSE | 11.29 | 11.18 | 2.86
# R2 | 0.59 | 0.60 | 0.897
#
#
# As a result we chose to deploy the random forrest regression model.
#
# It would have been interesting to see if performance could be enhanced by a more detailed feature engineering that incorporates higher order terms and interactions between features. Also, trying an elastic net regularizatiom, which is a mixture of L1 and L2 type penalties could be interesting.
# Let's perform prediction on the test data and save it to a file. We will use a random forrest model as stated above:
# In[19]:
Y_test_rf = model_rf.predict(X_test)
Y_test = model.predict(X_test)
# In[22]:
with open('codetest_part1_out.txt','w') as f:
for y in Y_test_rf:
f.write(str(y)+'\n')
# Sanity check to make sure Lasso and random forrest result in similar estimations for the test data set
# In[23]:
fig = plt.figure(figsize=(15,3))
plt.plot(Y_test[:100], '-o', label = 'lasso')
plt.plot(Y_test_rf[:100], '-o',color = 'g', label = 'random forrest')
plt.legend()
plt.show()
| osemer01/regression-w-unknown-feature-names | Codetest_Part1.py | Python | cc0-1.0 | 13,223 |
# -*- coding: utf-8 -*-
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
from odoo import fields
@tagged('post_install', '-at_install')
class TestAccountInvoiceReport(AccountTestInvoicingCommon):
@classmethod
def setUpClass(cls, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
cls.invoices = cls.env['account.move'].create([
{
'move_type': 'out_invoice',
'partner_id': cls.partner_a.id,
'invoice_date': fields.Date.from_string('2016-01-01'),
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [
(0, None, {
'product_id': cls.product_a.id,
'quantity': 3,
'price_unit': 1000,
}),
(0, None, {
'product_id': cls.product_a.id,
'quantity': 1,
'price_unit': 3000,
}),
]
},
{
'move_type': 'out_receipt',
'invoice_date': fields.Date.from_string('2016-01-01'),
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [
(0, None, {
'product_id': cls.product_a.id,
'quantity': 1,
'price_unit': 6000,
}),
]
},
{
'move_type': 'out_refund',
'partner_id': cls.partner_a.id,
'invoice_date': fields.Date.from_string('2017-01-01'),
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [
(0, None, {
'product_id': cls.product_a.id,
'quantity': 1,
'price_unit': 1200,
}),
]
},
{
'move_type': 'in_invoice',
'partner_id': cls.partner_a.id,
'invoice_date': fields.Date.from_string('2016-01-01'),
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [
(0, None, {
'product_id': cls.product_a.id,
'quantity': 1,
'price_unit': 60,
}),
]
},
{
'move_type': 'in_receipt',
'partner_id': cls.partner_a.id,
'invoice_date': fields.Date.from_string('2016-01-01'),
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [
(0, None, {
'product_id': cls.product_a.id,
'quantity': 1,
'price_unit': 60,
}),
]
},
{
'move_type': 'in_refund',
'partner_id': cls.partner_a.id,
'invoice_date': fields.Date.from_string('2017-01-01'),
'currency_id': cls.currency_data['currency'].id,
'invoice_line_ids': [
(0, None, {
'product_id': cls.product_a.id,
'quantity': 1,
'price_unit': 12,
}),
]
},
])
def assertInvoiceReportValues(self, expected_values_list):
reports = self.env['account.invoice.report'].search([('company_id', '=', self.company_data['company'].id)], order='price_subtotal DESC, quantity ASC')
expected_values_dict = [{
'price_average': vals[0],
'price_subtotal': vals[1],
'quantity': vals[2],
} for vals in expected_values_list]
self.assertRecordValues(reports, expected_values_dict)
def test_invoice_report_multiple_types(self):
self.assertInvoiceReportValues([
#price_average price_subtotal quantity
[2000, 2000, 1],
[1000, 1000, 1],
[1000, 1000, 3],
[6, 6, 1],
[-20, -20, -1],
[-20, -20, -1],
[-600, -600, -1],
])
| ygol/odoo | addons/account/tests/test_account_invoice_report.py | Python | agpl-3.0 | 4,611 |
# -*- coding: utf-8 -*-
# flake8: noqa
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='HistoricalSeriesWork',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('series_id', models.IntegerField(db_index=True, null=True, blank=True)),
('title', models.CharField(max_length=100)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('history_user', models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical series work',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Series',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('author', models.CharField(max_length=100)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='SeriesWork',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=100)),
('series', models.ForeignKey(related_name='works', to='sample.Series')),
],
options={
},
bases=(models.Model,),
),
]
| jwhitlock/dsh-orderwrt-bug | sample/migrations/0001_initial.py | Python | mpl-2.0 | 2,117 |
import math
import sys
import re
valuePattern = re.compile('= (.+)$')
def extractValue(line):
match = re.search(valuePattern, line)
if match:
return float.fromhex(match.group(1))
else:
return "ERROR"
intervalPattern = re.compile('= \[(.*?), (.*?)\]')
def extractInterval(line):
match = re.search(intervalPattern, line)
if match:
lower = float.fromhex(match.group(1))
upper = float.fromhex(match.group(2))
return (lower, upper)
else:
return "ERROR"
def isInInterval(value, lower, upper):
return lower<=value and value<=upper
#f1 - values, f2 - ranges
f1 = open(str(sys.argv[1]), 'r')
f2 = open(str(sys.argv[2]), 'r')
wide = 0
total = 0
result = 0
for line1, line2 in zip(f1.readlines(), f2.readlines()):
total+=1
value = extractValue(line1)
lower, upper = extractInterval(line2)
if math.isnan(value):
if math.isfinite(lower) and math.isfinite(upper):
print(line1)
print(line2)
result = 1
continue
if lower!=upper:
wide+=1
if not isInInterval(value, lower, upper):
print(line1)
print(line2)
result = 1
print(total, wide)
f1.close()
f2.close()
sys.exit(result)
| jacekburys/csmith | programs/validate.py | Python | bsd-2-clause | 1,157 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron and Valeureux Copyright Valeureux.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Mail for Holacracy',
'version': '1.0',
'category': 'Social Network',
'author': 'Yannick Buron and Valeureux',
'license': 'AGPL-3',
'description': """
Mail for Holacracy
==================
This module improve the mail.group object in order to use him for holacracy
---------------------------------------------------------------------------
* Mail.group is now a recursive model. You can't subscribe to a parent
group but each followers of a group is automatically
subscribed to his parent group
* A group can now be a normal group, a circle or a role
* In a circle, you can define permissions for children groups
* A group can now be linked to a partner,
you can easily create it from the group
http://en.wikipedia.org/wiki/Holacracy
""",
'website': 'http://www.wezer.org',
'depends': [
'base_community',
'base_recursive_model',
'mail',
],
'data': [
'security/mail_holacracy_security.xml',
'security/ir.model.access.csv',
'mail_holacracy_view.xml'
],
'demo': ['data/mail_holacracy_demo.xml'],
'test': ['tests/mail_holacracy.yml'],
'installable': True,
}
| Valeureux/wezer-exchange | __unreviewed__/mail_holacracy/__openerp__.py | Python | agpl-3.0 | 2,149 |
# -*- encoding: utf-8 -*-
# Copyright (C) 2015 Alejandro López Espinosa (kudrom)
import os
import os.path
settings = {
# Current working directory
"lupulo_cwd": os.path.dirname(os.path.abspath(__file__)),
# Avoid the testing port 8081 if you are going to run the tests with
# an instance of the webpage open in a browser
"web_server_port": 8080,
# Log settings
"log_file": "development.log",
"redirect_stdout": True,
# Activate the hot notification of layout and data schema
"activate_inotify": True,
# Settings for mongodb
"activate_mongo": False,
# Sets what listener the backend is using
"listener": "mock",
# Settings for the mock listener
"mock_timeout": 1,
"mock_ids": 2,
# Don't modify this
"template_async_call_delay": 0.00001,
"template_n_steps": 10000,
}
settings["lupulo_templates_dir"] = os.path.join(settings["lupulo_cwd"],
"templates")
settings["data_schema"] = os.path.join(settings["lupulo_cwd"],
"defaults/data_schema.json")
settings["layout"] = os.path.join(settings["lupulo_cwd"],
"defaults/layout.json")
| kudrom/lupulo | lupulo/settings.py | Python | gpl-2.0 | 1,240 |
# -*- coding: utf-8 -*-
import os
import json
from gensim.models import word2vec
from gensim import models
class Rule(object):
"""
Store the concept terms of a rule, and calculate the rule similarity.
"""
def __init__(self, domain, rule_terms, children, response, word2vec_model):
self.id_term = domain
self.terms = rule_terms
self.model = word2vec_model
self.response = response
self.children = children
self.log = open('log.txt','w',encoding='utf-8')
def __str__(self):
res = 'Domain:' + self.id_term
if self.has_child():
res += ' with children: '
for child in self.children:
res += ' ' + str(child)
return res
def serialize(self):
"""
Convert the instance to json format.
"""
ch_list = []
for child in self.children:
ch_list.append(child.id_term)
cp_list = []
for t in self.terms:
cp_list.append(t)
response = []
data = { "domain": str(self.id_term),
"concepts": cp_list,
"children": ch_list,
"response": response
}
return data
def add_child(self,child_rule):
"""
Add child rule into children list , e.g: Purchase(Parent) -> Drinks(Child).
"""
self.children.append(child_rule)
def has_child(self):
return len(self.children)
def has_response(self):
return len(self.response)
def match(self, sentence, threshold=0):
"""
Calculate the similarity between the input and concept term.
Args:
threshold: a threshold to ignore the low similarity.
sentence : a list of words.
Returns:
a struct : [similarity, domain_name, matchee in the sentence]
"""
max_sim = 0.0
matchee = ""
for word in sentence:
for term in self.terms:
try:
sim = self.model.similarity(term,word)
if sim > max_sim and sim > threshold:
max_sim = sim
matchee = word
except Exception as e:
self.log.write(repr(e)+ ". Try to hard-match.")
if term == word:
max_sim = 1
matchee = word
return [max_sim, self.id_term, matchee]
class RuleBase(object):
"""
to store rules, and load the trained word2vec model.
"""
def __init__(self, domain="general"):
self.rules = {}
self.domain = domain
self.model = None
self.forest_base_roots = []
def __str__(self):
res = "There are " + str(self.rule_amount()) + " rules in the rulebase:"
res+= "\n-------\n"
for key,rulebody in self.rules.items():
res += str(rulebody) + '\n'
return res
def rule_amount(self):
return len(self.rules)
def output_as_json(self, path='rule.json'):
rule_list = []
for rule in self.rules.values():
rule_list.append(rule.serialize())
with open(path,'w',encoding='utf-8') as op:
op.write(json.dumps(rule_list, indent=4))
def load_rules_old_format(self,path):
"""
Build the rulebase by loading the rules terms from the given file.
The data format is: child term, parent term(optional)
Args: the path of file.
"""
assert self.model is not None, "Please load the model before loading rules."
self.rules.clear()
with open(path, 'r', encoding='utf-8') as input:
for line in input:
rule_terms = line.strip('\n').split(' ')
new_rule = Rule(self.rule_amount(), rule_terms[0].split(','), self.model)
if new_rule.id_term not in self.rules:
self.rules[new_rule.id_term] = new_rule
#else
# self.rules[new_rule.id_term].terms = rule_terms
if len(rule_terms) > 1:
# this rule has parents.
for parent in rule_terms[1:]:
#if parent not in self.rules:
self.rules[parent].children.append(new_rule)
else:
# is the root of classification tree.
self.forest_base_roots.append(new_rule)
def load_rules(self, path, reload=False, is_root=False):
"""
Build the rulebase by loading the rules terms from the given file.
Args: the path of file.
"""
assert self.model is not None, "Please load the model before loading rules."
if reload:
self.rules.clear()
with open(path, 'r', encoding='utf-8') as input:
json_data = json.load(input)
# load rule and build an instance
for data in json_data:
domain = data["domain"]
concepts_list = data["concepts"]
children_list = data["children"]
response = data["response"]
if domain not in self.rules:
rule = Rule(domain, concepts_list, children_list, response, self.model)
self.rules[domain] = rule
if is_root:
self.forest_base_roots.append(rule)
else:
print("[Rules]: Detect a duplicate domain name '%s'." % domain)
def load_rules_from_dic(self,path):
"""
load all rule_files in given path
"""
for file_name in os.listdir(path):
if not file_name.startswith('.'): #escape .DS_Store on OSX.
if file_name == "rule.json": # roots of forest
self.load_rules(path + file_name, is_root=True)
else:
self.load_rules(path + file_name)
def load_model(self,path):
"""
Load a trained word2vec model(binary format only).
Args:
path: the path of the model.
"""
self.model = models.Word2Vec.load_word2vec_format(path,binary=True)
def match(self, sentence, topk=1, threshold=0, root=None):
"""
match the sentence with rules then order by similarity.
Args:
sentence: a list of words
threshold: a threshold to ignore the low similarity.
Return:
a list holds the top k-th rules and the classification tree travel path.
"""
assert self.model is not None, "Please load the model before any match."
result_list = []
at_leaf_node = False
term_trans = ""
if root is None: # then search from roots of forest.
focused_rule = self.forest_base_roots[:]
else:
focused_rule = [self.rules[root]]
while not at_leaf_node:
at_leaf_node = True
for rule in focused_rule:
result_list.append(rule.match(sentence, threshold))
result_list = sorted(result_list, reverse=True , key=lambda k: k[0])
top_domain = result_list[0][1] # get the best matcher's term.
if self.rules[top_domain].has_child():
result_list = []
term_trans += top_domain+'>'
at_leaf_node = False
# travel to the best node's children.
focused_rule = []
for rule_id in self.rules[top_domain].children:
focused_rule.append(self.rules[rule_id])
return [result_list,term_trans]
| konata39/chatbot-backend | RuleMatcher/rulebase.py | Python | gpl-3.0 | 7,750 |
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'dscl Get-Groups',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'This module will use the current user context to query active directory for a list of Groups.',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : True,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run on.',
'Required' : True,
'Value' : ''
},
'Domain' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Domain',
'Required' : True,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
domain = self.options['Domain']['Value']
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import subprocess
cmd = \"""dscl "/Active Directory/%s/All Domains/" -list /Groups\"""
print subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.read()
""" % (domain)
return script
| adaptivethreat/Empire | lib/modules/python/situational_awareness/network/active_directory/dscl_get_groups.py | Python | bsd-3-clause | 3,241 |
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from pages.base import Page
from pages.generic_feedback_picker import PickerPage
from pages.generic_feedback_thanks import ThanksPage
class GenericFeedbackFormPage(Page):
_page_title = 'Submit Your Feedback :: Firefox Input'
_intro_card_locator = (By.ID, 'intro')
_picker_link_locator = (By.CSS_SELECTOR, '#back-to-picker a')
_back_locator = (By.ID, 'back-button')
_happy_button_locator = (By.ID, 'happy-button')
_sad_button_locator = (By.ID, 'sad-button')
_moreinfo_card_locator = (By.ID, 'moreinfo')
_description_locator = (By.ID, 'description')
_description_character_count_locator = (By.ID, 'description-counter')
_url_locator = (By.ID, 'id_url')
_email_checkbox_locator = (By.ID, 'email-ok')
_email_locator = (By.ID, 'id_email')
_submit_locator = (By.ID, 'form-submit-btn')
_support_page_locator = (By.LINK_TEXT, 'Firefox Support')
def go_to_feedback_page(self, product_name):
self.selenium.get(self.base_url + '/feedback/' + product_name + '/')
self.is_the_current_page
def is_product(self, product_name):
# Make sure we're on the intro card
self.selenium.find_element(*self._intro_card_locator)
# Get the first ask and make sure it has the product name in
# it
first_ask = self.selenium.find_element(By.CSS_SELECTOR, 'div.ask:first-child').text
return product_name in first_ask
def go_to_picker_page(self):
picker_pg = PickerPage(self.testsetup)
self.selenium.find_element(*self._picker_link_locator).click()
WebDriverWait(self.selenium, 10).until(lambda s: picker_pg.is_the_current_page)
return picker_pg
def type_feedback(self, feedback):
self.selenium.find_element(*self._feedback_locator).send_keys(feedback)
def click_support_page(self):
self.selenium.find_element(*self._support_page_locator).click()
def click_happy_feedback(self):
self.selenium.find_element(*self._happy_button_locator).click()
self.wait_for(self._description_locator)
def click_sad_feedback(self):
self.selenium.find_element(*self._sad_button_locator).click()
self.wait_for(self._description_locator)
def click_back(self):
self.selenium.find_element(*self._back_locator).click()
self.wait_for(self._happy_button_locator)
def set_description_execute_script(self, text):
"""Sets the value of the description textarea using execute_script
:arg text: The text to set
We use ``execute_script`` here because sending keys one at a time
takes a crazy amount of time for texts > 200 characters.
"""
text = text.replace("'", "\\'").replace('"', '\\"')
self.selenium.execute_script("$('#description').val('" + text + "')")
def set_description(self, text):
desc = self.selenium.find_element(*self._description_locator)
desc.clear()
desc.send_keys(text)
def update_description(self, text):
desc = self.selenium.find_element(*self._description_locator)
desc.send_keys(text)
def set_url(self, text):
url = self.selenium.find_element(*self._url_locator)
url.clear()
url.send_keys(text)
def check_email_checkbox(self, checked=True):
self.wait_for(self._email_checkbox_locator)
checkbox = self.selenium.find_element(*self._email_checkbox_locator)
if checked != checkbox.is_selected():
checkbox.click()
def set_email(self, text):
if not self.is_element_visible(self._email_locator):
self.wait_for(self._email_locator)
email = self.selenium.find_element(*self._email_locator)
email.clear()
email.send_keys(text)
@property
def is_submit_enabled(self):
# return not 'disabled' in self.selenium.find_element(*self._submit_feedback_locator).get_attribute('class'
return self.selenium.find_element(*self._submit_locator).is_enabled()
def submit(self, expect_success=True):
self.selenium.find_element(*self._submit_locator).click()
if expect_success:
return ThanksPage(self.testsetup)
@property
def support_page_link_address(self):
return self.selenium.find_element(*self._support_page_locator).get_attribute('href')
@property
def is_url_valid(self):
return not 'invalid' in self.selenium.find_element(*self._url_locator).get_attribute('class')
@property
def is_email_valid(self):
return not 'invalid' in self.selenium.find_element(*self._email_locator).get_attribute('class')
@property
def remaining_character_count(self):
return int(self.selenium.find_element(*self._description_character_count_locator).text)
| DESHRAJ/fjord | smoketests/pages/generic_feedback_form.py | Python | bsd-3-clause | 5,126 |
# Copyright 2016 Cyril Gaudin (Camptocamp)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
class AccountPaymentTerm(models.Model):
_inherit = 'account.payment.term'
early_payment_discount = fields.Boolean(
string="Early Payment Discount",
default=False
)
epd_nb_days = fields.Integer(string="Number of day(s)")
epd_discount = fields.Float(string="Discount")
epd_tolerance = fields.Float(string="Tolerance")
company_currency = fields.Many2one(
comodel_name='res.currency',
compute='_compute_company_currency',
required=True
)
_sql_constraints = [(
'early_payment_discount',
'CHECK '
'(NOT early_payment_discount OR'
' (NULLIF(epd_nb_days, 0) IS NOT NULL AND'
' NULLIF(epd_discount, 0) IS NOT NULL)'
')',
_("'Number of day(s)' and 'Discount' fields "
"must be filled if 'Early Payment Discount' is checked")
)]
def _compute_company_currency(self):
self.company_currency = self.env.user.company_id.currency_id
@api.onchange('early_payment_discount')
def _onchange_early_payment_discount(self):
if not self.early_payment_discount:
self.epd_nb_days = False
self.epd_discount = False
self.epd_tolerance = False
| kittiu/account-payment | account_early_payment_discount/models/account_payment_term.py | Python | agpl-3.0 | 1,389 |
from django.db import models
class Section(models.Model):
name = models.CharField(max_length=32)
| libreoss/liberator-api | liberator/models/section.py | Python | gpl-2.0 | 104 |
from helpers import read_config
import importlib
screen = None
def init():
""" This function is called by main.py to read the output configuration, pick the corresponding drivers and initialize a Screen object.
It also sets ``screen`` global of ``output`` module with created ``Screen`` object."""
global screen
config = read_config("config.json")
output_config = config["output"][0]
driver_name = output_config["driver"]
driver_module = importlib.import_module("output.drivers."+driver_name)
args = output_config["args"] if "args" in output_config else []
kwargs = output_config["kwargs"] if "kwargs" in output_config else {}
screen = driver_module.Screen(*args, **kwargs)
| CRImier/pyLCI | output/output.py | Python | apache-2.0 | 717 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Connects all half, float and double tensors to CheckNumericsOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("verify_tensor_all_finite")
def verify_tensor_all_finite(t, msg, name=None):
"""Assert that the tensor does not contain any NaN's or Inf's.
Args:
t: Tensor to check.
msg: Message to log on failure.
name: A name for this operation (optional).
Returns:
Same tensor as `t`.
"""
with ops.name_scope(name, "VerifyFinite", [t]) as name:
t = ops.convert_to_tensor(t, name="t")
with ops.colocate_with(t):
verify_input = array_ops.check_numerics(t, message=msg)
out = control_flow_ops.with_dependencies([verify_input], t)
return out
@tf_export("add_check_numerics_ops")
def add_check_numerics_ops():
"""Connect a `check_numerics` to every floating point tensor.
`check_numerics` operations themselves are added for each `half`, `float`,
or `double` tensor in the graph. For all ops in the graph, the
`check_numerics` op for all of its (`half`, `float`, or `double`) inputs
is guaranteed to run before the `check_numerics` op on any of its outputs.
Note: This API is not compatible with the use of `tf.cond` or
`tf.while_loop`, and will raise a `ValueError` if you attempt to call it
in such a graph.
Returns:
A `group` op depending on all `check_numerics` ops added.
Raises:
ValueError: If the graph contains any numeric operations in a control flow
structure.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Not compatible with eager execution. To check for `Inf`s and `NaN`s under
eager execution, call tfe.seterr(inf_or_nan='raise') once before executing
the checked operations.
@enc_compatibility
"""
if context.executing_eagerly():
raise RuntimeError(
"add_check_numerics_ops() is not compatible with eager execution. "
"To check for Inf's and NaN's under eager execution, call "
"tfe.seterr(inf_or_nan='raise') once before executing the "
"checked operations.")
check_op = []
# This code relies on the ordering of ops in get_operations().
# The producer of a tensor always comes before that tensor's consumer in
# this list. This is true because get_operations() returns ops in the order
# added, and an op can only be added after its inputs are added.
for op in ops.get_default_graph().get_operations():
for output in op.outputs:
if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
if op._get_control_flow_context() is not None: # pylint: disable=protected-access
raise ValueError("`tf.add_check_numerics_ops() is not compatible "
"with TensorFlow control flow operations such as "
"`tf.cond()` or `tf.while_loop()`.")
message = op.name + ":" + str(output.value_index)
with ops.control_dependencies(check_op):
check_op = [array_ops.check_numerics(output, message=message)]
return control_flow_ops.group(*check_op)
| kobejean/tensorflow | tensorflow/python/ops/numerics.py | Python | apache-2.0 | 4,096 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests for checkdeps.
"""
import os
import unittest
import builddeps
import checkdeps
import results
class CheckDepsTest(unittest.TestCase):
def setUp(self):
self.deps_checker = checkdeps.DepsChecker(
being_tested=True,
base_directory=os.path.join(os.path.dirname(__file__), os.path.pardir))
def ImplTestRegularCheckDepsRun(self, ignore_temp_rules, skip_tests):
self.deps_checker._ignore_temp_rules = ignore_temp_rules
self.deps_checker._skip_tests = skip_tests
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'checkdeps/testdata'))
problems = self.deps_checker.results_formatter.GetResults()
if skip_tests:
self.failUnlessEqual(3, len(problems))
else:
self.failUnlessEqual(4, len(problems))
def VerifySubstringsInProblems(key_path, substrings_in_sequence):
"""Finds the problem in |problems| that contains |key_path|,
then verifies that each of |substrings_in_sequence| occurs in
that problem, in the order they appear in
|substrings_in_sequence|.
"""
found = False
key_path = os.path.normpath(key_path)
for problem in problems:
index = problem.find(key_path)
if index != -1:
for substring in substrings_in_sequence:
index = problem.find(substring, index + 1)
self.failUnless(index != -1, '%s in %s' % (substring, problem))
found = True
break
if not found:
self.fail('Found no problem for file %s' % key_path)
if ignore_temp_rules:
VerifySubstringsInProblems('testdata/allowed/test.h',
['-checkdeps/testdata/disallowed',
'temporarily_allowed.h',
'-third_party/explicitly_disallowed',
'Because of no rule applying'])
else:
VerifySubstringsInProblems('testdata/allowed/test.h',
['-checkdeps/testdata/disallowed',
'-third_party/explicitly_disallowed',
'Because of no rule applying'])
VerifySubstringsInProblems('testdata/disallowed/test.h',
['-third_party/explicitly_disallowed',
'Because of no rule applying',
'Because of no rule applying'])
VerifySubstringsInProblems('disallowed/allowed/test.h',
['-third_party/explicitly_disallowed',
'Because of no rule applying',
'Because of no rule applying'])
if not skip_tests:
VerifySubstringsInProblems('allowed/not_a_test.cc',
['-checkdeps/testdata/disallowed'])
def testRegularCheckDepsRun(self):
self.ImplTestRegularCheckDepsRun(False, False)
def testRegularCheckDepsRunIgnoringTempRules(self):
self.ImplTestRegularCheckDepsRun(True, False)
def testRegularCheckDepsRunSkipTests(self):
self.ImplTestRegularCheckDepsRun(False, True)
def testRegularCheckDepsRunIgnoringTempRulesSkipTests(self):
self.ImplTestRegularCheckDepsRun(True, True)
def CountViolations(self, ignore_temp_rules):
self.deps_checker._ignore_temp_rules = ignore_temp_rules
self.deps_checker.results_formatter = results.CountViolationsFormatter()
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'checkdeps/testdata'))
return self.deps_checker.results_formatter.GetResults()
def testCountViolations(self):
self.failUnlessEqual('10', self.CountViolations(False))
def testCountViolationsIgnoringTempRules(self):
self.failUnlessEqual('11', self.CountViolations(True))
def testCountViolationsWithRelativePath(self):
self.deps_checker.results_formatter = results.CountViolationsFormatter()
self.deps_checker.CheckDirectory(
os.path.join('checkdeps', 'testdata', 'allowed'))
self.failUnlessEqual('4', self.deps_checker.results_formatter.GetResults())
def testTempRulesGenerator(self):
self.deps_checker.results_formatter = results.TemporaryRulesFormatter()
self.deps_checker.CheckDirectory(
os.path.join(self.deps_checker.base_directory,
'checkdeps/testdata/allowed'))
temp_rules = self.deps_checker.results_formatter.GetResults()
expected = [u' "!checkdeps/testdata/disallowed/bad.h",',
u' "!checkdeps/testdata/disallowed/teststuff/bad.h",',
u' "!third_party/explicitly_disallowed/bad.h",',
u' "!third_party/no_rule/bad.h",']
self.failUnlessEqual(expected, temp_rules)
def testBadBaseDirectoryNotCheckoutRoot(self):
# This assumes git. It's not a valid test if buildtools is fetched via svn.
with self.assertRaises(builddeps.DepsBuilderError):
checkdeps.DepsChecker(being_tested=True,
base_directory=os.path.dirname(__file__))
def testCheckAddedIncludesAllGood(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "checkdeps/testdata/allowed/good.h"',
'#include "checkdeps/testdata/disallowed/allowed/good.h"']
]])
self.failIf(problems)
def testCheckAddedIncludesManyGarbageLines(self):
garbage_lines = ["My name is Sam%d\n" % num for num in range(50)]
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc', garbage_lines]])
self.failIf(problems)
def testCheckAddedIncludesNoRule(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "no_rule_for_this/nogood.h"']
]])
self.failUnless(problems)
def testCheckAddedIncludesSkippedDirectory(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/disallowed/allowed/skipped/test.cc',
['#include "whatever/whocares.h"']
]])
self.failIf(problems)
def testCheckAddedIncludesTempAllowed(self):
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "checkdeps/testdata/disallowed/temporarily_allowed.h"']
]])
self.failUnless(problems)
def testCopyIsDeep(self):
# Regression test for a bug where we were making shallow copies of
# Rules objects and therefore all Rules objects shared the same
# dictionary for specific rules.
#
# The first pair should bring in a rule from testdata/allowed/DEPS
# into that global dictionary that allows the
# temp_allowed_for_tests.h file to be included in files ending
# with _unittest.cc, and the second pair should completely fail
# once the bug is fixed, but succeed (with a temporary allowance)
# if the bug is in place.
problems = self.deps_checker.CheckAddedCppIncludes(
[['checkdeps/testdata/allowed/test.cc',
['#include "/checkdeps/testdata/disallowed/temporarily_allowed.h"']
],
['checkdeps/testdata/disallowed/foo_unittest.cc',
['#include "checkdeps/testdata/bongo/temp_allowed_for_tests.h"']
]])
# With the bug in place, there would be two problems reported, and
# the second would be for foo_unittest.cc.
self.failUnless(len(problems) == 1)
self.failUnless(problems[0][0].endswith('/test.cc'))
def testTraversalIsOrdered(self):
dirs_traversed = []
for rules, filenames in self.deps_checker.GetAllRulesAndFiles():
self.failUnlessEqual(type(filenames), list)
self.failUnlessEqual(filenames, sorted(filenames))
if filenames:
dir_names = set(os.path.dirname(file) for file in filenames)
self.failUnlessEqual(1, len(dir_names))
dirs_traversed.append(dir_names.pop())
self.failUnlessEqual(dirs_traversed, sorted(dirs_traversed))
if __name__ == '__main__':
unittest.main()
| junhuac/MQUIC | src/buildtools/checkdeps/checkdeps_test.py | Python | mit | 8,256 |
_method_cache = {}
class methodcaller(object):
"""
Return a callable object that calls the given method on its operand.
Unlike the builtin `operator.methodcaller`, instances of this class are
serializable
"""
__slots__ = ('method',)
func = property(lambda self: self.method) # For `funcname` to work
def __new__(cls, method):
if method in _method_cache:
return _method_cache[method]
self = object.__new__(cls)
self.method = method
_method_cache[method] = self
return self
def __call__(self, obj, *args, **kwargs):
return getattr(obj, self.method)(*args, **kwargs)
def __reduce__(self):
return (methodcaller, (self.method,))
def __str__(self):
return "<%s: %s>" % (self.__class__.__name__, self.method)
__repr__ = __str__
class MethodCache(object):
"""Attribute access on this object returns a methodcaller for that
attribute.
Examples
--------
>>> a = [1, 3, 3]
>>> M.count(a, 3) == a.count(3)
True
"""
__getattr__ = staticmethod(methodcaller)
__dir__ = lambda self: list(_method_cache)
M = MethodCache()
| mrocklin/streams | streamz/utils.py | Python | bsd-3-clause | 1,184 |
# Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Sequence, TYPE_CHECKING, Union
import numpy as np
from cirq import _compat, value
from cirq.sim.clifford import stabilizer_state_ch_form
from cirq.sim.clifford.act_on_stabilizer_args import ActOnStabilizerArgs
if TYPE_CHECKING:
import cirq
class ActOnStabilizerCHFormArgs(
ActOnStabilizerArgs[stabilizer_state_ch_form.StabilizerStateChForm]
):
"""Wrapper around a stabilizer state in CH form for the act_on protocol."""
@_compat.deprecated_parameter(
deadline='v0.15',
fix='Specify all the arguments with keywords, use initial_state instead of state.',
parameter_desc='positional arguments',
match=lambda args, kwargs: len(args) != 1 or 'state' in kwargs,
)
def __init__(
self,
state: Optional['cirq.StabilizerStateChForm'] = None,
prng: Optional[np.random.RandomState] = None,
log_of_measurement_results: Optional[Dict[str, List[int]]] = None,
qubits: Optional[Sequence['cirq.Qid']] = None,
initial_state: Union[int, 'cirq.StabilizerStateChForm'] = 0,
classical_data: Optional['cirq.ClassicalDataStore'] = None,
):
"""Initializes with the given state and the axes for the operation.
Args:
state: The StabilizerStateChForm to act on. Operations are expected
to perform inplace edits of this object.
qubits: Determines the canonical ordering of the qubits. This
is often used in specifying the initial state, i.e. the
ordering of the computational basis states.
prng: The pseudo random number generator to use for probabilistic
effects.
log_of_measurement_results: A mutable object that measurements are
being recorded into.
initial_state: The initial state for the simulation. This can be a
full CH form passed by reference which will be modified inplace,
or a big-endian int in the computational basis. If the state is
an integer, qubits must be provided in order to determine
array sizes.
classical_data: The shared classical data container for this
simulation.
Raises:
ValueError: If initial state is an integer but qubits are not
provided.
"""
initial_state = state or initial_state
if isinstance(initial_state, int):
if qubits is None:
raise ValueError('Must specify qubits if initial state is integer')
initial_state = stabilizer_state_ch_form.StabilizerStateChForm(
len(qubits), initial_state
)
super().__init__(
state=initial_state,
prng=prng,
qubits=qubits,
log_of_measurement_results=log_of_measurement_results,
classical_data=classical_data,
)
def _perform_measurement(self, qubits: Sequence['cirq.Qid']) -> List[int]:
"""Returns the measurement from the stabilizer state form."""
return [self.state._measure(self.qubit_map[q], self.prng) for q in qubits]
def _on_copy(self, target: 'ActOnStabilizerCHFormArgs', deep_copy_buffers: bool = True):
target._state = self.state.copy()
def _on_kronecker_product(
self, other: 'cirq.ActOnStabilizerCHFormArgs', target: 'cirq.ActOnStabilizerCHFormArgs'
):
target._state = self.state.kron(other.state)
def _on_transpose_to_qubit_order(
self, qubits: Sequence['cirq.Qid'], target: 'cirq.ActOnStabilizerCHFormArgs'
):
axes = [self.qubit_map[q] for q in qubits]
target._state = self.state.reindex(axes)
def sample(
self,
qubits: Sequence['cirq.Qid'],
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
prng = value.parse_random_state(seed)
axes = self.get_axes(qubits)
measurements = []
for _ in range(repetitions):
state = self.state.copy()
measurements.append([state._measure(i, prng) for i in axes])
return np.array(measurements, dtype=bool)
| quantumlib/Cirq | cirq-core/cirq/sim/clifford/act_on_stabilizer_ch_form_args.py | Python | apache-2.0 | 4,827 |
# Copyright 2015, Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pinball_ext.common import utils
from pinball_ext.job.basic_jobs import ClusterJob
__author__ = 'Changshu Liu, Mao Ye, Mohammad Shahangian'
__copyright__ = 'Copyright 2015, Pinterest, Inc.'
__credits__ = [__author__]
__license__ = 'Apache'
__version__ = '2.0'
LOG = utils.get_logger('pinball_ext.job.hive_jobs')
class HiveJobBase(ClusterJob):
"""Base class for jobs that run Hive query."""
# If set to true, upload archive; otherwise don't upload archive
_UPLOAD_ARCHIVE = False
def _get_query_template(self):
"""Get the hive query template as a string.
The returned template may contain some place holder parameters that will
be replaced with self.params.
"""
raise NotImplementedError("No query template available in HiveJobBase")
def _setup(self):
super(HiveJobBase, self)._setup()
self._delay()
def _execute(self):
super(HiveJobBase, self)._execute()
self._job_output, self._job_stderr, self._job_ids =\
self.executor.run_hive_query(
self._get_query_template() % self.params,
upload_archive=self._UPLOAD_ARCHIVE)
class HiveJob(HiveJobBase):
"""A job to run Hive query whose template defined as job attribute.
It retrieves job attribute: _QUERY_TEMPLATE as query template.
"""
_QUERY_TEMPLATE = None
def _get_query_template(self):
if not self._QUERY_TEMPLATE:
raise Exception('_QUERY_TEMPLATE is empty')
return self._QUERY_TEMPLATE
class HiveFileJob(HiveJobBase):
"""A job to run Hive query whose template is stored in a file.
The file path is specified in job attribute: _QUERY_TEMPLATE_FILE. The path
is relative to job's root_dir attributed which is the dir where HiveFileJob
class is defined by default. Derived job class can override _setup() method
to override this attribute.
"""
_QUERY_TEMPLATE_FILE = None
def _setup(self):
super(HiveFileJob, self)._setup()
self.root_dir = os.path.dirname(__file__)
def _get_query_template(self):
if not self._QUERY_TEMPLATE_FILE:
raise NotImplementedError('_QUERY_FILE is empty')
query_file_path = os.path.join(self.root_dir, self._QUERY_TEMPLATE_FILE)
LOG.info('reading hive query template from: %s ...', query_file_path)
with open(query_file_path, 'r') as f:
query_template = f.read()
return query_template
| sungjuly/pinball | pinball_ext/job/hive_jobs.py | Python | apache-2.0 | 3,084 |
# Pipeline that tests the pre-trained weights for different zones
#
# Copyright: (c) Daniel Duma 2018
# Author: Daniel Duma <[email protected]>
# For license information, see LICENSE.TXT
from __future__ import print_function
from __future__ import absolute_import
import json, os, time
from .base_pipeline import BaseTestingPipeline
from retrieval.base_retrieval import BaseRetrieval
from celery.result import ResultSet
import csv
class WeightTestingPipeline(BaseTestingPipeline):
"""
Pipeline for testing weights. The weights are loaded from CSV files and applied to each query.
"""
def __init__(self, retrieval_class=BaseRetrieval, use_celery=False):
super(WeightTestingPipeline, self).__init__(retrieval_class=retrieval_class, use_celery=use_celery)
self.writers = {}
def createWriterInstances(self):
"""
Initializes the writer instances.
"""
# self.writers = createResultStorers(self.exp["name"],
# self.exp.get("random_zoning", False),
# self.options.get("clear_existing_prr_results", False))
def loadWeights(self, weights_filenames):
"""
:return:
"""
weights = {}
for filename in weights_filenames:
reader = csv.reader(open(filename, "r"), delimiter="\t")
columns = []
for index, row in enumerate(reader):
if index == 0:
columns = row
while columns[0] == "":
columns = columns[1:]
continue
qtype = row[0]
weights[qtype] = weights.get(qtype, {})
for row_index in range(1, len(row)):
weights[qtype][columns[row_index - 1]] = float(row[row_index])
return weights
def initializePipeline(self):
"""
Whatever needs to happen before we start the pipeline: inializing
connections, VMs, whatever.
"""
for method in self.exp["doc_methods"]:
if "preset_runtime_weights_files" in self.exp["doc_methods"][method]:
self.exp["doc_methods"][method]["preset_runtime_weights"] = self.loadWeights(
self.exp["doc_methods"][method]["preset_runtime_weights_files"])
def setRuntimeParameters(self, method, precomputed_query):
"""
Sets the runtime parameters to use depending on the method and query
"""
preset_weights = self.main_all_doc_methods[method].get("preset_runtime_weights", [])
if precomputed_query["az"] in preset_weights:
query_type = precomputed_query["az"]
elif precomputed_query["csc_type"] in preset_weights:
query_type = precomputed_query["csc_type"]
else:
print("Query type %s %s not found in preset_weights" % (precomputed_query["az"], precomputed_query["csc_type"]))
raise ValueError
return preset_weights[query_type]
def loadQueriesAndFileList(self):
"""
Loads the precomputed queries and the list of test files to process.
"""
precomputed_queries_file_path = self.exp.get("precomputed_queries_file_path", None)
if not precomputed_queries_file_path:
precomputed_queries_file_path = os.path.join(self.exp["exp_dir"],
self.exp.get("precomputed_queries_filename",
"precomputed_queries.json"))
if "ALL" in self.exp.get("queries_to_process", ["ALL"]):
self.precomputed_queries = json.load(open(precomputed_queries_file_path, "r")) # [:1]
self.precomputed_queries = self.precomputed_queries[self.options.get("run_query_start_at", 0):]
## precomputed_queries=json.load(open(self.exp["exp_dir"]+"precomputed_queries.json","r"))
else:
queries_filename = "queries_by_" + self.exp["queries_classification"] + ".json"
queries_by_zone = json.load(open(self.exp["exp_dir"] + queries_filename, "r"))
self.precomputed_queries = []
for zone in queries_by_zone[self.exp["queries_to_process"]]:
self.precomputed_queries.extend(queries_by_zone[zone])
print("Total precomputed queries: ", len(self.precomputed_queries))
files_dict_filename = os.path.join(self.exp["exp_dir"],
self.exp.get("files_dict_filename", "files_dict.json"))
self.files_dict = json.load(open(files_dict_filename, "r"))
self.files_dict["ALL_FILES"] = {}
assert self.exp["name"] != "", "Experiment needs a name!"
self.createWriterInstances()
def addResult(self, file_guid, precomputed_query, doc_method, retrieved_results):
"""
Overrides BaseTestingPipeline.addResult so that for each retrieval result
we actually run .explain() on each item and we store the precomputed
formula.
"""
# doc_list = [hit[1]["guid"] for hit in retrieved_results]
# for zone_type in ["csc_type", "az"]:
# if precomputed_query.get(zone_type, "") != "":
# if self.writers[zone_type + "_" + precomputed_query[
# zone_type].strip()].getResultCount() < self.max_per_class_results:
# must_process = True
# else:
# must_process = False
# # TODO this is redundant now. Merge this into base_pipeline.py?
# print(u"Too many queries of type {} already".format(precomputed_query[zone_type]))
# ## assert(False)
#
# if not must_process:
# return
result_dict = self.newResultDict(file_guid, precomputed_query, doc_method)
self.logger.measureScoreAndLog(retrieved_results, precomputed_query["citation_multi"], result_dict)
# if result_dict["mrr_score"] < 0.1:
# print("FUCK. FIX THIS", result_dict["mrr_score"], doc_method)
def saveResultsAndCleanUp(self):
"""
Executes after the retrieval is done.
"""
self.logger.showFinalSummary()
def main():
pass
if __name__ == '__main__':
main()
| danieldmm/minerva | evaluation/weight_testing_pipeline.py | Python | gpl-3.0 | 6,437 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Nicira, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy.orm import exc
from neutron.openstack.common import log as logging
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.dbexts import vcns_models
from neutron.plugins.nicira.vshield.common import (
exceptions as vcns_exc)
LOG = logging.getLogger(__name__)
def add_vcns_router_binding(session, router_id, vse_id, lswitch_id, status):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsRouterBinding(
router_id=router_id,
edge_id=vse_id,
lswitch_id=lswitch_id,
status=status)
session.add(binding)
return binding
def get_vcns_router_binding(session, router_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsRouterBinding).
filter_by(router_id=router_id).first())
def update_vcns_router_binding(session, router_id, **kwargs):
with session.begin(subtransactions=True):
binding = (session.query(vcns_models.VcnsRouterBinding).
filter_by(router_id=router_id).one())
for key, value in kwargs.iteritems():
binding[key] = value
def delete_vcns_router_binding(session, router_id):
with session.begin(subtransactions=True):
binding = (session.query(vcns_models.VcnsRouterBinding).
filter_by(router_id=router_id).one())
session.delete(binding)
#
# Edge Firewall binding methods
#
def add_vcns_edge_firewallrule_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeFirewallRuleBinding(
rule_id=map_info['rule_id'],
rule_vseid=map_info['rule_vseid'],
edge_id=map_info['edge_id'])
session.add(binding)
return binding
def delete_vcns_edge_firewallrule_binding(session, id):
with session.begin(subtransactions=True):
if not (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
filter_by(rule_id=id).delete()):
msg = _("Rule Resource binding with id:%s not found!") % id
raise nvp_exc.NvpServicePluginException(err_msg=msg)
def get_vcns_edge_firewallrule_binding(session, id, edge_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
filter_by(rule_id=id, edge_id=edge_id).first())
def get_vcns_edge_firewallrule_binding_by_vseid(
session, edge_id, rule_vseid):
with session.begin(subtransactions=True):
try:
return (session.query(vcns_models.VcnsEdgeFirewallRuleBinding).
filter_by(edge_id=edge_id, rule_vseid=rule_vseid).one())
except exc.NoResultFound:
msg = _("Rule Resource binding not found!")
raise nvp_exc.NvpServicePluginException(err_msg=msg)
def cleanup_vcns_edge_firewallrule_binding(session, edge_id):
with session.begin(subtransactions=True):
session.query(
vcns_models.VcnsEdgeFirewallRuleBinding).filter_by(
edge_id=edge_id).delete()
def add_vcns_edge_vip_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeVipBinding(
vip_id=map_info['vip_id'],
edge_id=map_info['edge_id'],
vip_vseid=map_info['vip_vseid'],
app_profileid=map_info['app_profileid'])
session.add(binding)
return binding
def get_vcns_edge_vip_binding(session, id):
with session.begin(subtransactions=True):
try:
qry = session.query(vcns_models.VcnsEdgeVipBinding)
return qry.filter_by(vip_id=id).one()
except exc.NoResultFound:
msg = _("VIP Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise vcns_exc.VcnsNotFound(
resource='router_service_binding', msg=msg)
def delete_vcns_edge_vip_binding(session, id):
with session.begin(subtransactions=True):
qry = session.query(vcns_models.VcnsEdgeVipBinding)
if not qry.filter_by(vip_id=id).delete():
msg = _("VIP Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise nvp_exc.NvpServicePluginException(err_msg=msg)
def add_vcns_edge_pool_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgePoolBinding(
pool_id=map_info['pool_id'],
edge_id=map_info['edge_id'],
pool_vseid=map_info['pool_vseid'])
session.add(binding)
return binding
def get_vcns_edge_pool_binding(session, id, edge_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsEdgePoolBinding).
filter_by(pool_id=id, edge_id=edge_id).first())
def get_vcns_edge_pool_binding_by_vseid(session, edge_id, pool_vseid):
with session.begin(subtransactions=True):
try:
qry = session.query(vcns_models.VcnsEdgePoolBinding)
binding = qry.filter_by(edge_id=edge_id,
pool_vseid=pool_vseid).one()
except exc.NoResultFound:
msg = (_("Pool Resource binding with edge_id:%(edge_id)s "
"pool_vseid:%(pool_vseid)s not found!") %
{'edge_id': edge_id, 'pool_vseid': pool_vseid})
LOG.exception(msg)
raise nvp_exc.NvpServicePluginException(err_msg=msg)
return binding
def delete_vcns_edge_pool_binding(session, id, edge_id):
with session.begin(subtransactions=True):
qry = session.query(vcns_models.VcnsEdgePoolBinding)
if not qry.filter_by(pool_id=id, edge_id=edge_id).delete():
msg = _("Pool Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise nvp_exc.NvpServicePluginException(err_msg=msg)
def add_vcns_edge_monitor_binding(session, map_info):
with session.begin(subtransactions=True):
binding = vcns_models.VcnsEdgeMonitorBinding(
monitor_id=map_info['monitor_id'],
edge_id=map_info['edge_id'],
monitor_vseid=map_info['monitor_vseid'])
session.add(binding)
return binding
def get_vcns_edge_monitor_binding(session, id, edge_id):
with session.begin(subtransactions=True):
return (session.query(vcns_models.VcnsEdgeMonitorBinding).
filter_by(monitor_id=id, edge_id=edge_id).first())
def delete_vcns_edge_monitor_binding(session, id, edge_id):
with session.begin(subtransactions=True):
qry = session.query(vcns_models.VcnsEdgeMonitorBinding)
if not qry.filter_by(monitor_id=id, edge_id=edge_id).delete():
msg = _("Monitor Resource binding with id:%s not found!") % id
LOG.exception(msg)
raise nvp_exc.NvpServicePluginException(err_msg=msg)
| ntt-sic/neutron | neutron/plugins/nicira/dbexts/vcns_db.py | Python | apache-2.0 | 7,637 |
# Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging
from trove.cluster import models
from trove.cluster.tasks import ClusterTasks
from trove.cluster.views import ClusterView
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common.notification import DBaaSClusterGrow
from trove.common.notification import StartNotification
from trove.common import remote
from trove.common import server_group as srv_grp
from trove.common.strategies.cluster import base
from trove.common import utils
from trove.datastore import models as datastore_models
from trove.extensions.mgmt.clusters.views import MgmtClusterView
from trove.instance import models as inst_models
from trove.quota.quota import check_quotas
from trove.taskmanager import api as task_api
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class MongoDbAPIStrategy(base.BaseAPIStrategy):
@property
def cluster_class(self):
return MongoDbCluster
@property
def cluster_view_class(self):
return MongoDbClusterView
@property
def mgmt_cluster_view_class(self):
return MongoDbMgmtClusterView
class MongoDbCluster(models.Cluster):
@classmethod
def create(cls, context, name, datastore, datastore_version,
instances, extended_properties, locality):
# TODO(amcreynolds): consider moving into CONF and even supporting
# TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
# TODO(amcreynolds): or introduce a min/max num_instances and set
# TODO(amcreynolds): both to 3
num_instances = len(instances)
if num_instances != 3:
raise exception.ClusterNumInstancesNotSupported(num_instances=3)
flavor_ids = [instance['flavor_id'] for instance in instances]
if len(set(flavor_ids)) != 1:
raise exception.ClusterFlavorsNotEqual()
flavor_id = flavor_ids[0]
nova_client = remote.create_nova_client(context)
try:
flavor = nova_client.flavors.get(flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
mongo_conf = CONF.get(datastore_version.manager)
num_configsvr = (1 if mongo_conf.num_config_servers_per_cluster == 1
else 3)
num_mongos = mongo_conf.num_query_routers_per_cluster
delta_instances = num_instances + num_configsvr + num_mongos
deltas = {'instances': delta_instances}
volume_sizes = [instance['volume_size'] for instance in instances
if instance.get('volume_size', None)]
volume_size = None
if mongo_conf.volume_support:
if len(volume_sizes) != num_instances:
raise exception.ClusterVolumeSizeRequired()
if len(set(volume_sizes)) != 1:
raise exception.ClusterVolumeSizesNotEqual()
volume_size = volume_sizes[0]
models.validate_volume_size(volume_size)
# TODO(amcreynolds): for now, mongos+configsvr same flavor+disk
deltas['volumes'] = volume_size * delta_instances
else:
# TODO(amcreynolds): is ephemeral possible for mongodb clusters?
if len(volume_sizes) > 0:
raise exception.VolumeNotSupported()
ephemeral_support = mongo_conf.device_path
if ephemeral_support and flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
check_quotas(context.tenant, deltas)
nics = [instance.get('nics', None) for instance in instances]
nic = nics[0]
for n in nics[1:]:
if n != nic:
raise ValueError(_('All cluster nics must be the same. '
'%(nic)s != %(n)s')
% {'nic': nic, 'n': n})
azs = [instance.get('availability_zone', None)
for instance in instances]
regions = [instance.get('region_name', None)
for instance in instances]
db_info = models.DBCluster.create(
name=name, tenant_id=context.tenant,
datastore_version_id=datastore_version.id,
task_status=ClusterTasks.BUILDING_INITIAL)
replica_set_name = "rs1"
member_config = {"id": db_info.id,
"shard_id": utils.generate_uuid(),
"instance_type": "member",
"replica_set_name": replica_set_name}
configsvr_config = {"id": db_info.id,
"instance_type": "config_server"}
mongos_config = {"id": db_info.id,
"instance_type": "query_router"}
if mongo_conf.cluster_secure:
cluster_key = base64.b64encode(utils.generate_random_password())
member_config['key'] = cluster_key
configsvr_config['key'] = cluster_key
mongos_config['key'] = cluster_key
for i in range(num_instances):
instance_name = "%s-%s-%s" % (name, replica_set_name, str(i + 1))
inst_models.Instance.create(context, instance_name,
flavor_id,
datastore_version.image_id,
[], [], datastore,
datastore_version,
volume_size, None,
availability_zone=azs[i],
nics=nic,
configuration_id=None,
cluster_config=member_config,
modules=instances[i].get('modules'),
region_name=regions[i],
locality=locality)
for i in range(num_configsvr):
instance_name = "%s-%s-%s" % (name, "configsvr", str(i + 1))
inst_models.Instance.create(context, instance_name,
flavor_id,
datastore_version.image_id,
[], [], datastore,
datastore_version,
volume_size, None,
availability_zone=azs[i %
num_instances],
nics=nic,
configuration_id=None,
cluster_config=configsvr_config,
region_name=regions[i % num_instances],
locality=locality)
for i in range(num_mongos):
instance_name = "%s-%s-%s" % (name, "mongos", str(i + 1))
inst_models.Instance.create(context, instance_name,
flavor_id,
datastore_version.image_id,
[], [], datastore,
datastore_version,
volume_size, None,
availability_zone=azs[i %
num_instances],
nics=nic,
configuration_id=None,
cluster_config=mongos_config,
region_name=regions[i % num_instances],
locality=locality)
task_api.load(context, datastore_version.manager).create_cluster(
db_info.id)
return MongoDbCluster(context, db_info, datastore, datastore_version)
def _parse_grow_item(self, item):
used_keys = []
def _check_option(key, required=False, valid_values=None):
if required and key not in item:
raise exception.TroveError(
_('An instance with the options %(given)s is missing '
'the MongoDB required option %(expected)s.')
% {'given': item.keys(), 'expected': key}
)
value = item.get(key, None)
if valid_values and value not in valid_values:
raise exception.TroveError(
_('The value %(value)s for key %(key)s is invalid. '
'Allowed values are %(valid)s.')
% {'value': value, 'key': key, 'valid': valid_values}
)
used_keys.append(key)
return value
flavor_id = utils.get_id_from_href(_check_option('flavorRef',
required=True))
volume_size = int(_check_option('volume', required=True)['size'])
instance_type = _check_option('type', required=True,
valid_values=['replica',
'query_router'])
name = _check_option('name')
related_to = _check_option('related_to')
nics = _check_option('nics')
availability_zone = _check_option('availability_zone')
unused_keys = list(set(item.keys()).difference(set(used_keys)))
if unused_keys:
raise exception.TroveError(
_('The arguments %s are not supported by MongoDB.')
% unused_keys
)
instance = {'flavor_id': flavor_id,
'volume_size': volume_size,
'instance_type': instance_type}
if name:
instance['name'] = name
if related_to:
instance['related_to'] = related_to
if nics:
instance['nics'] = nics
if availability_zone:
instance['availability_zone'] = availability_zone
return instance
def action(self, context, req, action, param):
if action == 'grow':
context.notification = DBaaSClusterGrow(context, request=req)
with StartNotification(context, cluster_id=self.id):
return self.grow([self._parse_grow_item(item)
for item in param])
elif action == 'add_shard':
context.notification = DBaaSClusterGrow(context, request=req)
with StartNotification(context, cluster_id=self.id):
return self.add_shard()
else:
super(MongoDbCluster, self).action(context, req, action, param)
def add_shard(self):
if self.db_info.task_status != ClusterTasks.NONE:
current_task = self.db_info.task_status.name
msg = _("This action cannot be performed on the cluster while "
"the current cluster task is '%s'.") % current_task
LOG.error(msg)
raise exception.UnprocessableEntity(msg)
db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
type='member').all()
num_unique_shards = len(set([db_inst.shard_id for db_inst
in db_insts]))
if num_unique_shards == 0:
msg = _("This action cannot be performed on the cluster as no "
"reference shard exists.")
LOG.error(msg)
raise exception.UnprocessableEntity(msg)
arbitrary_shard_id = db_insts[0].shard_id
members_in_shard = [db_inst for db_inst in db_insts
if db_inst.shard_id == arbitrary_shard_id]
num_members_per_shard = len(members_in_shard)
a_member = inst_models.load_any_instance(self.context,
members_in_shard[0].id)
deltas = {'instances': num_members_per_shard}
volume_size = a_member.volume_size
if volume_size:
deltas['volumes'] = volume_size * num_members_per_shard
check_quotas(self.context.tenant, deltas)
new_replica_set_name = "rs" + str(num_unique_shards + 1)
new_shard_id = utils.generate_uuid()
dsv_manager = (datastore_models.DatastoreVersion.
load_by_uuid(db_insts[0].datastore_version_id).manager)
manager = task_api.load(self.context, dsv_manager)
key = manager.get_key(a_member)
member_config = {"id": self.id,
"shard_id": new_shard_id,
"instance_type": "member",
"replica_set_name": new_replica_set_name,
"key": key}
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
for i in range(1, num_members_per_shard + 1):
instance_name = "%s-%s-%s" % (self.name, new_replica_set_name,
str(i))
inst_models.Instance.create(self.context, instance_name,
a_member.flavor_id,
a_member.datastore_version.image_id,
[], [], a_member.datastore,
a_member.datastore_version,
volume_size, None,
availability_zone=None,
nics=None,
configuration_id=None,
cluster_config=member_config,
locality=locality)
self.update_db(task_status=ClusterTasks.ADDING_SHARD)
manager.mongodb_add_shard_cluster(
self.id,
new_shard_id,
new_replica_set_name)
def grow(self, instances):
"""Extend a cluster by adding new instances.
Currently only supports adding a replica set to the cluster.
"""
if not len(instances) > 0:
raise exception.TroveError(
_('Not instances specified for grow operation.')
)
self._prep_resize()
self._check_quotas(self.context, instances)
query_routers, shards = self._group_instances(instances)
for shard in shards:
self._check_instances(
self.context, shard, self.datastore_version,
allowed_instance_count=[3]
)
if query_routers:
self._check_instances(self.context, query_routers,
self.datastore_version)
# all checks are done before any instances are created
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
instance_ids = []
for shard in shards:
instance_ids.extend(self._create_shard_instances(shard, locality))
if query_routers:
instance_ids.extend(
self._create_query_router_instances(query_routers, locality)
)
self.update_db(task_status=ClusterTasks.GROWING_CLUSTER)
self.manager.grow_cluster(self.id, instance_ids)
def shrink(self, instance_ids):
"""Removes instances from a cluster.
Currently only supports removing entire replica sets from the cluster.
"""
if not len(instance_ids) > 0:
raise exception.TroveError(
_('Not instances specified for grow operation.')
)
self._prep_resize()
all_member_ids = set([member.id for member in self.members])
all_query_router_ids = set([query_router.id for query_router
in self.query_routers])
target_ids = set(instance_ids)
target_member_ids = target_ids.intersection(all_member_ids)
target_query_router_ids = target_ids.intersection(all_query_router_ids)
target_configsvr_ids = target_ids.difference(
target_member_ids.union(target_query_router_ids)
)
if target_configsvr_ids:
raise exception.ClusterShrinkInstanceInUse(
id=list(target_configsvr_ids),
reason="Cannot remove config servers."
)
remaining_query_router_ids = all_query_router_ids.difference(
target_query_router_ids
)
if len(remaining_query_router_ids) < 1:
raise exception.ClusterShrinkInstanceInUse(
id=list(target_query_router_ids),
reason="Cannot remove all remaining query routers. At least "
"one query router must be available in the cluster."
)
if target_member_ids:
target_members = [member for member in self.members
if member.id in target_member_ids]
target_shards = {}
for member in target_members:
if member.shard_id in target_shards:
target_shards[member.shard_id].append(member.id)
else:
target_shards[member.shard_id] = [member.id]
for target_shard_id in target_shards.keys():
# check the whole shard is being deleted
target_shard_member_ids = [
member.id for member in target_members
if member.shard_id == target_shard_id
]
all_shard_member_ids = [
member.id for member in self.members
if member.shard_id == target_shard_id
]
if set(target_shard_member_ids) != set(all_shard_member_ids):
raise exception.TroveError(
_('MongoDB cluster shrink only supports removing an '
'entire shard. Shard %(shard)s has members: '
'%(instances)s')
% {'shard': target_shard_id,
'instances': all_shard_member_ids}
)
self._check_shard_status(target_shard_member_ids[0])
# all checks are done by now
self.update_db(task_status=ClusterTasks.SHRINKING_CLUSTER)
for instance_id in instance_ids:
instance = inst_models.load_any_instance(self.context, instance_id)
instance.delete()
self.manager.shrink_cluster(self.id, instance_ids)
def _create_instances(self, instances, cluster_config,
default_name_tag, locality, key=None):
"""Loop through the instances and create them in this cluster."""
cluster_config['id'] = self.id
if CONF.get(self.datastore_version.manager).cluster_secure:
if not key:
key = self.get_guest(self.arbitrary_query_router).get_key()
cluster_config['key'] = key
instance_ids = []
for i, instance in enumerate(instances):
name = instance.get('name', '%s-%s-%s' % (
self.name, default_name_tag, i + 1))
new_instance = inst_models.Instance.create(
self.context, name, instance['flavor_id'],
self.datastore_version.image_id, [], [],
self.datastore, self.datastore_version,
instance['volume_size'], None,
availability_zone=instance.get('availability_zone', None),
nics=instance.get('nics', None),
cluster_config=cluster_config,
locality=locality
)
instance_ids.append(new_instance.id)
return instance_ids
def _create_shard_instances(self, instances, locality,
replica_set_name=None, key=None):
"""Create the instances for a new shard in the cluster."""
shard_id = utils.generate_uuid()
if not replica_set_name:
replica_set_name = self._gen_replica_set_name()
cluster_config = {'shard_id': shard_id,
'instance_type': 'member',
'replica_set_name': replica_set_name}
return self._create_instances(instances, cluster_config,
replica_set_name, locality, key=key)
def _create_query_router_instances(self, instances, locality, key=None):
"""Create the instances for the new query router."""
cluster_config = {'instance_type': 'query_router'}
return self._create_instances(instances, cluster_config,
'mongos', locality, key=key)
def _prep_resize(self):
"""Get information about the cluster's current state."""
if self.db_info.task_status != ClusterTasks.NONE:
current_task = self.db_info.task_status.name
msg = _("This action cannot be performed on the cluster while "
"the current cluster task is '%s'.") % current_task
LOG.error(msg)
raise exception.UnprocessableEntity(msg)
def _instances_of_type(instance_type):
return [db_inst for db_inst in self.db_instances
if db_inst.type == instance_type]
self.config_svrs = _instances_of_type('config_server')
self.query_routers = _instances_of_type('query_router')
self.members = _instances_of_type('member')
self.shard_ids = set([member.shard_id for member in self.members])
self.arbitrary_query_router = inst_models.load_any_instance(
self.context, self.query_routers[0].id
)
self.manager = task_api.load(self.context,
self.datastore_version.manager)
def _group_instances(self, instances):
"""Group the instances into logical sets (type, shard, etc)."""
replicas = []
query_routers = []
for item in instances:
if item['instance_type'] == 'replica':
replica_requirements = ['related_to', 'name']
if not all(key in item for key in replica_requirements):
raise exception.TroveError(
_('Replica instance does not have required field(s) '
'%s.') % replica_requirements
)
replicas.append(item)
elif item['instance_type'] == 'query_router':
query_routers.append(item)
else:
raise exception.TroveError(
_('Instance type %s not supported for MongoDB cluster '
'grow.') % item['instance_type']
)
return query_routers, self._group_shard_instances(replicas)
def _group_shard_instances(self, instances):
"""Group the replica instances into shards."""
# Create the sets. Dictionary keys correspond to instance names.
# Dictionary values are the same if related.
sets = {}
specified_names = []
for instance in instances:
name = instance['name']
specified_names.append(name)
if name in sets:
sets[name].append(instance)
else:
sets[name] = [instance]
if 'related_to' in instance:
if instance['related_to'] == instance['name']:
continue
relative = instance['related_to']
if relative in sets:
if sets[relative] is not sets[name]:
sets[relative].extend(sets[name])
sets[name] = sets[relative]
else:
sets[relative] = sets[name]
specified_names_set = set(specified_names)
if len(specified_names) != len(specified_names_set):
raise exception.TroveError(
_('Duplicate member names not allowed.')
)
unknown_relations = set(sets.keys()).difference((specified_names_set))
if unknown_relations:
raise exception.TroveError(
_('related_to target(s) %(targets)s do not match any '
'specified names.')
% {'targets': list(unknown_relations)}
)
# reduce the set to unique values
shards = []
for key in sets.keys():
exists = False
for item in shards:
if item is sets[key]:
exists = True
break
if exists:
continue
shards.append(sets[key])
for shard in shards:
flavor = None
size = None
for member in shard:
if ((flavor and member['flavor_id'] != flavor) or (
size and member['volume_size'] != size)):
raise exception.TroveError(
_('Members of the same shard have mismatching '
'flavorRef and/or volume values.')
)
flavor = member['flavor_id']
size = member['volume_size']
return shards
def _gen_replica_set_name(self):
"""Check the replica set names of all shards in the cluster to
determine the next available name.
Names are in the form 'rsX' where X is an integer.
"""
used_names = []
for shard_id in self.shard_ids:
# query the guest for the replica name on one member of each shard
members = [mem for mem in self.members
if mem.shard_id == shard_id]
member = inst_models.load_any_instance(self.context, members[0].id)
used_names.append(self.get_guest(member).get_replica_set_name())
# find the first unused name
i = 0
while True:
i += 1
name = 'rs%s' % i
if name not in used_names:
return name
def _check_shard_status(self, member_id):
member = inst_models.load_any_instance(self.context, member_id)
guest = self.get_guest(member)
rs_name = guest.get_replica_set_name()
if self.get_guest(
self.arbitrary_query_router).is_shard_active(rs_name):
raise exception.TroveError(
_('Shard with instance %s is still active. Please remove the '
'shard from the MongoDB cluster before shrinking.')
% member_id
)
@staticmethod
def _check_quotas(context, instances):
deltas = {'instances': len(instances),
'volumes': sum([instance['volume_size']
for instance in instances])}
check_quotas(context.tenant, deltas)
@staticmethod
def _check_instances(context, instances, datastore_version,
allowed_instance_count=None):
instance_count = len(instances)
if allowed_instance_count:
if instance_count not in allowed_instance_count:
raise exception.ClusterNumInstancesNotSupported(
num_instances=allowed_instance_count
)
flavor_ids = [instance['flavor_id'] for instance in instances]
if len(set(flavor_ids)) != 1:
raise exception.ClusterFlavorsNotEqual()
flavor_id = flavor_ids[0]
nova_client = remote.create_nova_client(context)
try:
flavor = nova_client.flavors.get(flavor_id)
except nova_exceptions.NotFound:
raise exception.FlavorNotFound(uuid=flavor_id)
mongo_conf = CONF.get(datastore_version.manager)
volume_sizes = [instance['volume_size'] for instance in instances
if instance.get('volume_size', None)]
if mongo_conf.volume_support:
if len(volume_sizes) != instance_count:
raise exception.ClusterVolumeSizeRequired()
if len(set(volume_sizes)) != 1:
raise exception.ClusterVolumeSizesNotEqual()
volume_size = volume_sizes[0]
models.validate_volume_size(volume_size)
else:
# TODO(amcreynolds): is ephemeral possible for mongodb clusters?
if len(volume_sizes) > 0:
raise exception.VolumeNotSupported()
ephemeral_support = mongo_conf.device_path
if ephemeral_support and flavor.ephemeral == 0:
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
class MongoDbClusterView(ClusterView):
def build_instances(self):
return self._build_instances(['query_router'], ['member'])
class MongoDbMgmtClusterView(MgmtClusterView):
def build_instances(self):
return self._build_instances(['query_router'],
['config_server',
'member',
'query_router'])
| Tesora-Release/tesora-trove | trove/common/strategies/cluster/mongodb/api.py | Python | apache-2.0 | 29,811 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1
from google.api_core import grpc_helpers_async
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.dialogflow_v2beta1.types import document
from google.cloud.dialogflow_v2beta1.types import document as gcd_document
from google.longrunning import operations_pb2 # type: ignore
from .base import DocumentsTransport, DEFAULT_CLIENT_INFO
from .grpc import DocumentsGrpcTransport
class DocumentsGrpcAsyncIOTransport(DocumentsTransport):
"""gRPC AsyncIO backend transport for Documents.
Service for managing knowledge
[Documents][google.cloud.dialogflow.v2beta1.Document].
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_documents(
self,
) -> Callable[
[document.ListDocumentsRequest], Awaitable[document.ListDocumentsResponse]
]:
r"""Return a callable for the list documents method over gRPC.
Returns the list of all documents of the knowledge base.
Note: The ``projects.agent.knowledgeBases.documents`` resource
is deprecated; only use ``projects.knowledgeBases.documents``.
Returns:
Callable[[~.ListDocumentsRequest],
Awaitable[~.ListDocumentsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_documents" not in self._stubs:
self._stubs["list_documents"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/ListDocuments",
request_serializer=document.ListDocumentsRequest.serialize,
response_deserializer=document.ListDocumentsResponse.deserialize,
)
return self._stubs["list_documents"]
@property
def get_document(
self,
) -> Callable[[document.GetDocumentRequest], Awaitable[document.Document]]:
r"""Return a callable for the get document method over gRPC.
Retrieves the specified document.
Note: The ``projects.agent.knowledgeBases.documents`` resource
is deprecated; only use ``projects.knowledgeBases.documents``.
Returns:
Callable[[~.GetDocumentRequest],
Awaitable[~.Document]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_document" not in self._stubs:
self._stubs["get_document"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/GetDocument",
request_serializer=document.GetDocumentRequest.serialize,
response_deserializer=document.Document.deserialize,
)
return self._stubs["get_document"]
@property
def create_document(
self,
) -> Callable[
[gcd_document.CreateDocumentRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the create document method over gRPC.
Creates a new document.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[KnowledgeOperationMetadata][google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata]
- ``response``:
[Document][google.cloud.dialogflow.v2beta1.Document]
Note: The ``projects.agent.knowledgeBases.documents`` resource
is deprecated; only use ``projects.knowledgeBases.documents``.
Returns:
Callable[[~.CreateDocumentRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_document" not in self._stubs:
self._stubs["create_document"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/CreateDocument",
request_serializer=gcd_document.CreateDocumentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_document"]
@property
def import_documents(
self,
) -> Callable[
[document.ImportDocumentsRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the import documents method over gRPC.
Create documents by importing data from external sources.
Dialogflow supports up to 350 documents in each request. If you
try to import more, Dialogflow will return an error.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[KnowledgeOperationMetadata][google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata]
- ``response``:
[ImportDocumentsResponse][google.cloud.dialogflow.v2beta1.ImportDocumentsResponse]
Returns:
Callable[[~.ImportDocumentsRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_documents" not in self._stubs:
self._stubs["import_documents"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/ImportDocuments",
request_serializer=document.ImportDocumentsRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["import_documents"]
@property
def delete_document(
self,
) -> Callable[
[document.DeleteDocumentRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete document method over gRPC.
Deletes the specified document.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[KnowledgeOperationMetadata][google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata]
- ``response``: An `Empty
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#empty>`__
Note: The ``projects.agent.knowledgeBases.documents`` resource
is deprecated; only use ``projects.knowledgeBases.documents``.
Returns:
Callable[[~.DeleteDocumentRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_document" not in self._stubs:
self._stubs["delete_document"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/DeleteDocument",
request_serializer=document.DeleteDocumentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_document"]
@property
def update_document(
self,
) -> Callable[
[gcd_document.UpdateDocumentRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the update document method over gRPC.
Updates the specified document.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[KnowledgeOperationMetadata][google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata]
- ``response``:
[Document][google.cloud.dialogflow.v2beta1.Document]
Note: The ``projects.agent.knowledgeBases.documents`` resource
is deprecated; only use ``projects.knowledgeBases.documents``.
Returns:
Callable[[~.UpdateDocumentRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_document" not in self._stubs:
self._stubs["update_document"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/UpdateDocument",
request_serializer=gcd_document.UpdateDocumentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_document"]
@property
def reload_document(
self,
) -> Callable[
[document.ReloadDocumentRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the reload document method over gRPC.
Reloads the specified document from its specified source,
content_uri or content. The previously loaded content of the
document will be deleted. Note: Even when the content of the
document has not changed, there still may be side effects
because of internal implementation changes. Note: If the
document source is Google Cloud Storage URI, its metadata will
be replaced with the custom metadata from Google Cloud Storage
if the ``import_gcs_custom_metadata`` field is set to true in
the request.
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[KnowledgeOperationMetadata][google.cloud.dialogflow.v2beta1.KnowledgeOperationMetadata]
- ``response``:
[Document][google.cloud.dialogflow.v2beta1.Document]
Note: The ``projects.agent.knowledgeBases.documents`` resource
is deprecated; only use ``projects.knowledgeBases.documents``.
Returns:
Callable[[~.ReloadDocumentRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "reload_document" not in self._stubs:
self._stubs["reload_document"] = self.grpc_channel.unary_unary(
"/google.cloud.dialogflow.v2beta1.Documents/ReloadDocument",
request_serializer=document.ReloadDocumentRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["reload_document"]
def close(self):
return self.grpc_channel.close()
__all__ = ("DocumentsGrpcAsyncIOTransport",)
| googleapis/python-dialogflow | google/cloud/dialogflow_v2beta1/services/documents/transports/grpc_asyncio.py | Python | apache-2.0 | 23,262 |
from pymongo import MongoClient
from wordcloud import WordCloud
import matplotlib.pyplot as plt
keys = ['school', 'location', 'major', 'position', 'employment']
def get_userData():
client = MongoClient()
db = client['Zhihu']
col = db['UserInfo']
return col.find()
def get_word(userdata, keyword='school'):
useful = {}
print keyword
if keyword in keys:
for user in userdata:
if user[keyword]:
if user[keyword] in useful.keys():
useful[user[keyword]] += 1
else:
useful[user[keyword]] = 1
frequency = []
for k in useful.keys():
frequency.append((unicode(k), useful[k]))
return frequency
def show_pic(frequency):
if frequency:
print "pic making"
wordcloud = WordCloud(font_path='./msyh.ttf').fit_words(frequency)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
data = get_userData()
freq = get_word(data, 'school')
show_pic(freq)
| wangmengcn/LearningFlask | app/main/ZHData/wordCloud.py | Python | mit | 1,039 |
import json
import time
import logging
from collections import UserDict
import argparse
import datetime
import re
from pajbot.tbutil import find
from pajbot.models.db import DBManager, Base
from pajbot.models.action import ActionParser, RawFuncAction, FuncAction
from pajbot.managers.redis import RedisManager
from sqlalchemy import orm
from sqlalchemy.orm import relationship, joinedload
from sqlalchemy import Column, Integer, Boolean, DateTime, ForeignKey, String
from sqlalchemy.dialects.mysql import TEXT
log = logging.getLogger('pajbot')
class Banphrase(Base):
__tablename__ = 'tb_banphrase'
id = Column(Integer, primary_key=True)
name = Column(String(256), nullable=False, default='')
phrase = Column(String(256), nullable=False)
length = Column(Integer, nullable=False, default=300)
permanent = Column(Boolean, nullable=False, default=False)
warning = Column(Boolean, nullable=False, default=True)
notify = Column(Boolean, nullable=False, default=True)
case_sensitive = Column(Boolean, nullable=False, default=False)
enabled = Column(Boolean, nullable=False, default=True)
data = relationship('BanphraseData',
uselist=False,
cascade='',
lazy='joined')
DEFAULT_TIMEOUT_LENGTH = 300
DEFAULT_NOTIFY = True
def __init__(self, **options):
self.id = None
self.name = 'No name'
self.length = self.DEFAULT_TIMEOUT_LENGTH
self.permanent = False
self.warning = True
self.notify = self.DEFAULT_NOTIFY
self.case_sensitive = False
self.enabled = True
self.set(**options)
def set(self, **options):
self.name = options.get('name', self.name)
self.phrase = options.get('phrase', self.phrase)
self.length = options.get('length', self.length)
self.permanent = options.get('permanent', self.permanent)
self.warning = options.get('warning', self.warning)
self.notify = options.get('notify', self.notify)
self.case_sensitive = options.get('case_sensitive', self.case_sensitive)
self.enabled = options.get('enabled', self.enabled)
def match(self, message):
"""
Returns True if message matches our banphrase.
Otherwise it returns False
Respects case-sensitiveness option
"""
if self.case_sensitive:
return self.phrase in message
else:
return self.phrase.lower() in message.lower()
def exact_match(self, message):
"""
Returns True if message exactly matches our banphrase.
Otherwise it returns False
Respects case-sensitiveness option
"""
if self.case_sensitive:
return self.phrase == message
else:
return self.phrase.lower() == message.lower()
class BanphraseData(Base):
__tablename__ = 'tb_banphrase_data'
banphrase_id = Column(Integer,
ForeignKey('tb_banphrase.id'),
primary_key=True,
autoincrement=False)
num_uses = Column(Integer, nullable=False, default=0)
added_by = Column(Integer, nullable=True)
user = relationship('User',
primaryjoin='User.id==BanphraseData.added_by',
foreign_keys='User.id',
uselist=False,
cascade='',
lazy='noload')
def __init__(self, banphrase_id, **options):
self.banphrase_id = banphrase_id
self.num_uses = 0
self.added_by = None
self.set(**options)
def set(self, **options):
self.num_uses = options.get('num_uses', self.num_uses)
self.added_by = options.get('added_by', self.added_by)
class BanphraseManager:
def __init__(self, bot):
self.bot = bot
self.banphrases = []
self.enabled_banphrases = []
self.db_session = DBManager.create_session(expire_on_commit=False)
if self.bot:
self.bot.socket_manager.add_handler('banphrase.update', self.on_banphrase_update)
self.bot.socket_manager.add_handler('banphrase.remove', self.on_banphrase_remove)
def on_banphrase_update(self, data, conn):
try:
banphrase_id = int(data['banphrase_id'])
except (KeyError, ValueError):
log.warn('No banphrase ID found in on_banphrase_update')
return False
updated_banphrase = find(lambda banphrase: banphrase.id == banphrase_id, self.banphrases)
if updated_banphrase:
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
db_session.add(updated_banphrase)
db_session.refresh(updated_banphrase)
db_session.expunge(updated_banphrase)
else:
with DBManager.create_session_scope(expire_on_commit=False) as db_session:
updated_banphrase = db_session.query(Banphrase).filter_by(id=banphrase_id).one_or_none()
db_session.expunge_all()
if updated_banphrase is not None:
self.db_session.add(updated_banphrase.data)
if updated_banphrase:
if updated_banphrase not in self.banphrases:
self.banphrases.append(updated_banphrase)
if updated_banphrase.enabled is True and updated_banphrase not in self.enabled_banphrases:
self.enabled_banphrases.append(updated_banphrase)
for banphrase in self.enabled_banphrases:
if banphrase.enabled is False:
self.enabled_banphrases.remove(banphrase)
def on_banphrase_remove(self, data, conn):
try:
banphrase_id = int(data['banphrase_id'])
except (KeyError, ValueError):
log.warn('No banphrase ID found in on_banphrase_remove')
return False
removed_banphrase = find(lambda banphrase: banphrase.id == banphrase_id, self.banphrases)
if removed_banphrase:
if removed_banphrase in self.enabled_banphrases:
self.enabled_banphrases.remove(removed_banphrase)
if removed_banphrase in self.banphrases:
self.banphrases.remove(removed_banphrase)
def load(self):
self.banphrases = self.db_session.query(Banphrase).all()
for banphrase in self.banphrases:
self.db_session.expunge(banphrase)
self.enabled_banphrases = [banphrase for banphrase in self.banphrases if banphrase.enabled is True]
return self
def commit(self):
self.db_session.commit()
def create_banphrase(self, phrase, **options):
for banphrase in self.banphrases:
if banphrase.phrase == phrase:
return banphrase, False
banphrase = Banphrase(phrase=phrase, **options)
banphrase.data = BanphraseData(banphrase.id, added_by=options.get('added_by', None))
self.db_session.add(banphrase)
self.db_session.add(banphrase.data)
self.commit()
self.db_session.expunge(banphrase)
self.banphrases.append(banphrase)
self.enabled_banphrases.append(banphrase)
return banphrase, True
def remove_banphrase(self, banphrase):
self.banphrases.remove(banphrase)
if banphrase in self.enabled_banphrases:
self.enabled_banphrases.remove(banphrase)
self.db_session.expunge(banphrase.data)
self.db_session.delete(banphrase)
self.db_session.delete(banphrase.data)
self.commit()
def punish(self, user, banphrase):
"""
This method is responsible for calculating
what sort of punishment a user deserves.
The `permanent` flag takes precedence over the `warning` flag.
This means if a banphrase is marked with the `permanent` flag,
the user will be permanently banned even if this is his first strike.
"""
if banphrase.permanent is True:
# Permanently ban user
punishment = 'permanently banned'
self.bot.ban(user.username)
else:
# Timeout user
timeout_length, punishment = user.timeout(banphrase.length, self.bot, use_warnings=banphrase.warning)
""" Finally, time out the user for whatever timeout length was required. """
self.bot.timeout(user.username, timeout_length)
if banphrase.notify is True:
""" Last but not least, notify the user why he has been timed out
if the banphrase wishes it. """
notification_msg = 'You have been {punishment} because your message matched the "{banphrase.name}" banphrase.'.format(punishment=punishment, banphrase=banphrase)
self.bot.whisper(user.username, notification_msg)
def check_message(self, message):
match = find(lambda banphrase: banphrase.match(message), self.enabled_banphrases)
return match or False
def find_match(self, message, id=None):
match = None
if id is not None:
match = find(lambda banphrase: banphrase.id == id, self.banphrases)
if match is None:
match = find(lambda banphrase: banphrase.exact_match(message), self.banphrases)
return match
def parse_banphrase_arguments(self, message):
parser = argparse.ArgumentParser()
parser.add_argument('--length', dest='length', type=int)
parser.add_argument('--time', dest='length', type=int)
parser.add_argument('--duration', dest='length', type=int)
parser.add_argument('--notify', dest='notify', action='store_true')
parser.add_argument('--no-notify', dest='notify', action='store_false')
parser.add_argument('--perma', dest='permanent', action='store_true')
parser.add_argument('--no-perma', dest='permanent', action='store_false')
parser.add_argument('--permanent', dest='permanent', action='store_true')
parser.add_argument('--no-permanent', dest='permanent', action='store_false')
parser.add_argument('--casesensitive', dest='case_sensitive', action='store_true')
parser.add_argument('--no-casesensitive', dest='case_sensitive', action='store_false')
parser.add_argument('--warning', dest='warning', action='store_true')
parser.add_argument('--no-warning', dest='warning', action='store_false')
parser.add_argument('--name', nargs='+', dest='name')
parser.set_defaults(length=None,
notify=None,
permanent=None,
case_sensitive=None,
warning=None)
try:
args, unknown = parser.parse_known_args(message.split())
except SystemExit:
return False, False
except:
log.exception('Unhandled exception in add_command')
return False, False
# Strip options of any values that are set as None
options = {k: v for k, v in vars(args).items() if v is not None}
response = ' '.join(unknown)
if 'name' in options:
options['name'] = ' '.join(options['name'])
log.info(options)
return options, response
| gigglearrows/anniesbot | pajbot/models/banphrase.py | Python | mit | 11,138 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018 Miha Purg <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
#
"""
This module contains the QStruct class for handling Q structure files (PDB, mol2).
Additionally, it implements methods for finding and replacing atom
placeholders (e.g. $1.N$)
"""
from __future__ import absolute_import, unicode_literals, division
from six.moves import map
import re
import logging
from collections import namedtuple
from Qpyl.common import raise_or_log
logger = logging.getLogger(__name__)
PosVector = namedtuple("PosVector", ["x", "y", "z"])
_PLACEHOLDER_RE = re.compile("\$\S+\.\S+\$")
_COMMENTS_RE = re.compile(r"[#\!].*")
def find_placeholders(inputstring):
"""Find atom placeholders of the form $514.C3$
It ignores comments (characters following # or !)
See also QStruct.convert_placeholders
"""
tmp = re.sub(_COMMENTS_RE, "", inputstring)
return _PLACEHOLDER_RE.findall(tmp)
class QStructError(Exception):
pass
class QStruct(object):
"""
Class for processing the structure (coordinates)
Args:
filename (path to the structure file)
filetype (type of structure file: 'pdb' or 'mol2')
ignore_errors (boolean): Optional, default is False.\
If set to True, some non-vital\
exceptions are logged instead.
In contrast to QLib and QPrm, the 'read' methods in this
class should not be called since the object should
contain data from only one structure file.
The structure data is stored in three lists:
atoms, residues, molecules
which contain _StructAtom, _StructResidue and _StructMolecule
objects.
"""
def __init__(self, filename, filetype, ignore_errors=False):
self.ignore_errors = ignore_errors
FILE_TYPES = {'pdb': self._read_pdb,
'mol2': self._read_mol2}
self.filetype = filetype.lower()
if self.filetype not in FILE_TYPES:
raise QStructError("Filetype {} not supported. Use {}"
.format(filetype,
" or ".join(FILE_TYPES)))
self.atoms = []
self.residues = []
self.molecules = []
self.filename = filename
# TODO: some sort of lookup hashes if needed
# run the parser function
FILE_TYPES[self.filetype](filename)
# check if we actually got something
for t in ["atoms", "residues", "molecules"]:
if len(self.__dict__[t]) == 0:
raise QStructError("No {} found, check file '{}' and"
" filetype '{}'".format(t,
self.filename,
self.filetype))
def _read_mol2(self, mol2_file):
"""
Read and parse a mol2 file for coordinates.
Args:
mol2_file (string): name/path of file
"""
molecule = None
residue = None
aindex, old_aindex = None, None
section = None
for line in open(mol2_file, 'r').readlines():
if line.startswith("@<TRIPOS>"):
section = line.replace("@<TRIPOS>", "").strip()
if section == "MOLECULE":
if molecule != None:
self.molecules.append(molecule)
molecule = _StructMolecule(self)
continue
if section == "ATOM":
if aindex != None:
old_aindex = aindex
lf = line.split()
aindex, aname = int(lf[0]), lf[1]
x, y, z = map(float, lf[2:5])
rindex = int(lf[6])
rname = lf[7][0:3].upper()
if old_aindex != None and aindex - old_aindex != 1:
raise_or_log("Bad Mol2 format - atom "
"index {} followed by {}"
.format(old_aindex, aindex),
QStructError, logger, self.ignore_errors)
if not residue or residue.index_struct != rindex:
if residue and rindex - residue.index_struct != 1:
raise_or_log("Bad Mol2 format - residue "
"index {} followed by {}"
.format(residue.index_struct, rindex),
QStructError, logger, self.ignore_errors)
residue = _StructResidue(rindex, rname, molecule, self)
self.residues.append(residue)
molecule.add_residue(residue)
atom = _StructAtom(aindex, aname, x, y, z, residue, self)
self.atoms.append(atom)
residue.add_atom(atom)
# append last one after parsing
if molecule != None and len(molecule.residues) > 0:
self.molecules.append(molecule)
def _read_pdb(self, pdb_file):
"""
Read and parse a PDB file for coordinates.
Args:
pdb_file (string): name/path of file
"""
# make a new _StructMolecule object
molecule = _StructMolecule(self)
# parse the PDB file
residue = None
aindex, old_aindex = None, None
for line in open(pdb_file, 'r').readlines():
if line.startswith("ATOM") or line.startswith("HETATM"):
if aindex != None:
old_aindex = aindex
aindex = int(line[6:12])
if old_aindex != None and aindex - old_aindex != 1:
raise_or_log("Bad PDB format - atom "
"index {} followed by {}"
.format(old_aindex, aindex),
QStructError, logger, self.ignore_errors)
aname = line[12:17].strip()
rname = line[17:20].strip().upper()
rindex = int(line[22:26])
x, y, z = map(float, (line[30:38], line[38:46], line[46:54]))
if not residue or residue.index_struct != rindex:
if residue and rindex - residue.index_struct != 1:
raise_or_log("Bad PDB format - residue "
"index {} followed by {}"
.format(residue.index_struct, rindex),
QStructError, logger, self.ignore_errors)
residue = _StructResidue(rindex, rname, molecule, self)
self.residues.append(residue)
molecule.add_residue(residue)
if aname in [a.name for a in residue.atoms]:
raise_or_log("Bad PDB format - two atoms with same name "
"({}) in residue {}.{}"
"".format(aname, rname, rindex),
QStructError, logger, self.ignore_errors)
atom = _StructAtom(aindex, aname, x, y, z, residue, self)
self.atoms.append(atom)
residue.add_atom(atom)
elif line.startswith("TER") or line.startswith("GAP"):
self.molecules.append(molecule)
residue = None
molecule = _StructMolecule(self)
# append last one if it didn't gave a TER/GAP
if molecule != None and len(molecule.residues) > 0:
self.molecules.append(molecule)
def convert_placeholders(self, inputstring):
"""Convert atom placeholders ($514.C3$) to indexes.
Placeholders are a combination of the residue id and
atom name, encapsulated in $$ - $RESID.ATOM_NAME$
In addition,there are some special values:
$LAST.ID$ - id of last atom in the system
Arguments:
inputstring (string): string with placeholders (input file contents)
Returns:
outputstring (string): converted string
"""
id_map = {"{}.{}".format(a.residue.index, a.name): str(a.index)
for a in self.atoms}
last_id = "{}.{}".format(self.atoms[-1].residue.index,
self.atoms[-1].name)
outputstring = ""
for line in inputstring.split("\n"):
comment = ""
if "#" in line:
i = line.index("#")
line, comment = line[:i], line[i:]
c = find_placeholders(line)
for pid in c:
pid = pid.strip("$")
pid2 = pid.replace("LAST.ID", last_id)
try:
padding = (len(pid2)+2 - len(id_map[pid2])) * " "
except KeyError:
raise QStructError("Atom '${}$' does not exist in the pdb "
"structure.".format(pid2))
line = re.sub("\$" + pid + "\$", id_map[pid2] + padding, line)
outputstring += line + comment + "\n"
return outputstring
class _StructAtom(object):
"""Contains structural information for an atom.
Arguments:
index_struct (int): index as written in pdb or mol2
name (string): atom name
x,y,z (float): coordinates
residue (_StructResidue): parent residue object
structure (_QStruct): parent structure object
Property 'index' (int) is the actual 1-based index of the atom
in the atom list (as opposed to index_struct which was read from
the file). It should correspond to the index in the generated topology.
"""
def __init__(self, index_struct, name, x, y, z, residue, structure):
self.index_struct = int(index_struct)
self.name = name
self.coordinates = PosVector(float(x), float(y), float(z))
self.residue = residue
self.structure = structure
@property
def index(self):
return self.structure.atoms.index(self) + 1
def __repr__(self):
res = self.residue
mol = res.molecule
return "_StructAtom: {}.{}.{}".format(mol.index,
res.index,
self.index)
class _StructResidue(object):
"""Contains structural information for a residue.
Arguments:
index_struct (int): index as written in pdb or mol2
name (string): residue name
molecule (_StructMolecule): parent molecule object
structure (_QStruct): parent structure object
Property 'index' (int) is the actual 1-based index of the residue
in the residue list (as opposed to index_struct which was read from
the file). It should correspond to the index in the generated topology.
"""
def __init__(self, index_struct, name, molecule, structure):
self.atoms = []
self.index_struct = int(index_struct)
self.name = name
self.molecule = molecule
self.structure = structure
@property
def index(self):
return self.structure.residues.index(self) + 1
def add_atom(self, atom):
self.atoms.append(atom)
def __repr__(self):
mol = self.molecule
return "_StructResidue: {}.{}{}".format(mol.index,
self.name,
self.index)
class _StructMolecule(object):
"""Contains structural information for a molecule.
Arguments:
structure (_QStruct): parent structure object
Special property is 'index' (int). It is the actual
1-based index of the molecule in the residue list (as it was appended).
This should corresponds to the index in the generated topology.
"""
def __init__(self, structure):
self.residues = []
self.structure = structure
@property
def index(self):
return self.structure.molecules.index(self) + 1
def add_residue(self, residue):
self.residues.append(residue)
def __repr__(self):
return "_StructMolecule: {}".format(self.index)
| mpurg/qtools | packages/Qpyl/core/qstructure.py | Python | mit | 13,331 |
def get_binary_labeled_data():
labeled_data = []
x = [
[-0.155, 0.128, -0.688, 0.028, -0.555, -1.112, 0.427, -0.199, 0.807, -1.705],
[-0.847, 2.342, 1.156, 0.767, 0.171, 0.814, -0.657, 0.631, -0.346, 0.682],
[-0.886, 1.638, -0.818, -0.838, 1.137, 2.086, -0.662, 1.396, -1.613, -0.268],
[-0.002, -0.569, -1.079, -0.667, 0.928, 0.548, 0.75, 0.427, -1.079, 0.676],
[-0.772, -2.468, 1.688, 0.009, -1.934, -1.657, -0.987, -0.162, 2.354, 0.337],
[-0.077, 0.2, 0.244, 0.583, -0.458, -2.076, 1.646, 1.081, 0.905, -0.213],
[0.676, 0.298, 0.222, 1.152, -0.911, -0.959, -0.266, 2.107, 1.146, -1.337],
[0.02, 0.749, -1.476, -0.093, 0.143, -0.084, 0.303, 0.53, -0.131, -1.536],
[0.336, -1.074, -2.36, -0.445, 0.222, 1.248, -1.067, 0.595, -0.489, -1.463],
[-1.145, -0.653, 0.207, 1.499, 3.571, 2.755, 2.161, 0.935, -4.284, -0.501],
[-0.328, -0.903, -0.243, -0.55, -0.033, 0.74, -0.874, 0.945, -0.118, -0.651],
[0.914, -0.485, 1.077, -0.525, -0.495, -0.129, -0.582, -0.089, 0.542, 0.965],
[0.38, 1.024, 0.351, 0.11, 0.506, 1.275, -0.681, -0.129, -0.789, -0.011],
[0.429, -0.502, -0.601, 0.026, 0.169, 0.673, -0.504, -1.534, -0.314, 0.147],
[-0.891, 1.432, -0.523, 0.906, 2.416, 1.923, 1.397, -0.033, -2.911, 0.369],
[-0.539, 0.694, 1.398, 0.791, 2.225, 1.792, 1.263, 2.338, -2.685, -0.125],
[0.608, 0.182, 1.06, 0.361, 2.031, 0.612, 2.296, 1.488, -2.239, 0.997],
[-0.905, -0.842, -0.365, 0.443, -2.235, -1.46, -1.648, 2.599, 2.626, 0.466],
[0.918, -0.203, -1.272, -1.751, 0.733, 0.491, 0.527, 2.606, -0.864, 0.038],
[0.305, 0.417, -1.299, 2.556, 0.791, 0.525, 0.574, 1.107, -0.931, 0.785],
[-0.715, 0.499, -0.663, -0.145, -0.359, -2.104, 1.821, 1.394, 0.808, -0.074],
[1.444, 0.863, 1.106, 1.037, -3.81, -3.141, -2.081, -0.112, 4.612, 0.458],
[0.096, 1.174, 0.332, -0.205, 1.645, 1.924, 0.264, -0.302, -2.108, -0.34],
[0.087, -0.307, 0.169, 0.908, 0.013, 0.118, -0.113, -2.355, -0.038, -0.815],
[1.778, -1.174, 0.187, 1.413, 0.86, 0.457, 0.752, 2.357, -0.989, -0.069],
[0.615, -0.899, -1.668, -0.412, -2.519, -2.281, -1.147, 1.365, 3.091, 0.266],
[0.187, -1.107, 1.076, -0.821, 0.237, 0.819, -0.566, 1.707, -0.416, 1.224],
[-0.623, -0.78, 0.93, 1.717, 1.294, 0.872, 0.924, 0.723, -1.526, -0.134],
[0.547, -0.759, -0.056, -0.96, -0.757, -0.974, -0.024, 1.359, 0.989, -0.579],
[1.526, -0.566, -1.294, 0.304, 2.391, 1.91, 1.374, 0.88, -2.882, 0.843],
[0.99, -1.163, 1.838, 2.009, -1.477, -0.983, -1.069, 1.438, 1.739, 2.695],
[0.259, 1.319, -1.202, 1.143, -0.586, 0.457, -1.37, 1.26, 0.515, -0.124],
[-0.652, -2.016, 1.123, -0.41, -0.876, -0.521, -0.703, 0.05, 1.019, 0.476],
[0.482, 0.986, 0.928, -0.976, -0.655, -1.643, 0.873, 0.966, 1.021, 2.519],
[1.905, 0.119, 0.527, -0.333, 4.131, 3.402, 2.262, -0.386, -5.0, 0.432],
[0.168, 0.64, 0.492, 1.673, 0.817, -0.734, 2.017, 0.229, -0.698, 0.697],
[-0.206, -0.653, 0.784, 0.04, 2.463, 1.989, 1.393, -0.536, -2.973, -0.461],
[-1.084, 0.681, -1.377, -1.338, 0.453, 1.236, -0.716, -1.948, -0.726, -0.501],
[0.882, -2.013, 1.037, -0.133, 0.792, 1.467, -0.476, -1.637, -1.127, -0.045],
[-0.833, 2.264, -0.509, 0.213, -0.343, 0.163, -0.685, -1.036, 0.323, -1.17],
[1.233, 2.281, -2.832, 0.348, -0.643, -0.616, -0.255, -2.365, 0.796, -0.979],
[0.48, 0.48, 2.151, -0.025, -0.041, 0.912, -1.078, 0.338, -0.145, 1.146],
[-1.217, -0.075, 1.407, 0.097, -0.53, 0.067, -0.852, -0.134, 0.537, 0.354],
[1.225, 0.748, 0.64, 1.665, 0.56, 1.822, -1.212, 1.652, -0.959, -0.11],
[-0.605, -1.807, -0.787, 0.103, -1.448, -0.455, -1.616, 0.872, 1.601, -2.656],
[0.599, -0.337, 0.846, -0.245, -0.709, 0.179, -1.239, -2.055, 0.701, 0.244],
[2.425, 1.856, -0.801, 0.909, -0.206, -0.704, 0.484, -0.77, 0.359, -0.755],
[-0.305, -1.108, 0.678, -1.157, -0.694, -0.363, -0.613, -0.865, 0.797, 0.013],
[-0.677, -0.6, 0.299, 0.759, -0.754, -0.873, -0.131, -0.043, 0.965, 0.213],
[-0.46, 1.3, 0.071, 0.057, 0.835, 0.44, 0.735, 0.344, -0.96, 2.074],
[1.477, -1.096, 1.185, 0.157, 0.067, -1.351, 1.605, 0.354, 0.21, -0.494],
[0.686, -0.897, 1.354, 0.068, -2.752, -1.716, -2.121, -0.386, 3.217, 1.67],
[-1.223, -1.098, 0.151, 0.921, 2.474, 1.957, 1.444, -1.737, -2.978, -3.081],
[0.032, -0.826, -0.407, -0.206, 2.068, 1.392, 1.479, 0.384, -2.438, -0.581],
[1.089, 0.862, -3.557, 0.4, -0.083, -1.598, 1.662, 0.064, 0.416, -0.762],
[-1.72, 0.213, -1.292, -0.796, -1.166, -0.744, -0.88, -2.927, 1.366, 0.997],
[0.208, -1.308, -0.453, 0.229, 2.416, 1.798, 1.536, 0.364, -2.884, 0.593],
[0.778, 0.088, -0.372, 0.841, 2.304, 1.824, 1.342, 0.739, -2.773, -1.642],
[-0.618, -0.112, -0.001, -1.319, 2.11, 1.534, 1.383, -0.73, -2.512, 1.284],
[0.282, 1.513, -0.806, 1.486, -0.067, 0.864, -1.063, -0.529, -0.109, 0.578],
[0.025, 0.846, 0.145, 1.118, 3.226, 2.494, 1.948, -0.484, -3.871, -0.404],
[0.073, 1.199, 1.08, 0.151, 1.161, 0.109, 1.581, -1.265, -1.23, 1.069],
[0.493, -0.777, -0.979, 0.488, 0.456, 2.217, -1.806, 1.192, -0.932, 1.577],
[-0.227, 0.431, 0.739, -0.286, -0.379, -0.643, 0.162, -0.499, 0.527, -1.899],
[-0.074, 0.419, 1.523, -1.294, -0.195, -0.913, 0.733, -0.627, 0.391, 0.654],
[-0.638, -0.526, -0.505, 3.381, -1.62, -0.805, -1.476, -0.651, 1.851, 1.139],
[-0.02, 0.72, -0.712, -0.34, 1.873, 1.345, 1.246, 0.966, -2.226, -0.741],
[1.427, -0.991, -1.295, 0.829, -0.363, -1.303, 0.922, 0.301, 0.647, -1.808],
[1.192, -0.089, -0.33, 0.173, -1.097, -0.816, -0.698, -1.426, 1.31, -1.074],
[-0.07, -2.022, -0.998, 0.697, 0.276, 1.19, -0.923, 1.061, -0.533, -0.507],
[2.173, 0.477, 0.392, 1.075, -1.228, -0.501, -1.243, 0.373, 1.381, -0.117],
[-0.919, -2.287, -1.167, -0.676, -0.878, -0.055, -1.227, -1.088, 0.925, -1.436],
[0.064, 0.695, 0.981, -2.103, -0.026, 0.562, -0.666, 0.917, -0.089, 0.959],
[0.157, -0.064, 1.642, -0.791, -0.085, 0.684, -0.889, 0.059, -0.052, -1.156],
[1.123, 2.923, 1.073, 0.211, -0.183, 0.927, -1.302, -1.607, -0.001, 1.292],
[0.292, 0.495, -1.278, 1.736, -0.0, 1.084, -1.211, -0.249, -0.223, 0.673],
[-0.73, 1.998, -0.376, 2.397, -0.158, -0.291, 0.092, 1.078, 0.225, -0.564],
[0.431, -0.865, 1.168, 0.7, 0.016, 1.198, -1.314, 0.518, -0.264, -0.355],
[1.537, 1.249, -0.82, 0.244, -0.85, -0.287, -0.926, 0.348, 0.944, 0.331],
[0.889, 0.172, -0.004, 0.005, -1.45, -1.093, -0.906, -1.71, 1.734, 0.75],
[0.223, -0.535, -0.945, 0.316, -0.117, -0.334, 0.201, -0.438, 0.19, -0.398],
[-0.454, -0.16, -0.665, 0.708, -0.556, -0.6, -0.147, -1.476, 0.703, -2.428],
[0.658, -1.318, 1.47, -1.013, 2.123, 1.512, 1.427, 1.704, -2.521, -0.397],
[-0.78, 0.503, 0.09, 1.68, 0.055, 0.134, -0.069, 0.292, -0.085, -0.247],
[0.666, -1.186, -0.331, -1.419, -1.131, -1.357, -0.144, -0.527, 1.457, 1.875],
[-1.14, -0.108, -0.267, -0.248, 0.559, 1.292, -0.623, 2.1, -0.848, -0.788],
[-0.778, -0.107, 2.524, 0.695, 2.038, 1.612, 1.19, 0.66, -2.453, -1.055],
[1.531, 0.014, 1.64, 0.591, 0.291, 1.326, -1.054, -0.161, -0.576, -1.252],
[0.202, -1.0, -1.115, -0.408, 0.104, -1.493, 1.819, -1.136, 0.2, -0.149],
[1.588, -4.399, 0.023, 0.704, 0.184, -0.638, 0.982, 1.83, -0.06, 0.351],
[0.209, -0.617, -0.38, 0.314, -0.356, -1.659, 1.33, -1.175, 0.712, -0.357],
[0.988, -1.087, 0.207, 2.328, -0.561, -2.589, 2.067, 0.852, 1.118, 0.488],
[-0.025, -0.087, 1.763, -0.12, 2.581, 2.167, 1.366, 0.192, -3.132, 0.493],
[-0.552, -0.789, -0.245, -1.097, 0.233, 0.96, -0.729, -1.921, -0.441, -0.367],
[-0.221, 0.928, -0.015, 0.342, 0.138, 1.087, -1.011, 0.211, -0.368, -0.755],
[-0.245, -0.147, -1.008, 0.66, -3.046, -2.052, -2.177, -0.613, 3.593, 1.26],
[-0.584, 0.212, 1.198, 1.516, 0.479, 1.081, -0.504, -0.944, -0.721, 0.158],
[0.544, -1.369, 0.3, 0.04, -1.953, -1.411, -1.289, 0.41, 2.323, 0.056],
[0.455, 1.246, -0.568, -1.183, -1.124, -0.608, -0.97, 0.281, 1.295, -1.347],
[-0.979, 0.715, 0.128, -0.771, -0.162, -1.105, 0.996, -0.684, 0.397, 0.276]
]
y = [
1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1
]
feature_names = ['feature%d' % i for i in range(10)]
for i, label in enumerate(y):
features = x[i]
data = {feature_names[i]: value for i, value in enumerate(features)}
if label:
label = 'True'
else:
label = 'False'
labeled_data.append({'features': data, 'label': label})
labeled_data = {'training_data': labeled_data}
return labeled_data | shawnhermans/soothsayer | soothsayer/tests/common.py | Python | bsd-3-clause | 9,213 |
from django import forms
from django.contrib import admin
from django.contrib.admin import BooleanFieldListFilter, SimpleListFilter
from django.contrib.admin.options import VERTICAL, ModelAdmin, TabularInline
from django.contrib.admin.sites import AdminSite
from django.core.checks import Error
from django.db.models import F, Field, Model
from django.db.models.functions import Upper
from django.forms.models import BaseModelFormSet
from django.test import SimpleTestCase
from .models import (
Band, Song, User, ValidationTestInlineModel, ValidationTestModel,
)
class CheckTestCase(SimpleTestCase):
def assertIsInvalid(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None, admin_site=None):
if admin_site is None:
admin_site = AdminSite()
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, admin_site)
self.assertEqual(admin_obj.check(), [Error(msg, hint=hint, obj=invalid_obj, id=id)])
def assertIsInvalidRegexp(self, model_admin, model, msg, id=None, hint=None, invalid_obj=None):
"""
Same as assertIsInvalid but treats the given msg as a regexp.
"""
invalid_obj = invalid_obj or model_admin
admin_obj = model_admin(model, AdminSite())
errors = admin_obj.check()
self.assertEqual(len(errors), 1)
error = errors[0]
self.assertEqual(error.hint, hint)
self.assertEqual(error.obj, invalid_obj)
self.assertEqual(error.id, id)
self.assertRegex(error.msg, msg)
def assertIsValid(self, model_admin, model, admin_site=None):
if admin_site is None:
admin_site = AdminSite()
admin_obj = model_admin(model, admin_site)
self.assertEqual(admin_obj.check(), [])
class RawIdCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields' must be a list or tuple.",
'admin.E001'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E002'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'raw_id_fields[0]' must be a foreign key or a "
"many-to-many field.",
'admin.E003'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ('users',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_field_attname(self):
class TestModelAdmin(ModelAdmin):
raw_id_fields = ['band_id']
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'raw_id_fields[0]' refers to 'band_id', which is "
"not a field of 'modeladmin.ValidationTestModel'.",
'admin.E002',
)
class FieldsetsCheckTests(CheckTestCase):
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {'fields': ('name',)}),)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
fieldsets = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets' must be a list or tuple.",
'admin.E007'
)
def test_non_iterable_item(self):
class TestModelAdmin(ModelAdmin):
fieldsets = ({},)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be a list or tuple.",
'admin.E008'
)
def test_item_not_a_pair(self):
class TestModelAdmin(ModelAdmin):
fieldsets = ((),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0]' must be of length 2.",
'admin.E009'
)
def test_second_element_of_item_not_a_dict(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', ()),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must be a dictionary.",
'admin.E010'
)
def test_missing_fields_key(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {}),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fieldsets[0][1]' must contain the key 'fields'.",
'admin.E011'
)
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {'fields': ('name',)}),)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_specified_both_fields_and_fieldsets(self):
class TestModelAdmin(ModelAdmin):
fieldsets = (('General', {'fields': ('name',)}),)
fields = ['name']
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"Both 'fieldsets' and 'fields' are specified.",
'admin.E005'
)
def test_duplicate_fields(self):
class TestModelAdmin(ModelAdmin):
fieldsets = [(None, {'fields': ['name', 'name']})]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[0][1]'.",
'admin.E012'
)
def test_duplicate_fields_in_fieldsets(self):
class TestModelAdmin(ModelAdmin):
fieldsets = [
(None, {'fields': ['name']}),
(None, {'fields': ['name']}),
]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"There are duplicate field(s) in 'fieldsets[1][1]'.",
'admin.E012'
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (('Band', {'fields': ('name',)}),)
self.assertIsValid(BandAdmin, Band)
class FieldsCheckTests(CheckTestCase):
def test_duplicate_fields_in_fields(self):
class TestModelAdmin(ModelAdmin):
fields = ['name', 'name']
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fields' contains duplicate field(s).",
'admin.E006'
)
def test_inline(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fields = 10
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'fields' must be a list or tuple.",
'admin.E004',
invalid_obj=ValidationTestInline
)
class FormCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeForm:
pass
class TestModelAdmin(ModelAdmin):
form = FakeForm
class TestModelAdminWithNoForm(ModelAdmin):
form = 'not a form'
for model_admin in (TestModelAdmin, TestModelAdminWithNoForm):
with self.subTest(model_admin):
self.assertIsInvalid(
model_admin, ValidationTestModel,
"The value of 'form' must inherit from 'BaseModelForm'.",
'admin.E016'
)
def test_fieldsets_with_custom_form_validation(self):
class BandAdmin(ModelAdmin):
fieldsets = (('Band', {'fields': ('name',)}),)
self.assertIsValid(BandAdmin, Band)
def test_valid_case(self):
class AdminBandForm(forms.ModelForm):
delete = forms.BooleanField()
class BandAdmin(ModelAdmin):
form = AdminBandForm
fieldsets = (
('Band', {
'fields': ('name', 'bio', 'sign_date', 'delete')
}),
)
self.assertIsValid(BandAdmin, Band)
class FilterVerticalCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
'admin.E017'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E019'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
'admin.E020'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ('users',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FilterHorizontalCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal' must be a list or tuple.",
'admin.E018'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E019'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'filter_horizontal[0]' must be a many-to-many field.",
'admin.E020'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
filter_horizontal = ('users',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class RadioFieldsCheckTests(CheckTestCase):
def test_not_dictionary(self):
class TestModelAdmin(ModelAdmin):
radio_fields = ()
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' must be a dictionary.",
'admin.E021'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'non_existent_field': VERTICAL}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E022'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'name': VERTICAL}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields' refers to 'name', which is not an instance "
"of ForeignKey, and does not have a 'choices' definition.",
'admin.E023'
)
def test_invalid_value(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'state': None}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'radio_fields[\"state\"]' must be either admin.HORIZONTAL or admin.VERTICAL.",
'admin.E024'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
radio_fields = {'state': VERTICAL}
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class PrepopulatedFieldsCheckTests(CheckTestCase):
def test_not_list_or_tuple(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'slug': 'test'}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
'The value of \'prepopulated_fields["slug"]\' must be a list '
'or tuple.',
'admin.E029'
)
def test_not_dictionary(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = ()
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' must be a dictionary.",
'admin.E026'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'non_existent_field': ('slug',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E027'
)
def test_missing_field_again(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'slug': ('non_existent_field',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields[\"slug\"][0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E030'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'users': ('name',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' refers to 'users', which must not be "
"a DateTimeField, a ForeignKey, a OneToOneField, or a ManyToManyField.",
'admin.E028'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_one_to_one_field(self):
class TestModelAdmin(ModelAdmin):
prepopulated_fields = {'best_friend': ('name',)}
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'prepopulated_fields' refers to 'best_friend', which must not be "
"a DateTimeField, a ForeignKey, a OneToOneField, or a ManyToManyField.",
'admin.E028'
)
class ListDisplayTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
list_display = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display' must be a list or tuple.",
'admin.E107'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
list_display = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' refers to 'non_existent_field', "
"which is not a callable, an attribute of 'TestModelAdmin', "
"or an attribute or method on 'modeladmin.ValidationTestModel'.",
'admin.E108'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
list_display = ('users',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display[0]' must not be a ManyToManyField.",
'admin.E109'
)
def test_valid_case(self):
@admin.display
def a_callable(obj):
pass
class TestModelAdmin(ModelAdmin):
@admin.display
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_valid_field_accessible_via_instance(self):
class PositionField(Field):
"""Custom field accessible only via instance."""
def contribute_to_class(self, cls, name):
super().contribute_to_class(cls, name)
setattr(cls, self.name, self)
def __get__(self, instance, owner):
if instance is None:
raise AttributeError()
class TestModel(Model):
field = PositionField()
class TestModelAdmin(ModelAdmin):
list_display = ('field',)
self.assertIsValid(TestModelAdmin, TestModel)
class ListDisplayLinksCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
list_display_links = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
list_display_links = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel, (
"The value of 'list_display_links[0]' refers to "
"'non_existent_field', which is not defined in 'list_display'."
), 'admin.E111'
)
def test_missing_in_list_display(self):
class TestModelAdmin(ModelAdmin):
list_display_links = ('name',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display_links[0]' refers to 'name', which is not defined in 'list_display'.",
'admin.E111'
)
def test_valid_case(self):
@admin.display
def a_callable(obj):
pass
class TestModelAdmin(ModelAdmin):
@admin.display
def a_method(self, obj):
pass
list_display = ('name', 'decade_published_in', 'a_method', a_callable)
list_display_links = ('name', 'decade_published_in', 'a_method', a_callable)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_None_is_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_display_links = None
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_list_display_links_check_skipped_if_get_list_display_overridden(self):
"""
list_display_links check is skipped if get_list_display() is overridden.
"""
class TestModelAdmin(ModelAdmin):
list_display_links = ['name', 'subtitle']
def get_list_display(self, request):
pass
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_list_display_link_checked_for_list_tuple_if_get_list_display_overridden(self):
"""
list_display_links is checked for list/tuple/None even if
get_list_display() is overridden.
"""
class TestModelAdmin(ModelAdmin):
list_display_links = 'non-list/tuple'
def get_list_display(self, request):
pass
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_display_links' must be a list, a tuple, or None.",
'admin.E110'
)
class ListFilterTests(CheckTestCase):
def test_list_filter_validation(self):
class TestModelAdmin(ModelAdmin):
list_filter = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter' must be a list or tuple.",
'admin.E112'
)
def test_not_list_filter_class(self):
class TestModelAdmin(ModelAdmin):
list_filter = ['RandomClass']
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'RandomClass', which "
"does not refer to a Field.",
'admin.E116'
)
def test_callable(self):
def random_callable():
pass
class TestModelAdmin(ModelAdmin):
list_filter = [random_callable]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113'
)
def test_not_callable(self):
class TestModelAdmin(ModelAdmin):
list_filter = [[42, 42]]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115'
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
list_filter = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' refers to 'non_existent_field', "
"which does not refer to a Field.",
'admin.E116'
)
def test_not_filter(self):
class RandomClass:
pass
class TestModelAdmin(ModelAdmin):
list_filter = (RandomClass,)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113'
)
def test_not_filter_again(self):
class RandomClass:
pass
class TestModelAdmin(ModelAdmin):
list_filter = (('is_active', RandomClass),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115'
)
def test_not_filter_again_again(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'))
def get_queryset(self, cl, qs):
return qs
class TestModelAdmin(ModelAdmin):
list_filter = (('is_active', AwesomeFilter),)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0][1]' must inherit from 'FieldListFilter'.",
'admin.E115'
)
def test_list_filter_is_func(self):
def get_filter():
pass
class TestModelAdmin(ModelAdmin):
list_filter = [get_filter]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must inherit from 'ListFilter'.",
'admin.E113'
)
def test_not_associated_with_field_name(self):
class TestModelAdmin(ModelAdmin):
list_filter = (BooleanFieldListFilter,)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_filter[0]' must not inherit from 'FieldListFilter'.",
'admin.E114'
)
def test_valid_case(self):
class AwesomeFilter(SimpleListFilter):
def get_title(self):
return 'awesomeness'
def get_choices(self, request):
return (('bit', 'A bit awesome'), ('very', 'Very awesome'))
def get_queryset(self, cl, qs):
return qs
class TestModelAdmin(ModelAdmin):
list_filter = ('is_active', AwesomeFilter, ('is_active', BooleanFieldListFilter), 'no')
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListPerPageCheckTests(CheckTestCase):
def test_not_integer(self):
class TestModelAdmin(ModelAdmin):
list_per_page = 'hello'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_per_page' must be an integer.",
'admin.E118'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_per_page = 100
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListMaxShowAllCheckTests(CheckTestCase):
def test_not_integer(self):
class TestModelAdmin(ModelAdmin):
list_max_show_all = 'hello'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_max_show_all' must be an integer.",
'admin.E119'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_max_show_all = 200
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class SearchFieldsCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
search_fields = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'search_fields' must be a list or tuple.",
'admin.E126'
)
class DateHierarchyCheckTests(CheckTestCase):
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'non_existent_field'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' refers to 'non_existent_field', "
"which does not refer to a Field.",
'admin.E127'
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'name'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'pub_date'
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_related_valid_case(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'band__sign_date'
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_related_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
date_hierarchy = 'band__name'
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'date_hierarchy' must be a DateField or DateTimeField.",
'admin.E128'
)
class OrderingCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
ordering = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering' must be a list or tuple.",
'admin.E031'
)
class TestModelAdmin(ModelAdmin):
ordering = ('non_existent_field',)
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_random_marker_not_alone(self):
class TestModelAdmin(ModelAdmin):
ordering = ('?', 'name')
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering' has the random ordering marker '?', but contains "
"other fields as well.",
'admin.E032',
hint='Either remove the "?", or remove the other fields.'
)
def test_valid_random_marker_case(self):
class TestModelAdmin(ModelAdmin):
ordering = ('?',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_valid_complex_case(self):
class TestModelAdmin(ModelAdmin):
ordering = ('band__name',)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
ordering = ('name', 'pk')
self.assertIsValid(TestModelAdmin, ValidationTestModel)
def test_invalid_expression(self):
class TestModelAdmin(ModelAdmin):
ordering = (F('nonexistent'), )
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'ordering[0]' refers to 'nonexistent', which is not "
"a field of 'modeladmin.ValidationTestModel'.",
'admin.E033'
)
def test_valid_expression(self):
class TestModelAdmin(ModelAdmin):
ordering = (Upper('name'), Upper('band__name').desc())
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListSelectRelatedCheckTests(CheckTestCase):
def test_invalid_type(self):
class TestModelAdmin(ModelAdmin):
list_select_related = 1
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'list_select_related' must be a boolean, tuple or list.",
'admin.E117'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
list_select_related = False
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class SaveAsCheckTests(CheckTestCase):
def test_not_boolean(self):
class TestModelAdmin(ModelAdmin):
save_as = 1
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'save_as' must be a boolean.",
'admin.E101'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
save_as = True
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class SaveOnTopCheckTests(CheckTestCase):
def test_not_boolean(self):
class TestModelAdmin(ModelAdmin):
save_on_top = 1
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'save_on_top' must be a boolean.",
'admin.E102'
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
save_on_top = True
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class InlinesCheckTests(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
inlines = 10
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'inlines' must be a list or tuple.",
'admin.E103'
)
def test_not_correct_inline_field(self):
class TestModelAdmin(ModelAdmin):
inlines = [42]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"'.*\.TestModelAdmin' must inherit from 'InlineModelAdmin'\.",
'admin.E104'
)
def test_not_model_admin(self):
class ValidationTestInline:
pass
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must inherit from 'InlineModelAdmin'\.",
'admin.E104'
)
def test_missing_model_field(self):
class ValidationTestInline(TabularInline):
pass
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"'.*\.ValidationTestInline' must have a 'model' attribute\.",
'admin.E105'
)
def test_invalid_model_type(self):
class SomethingBad:
pass
class ValidationTestInline(TabularInline):
model = SomethingBad
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106'
)
def test_invalid_model(self):
class ValidationTestInline(TabularInline):
model = 'Not a class'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"The value of '.*\.ValidationTestInline.model' must be a Model\.",
'admin.E106'
)
def test_invalid_callable(self):
def random_obj():
pass
class TestModelAdmin(ModelAdmin):
inlines = [random_obj]
self.assertIsInvalidRegexp(
TestModelAdmin, ValidationTestModel,
r"'.*\.random_obj' must inherit from 'InlineModelAdmin'\.",
'admin.E104'
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FkNameCheckTests(CheckTestCase):
def test_missing_field(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'non_existent_field'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"'modeladmin.ValidationTestInlineModel' has no field named 'non_existent_field'.",
'admin.E202',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
fk_name = 'parent'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ExtraCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 'hello'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'extra' must be an integer.",
'admin.E203',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
extra = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class MaxNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 'hello'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'max_num' must be an integer.",
'admin.E204',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
max_num = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class MinNumCheckTests(CheckTestCase):
def test_not_integer(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 'hello'
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'min_num' must be an integer.",
'admin.E205',
invalid_obj=ValidationTestInline
)
def test_valid_case(self):
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
min_num = 2
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class FormsetCheckTests(CheckTestCase):
def test_invalid_type(self):
class FakeFormSet:
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = FakeFormSet
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsInvalid(
TestModelAdmin, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInline
)
def test_inline_without_formset_class(self):
class ValidationTestInlineWithoutFormsetClass(TabularInline):
model = ValidationTestInlineModel
formset = 'Not a FormSet Class'
class TestModelAdminWithoutFormsetClass(ModelAdmin):
inlines = [ValidationTestInlineWithoutFormsetClass]
self.assertIsInvalid(
TestModelAdminWithoutFormsetClass, ValidationTestModel,
"The value of 'formset' must inherit from 'BaseModelFormSet'.",
'admin.E206',
invalid_obj=ValidationTestInlineWithoutFormsetClass
)
def test_valid_case(self):
class RealModelFormSet(BaseModelFormSet):
pass
class ValidationTestInline(TabularInline):
model = ValidationTestInlineModel
formset = RealModelFormSet
class TestModelAdmin(ModelAdmin):
inlines = [ValidationTestInline]
self.assertIsValid(TestModelAdmin, ValidationTestModel)
class ListDisplayEditableTests(CheckTestCase):
def test_list_display_links_is_none(self):
"""
list_display and list_editable can contain the same values
when list_display_links is None
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = list_display
list_display_links = None
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_first_item(self):
"""
The first item in list_display can be the same as the first in
list_editable.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['name', 'slug']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_in_list_editable(self):
"""
The first item in list_display can be in list_editable as long as
list_display_links is defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
list_display_links = ['pub_date']
self.assertIsValid(ProductAdmin, ValidationTestModel)
def test_list_display_first_item_same_as_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be the same as the first item
in list_editable if list_display_links is not defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name']
list_editable = ['name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[0]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_list_display_first_item_in_list_editable_no_list_display_links(self):
"""
The first item in list_display cannot be in list_editable if
list_display_links isn't defined.
"""
class ProductAdmin(ModelAdmin):
list_display = ['name', 'slug', 'pub_date']
list_editable = ['slug', 'name']
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'list_editable[1]' refers to the first field "
"in 'list_display' ('name'), which cannot be used unless "
"'list_display_links' is set.",
id='admin.E124',
)
def test_both_list_editable_and_list_display_links(self):
class ProductAdmin(ModelAdmin):
list_editable = ('name',)
list_display = ('name',)
list_display_links = ('name',)
self.assertIsInvalid(
ProductAdmin, ValidationTestModel,
"The value of 'name' cannot be in both 'list_editable' and "
"'list_display_links'.",
id='admin.E123',
)
class AutocompleteFieldsTests(CheckTestCase):
def test_autocomplete_e036(self):
class Admin(ModelAdmin):
autocomplete_fields = 'name'
self.assertIsInvalid(
Admin, Band,
msg="The value of 'autocomplete_fields' must be a list or tuple.",
id='admin.E036',
invalid_obj=Admin,
)
def test_autocomplete_e037(self):
class Admin(ModelAdmin):
autocomplete_fields = ('nonexistent',)
self.assertIsInvalid(
Admin, ValidationTestModel,
msg=(
"The value of 'autocomplete_fields[0]' refers to 'nonexistent', "
"which is not a field of 'modeladmin.ValidationTestModel'."
),
id='admin.E037',
invalid_obj=Admin,
)
def test_autocomplete_e38(self):
class Admin(ModelAdmin):
autocomplete_fields = ('name',)
self.assertIsInvalid(
Admin, ValidationTestModel,
msg=(
"The value of 'autocomplete_fields[0]' must be a foreign "
"key or a many-to-many field."
),
id='admin.E038',
invalid_obj=Admin,
)
def test_autocomplete_e039(self):
class Admin(ModelAdmin):
autocomplete_fields = ('band',)
self.assertIsInvalid(
Admin, Song,
msg=(
'An admin for model "Band" has to be registered '
'to be referenced by Admin.autocomplete_fields.'
),
id='admin.E039',
invalid_obj=Admin,
)
def test_autocomplete_e040(self):
class NoSearchFieldsAdmin(ModelAdmin):
pass
class AutocompleteAdmin(ModelAdmin):
autocomplete_fields = ('featuring',)
site = AdminSite()
site.register(Band, NoSearchFieldsAdmin)
self.assertIsInvalid(
AutocompleteAdmin, Song,
msg=(
'NoSearchFieldsAdmin must define "search_fields", because '
'it\'s referenced by AutocompleteAdmin.autocomplete_fields.'
),
id='admin.E040',
invalid_obj=AutocompleteAdmin,
admin_site=site,
)
def test_autocomplete_is_valid(self):
class SearchFieldsAdmin(ModelAdmin):
search_fields = 'name'
class AutocompleteAdmin(ModelAdmin):
autocomplete_fields = ('featuring',)
site = AdminSite()
site.register(Band, SearchFieldsAdmin)
self.assertIsValid(AutocompleteAdmin, Song, admin_site=site)
def test_autocomplete_is_onetoone(self):
class UserAdmin(ModelAdmin):
search_fields = ('name',)
class Admin(ModelAdmin):
autocomplete_fields = ('best_friend',)
site = AdminSite()
site.register(User, UserAdmin)
self.assertIsValid(Admin, ValidationTestModel, admin_site=site)
class ActionsCheckTests(CheckTestCase):
def test_custom_permissions_require_matching_has_method(self):
@admin.action(permissions=['custom'])
def custom_permission_action(modeladmin, request, queryset):
pass
class BandAdmin(ModelAdmin):
actions = (custom_permission_action,)
self.assertIsInvalid(
BandAdmin, Band,
'BandAdmin must define a has_custom_permission() method for the '
'custom_permission_action action.',
id='admin.E129',
)
def test_actions_not_unique(self):
@admin.action
def action(modeladmin, request, queryset):
pass
class BandAdmin(ModelAdmin):
actions = (action, action)
self.assertIsInvalid(
BandAdmin, Band,
"__name__ attributes of actions defined in BandAdmin must be "
"unique. Name 'action' is not unique.",
id='admin.E130',
)
def test_actions_unique(self):
@admin.action
def action1(modeladmin, request, queryset):
pass
@admin.action
def action2(modeladmin, request, queryset):
pass
class BandAdmin(ModelAdmin):
actions = (action1, action2)
self.assertIsValid(BandAdmin, Band)
| atul-bhouraskar/django | tests/modeladmin/test_checks.py | Python | bsd-3-clause | 46,499 |
import logging
from collections import Counter
from udata.commands import cli, header, success
log = logging.getLogger(__name__)
@cli.group('images')
def grp():
'''Images related operations'''
pass
def render_or_skip(obj, attr):
try:
getattr(obj, attr).rerender()
obj.save()
return 1
except Exception as e:
log.warning('Skipped "%s": %s(%s)', obj, e.__class__.__name__, e)
return 0
@grp.command()
def render():
'''Force (re)rendering stored images'''
from udata.core.organization.models import Organization
from udata.core.post.models import Post
from udata.core.reuse.models import Reuse
from udata.core.user.models import User
header('Rendering images')
count = Counter()
total = Counter()
organizations = Organization.objects(logo__exists=True)
total['orgs'] = organizations.count()
log.info('Processing {0} organizations logos'.format(total['orgs']))
for org in organizations:
count['orgs'] += render_or_skip(org, 'logo')
users = User.objects(avatar__exists=True)
total['users'] = users.count()
log.info('Processing {0} user avatars'.format(total['users']))
for user in users:
count['users'] += render_or_skip(user, 'avatar')
posts = Post.objects(image__exists=True)
total['posts'] = posts.count()
log.info('Processing {0} post images'.format(total['posts']))
for post in posts:
count['posts'] += render_or_skip(post, 'image')
reuses = Reuse.objects(image__exists=True)
total['reuses'] = reuses.count()
log.info('Processing {0} reuse images'.format(total['reuses']))
for reuse in reuses:
count['reuses'] += render_or_skip(reuse, 'image')
log.info('''Summary:
Organization logos: {count[orgs]}/{total[orgs]}
User avatars: {count[users]}/{total[users]}
Post images: {count[posts]}/{total[posts]}
Reuse images: {count[reuses]}/{total[reuses]}
'''.format(count=count, total=total))
success('Images rendered')
| opendatateam/udata | udata/commands/images.py | Python | agpl-3.0 | 2,037 |
#! /usr/bin/env python
# Triggers the webcam after getting a signal from the publisher pi.
import sys
from subprocess import call
from time import time, sleep
import datetime
# Global variables
GIT_BASE_DIRECTORY = "./snapshots/"
# Function that triggers the webcam, takes a picture
# and names the file with date and timestamp
def snapshot():
global GIT_BASE_DIRECTORY
# Getting the year, month, day, hour, minute, and second
currentTime = datetime.datetime.now()
# Creating filename with the date and timestamp
snapshotFile = "visitor_%d:%d:%d_%d:%d:%d.jpg" % \
(currentTime.hour, currentTime.minute,\
currentTime.second, currentTime.month,\
currentTime.day, currentTime.year)
#now = datetime.datetime.now()
#snapshotFile = "visitor-%d:%d:%d-%d:%d:%d.jpg" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
print "**************Taking picture**************"
# Creating the terminal command and executing it
snapshotCMDCommand = "fswebcam " + GIT_BASE_DIRECTORY + snapshotFile
snapshotReturnCode = call(snapshotCMDCommand, shell = True)
return snapshotFile
# Debugging
#print '\nTaking snapshot\n', snapshotCMDCommand
#print '\n', snapshotReturnCode
# Uploading the snapshot to the Git account
def uploadFileToGit():
global GIT_BASE_DIRECTORY
gitInfo = "git add " + GIT_BASE_DIRECTORY + \
"; git commit -m " + " \"Visitor image\" " \
+ GIT_BASE_DIRECTORY + " ; git push"
gitReturnCode = call(gitInfo, shell=True)
print "Git stuff return code is ", gitReturnCode
# Testing
#s = snapshot()
#print s
#uploadFileToGit()
| raiarun/HomeGuard | webcam_pi.py | Python | lgpl-2.1 | 1,703 |
import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="minexponent",
parent_name="histogram.marker.colorbar",
**kwargs
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/histogram/marker/colorbar/_minexponent.py | Python | mit | 507 |
import pytest
from diofant import (Dummy, E, Float, GoldenRatio, I, Integer, Mod, Mul, Pow,
Rational, Symbol, Wild, acos, asin, cbrt, exp, false, log,
nan, oo, pi, simplify, sin, sqrt, zoo)
from diofant.abc import x, y
from diofant.core.facts import InconsistentAssumptions
__all__ = ()
def test_symbol_unset():
x = Symbol('x', extended_real=True, integer=True)
assert x.is_extended_real is True
assert x.is_integer is True
assert x.is_imaginary is None
assert x.is_noninteger is False
assert x.is_number is False
def test_zero():
z = Integer(0)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_extended_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is True
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is True
assert z.is_nonnegative is True
assert z.is_even is True
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_one():
z = Integer(1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_extended_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_number is True
assert z.is_composite is False # issue sympy/sympy#8807
def test_negativeone():
z = Integer(-1)
assert z.is_commutative is True
assert z.is_integer is True
assert z.is_rational is True
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_extended_real is True
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is False
assert z.is_positive is False
assert z.is_negative is True
assert z.is_nonpositive is True
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is True
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
assert z.is_number is True
def test_infinity():
assert oo.is_commutative is True
assert oo.is_integer is False
assert oo.is_rational is False
assert oo.is_algebraic is False
assert oo.is_transcendental is False
assert oo.is_extended_real is True
assert oo.is_complex is False
assert oo.is_noninteger is False
assert oo.is_irrational is False
assert oo.is_imaginary is False
assert oo.is_positive is True
assert oo.is_negative is False
assert oo.is_nonpositive is False
assert oo.is_nonnegative is True
assert oo.is_even is False
assert oo.is_odd is False
assert oo.is_finite is False
assert oo.is_infinite is True
assert oo.is_comparable is True
assert oo.is_prime is False
assert oo.is_composite is False
assert oo.is_number is True
assert oo.is_zero is False
assert oo.is_nonzero is True # issue sympy/sympy#21107
def test_neg_infinity():
mm = -oo
assert mm.is_commutative is True
assert mm.is_integer is False
assert mm.is_rational is False
assert mm.is_algebraic is False
assert mm.is_transcendental is False
assert mm.is_extended_real is True
assert mm.is_complex is False
assert mm.is_noninteger is False
assert mm.is_irrational is False
assert mm.is_imaginary is False
assert mm.is_positive is False
assert mm.is_negative is True
assert mm.is_nonpositive is True
assert mm.is_nonnegative is False
assert mm.is_even is False
assert mm.is_odd is False
assert mm.is_finite is False
assert mm.is_infinite is True
assert mm.is_comparable is True
assert mm.is_prime is False
assert mm.is_composite is False
assert mm.is_number is True
def test_zoo():
assert zoo.is_complex is False
assert zoo.is_real is False
assert zoo.is_prime is False
assert zoo.is_infinite
def test_nan():
assert nan.is_commutative is True
assert nan.is_integer is False
assert nan.is_rational is False
assert nan.is_algebraic is False
assert nan.is_transcendental is False
assert nan.is_extended_real is None
assert nan.is_complex is False
assert nan.is_noninteger is False
assert nan.is_irrational is False
assert nan.is_imaginary is False
assert nan.is_positive is None
assert nan.is_negative is None
assert nan.is_nonpositive is None
assert nan.is_nonnegative is None
assert nan.is_even is False
assert nan.is_odd is False
assert nan.is_finite is False
assert nan.is_infinite is None
assert nan.is_comparable is False
assert nan.is_prime is False
assert nan.is_composite is False
assert nan.is_number is True
def test_pos_rational():
r = Rational(3, 4)
assert r.is_commutative is True
assert r.is_integer is False
assert r.is_rational is True
assert r.is_algebraic is True
assert r.is_transcendental is False
assert r.is_extended_real is True
assert r.is_complex is True
assert r.is_noninteger is True
assert r.is_irrational is False
assert r.is_imaginary is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
assert r.is_nonnegative is True
assert r.is_even is False
assert r.is_odd is False
assert r.is_finite is True
assert r.is_infinite is False
assert r.is_comparable is True
assert r.is_prime is False
assert r.is_composite is False
r = Rational(1, 4)
assert r.is_nonpositive is False
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonnegative is True
r = Rational(5, 4)
assert r.is_negative is False
assert r.is_positive is True
assert r.is_nonpositive is False
assert r.is_nonnegative is True
r = Rational(5, 3)
assert r.is_nonnegative is True
assert r.is_positive is True
assert r.is_negative is False
assert r.is_nonpositive is False
def test_neg_rational():
r = Rational(-3, 4)
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-1, 4)
assert r.is_nonpositive is True
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonnegative is False
r = Rational(-5, 4)
assert r.is_negative is True
assert r.is_positive is False
assert r.is_nonpositive is True
assert r.is_nonnegative is False
r = Rational(-5, 3)
assert r.is_nonnegative is False
assert r.is_positive is False
assert r.is_negative is True
assert r.is_nonpositive is True
def test_pi():
z = pi
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_extended_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_E():
z = E
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is False
assert z.is_transcendental is True
assert z.is_extended_real is True
assert z.is_complex is True
assert z.is_noninteger is True
assert z.is_irrational is True
assert z.is_imaginary is False
assert z.is_positive is True
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is True
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is True
assert z.is_prime is False
assert z.is_composite is False
def test_I():
z = I
assert z.is_commutative is True
assert z.is_integer is False
assert z.is_rational is False
assert z.is_algebraic is True
assert z.is_transcendental is False
assert z.is_extended_real is False
assert z.is_complex is True
assert z.is_noninteger is False
assert z.is_irrational is False
assert z.is_imaginary is True
assert z.is_positive is False
assert z.is_negative is False
assert z.is_nonpositive is False
assert z.is_nonnegative is False
assert z.is_even is False
assert z.is_odd is False
assert z.is_finite is True
assert z.is_infinite is False
assert z.is_comparable is False
assert z.is_prime is False
assert z.is_composite is False
def test_symbol_real():
# issue sympy/sympy#3848
a = Symbol('a', extended_real=False)
assert a.is_extended_real is False
assert a.is_integer is False
assert a.is_negative is False
assert a.is_positive is False
assert a.is_nonnegative is False
assert a.is_nonpositive is False
assert a.is_nonzero is True
def test_symbol_zero():
x = Symbol('x', zero=True)
assert x.is_positive is False
assert x.is_nonpositive
assert x.is_negative is False
assert x.is_nonnegative
assert x.is_zero is True
assert x.is_nonzero is False
assert x.is_finite is True
# issue sympy/sympy#9165
f = Symbol('f', finite=False)
assert 0/x == nan
assert 0*(1/x) == nan
assert 0*f == nan
def test_symbol_positive():
x = Symbol('x', positive=True)
assert x.is_positive is True
assert x.is_nonpositive is False
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_positive():
x = -Symbol('x', positive=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is True
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_symbol_nonpositive():
x = Symbol('x', nonpositive=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_nonpositive():
x = -Symbol('x', nonpositive=True)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive():
x = Symbol('x', positive=False)
assert x.is_positive is False
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsepositive():
x = -Symbol('x', positive=False)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsepositive_real():
x = Symbol('x', positive=False, extended_real=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is None
assert x.is_nonnegative is None
assert x.is_zero is None
assert x.is_nonzero is None
def test_neg_symbol_falsepositive_real():
x = -Symbol('x', positive=False, extended_real=True)
assert x.is_positive is None
assert x.is_nonpositive is None
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is None
assert x.is_nonzero is None
def test_symbol_falsenonnegative():
x = Symbol('x', nonnegative=False)
assert x.is_positive is False
assert x.is_nonpositive is None
assert x.is_negative is None
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_falsenonnegative():
x = -Symbol('x', nonnegative=False)
assert x.is_positive is None
assert x.is_negative is False
assert x.is_nonnegative is None
assert x.is_zero is False
assert x.is_nonzero is True
@pytest.mark.xfail
def test_neg_symbol_falsenonnegative_xfail():
x = -Symbol('x', nonnegative=False)
assert x.is_nonpositive is False
def test_symbol_falsenonnegative_real():
x = Symbol('x', nonnegative=False, extended_real=True)
assert x.is_positive is False
assert x.is_nonpositive is True
assert x.is_negative is True
assert x.is_nonnegative is False
assert x.is_zero is False
assert x.is_nonzero is True
def test_neg_symbol_falsenonnegative_real():
x = -Symbol('x', nonnegative=False, extended_real=True)
assert x.is_positive is True
assert x.is_nonpositive is False
assert x.is_negative is False
assert x.is_nonnegative is True
assert x.is_zero is False
assert x.is_nonzero is True
def test_prime():
assert Integer(-1).is_prime is False
assert Integer(-2).is_prime is False
assert Integer(-4).is_prime is False
assert Integer(0).is_prime is False
assert Integer(1).is_prime is False
assert Integer(2).is_prime is True
assert Integer(17).is_prime is True
assert Integer(4).is_prime is False
def test_composite():
assert Integer(-1).is_composite is False
assert Integer(-2).is_composite is False
assert Integer(-4).is_composite is False
assert Integer(0).is_composite is False
assert Integer(2).is_composite is False
assert Integer(17).is_composite is False
assert Integer(4).is_composite is True
def test_prime_symbol():
x = Symbol('x', prime=True)
assert x.is_prime is True
assert x.is_integer is True
assert x.is_positive is True
assert x.is_negative is False
assert x.is_nonpositive is False
assert x.is_nonnegative is True
x = Symbol('x', prime=False)
assert x.is_prime is False
assert x.is_integer is None
assert x.is_positive is None
assert x.is_negative is None
assert x.is_nonpositive is None
assert x.is_nonnegative is None
def test_symbol_noncommutative():
x = Symbol('x', commutative=True)
assert x.is_complex is None
x = Symbol('x', commutative=False)
assert x.is_integer is False
assert x.is_rational is False
assert x.is_algebraic is False
assert x.is_irrational is False
assert x.is_extended_real is False
assert x.is_complex is False
def test_other_symbol():
x = Symbol('x', integer=True)
assert x.is_integer is True
assert x.is_extended_real is True
assert x.is_finite is True
x = Symbol('x', integer=True, nonnegative=True)
assert x.is_integer is True
assert x.is_nonnegative is True
assert x.is_negative is False
assert x.is_positive is None
x = Symbol('x', integer=True, nonpositive=True)
assert x.is_integer is True
assert x.is_nonpositive is True
assert x.is_positive is False
assert x.is_negative is None
x = Symbol('x', odd=True)
assert x.is_odd is True
assert x.is_even is False
assert x.is_integer is True
x = Symbol('x', odd=False)
assert x.is_odd is False
assert x.is_even is None
assert x.is_integer is None
x = Symbol('x', even=True)
assert x.is_even is True
assert x.is_odd is False
assert x.is_integer is True
assert x.is_finite is True
x = Symbol('x', even=False)
assert x.is_even is False
assert x.is_odd is None
assert x.is_integer is None
assert x.is_finite is None # issue sympy/sympy#16432
x = Symbol('x', integer=True, nonnegative=True)
assert x.is_integer is True
assert x.is_nonnegative is True
x = Symbol('x', integer=True, nonpositive=True)
assert x.is_integer is True
assert x.is_nonpositive is True
with pytest.raises(AttributeError):
x.is_extended_real = False
x = Symbol('x', algebraic=True)
assert x.is_transcendental is False
x = Symbol('x', transcendental=True)
assert x.is_algebraic is False
assert x.is_rational is False
assert x.is_integer is False
x = Symbol('x', zero=False)
assert x.is_nonzero is True # issue sympy/sympy#16431
def test_sympyissue_3825():
"""catch: hash instability"""
a1 = x + y
a2 = y + x
assert a2.is_comparable is False
h1 = hash(a1)
h2 = hash(a2)
assert h1 == h2
def test_sympyissue_4822():
z = cbrt(-1)*(1 - I*sqrt(3))
assert z.is_extended_real in [True, None]
def test_hash_vs_typeinfo():
"""Seemingly different typeinfo, but in fact equal."""
# the following two are semantically equal
x1 = Symbol('x', even=True)
x2 = Symbol('x', integer=True, odd=False)
assert hash(x1) == hash(x2)
assert x1 == x2
def test_hash_vs_typeinfo_2():
"""Different typeinfo should mean !eq"""
# the following two are semantically different
x1 = Symbol('x', even=True)
assert x != x1
assert hash(x) != hash(x1) # This might fail with very low probability
def test_hash_vs_eq():
"""catch: different hash for equal objects"""
a = 1 + pi # important: do not fold it into a Number instance
ha = hash(a) # it should be Add/Mul/... to trigger the bug
# this uses .evalf() and deduces it is positive
assert a.is_positive is True
# be sure that hash stayed the same
assert ha == hash(a)
# now b should be the same expression
b = a.expand(trig=True)
hb = hash(b)
assert a == b
assert ha == hb
def test_Add_is_pos_neg():
# these cover lines not covered by the rest of tests in core
n = Symbol('n', negative=True, infinite=True)
nn = Symbol('n', nonnegative=True, infinite=True)
np = Symbol('n', nonpositive=True, infinite=True)
p = Symbol('p', positive=True, infinite=True)
r = Dummy(extended_real=True, finite=False)
xf = Symbol('xb', finite=True, real=True)
assert (n + p).is_positive is None
assert (n + x).is_positive is None
assert (p + x).is_positive is None
assert (n + p).is_negative is None
assert (n + x).is_negative is None
assert (p + x).is_negative is None
assert (n + xf).is_positive is False
assert (p + xf).is_positive is True
assert (n + xf).is_negative is True
assert (p + xf).is_negative is False
assert (x - oo).is_negative is None # issue sympy/sympy#7798
# issue sympy/sympy#8046, 16.2
assert (p + nn).is_positive
assert (n + np).is_negative
assert (p + r).is_positive is None
def test_Add_is_imaginary():
nn = Dummy(nonnegative=True, finite=True)
assert (I*nn + I).is_imaginary # issue sympy/sympy#8046, 17
# issue sympy/sympy#4149
assert (3 + I).is_complex
assert (3 + I).is_imaginary is False
assert (3*I + pi*I).is_imaginary
y = Symbol('y', real=True)
assert (3*I + pi*I + y*I).is_imaginary is True
p = Symbol('p', positive=True, finite=True)
assert (3*I + pi*I + p*I).is_imaginary
n = Symbol('n', negative=True, finite=True)
assert (-3*I - pi*I + n*I).is_imaginary
# tests from the PR sympy/sympy#7887:
e = -sqrt(3)*I/2 + Float(0.866025403784439)*I
assert e.is_extended_real is False
assert e.is_imaginary
def test_Pow_is_imaginary():
# issue sympy/sympy#4149
i = Symbol('i', imaginary=True)
assert ([(i**a).is_imaginary for a in range(4)] ==
[False, True, False, True])
def test_Add_is_algebraic():
a = Symbol('a', algebraic=True)
b = Symbol('a', algebraic=True)
na = Symbol('na', algebraic=False)
nb = Symbol('nb', algebraic=False)
assert (a + b).is_algebraic
assert (na + nb).is_algebraic is None
assert (a + na).is_algebraic is False
assert (a + x).is_algebraic is None
assert (na + x).is_algebraic is None
def test_Mul_is_algebraic():
a = Symbol('a', algebraic=True)
b = Symbol('b', algebraic=True)
na = Symbol('na', algebraic=False)
an = Symbol('an', algebraic=True, nonzero=True)
nb = Symbol('nb', algebraic=False)
assert (a*b).is_algebraic
assert (na*nb).is_algebraic is None
assert (a*na).is_algebraic is None
assert (an*na).is_algebraic is False
assert (a*x).is_algebraic is None
assert (na*x).is_algebraic is None
def test_Pow_is_algebraic():
e = Symbol('e', algebraic=True)
assert Pow(1, e, evaluate=False).is_algebraic
assert Pow(0, e, evaluate=False).is_algebraic
a = Symbol('a', algebraic=True)
an = Symbol('an', algebraic=True, nonzero=True)
na = Symbol('na', algebraic=False)
ia = Symbol('ia', algebraic=True, irrational=True)
ib = Symbol('ib', algebraic=True, irrational=True)
r = Symbol('r', rational=True, nonzero=True)
assert (an**r).is_algebraic
assert (a**r**2).is_algebraic
assert (a**x).is_algebraic is None
assert (na**r).is_algebraic is False
assert (ia**r).is_algebraic
assert (ia**ib).is_algebraic is False
assert (a**e).is_algebraic is None
# Gelfond-Schneider constant:
assert Pow(2, sqrt(2), evaluate=False).is_algebraic is False
assert Pow(GoldenRatio, sqrt(3), evaluate=False).is_algebraic is False
# sympy/sympy#8649
t = Symbol('t', real=True, transcendental=True)
n = Symbol('n', integer=True)
assert (t**n).is_algebraic is None
assert (t**n).is_integer is None
assert exp(t).is_algebraic is None
assert exp(n).is_algebraic is None
i = Symbol('i', integer=True)
p = 1/(i - 1)
assert p.is_algebraic is None
assert p.is_finite is None # issue sympy/sympy#17453
# issue sympy/sympy#20617
assert exp(I*2*pi/3).is_algebraic is True
def test_Mul_is_infinite():
f = Symbol('f', finite=True)
i = Symbol('i', infinite=True)
z = Dummy(zero=True)
nzf = Dummy(finite=True, zero=False)
assert (x*f).is_finite is None
assert (x*i).is_finite is None
assert (f*i).is_finite is False
assert (x*f*i).is_finite is None
assert (z*i).is_finite is False
assert (nzf*i).is_finite is False
assert (z*f).is_finite is True
assert Mul(0, f, evaluate=False).is_finite is True
assert Mul(0, i, evaluate=False).is_finite is False
assert (x*f).is_infinite is None
assert (x*i).is_infinite is None
assert (f*i).is_infinite is None
assert (x*f*i).is_infinite is None
assert (z*i).is_infinite is nan.is_infinite
assert (nzf*i).is_infinite is True
assert (z*f).is_infinite is False
assert Mul(0, f, evaluate=False).is_infinite is False
assert Mul(0, i, evaluate=False).is_infinite is nan.is_infinite
def test_special_is_rational():
i = Symbol('i', integer=True)
i2 = Symbol('i2', integer=True)
ni = Symbol('ni', integer=True, nonzero=True)
r = Symbol('r', rational=True)
rn = Symbol('r', rational=True, nonzero=True)
nr = Symbol('nr', irrational=True)
assert sqrt(3).is_rational is False
assert (3 + sqrt(3)).is_rational is False
assert (3*sqrt(3)).is_rational is False
z = Symbol('z', zero=True)
assert exp(z).is_rational
assert exp(0, evaluate=False).is_rational
assert exp(3).is_rational is False
assert exp(ni).is_rational is False
assert exp(rn).is_rational is False
assert exp(x).is_rational is None
assert exp(log(3), evaluate=False).is_rational is True
assert log(exp(3), evaluate=False).is_rational is True
assert log(3).is_rational is False
assert log(ni + 1).is_rational is False
assert log(rn + 1).is_rational is False
assert log(x).is_rational is None
assert (sqrt(3) + sqrt(5)).is_rational is None
assert (sqrt(3) + pi).is_rational is False
assert (x**i).is_rational is None
assert (i**i).is_rational is True
assert (i**i2).is_rational is None
assert (r**i).is_rational is None
assert (r**r).is_rational is None
assert (r**x).is_rational is None
assert (nr**i).is_rational is None # issue sympy/sympy#8598
assert (nr**Symbol('z', zero=True)).is_rational
assert sin(1).is_rational is False
assert sin(ni).is_rational is False
assert sin(rn).is_rational is False
assert sin(x).is_rational is None
assert asin(rn).is_rational is False
assert sin(asin(3), evaluate=False).is_rational is True
def test_sanitize_assumptions():
# issue sympy/sympy#6666
for cls in (Symbol, Dummy, Wild):
x = cls('x', extended_real=1, positive=0)
assert x.is_extended_real is True
assert x.is_positive is False
assert cls('', extended_real=True, positive=None).is_positive is None
pytest.raises(ValueError, lambda: cls('', commutative=None))
pytest.raises(ValueError, lambda: Symbol._sanitize({'commutative': None}))
def test_special_assumptions():
e = -3 - sqrt(5) + (-sqrt(10)/2 - sqrt(2)/2)**2
assert simplify(e < 0) is false
assert simplify(e > 0) is false
assert (e == 0) is False # it's not a literal 0
assert e.equals(0) is True
def test_inconsistent():
# cf. issues sympy/sympy#5795 and sympy/sympy#5545
pytest.raises(InconsistentAssumptions, lambda: Symbol('x', extended_real=True,
commutative=False))
def test_sympyissue_2730():
assert (1/(1 + I)).is_extended_real is False
def test_sympyissue_2920():
n = Symbol('n', real=True, negative=True)
assert sqrt(n).is_imaginary
def test_sympyissue_7899():
x = Symbol('x', extended_real=True)
assert (I*x).is_extended_real is None
assert ((x - I)*(x - 1)).is_zero is None
assert ((x - I)*(x - 1)).is_extended_real is None
@pytest.mark.xfail
def test_sympyissue_7993():
x = Symbol('x', integer=True)
y = Symbol('y', noninteger=True)
assert (x - y).is_nonzero is True
def test_sympyissue_8075():
pytest.raises(InconsistentAssumptions, lambda: Dummy(zero=True, finite=False))
pytest.raises(InconsistentAssumptions, lambda: Dummy(zero=True, infinite=True))
def test_sympyissue_8642():
x = Symbol('x', extended_real=True, integer=False)
assert (x*2).is_integer is None
def test_sympyissue_10024():
x = Dummy('x')
assert Mod(x, 2*pi).is_zero is None
def test_sympyissue_16530():
e = 1/abs(x)
assert e.is_real is None
assert e.is_extended_real is None
def test_sympyissue_17555():
x = Symbol('x', infinite=True, extended_real=True)
assert x.is_positive is None
assert (-x).is_positive is None
def test_sympyissue_17556():
z = I*oo
assert z.is_imaginary is False
assert z.is_finite is False
def test_sympyissue_23086():
e = 180*acos(Rational(7823207, 7823209))/pi
assert e.is_zero is False
assert e.simplify()
| diofant/diofant | diofant/tests/core/test_assumptions.py | Python | bsd-3-clause | 27,771 |
'''
Support for reading LIT files.
'''
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal <kovid at kovidgoyal.net> ' \
'and Marshall T. Vandegrift <[email protected]>'
import struct, os, functools, re
from urlparse import urldefrag
from cStringIO import StringIO
from urllib import unquote as urlunquote
from lxml import etree
from calibre.ebooks.lit import LitError
from calibre.ebooks.lit.maps import OPF_MAP, HTML_MAP
import calibre.ebooks.lit.mssha1 as mssha1
from calibre.ebooks.oeb.base import urlnormalize, xpath
from calibre.ebooks.oeb.reader import OEBReader
from calibre.ebooks import DRMError
from calibre import plugins
lzx, lxzerror = plugins['lzx']
msdes, msdeserror = plugins['msdes']
__all__ = ["LitReader"]
XML_DECL = """<?xml version="1.0" encoding="UTF-8" ?>
"""
OPF_DECL = """<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE package
PUBLIC "+//ISBN 0-9673008-1-9//DTD OEB 1.0.1 Package//EN"
"http://openebook.org/dtds/oeb-1.0.1/oebpkg101.dtd">
"""
HTML_DECL = """<?xml version="1.0" encoding="UTF-8" ?>
<!DOCTYPE html PUBLIC
"+//ISBN 0-9673008-1-9//DTD OEB 1.0.1 Document//EN"
"http://openebook.org/dtds/oeb-1.0.1/oebdoc101.dtd">
"""
DESENCRYPT_GUID = "{67F6E4A2-60BF-11D3-8540-00C04F58C3CF}"
LZXCOMPRESS_GUID = "{0A9007C6-4076-11D3-8789-0000F8105754}"
CONTROL_TAG = 4
CONTROL_WINDOW_SIZE = 12
RESET_NENTRIES = 4
RESET_HDRLEN = 12
RESET_UCLENGTH = 16
RESET_INTERVAL = 32
FLAG_OPENING = (1 << 0)
FLAG_CLOSING = (1 << 1)
FLAG_BLOCK = (1 << 2)
FLAG_HEAD = (1 << 3)
FLAG_ATOM = (1 << 4)
def u32(bytes):
return struct.unpack('<L', bytes[:4])[0]
def u16(bytes):
return struct.unpack('<H', bytes[:2])[0]
def int32(bytes):
return struct.unpack('<l', bytes[:4])[0]
def encint(bytes, remaining):
pos, val = 0, 0
while remaining > 0:
b = ord(bytes[pos])
pos += 1
remaining -= 1
val <<= 7
val |= (b & 0x7f)
if b & 0x80 == 0: break
return val, bytes[pos:], remaining
def msguid(bytes):
values = struct.unpack("<LHHBBBBBBBB", bytes[:16])
return "{%08lX-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X}" % values
def read_utf8_char(bytes, pos):
c = ord(bytes[pos])
mask = 0x80
if (c & mask):
elsize = 0
while c & mask:
mask >>= 1
elsize += 1
if (mask <= 1) or (mask == 0x40):
raise LitError('Invalid UTF8 character: %s' % repr(bytes[pos]))
else:
elsize = 1
if elsize > 1:
if elsize + pos > len(bytes):
raise LitError('Invalid UTF8 character: %s' % repr(bytes[pos]))
c &= (mask - 1)
for i in xrange(1, elsize):
b = ord(bytes[pos+i])
if (b & 0xC0) != 0x80:
raise LitError(
'Invalid UTF8 character: %s' % repr(bytes[pos:pos+i]))
c = (c << 6) | (b & 0x3F)
return unichr(c), pos+elsize
def consume_sized_utf8_string(bytes, zpad=False):
result = []
slen, pos = read_utf8_char(bytes, 0)
for i in xrange(ord(slen)):
char, pos = read_utf8_char(bytes, pos)
result.append(char)
if zpad and bytes[pos] == '\000':
pos += 1
return u''.join(result), bytes[pos:]
def encode(string):
return unicode(string).encode('ascii', 'xmlcharrefreplace')
class UnBinary(object):
AMPERSAND_RE = re.compile(
r'&(?!(?:#[0-9]+|#x[0-9a-fA-F]+|[a-zA-Z_:][a-zA-Z0-9.-_:]+);)')
OPEN_ANGLE_RE = re.compile(r'<<(?![!]--)')
CLOSE_ANGLE_RE = re.compile(r'(?<!--)>>(?=>>|[^>])')
DOUBLE_ANGLE_RE = re.compile(r'([<>])\1')
EMPTY_ATOMS = ({},{})
def __init__(self, bin, path, manifest={}, map=HTML_MAP, atoms=EMPTY_ATOMS):
self.manifest = manifest
self.tag_map, self.attr_map, self.tag_to_attr_map = map
self.is_html = map is HTML_MAP
self.tag_atoms, self.attr_atoms = atoms
self.dir = os.path.dirname(path)
buf = StringIO()
self.binary_to_text(bin, buf)
self.raw = buf.getvalue().lstrip()
self.escape_reserved()
self._tree = None
def escape_reserved(self):
raw = self.raw
raw = self.AMPERSAND_RE.sub(r'&', raw)
raw = self.OPEN_ANGLE_RE.sub(r'<', raw)
raw = self.CLOSE_ANGLE_RE.sub(r'>', raw)
raw = self.DOUBLE_ANGLE_RE.sub(r'\1', raw)
self.raw = raw
def item_path(self, internal_id):
try:
target = self.manifest[internal_id].path
except KeyError:
return internal_id
if not self.dir:
return target
target = target.split('/')
base = self.dir.split('/')
for index in xrange(min(len(base), len(target))):
if base[index] != target[index]: break
else:
index += 1
relpath = (['..'] * (len(base) - index)) + target[index:]
return '/'.join(relpath)
def __unicode__(self):
return self.raw.decode('utf-8')
def __str__(self):
return self.raw
def binary_to_text(self, bin, buf):
stack = [(0, None, None, 0, 0, False, False, 'text', 0)]
self.cpos = 0
while stack:
self.binary_to_text_inner(bin, buf, stack)
del self.cpos
def binary_to_text_inner(self, bin, buf, stack):
(depth, tag_name, current_map, dynamic_tag, errors,
in_censorship, is_goingdown, state, flags) = stack.pop()
if state == 'close tag':
if not tag_name:
raise LitError('Tag ends before it begins.')
buf.write(encode(u''.join(('</', tag_name, '>'))))
dynamic_tag = 0
tag_name = None
state = 'text'
while self.cpos < len(bin):
c, self.cpos = read_utf8_char(bin, self.cpos)
oc = ord(c)
if state == 'text':
if oc == 0:
state = 'get flags'
continue
elif c == '\v':
c = '\n'
elif c == '>':
c = '>>'
elif c == '<':
c = '<<'
buf.write(encode(c))
elif state == 'get flags':
if oc == 0:
state = 'text'
continue
flags = oc
state = 'get tag'
elif state == 'get tag':
state = 'text' if oc == 0 else 'get attr'
if flags & FLAG_OPENING:
tag = oc
buf.write('<')
if not (flags & FLAG_CLOSING):
is_goingdown = True
if tag == 0x8000:
state = 'get custom length'
continue
if flags & FLAG_ATOM:
if not self.tag_atoms or tag not in self.tag_atoms:
raise LitError(
"atom tag %d not in atom tag list" % tag)
tag_name = self.tag_atoms[tag]
current_map = self.attr_atoms
elif tag < len(self.tag_map):
tag_name = self.tag_map[tag]
current_map = self.tag_to_attr_map[tag]
else:
dynamic_tag += 1
errors += 1
tag_name = '?'+unichr(tag)+'?'
current_map = self.tag_to_attr_map[tag]
print 'WARNING: tag %s unknown' % unichr(tag)
buf.write(encode(tag_name))
elif flags & FLAG_CLOSING:
if depth == 0:
raise LitError('Extra closing tag %s at %d'%(tag_name,
self.cpos))
break
elif state == 'get attr':
in_censorship = False
if oc == 0:
state = 'text'
if not is_goingdown:
tag_name = None
dynamic_tag = 0
buf.write(' />')
else:
buf.write('>')
frame = (depth, tag_name, current_map,
dynamic_tag, errors, in_censorship, False,
'close tag', flags)
stack.append(frame)
frame = (depth+1, None, None, 0, 0,
False, False, 'text', 0)
stack.append(frame)
break
else:
if oc == 0x8000:
state = 'get attr length'
continue
attr = None
if current_map and oc in current_map and current_map[oc]:
attr = current_map[oc]
elif oc in self.attr_map:
attr = self.attr_map[oc]
if not attr or not isinstance(attr, basestring):
raise LitError(
'Unknown attribute %d in tag %s' % (oc, tag_name))
if attr.startswith('%'):
in_censorship = True
state = 'get value length'
continue
buf.write(' ' + encode(attr) + '=')
if attr in ['href', 'src']:
state = 'get href length'
else:
state = 'get value length'
elif state == 'get value length':
if not in_censorship:
buf.write('"')
count = oc - 1
if count == 0:
if not in_censorship:
buf.write('"')
in_censorship = False
state = 'get attr'
continue
state = 'get value'
if oc == 0xffff:
continue
if count < 0 or count > (len(bin) - self.cpos):
raise LitError('Invalid character count %d' % count)
elif state == 'get value':
if count == 0xfffe:
if not in_censorship:
buf.write('%s"' % (oc - 1))
in_censorship = False
state = 'get attr'
elif count > 0:
if not in_censorship:
if c == '"':
c = '"'
elif c == '<':
c = '<'
buf.write(c.encode('ascii', 'xmlcharrefreplace'))
count -= 1
if count == 0:
if not in_censorship:
buf.write('"')
in_censorship = False
state = 'get attr'
elif state == 'get custom length':
count = oc - 1
if count <= 0 or count > len(bin)-self.cpos:
raise LitError('Invalid character count %d' % count)
dynamic_tag += 1
state = 'get custom'
tag_name = ''
elif state == 'get custom':
tag_name += c
count -= 1
if count == 0:
buf.write(encode(tag_name))
state = 'get attr'
elif state == 'get attr length':
count = oc - 1
if count <= 0 or count > (len(bin) - self.cpos):
raise LitError('Invalid character count %d' % count)
buf.write(' ')
state = 'get custom attr'
elif state == 'get custom attr':
buf.write(encode(c))
count -= 1
if count == 0:
buf.write('=')
state = 'get value length'
elif state == 'get href length':
count = oc - 1
if count <= 0 or count > (len(bin) - self.cpos):
raise LitError('Invalid character count %d' % count)
href = ''
state = 'get href'
elif state == 'get href':
href += c
count -= 1
if count == 0:
doc, frag = urldefrag(href[1:])
path = self.item_path(doc)
if frag:
path = '#'.join((path, frag))
path = urlnormalize(path)
buf.write(encode(u'"%s"' % path))
state = 'get attr'
class DirectoryEntry(object):
def __init__(self, name, section, offset, size):
self.name = name
self.section = section
self.offset = offset
self.size = size
def __repr__(self):
return "DirectoryEntry(name=%s, section=%d, offset=%d, size=%d)" \
% (repr(self.name), self.section, self.offset, self.size)
def __str__(self):
return repr(self)
class ManifestItem(object):
def __init__(self, original, internal, mime_type, offset, root, state):
self.original = original
self.internal = internal
self.mime_type = mime_type.lower() if hasattr(mime_type, 'lower') else mime_type
self.offset = offset
self.root = root
self.state = state
# Some LIT files have Windows-style paths
path = original.replace('\\', '/')
if path[1:3] == ':/': path = path[2:]
# Some paths in Fictionwise "multiformat" LIT files contain '..' (!?)
path = os.path.normpath(path).replace('\\', '/')
while path.startswith('../'): path = path[3:]
self.path = path
def __eq__(self, other):
if hasattr(other, 'internal'):
return self.internal == other.internal
return self.internal == other
def __repr__(self):
return "ManifestItem(internal=%r, path=%r, mime_type=%r, " \
"offset=%d, root=%r, state=%r)" \
% (self.internal, self.path, self.mime_type, self.offset,
self.root, self.state)
def preserve(function):
def wrapper(self, *args, **kwargs):
opos = self.stream.tell()
try:
return function(self, *args, **kwargs)
finally:
self.stream.seek(opos)
functools.update_wrapper(wrapper, function)
return wrapper
class LitFile(object):
PIECE_SIZE = 16
def __init__(self, filename_or_stream, log):
self._warn = log.warn
if hasattr(filename_or_stream, 'read'):
self.stream = filename_or_stream
else:
self.stream = open(filename_or_stream, 'rb')
try:
self.opf_path = os.path.splitext(
os.path.basename(self.stream.name))[0] + '.opf'
except AttributeError:
self.opf_path = 'content.opf'
if self.magic != 'ITOLITLS':
raise LitError('Not a valid LIT file')
if self.version != 1:
raise LitError('Unknown LIT version %d' % (self.version,))
self.read_secondary_header()
self.read_header_pieces()
self.read_section_names()
self.read_manifest()
self.read_drm()
def warn(self, msg):
self._warn(msg)
def magic():
@preserve
def fget(self):
self.stream.seek(0)
return self.stream.read(8)
return property(fget=fget)
magic = magic()
def version():
def fget(self):
self.stream.seek(8)
return u32(self.stream.read(4))
return property(fget=fget)
version = version()
def hdr_len():
@preserve
def fget(self):
self.stream.seek(12)
return int32(self.stream.read(4))
return property(fget=fget)
hdr_len = hdr_len()
def num_pieces():
@preserve
def fget(self):
self.stream.seek(16)
return int32(self.stream.read(4))
return property(fget=fget)
num_pieces = num_pieces()
def sec_hdr_len():
@preserve
def fget(self):
self.stream.seek(20)
return int32(self.stream.read(4))
return property(fget=fget)
sec_hdr_len = sec_hdr_len()
def guid():
@preserve
def fget(self):
self.stream.seek(24)
return self.stream.read(16)
return property(fget=fget)
guid = guid()
def header():
@preserve
def fget(self):
size = self.hdr_len \
+ (self.num_pieces * self.PIECE_SIZE) \
+ self.sec_hdr_len
self.stream.seek(0)
return self.stream.read(size)
return property(fget=fget)
header = header()
@preserve
def __len__(self):
self.stream.seek(0, 2)
return self.stream.tell()
@preserve
def read_raw(self, offset, size):
self.stream.seek(offset)
return self.stream.read(size)
def read_content(self, offset, size):
return self.read_raw(self.content_offset + offset, size)
def read_secondary_header(self):
offset = self.hdr_len + (self.num_pieces * self.PIECE_SIZE)
bytes = self.read_raw(offset, self.sec_hdr_len)
offset = int32(bytes[4:])
while offset < len(bytes):
blocktype = bytes[offset:offset+4]
blockver = u32(bytes[offset+4:])
if blocktype == 'CAOL':
if blockver != 2:
raise LitError(
'Unknown CAOL block format %d' % blockver)
self.creator_id = u32(bytes[offset+12:])
self.entry_chunklen = u32(bytes[offset+20:])
self.count_chunklen = u32(bytes[offset+24:])
self.entry_unknown = u32(bytes[offset+28:])
self.count_unknown = u32(bytes[offset+32:])
offset += 48
elif blocktype == 'ITSF':
if blockver != 4:
raise LitError(
'Unknown ITSF block format %d' % blockver)
if u32(bytes[offset+4+16:]):
raise LitError('This file has a 64bit content offset')
self.content_offset = u32(bytes[offset+16:])
self.timestamp = u32(bytes[offset+24:])
self.language_id = u32(bytes[offset+28:])
offset += 48
if not hasattr(self, 'content_offset'):
raise LitError('Could not figure out the content offset')
def read_header_pieces(self):
src = self.header[self.hdr_len:]
for i in xrange(self.num_pieces):
piece = src[i * self.PIECE_SIZE:(i + 1) * self.PIECE_SIZE]
if u32(piece[4:]) != 0 or u32(piece[12:]) != 0:
raise LitError('Piece %s has 64bit value' % repr(piece))
offset, size = u32(piece), int32(piece[8:])
piece = self.read_raw(offset, size)
if i == 0:
continue # Dont need this piece
elif i == 1:
if u32(piece[8:]) != self.entry_chunklen or \
u32(piece[12:]) != self.entry_unknown:
raise LitError('Secondary header does not match piece')
self.read_directory(piece)
elif i == 2:
if u32(piece[8:]) != self.count_chunklen or \
u32(piece[12:]) != self.count_unknown:
raise LitError('Secondary header does not match piece')
continue # No data needed from this piece
elif i == 3:
self.piece3_guid = piece
elif i == 4:
self.piece4_guid = piece
def read_directory(self, piece):
if not piece.startswith('IFCM'):
raise LitError('Header piece #1 is not main directory.')
chunk_size, num_chunks = int32(piece[8:12]), int32(piece[24:28])
if (32 + (num_chunks * chunk_size)) != len(piece):
raise LitError('IFCM header has incorrect length')
self.entries = {}
for i in xrange(num_chunks):
offset = 32 + (i * chunk_size)
chunk = piece[offset:offset + chunk_size]
tag, chunk = chunk[:4], chunk[4:]
if tag != 'AOLL': continue
remaining, chunk = int32(chunk[:4]), chunk[4:]
if remaining >= chunk_size:
raise LitError('AOLL remaining count is negative')
remaining = chunk_size - (remaining + 48)
entries = u16(chunk[-2:])
if entries == 0:
# Hopefully will work even without a correct entries count
entries = (2 ** 16) - 1
chunk = chunk[40:]
for j in xrange(entries):
if remaining <= 0: break
namelen, chunk, remaining = encint(chunk, remaining)
if namelen != (namelen & 0x7fffffff):
raise LitError('Directory entry had 64bit name length.')
if namelen > remaining - 3:
raise LitError('Read past end of directory chunk')
try:
name = chunk[:namelen].decode('utf-8')
chunk = chunk[namelen:]
remaining -= namelen
except UnicodeDecodeError:
break
section, chunk, remaining = encint(chunk, remaining)
offset, chunk, remaining = encint(chunk, remaining)
size, chunk, remaining = encint(chunk, remaining)
entry = DirectoryEntry(name, section, offset, size)
self.entries[name] = entry
def read_section_names(self):
if '::DataSpace/NameList' not in self.entries:
raise LitError('Lit file does not have a valid NameList')
raw = self.get_file('::DataSpace/NameList')
if len(raw) < 4:
raise LitError('Invalid Namelist section')
pos = 4
num_sections = u16(raw[2:pos])
self.section_names = [""] * num_sections
self.section_data = [None] * num_sections
for section in xrange(num_sections):
size = u16(raw[pos:pos+2])
pos += 2
size = size*2 + 2
if pos + size > len(raw):
raise LitError('Invalid Namelist section')
self.section_names[section] = \
raw[pos:pos+size].decode('utf-16-le').rstrip('\000')
pos += size
def read_manifest(self):
if '/manifest' not in self.entries:
raise LitError('Lit file does not have a valid manifest')
raw = self.get_file('/manifest')
self.manifest = {}
self.paths = {self.opf_path: None}
while raw:
slen, raw = ord(raw[0]), raw[1:]
if slen == 0: break
root, raw = raw[:slen].decode('utf8'), raw[slen:]
if not raw:
raise LitError('Truncated manifest')
for state in ['spine', 'not spine', 'css', 'images']:
num_files, raw = int32(raw), raw[4:]
if num_files == 0: continue
for i in xrange(num_files):
if len(raw) < 5:
raise LitError('Truncated manifest')
offset, raw = u32(raw), raw[4:]
internal, raw = consume_sized_utf8_string(raw)
original, raw = consume_sized_utf8_string(raw)
# The path should be stored unquoted, but not always
original = urlunquote(original)
# Is this last one UTF-8 or ASCIIZ?
mime_type, raw = consume_sized_utf8_string(raw, zpad=True)
self.manifest[internal] = ManifestItem(
original, internal, mime_type, offset, root, state)
mlist = self.manifest.values()
# Remove any common path elements
if len(mlist) > 1:
shared = mlist[0].path
for item in mlist[1:]:
path = item.path
while shared and not path.startswith(shared):
try: shared = shared[:shared.rindex("/", 0, -2) + 1]
except ValueError: shared = None
if not shared:
break
if shared:
slen = len(shared)
for item in mlist:
item.path = item.path[slen:]
# Fix any straggling absolute paths
for item in mlist:
if item.path[0] == '/':
item.path = os.path.basename(item.path)
self.paths[item.path] = item
def read_drm(self):
self.drmlevel = 0
if '/DRMStorage/Licenses/EUL' in self.entries:
self.drmlevel = 5
elif '/DRMStorage/DRMBookplate' in self.entries:
self.drmlevel = 3
elif '/DRMStorage/DRMSealed' in self.entries:
self.drmlevel = 1
else:
return
if self.drmlevel < 5:
msdes.deskey(self.calculate_deskey(), msdes.DE1)
bookkey = msdes.des(self.get_file('/DRMStorage/DRMSealed'))
if bookkey[0] != '\000':
raise LitError('Unable to decrypt title key!')
self.bookkey = bookkey[1:9]
else:
raise DRMError("Cannot access DRM-protected book")
def calculate_deskey(self):
hashfiles = ['/meta', '/DRMStorage/DRMSource']
if self.drmlevel == 3:
hashfiles.append('/DRMStorage/DRMBookplate')
prepad = 2
hash = mssha1.new()
for name in hashfiles:
data = self.get_file(name)
if prepad > 0:
data = ("\000" * prepad) + data
prepad = 0
postpad = 64 - (len(data) % 64)
if postpad < 64:
data = data + ("\000" * postpad)
hash.update(data)
digest = hash.digest()
key = [0] * 8
for i in xrange(0, len(digest)):
key[i % 8] ^= ord(digest[i])
return ''.join(chr(x) for x in key)
def get_file(self, name):
entry = self.entries[name]
if entry.section == 0:
return self.read_content(entry.offset, entry.size)
section = self.get_section(entry.section)
return section[entry.offset:entry.offset+entry.size]
def get_section(self, section):
data = self.section_data[section]
if not data:
data = self.get_section_uncached(section)
self.section_data[section] = data
return data
def get_section_uncached(self, section):
name = self.section_names[section]
path = '::DataSpace/Storage/' + name
transform = self.get_file(path + '/Transform/List')
content = self.get_file(path + '/Content')
control = self.get_file(path + '/ControlData')
while len(transform) >= 16:
csize = (int32(control) + 1) * 4
if csize > len(control) or csize <= 0:
raise LitError("ControlData is too short")
guid = msguid(transform)
if guid == DESENCRYPT_GUID:
content = self.decrypt(content)
control = control[csize:]
elif guid == LZXCOMPRESS_GUID:
reset_table = self.get_file(
'/'.join(('::DataSpace/Storage', name, 'Transform',
LZXCOMPRESS_GUID, 'InstanceData/ResetTable')))
content = self.decompress(content, control, reset_table)
control = control[csize:]
else:
raise LitError("Unrecognized transform: %s." % repr(guid))
transform = transform[16:]
return content
def decrypt(self, content):
length = len(content)
extra = length & 0x7
if extra > 0:
self.warn("content length not a multiple of block size")
content += "\0" * (8 - extra)
msdes.deskey(self.bookkey, msdes.DE1)
return msdes.des(content)
def decompress(self, content, control, reset_table):
if len(control) < 32 or control[CONTROL_TAG:CONTROL_TAG+4] != "LZXC":
raise LitError("Invalid ControlData tag value")
if len(reset_table) < (RESET_INTERVAL + 8):
raise LitError("Reset table is too short")
if u32(reset_table[RESET_UCLENGTH + 4:]) != 0:
raise LitError("Reset table has 64bit value for UCLENGTH")
result = []
window_size = 14
u = u32(control[CONTROL_WINDOW_SIZE:])
while u > 0:
u >>= 1
window_size += 1
if window_size < 15 or window_size > 21:
raise LitError("Invalid window in ControlData")
lzx.init(window_size)
ofs_entry = int32(reset_table[RESET_HDRLEN:]) + 8
uclength = int32(reset_table[RESET_UCLENGTH:])
accum = int32(reset_table[RESET_INTERVAL:])
bytes_remaining = uclength
window_bytes = (1 << window_size)
base = 0
while ofs_entry < len(reset_table):
if accum >= window_bytes:
accum = 0
size = int32(reset_table[ofs_entry:])
u = int32(reset_table[ofs_entry + 4:])
if u != 0:
raise LitError("Reset table entry greater than 32 bits")
if size >= len(content):
self._warn("LZX reset table entry out of bounds")
if bytes_remaining >= window_bytes:
lzx.reset()
try:
result.append(
lzx.decompress(content[base:size], window_bytes))
except lzx.LZXError:
self.warn("LZX decompression error; skipping chunk")
bytes_remaining -= window_bytes
base = size
accum += int32(reset_table[RESET_INTERVAL:])
ofs_entry += 8
if bytes_remaining < window_bytes and bytes_remaining > 0:
lzx.reset()
try:
result.append(lzx.decompress(content[base:], bytes_remaining))
except lzx.LZXError:
self.warn("LZX decompression error; skipping chunk")
bytes_remaining = 0
if bytes_remaining > 0:
raise LitError("Failed to completely decompress section")
return ''.join(result)
def get_atoms(self, entry):
name = '/'.join(('/data', entry.internal, 'atom'))
if name not in self.entries:
return ({}, {})
data = self.get_file(name)
nentries, data = u32(data), data[4:]
tags = {}
for i in xrange(1, nentries + 1):
if len(data) <= 1:
break
size, data = ord(data[0]), data[1:]
if size == 0 or len(data) < size:
break
tags[i], data = data[:size], data[size:]
if len(tags) != nentries:
self._warn("damaged or invalid atoms tag table")
if len(data) < 4:
return (tags, {})
attrs = {}
nentries, data = u32(data), data[4:]
for i in xrange(1, nentries + 1):
if len(data) <= 4:
break
size, data = u32(data), data[4:]
if size == 0 or len(data) < size:
break
attrs[i], data = data[:size], data[size:]
if len(attrs) != nentries:
self._warn("damaged or invalid atoms attributes table")
return (tags, attrs)
class LitContainer(object):
"""Simple Container-interface, read-only accessor for LIT files."""
def __init__(self, filename_or_stream, log):
self._litfile = LitFile(filename_or_stream, log)
self.log = log
def namelist(self):
return self._litfile.paths.keys()
def exists(self, name):
return urlunquote(name) in self._litfile.paths
def read(self, name):
entry = self._litfile.paths[urlunquote(name)] if name else None
if entry is None:
content = OPF_DECL + self._read_meta()
elif 'spine' in entry.state:
internal = '/'.join(('/data', entry.internal, 'content'))
raw = self._litfile.get_file(internal)
manifest = self._litfile.manifest
atoms = self._litfile.get_atoms(entry)
unbin = UnBinary(raw, name, manifest, HTML_MAP, atoms)
content = HTML_DECL + str(unbin)
tags = ('personname', 'place', 'city', 'country-region')
pat = r'(?i)</{0,1}st1:(%s)>'%('|'.join(tags))
content = re.sub(pat, '', content)
content = re.sub(r'<(/{0,1})form>', r'<\1div>', content)
else:
internal = '/'.join(('/data', entry.internal))
content = self._litfile.get_file(internal)
return content
def _read_meta(self):
path = 'content.opf'
raw = self._litfile.get_file('/meta')
try:
unbin = UnBinary(raw, path, self._litfile.manifest, OPF_MAP)
except LitError:
if 'PENGUIN group' not in raw: raise
print "WARNING: attempting PENGUIN malformed OPF fix"
raw = raw.replace(
'PENGUIN group', '\x00\x01\x18\x00PENGUIN group', 1)
unbin = UnBinary(raw, path, self._litfile.manifest, OPF_MAP)
return str(unbin)
def get_metadata(self):
return self._read_meta()
class LitReader(OEBReader):
Container = LitContainer
DEFAULT_PROFILE = 'MSReader'
def _spine_from_opf(self, opf):
manifest = self.oeb.manifest
for elem in xpath(opf, '/o2:package/o2:spine/o2:itemref'):
idref = elem.get('idref')
if idref not in manifest.ids:
continue
item = manifest.ids[idref]
if (item.media_type.lower() == 'application/xml' and
hasattr(item.data, 'xpath') and item.data.xpath('/html')):
item.media_type = 'application/xhtml+xml'
item.data = item._parse_xhtml(etree.tostring(item.data))
super(LitReader, self)._spine_from_opf(opf)
| hazrpg/calibre | src/calibre/ebooks/lit/reader.py | Python | gpl-3.0 | 34,423 |
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier, Jacques-Etienne Baudoux
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp import netsvc
from openerp.tools.translate import _
class purchase_order(orm.Model):
_inherit = 'purchase.order'
def validate_service_product_procurement(self, cr, uid, ids, context=None):
""" As action_picking_create only take care of non-service product
by looping on the moves, we need then to pass through all line with
product of type service and confirm them.
This way all procurements will reach the done state once the picking
related to the PO will be done and in the mean while the SO will be
then marked as delivered.
"""
wf_service = netsvc.LocalService("workflow")
proc_obj = self.pool.get('procurement.order')
# Proc product of type service should be confirm at this
# stage, otherwise, when picking of related PO is created
# then done, it stay blocked at running stage
proc_ids = proc_obj.search(cr, uid, [('purchase_id', 'in', ids)],
context=context)
for proc in proc_obj.browse(cr, uid, proc_ids, context=context):
if proc.product_id.type == 'service':
wf_service.trg_validate(uid, 'procurement.order',
proc.id, 'button_confirm', cr)
wf_service.trg_validate(uid, 'procurement.order',
proc.id, 'button_check', cr)
return True
# TODO In version 8.0, we should replace such a feature by
# giving each SO Line a route_id "drop shipping"
def action_picking_create(self, cr, uid, ids, context=None):
""" When the picking is created, we'll:
Only for the sales order lines mto + drop shipping:
Link the moves with the procurement of the sale order lines
which generated the purchase and confirm the procurement.
"""
assert len(ids) == 1, "Expected only 1 ID, got %r" % ids
picking_id = super(purchase_order, self).action_picking_create(
cr, uid, ids, context=context)
if not picking_id:
return picking_id
wf_service = netsvc.LocalService("workflow")
picking_obj = self.pool.get('stock.picking')
picking = picking_obj.browse(cr, uid, picking_id, context=context)
for move in picking.move_lines:
purchase_line = move.purchase_line_id
if not purchase_line:
continue
sale_line = purchase_line.sale_order_line_id
if not sale_line:
continue
if not (sale_line.type == 'make_to_order'
and sale_line.sale_flow == 'direct_delivery'):
continue
procurement = sale_line.procurement_id
if procurement and not procurement.move_id:
# the procurement for the sales and purchase is the same!
# So when the move will be done, the sales order and the
# purchase order will be shipped at the same time
procurement.write({'move_id': move.id})
wf_service.trg_validate(uid, 'procurement.order',
procurement.id, 'button_confirm', cr)
if purchase_line is not None:
wf_service.trg_validate(uid, 'procurement.order',
procurement.id, 'button_check', cr)
self.validate_service_product_procurement(cr, uid, ids, context)
return picking_id
class purchase_order_line(orm.Model):
_inherit = 'purchase.order.line'
_columns = {
'lr_source_line_id': fields.many2one( # one2one relation with selected_bid_line_id
'logistic.requisition.source',
'Logistic Requisition Source',
readonly=True,
ondelete='restrict'),
'from_bid_line_id': fields.many2one(
'purchase.order.line',
'Generated from bid',
readonly=True),
'po_line_from_bid_ids': fields.one2many(
'purchase.order.line',
'from_bid_line_id',
'Lines generated by the bid',
readonly=True),
}
def _prepare_lrs_update_from_po_line(self, cr, uid, vals,
po_line, context=None):
""" Take the vals dict from po line and return a vals dict for LRS
:param dict vals: value of to be written in new po line
:param browse_record po_line: purchase.order.line
:returns dict : vals to be written on logistic.requisition.source
"""
lrs_vals = {}
if vals.get('product_qty'):
lrs_vals['proposed_qty'] = vals.get('product_qty')
if vals.get('product_id'):
lrs_vals['proposed_product_id'] = vals.get('product_id')
if vals.get('product_uom'):
lrs_vals['proposed_uom_id'] = vals.get('product_uom')
if vals.get('price_unit'):
currency_obj = self.pool['res.currency']
to_curr = po_line.lr_source_line_id.requisition_id.currency_id.id
from_curr = po_line.order_id.pricelist_id.currency_id.id
price = currency_obj.compute(cr, uid, from_curr, to_curr,
vals.get('price_unit'), False)
lrs_vals['unit_cost'] = price
if vals.get('date_planned'):
if po_line.lr_source_line_id.transport_applicable:
if pr_bid_line.order_id.transport == 'included':
lrs_vals['date_etd'] = False
lrs_vals['date_eta'] = vals.get('date_planned')
else:
lrs_vals['date_etd'] = vals.get('date_planned')
lrs_vals['date_eta'] = False
else:
lrs_vals['date_etd'] = vals.get('date_planned')
lrs_vals['date_eta'] = vals.get('date_planned')
return lrs_vals
def write(self, cr, uid, ids, vals, context=None):
""" Here we implement something to allow the update of LRS when some
information are changed in PO line. It should be possible to do it when :
PO is still in draft
LRL is not marked as sourced
Once done, nobody should be able to change the PO line infos
"""
if context is None:
context = {}
if not ids:
return True
#We have to enforce list as it is called by function_inv
if not isinstance(ids, list):
ids = [ids]
if (vals.get('product_qty') or vals.get('product_id')
or vals.get('product_uom')
or vals.get('price_unit')
or vals.get('date_planned')):
lrs_obj = self.pool.get('logistic.requisition.source')
for line in self.browse(cr, uid, ids, context=context):
if line.lr_source_line_id:
if (line.lr_source_line_id.requisition_line_id in
('sourced', 'quoted')):
raise osv.except_osv(
_('UserError'),
_(
"You cannot change the informations because this PO line "
"is already linked to a Logistic Requsition Line %s marked "
"as sourced or quoted." % (line.lr_source_line_id.name)
)
)
else:
lrs_vals = self._prepare_lrs_update_from_po_line(cr,
uid, vals, line, context=context)
lrs_obj.write(cr, uid, [line.lr_source_line_id.id],
lrs_vals, context=context)
return super(purchase_order_line, self).write(cr, uid, ids, vals,
context=context)
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.lr_source_line_id:
if (line.lr_source_line_id.requisition_line_id in
('sourced', 'quoted')):
raise osv.except_osv(
_('UserError'),
_(
"You cannot delete this PO line because it is "
"already linked to a Logistic Requsition Line %s marked "
"as sourced or quoted." % (line.lr_source_line_id.name)
)
)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
| lepistone/vertical-ngo | __unported__/logistic_requisition/model/purchase.py | Python | agpl-3.0 | 9,709 |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = ""
services_str = "/home/karl/catkin_ws/src/navigation/robot_pose_ekf/srv/GetStatus.srv"
pkg_name = "robot_pose_ekf"
dependencies_str = "std_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "std_msgs;/opt/ros/lunar/share/std_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/lunar/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| kschultz1986/robots_for_all | build/navigation/robot_pose_ekf/cmake/robot_pose_ekf-genmsg-context.py | Python | gpl-3.0 | 525 |
import numpy as np
from numpy import array, float32
from numpy.linalg import norm
import networkx as nx
from collections import namedtuple
from catmaid.models import Treenode, TreenodeConnector, ClassInstance, Relation
try:
from scipy.sparse.csgraph import dijkstra
except:
pass
def synapse_clustering( skeleton_id, h_list ):
Gwud = createSpatialGraphFromSkeletonID( skeleton_id )
synNodes, connector_ids, relations = synapseNodesFromSkeletonID( skeleton_id )
return tree_max_density(Gwud, synNodes, connector_ids, relations, h_list)
def tree_max_density(Gwud, synNodes, connector_ids, relations, h_list):
""" Gwud: networkx graph were the edges are weighted by length, and undirected.
synNodes: list of node IDs where there is a synapse.
connector_ids: list of connector IDs.
relations: list of the type of synapse, 'presynaptic_to' or 'postsynaptic_to'.
The three lists are synchronized by index.
"""
D, id2index = distanceMatrix( Gwud, synNodes )
SynapseGroup = namedtuple("SynapseGroup", ['node_ids', 'connector_ids', 'relations', 'local_max'])
synapseGroups = {}
for h in h_list:
expDh = np.exp(-1 * np.multiply(D, D) / (h * h) )
targLoc = {} # targLocs hosts the final destination nodes of the hill climbing
densityField = {} # densityField stores the height of the hill to be climbed
for startNode in synNodes:
if startNode not in targLoc:
currNode = startNode
allOnPath = []
if startNode not in densityField:
densityField[startNode] = 0
densityField[startNode] = np.sum(expDh[:,id2index[startNode]])
while True:
allOnPath.append(currNode)
#Make sure I have densityField of all neighbors for comparison
if currNode in targLoc:
currNode = targLoc[ currNode ] # Jump right to the end already.
break
for nn in Gwud.neighbors( currNode ):
if nn not in densityField:
densityField[nn] = 0
densityField[nn] = np.sum(expDh[:,id2index[nn]])
prevNode = currNode
for nn in Gwud.neighbors( currNode ):
if densityField[nn] > densityField[currNode]:
currNode = nn
if currNode == prevNode:
break
for node in allOnPath:
targLoc[node] = currNode
uniqueTargs = set(targLoc[node] for node in synNodes)
loc2group = {}
synapseGroups[h] = {}
for ind, val in enumerate(uniqueTargs):
loc2group[val] = ind
synapseGroups[h][ind] = SynapseGroup([], [], [], val)
for ind, node in enumerate(synNodes):
gi = loc2group[targLoc[node]]
synapseGroups[h][ gi ].node_ids.append( node )
synapseGroups[h][ gi ].connector_ids.append( connector_ids[ind] )
synapseGroups[h][ gi ].relations.append( relations[ind] )
return synapseGroups
def distanceMatrix( G, synNodes ):
""" Given a nx graph, produce an all to all distance dict via scipy sparse matrix black magic.
Also, you get in 'id2index' the the mapping from a node id to the index in matrix scaledDistance. """
dmat = {}
nodeList = tuple(G.nodes())
synNodes = set(synNodes)
synIndices = tuple(i for i,node in enumerate(nodeList) if node in synNodes)
dmat = dijkstra(nx.to_scipy_sparse_matrix(G, nodeList),
directed=False, indices=synIndices)
return dmat, {node: i for i,node in enumerate(nodeList)}
def countTargets( skeleton_id ):
nTargets = {}
synNodes, connector_ids, relations = synapseNodesFromSkeletonID( skeleton_id )
PRE = Relation.objects.get(project=pid, relation_name='presynaptic_to').value_list('id')[0]
for i, cid in enumerate(connector_ids):
if relations[i] == PRE:
nTargets[cid] = TreenodeConnector.objects.filter(connector_id=cid,relation_id=PRE).count()
return nTargets
def createSpatialGraphFromSkeletonID(sid):
# retrieve all nodes of the skeleton
treenode_qs = Treenode.objects.filter(skeleton_id=sid).values_list(
'id', 'parent_id', 'location_x', 'location_y', 'location_z')
# build the networkx graph from it
G = nx.Graph()
locations = {}
for tnid, parent_id, location_x, location_y, location_z in treenode_qs:
if parent_id:
G.add_edge(parent_id, tnid)
locations[tnid] = array((location_x, location_y, location_z), dtype=float32)
for iFrom, iTo in G.edges(data=False):
G[iFrom][iTo]['weight'] = norm(locations[iFrom] - locations[iTo])
return G
def synapseNodesFromSkeletonID(sid):
sk = ClassInstance.objects.get(pk=sid)
pid = sk.project_id
qs_tc = TreenodeConnector.objects.filter(
project=pid,
skeleton=sid
).select_related('connector')
synapse_nodes = []
connector_ids = []
synapse_relations = []
for tc in qs_tc:
synapse_nodes.append(tc.treenode_id)
connector_ids.append(tc.connector_id)
synapse_relations.append(tc.relation_id)
return synapse_nodes, connector_ids, synapse_relations
def segregationIndex( synapseGroups, skeleton_id, weightOutputs=True ):
nout = np.zeros(len(synapseGroups))
ngrp = np.zeros(len(synapseGroups))
PRE = Relation.objects.get(project=pid, relation_name='presynaptic_to').value_list('id')[0]
if weightOutputs:
nTargets = countTargets( skeleton_id )
for group in synapseGroups.values():
for i, synDirection in enumerate(group.relations):
if synDirection == PRE:
nout[group] += nTargets[ group.connector_ids[i] ]
ngrp[group] += nTargets[ group.connector_ids[i] ]
else:
ngrp[group] +=1
else:
for group in synapseGroups.values():
for synDirection in group.relations:
if synDirection == PRE:
nout[group] += 1
ngrp[group] = len(group.relations)
frac = np.divide(nout,ngrp)
np.seterr(all='ignore')
h_partial = ngrp * (frac * np.log( frac ) + (1-frac) * np.log( 1 - frac ))
h_partial[np.isnan(h_partial)] = 0
frac_unseg = sum(nout)/sum(ngrp)
h_unseg = sum( ngrp) * ( frac_unseg*np.log(frac_unseg) + (1-frac_unseg)*np.log(1-frac_unseg) )
return 1 - sum(h_partial)/h_unseg
| htem/CATMAID | django/applications/catmaid/control/synapseclustering.py | Python | agpl-3.0 | 6,882 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from .factories import ProxyGrantingTicketFactory
from .factories import ProxyTicketFactory
from .factories import ServiceTicketFactory
from .factories import ConsumedServiceTicketFactory
from .utils import parse
from mama_cas.exceptions import InvalidTicket
from mama_cas.response import ValidationResponse
from mama_cas.response import ProxyResponse
from mama_cas.response import SamlValidationResponse
class ValidationResponseTests(TestCase):
def setUp(self):
self.st = ServiceTicketFactory()
self.pgt = ProxyGrantingTicketFactory()
def test_validation_response_content_type(self):
"""
A ``ValidationResponse`` should be set to the provided
content type.
"""
resp = ValidationResponse(context={'ticket': self.st, 'error': None},
content_type='text/xml')
self.assertEqual(resp.get('Content-Type'), 'text/xml')
def test_validation_response_ticket(self):
"""
When given a ticket, a ``ValidationResponse`` should return
an authentication success with the authenticated user.
"""
resp = ValidationResponse(context={'ticket': self.st, 'error': None},
content_type='text/xml')
user = parse(resp.content).find('./authenticationSuccess/user')
self.assertIsNotNone(user)
self.assertEqual(user.text, 'ellen')
def test_validation_response_error(self):
"""
When given an error, a ``ValidationResponse`` should return
an authentication failure with the error code and text.
"""
error = InvalidTicket('Testing Error')
resp = ValidationResponse(context={'ticket': None, 'error': error},
content_type='text/xml')
failure = parse(resp.content).find('./authenticationFailure')
self.assertIsNotNone(failure)
self.assertEqual(failure.get('code'), 'INVALID_TICKET')
self.assertEqual(failure.text, 'Testing Error')
def test_validation_response_pgt(self):
"""
When given a ``ProxyGrantingTicket``, a ``ValidationResponse``
should include a proxy-granting ticket.
"""
resp = ValidationResponse(context={'ticket': self.st, 'error': None,
'pgt': self.pgt},
content_type='text/xml')
pgt = parse(resp.content).find('./authenticationSuccess/proxyGrantingTicket')
self.assertIsNotNone(pgt)
self.assertEqual(pgt.text, self.pgt.iou)
def test_validation_response_proxies(self):
"""
When given a list of proxies, a ``ValidationResponse`` should
include the list with ordering retained.
"""
proxy_list = ['https://proxy2/pgtUrl', 'https://proxy1/pgtUrl']
resp = ValidationResponse(context={'ticket': self.st, 'error': None,
'proxies': proxy_list},
content_type='text/xml')
proxies = parse(resp.content).find('./authenticationSuccess/proxies')
self.assertIsNotNone(proxies)
self.assertEqual(len(proxies.findall('proxy')), len(proxy_list))
self.assertEqual(proxies[0].text, proxy_list[0])
self.assertEqual(proxies[1].text, proxy_list[1])
def test_validation_response_attributes(self):
"""
When given custom user attributes, a ``ValidationResponse``
should include the attributes in the response.
"""
attrs = {'givenName': 'Ellen', 'sn': 'Cohen', 'email': '[email protected]'}
resp = ValidationResponse(context={'ticket': self.st, 'error': None,
'attributes': attrs},
content_type='text/xml')
attributes = parse(resp.content).find('./authenticationSuccess/attributes')
self.assertIsNotNone(attributes)
self.assertEqual(len(attributes), len(attrs))
for child in attributes:
self.assertTrue(child.tag in attrs)
self.assertEqual(child.text, attrs[child.tag])
# Ordering is not guaranteed, so remove attributes from
# the dict as they are validated. When done, check if the
# dict is empty to see if all attributes were matched.
del attrs[child.tag]
self.assertEqual(len(attrs), 0)
def test_validation_response_nonstring_attributes(self):
"""
When given non-string attributes, the values should be
converted to strings in the response.
"""
attrs = {'boolean': True}
resp = ValidationResponse(context={'ticket': self.st, 'error': None,
'attributes': attrs},
content_type='text/xml')
attributes = parse(resp.content).find('./authenticationSuccess/attributes')
self.assertIsNotNone(attributes)
self.assertEqual(attributes[0].tag, 'boolean')
self.assertEqual(attributes[0].text, 'True')
def test_validation_response_unicode_attributes(self):
"""
When given Unicode attributes, the values should be
handled correctly in the response.
"""
attrs = {'unicode': u'тнє мαмαѕ & тнє ραραѕ'}
resp = ValidationResponse(context={'ticket': self.st, 'error': None,
'attributes': attrs},
content_type='text/xml')
attributes = parse(resp.content).find('./authenticationSuccess/attributes')
self.assertIsNotNone(attributes)
self.assertEqual(attributes[0].tag, 'unicode')
self.assertEqual(attributes[0].text, 'тнє мαмαѕ & тнє ραραѕ')
class ProxyResponseTests(TestCase):
def setUp(self):
self.st = ServiceTicketFactory()
self.pgt = ProxyGrantingTicketFactory()
self.pt = ProxyTicketFactory()
def test_proxy_response_content_type(self):
"""
A ``ProxyResponse`` should be set to the provided
content type.
"""
resp = ProxyResponse(context={'ticket': self.pt, 'error': None},
content_type='text/xml')
self.assertEqual(resp.get('Content-Type'), 'text/xml')
def test_proxy_response_ticket(self):
"""
When given a ticket, a ``ProxyResponse`` should return a
proxy request success with the proxy ticket.
"""
resp = ProxyResponse(context={'ticket': self.pt, 'error': None},
content_type='text/xml')
pt = parse(resp.content).find('./proxySuccess/proxyTicket')
self.assertIsNotNone(pt)
self.assertEqual(pt.text, self.pt.ticket)
def test_proxy_response_error(self):
"""
When given an error, a ``ProxyResponse`` should return a
proxy request failure with the error code and text.
"""
error = InvalidTicket('Testing Error')
resp = ProxyResponse(context={'ticket': None, 'error': error},
content_type='text/xml')
failure = parse(resp.content).find('./proxyFailure')
self.assertIsNotNone(failure)
self.assertEqual(failure.get('code'), 'INVALID_TICKET')
self.assertEqual(failure.text, 'Testing Error')
class SamlValidationResponseTests(TestCase):
def setUp(self):
self.st = ConsumedServiceTicketFactory()
def test_saml_validation_response_ticket(self):
"""
When given a ticket, a ``SamlValidationResponse`` should return
an authentication success.
"""
resp = SamlValidationResponse(context={'ticket': self.st, 'error': None},
content_type='text/xml')
code = parse(resp.content).find('./Body/Response/Status/StatusCode')
self.assertIsNotNone(code)
self.assertEqual(code.get('Value'), 'samlp:Success')
def test_saml_validation_response_error(self):
"""
When given an error, a ``SamlValidationResponse`` should return
an authentication failure with the error text.
"""
error = InvalidTicket('Testing Error')
resp = SamlValidationResponse(context={'ticket': None, 'error': error},
content_type='text/xml')
code = parse(resp.content).find('./Body/Response/Status/StatusCode')
self.assertIsNotNone(code)
self.assertEqual(code.get('Value'), 'samlp:RequestDenied')
message = parse(resp.content).find('./Body/Response/Status/StatusMessage')
self.assertIsNotNone(message)
self.assertEqual(message.text, 'Testing Error')
def test_saml_validation_response_attributes(self):
"""
When given custom user attributes, a ``SamlValidationResponse``
authentication success should include the attributes in the
response.
"""
attrs = {'givenName': 'Ellen', 'sn': 'Cohen', 'email': '[email protected]'}
resp = SamlValidationResponse(context={'ticket': self.st, 'error': None,
'attributes': attrs},
content_type='text/xml')
attribute_statement = parse(resp.content).find('./Body/Response/Assertion/AttributeStatement')
self.assertIsNotNone(attribute_statement)
for attr in attribute_statement.findall('Attribute'):
attr_name = attr.get('AttributeName')
attr_value = attr.find('AttributeValue')
self.assertTrue(attr_name in attrs)
self.assertEqual(attr_value.text, attrs[attr_name])
# Ordering is not guaranteed, so remove attributes from
# the dict as they are validated. When done, check if the
# dict is empty to see if all attributes were matched.
del attrs[attr_name]
self.assertEqual(len(attrs), 0)
| forcityplatform/django-mama-cas | mama_cas/tests/test_response.py | Python | bsd-3-clause | 10,106 |
import time
import os
import enigma
from Components.config import config
from Components import Harddisk
from twisted.internet import threads
def getTrashFolder(path):
# Returns trash folder without symlinks. Path may be file or directory or whatever.
mountpoint = Harddisk.findMountPoint(os.path.realpath(path))
movie = os.path.join(mountpoint, 'movie')
if os.path.isdir(movie):
mountpoint = movie
return os.path.join(mountpoint, ".Trash")
def createTrashFolder(path):
# Create and return trash folder for given file or dir
trash = getTrashFolder(path)
if not os.path.isdir(trash):
print "[Trashcan] create:", trash
os.mkdir(trash)
return trash
def enumTrashFolders():
# Walk through all Trash folders. This may access network
# drives and similar, so might block for minutes.
for mount in Harddisk.getProcMounts():
if mount[1].startswith('/media/'):
mountpoint = mount[1]
movie = os.path.join(mountpoint, 'movie')
if os.path.isdir(movie):
mountpoint = movie
result = os.path.join(mountpoint, ".Trash")
if os.path.isdir(result):
yield result
class Trashcan:
def __init__(self):
self.isCleaning = False
self.session = None
self.dirty = set()
def init(self, session):
self.session = session
session.nav.record_event.append(self.gotRecordEvent)
def markDirty(self, path):
# Marks a path for purging, for when a recording on that
# device starts or ends.
if not path:
return
trash = getTrashFolder(path)
self.dirty.add(trash)
def gotRecordEvent(self, service, event):
if (event == enigma.iRecordableService.evEnd):
self.cleanIfIdle()
def destroy(self):
if self.session is not None:
self.session.nav.record_event.remove(self.gotRecordEvent)
self.session = None
def __del__(self):
self.destroy()
def cleanIfIdle(self, path=None):
# RecordTimer calls this when preparing a recording. That is a
# nice moment to clean up. It also mentions the path, so mark
# it as dirty.
self.markDirty(path)
if not self.dirty:
return
if self.isCleaning:
print "[Trashcan] Cleanup already running"
return
if (self.session is not None) and self.session.nav.getRecordings():
return
self.isCleaning = True
ctimeLimit = time.time() - (config.usage.movielist_trashcan_days.value * 3600 * 24)
reserveBytes = 1024*1024*1024 * int(config.usage.movielist_trashcan_reserve.value)
cleanset = self.dirty
self.dirty = set()
threads.deferToThread(purge, cleanset, ctimeLimit, reserveBytes).addCallbacks(self.cleanReady, self.cleanFail)
def cleanReady(self, result=None):
self.isCleaning = False
# schedule another clean loop if needed (so we clean up all devices, not just one)
self.cleanIfIdle()
def cleanFail(self, failure):
print "[Trashcan] ERROR in clean:", failure
self.isCleaning = False
def purge(cleanset, ctimeLimit, reserveBytes):
# Remove expired items from trash, and attempt to have
# reserveBytes of free disk space.
for trash in cleanset:
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
diskstat = os.statvfs(trash)
free = diskstat.f_bfree * diskstat.f_bsize
bytesToRemove = reserveBytes - free
candidates = []
print "[Trashcan] bytesToRemove", bytesToRemove, trash
size = 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
try:
fn = os.path.join(root, name)
st = os.stat(fn)
if st.st_ctime < ctimeLimit:
print "[Trashcan] Too old:", name, st.st_ctime
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st.st_size
else:
candidates.append((st.st_ctime, fn, st.st_size))
size += st.st_size
except Exception, e:
print "[Trashcan] Failed to stat %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
candidates.sort()
# Now we have a list of ctime, candidates, size. Sorted by ctime (=deletion time)
print "[Trashcan] Bytes to remove remaining:", bytesToRemove, trash
for st_ctime, fn, st_size in candidates:
if bytesToRemove < 0:
break
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st_size
size -= st_size
print "[Trashcan] Size after purging:", size, trash
def cleanAll(trash):
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
fn = os.path.join(root, name)
try:
enigma.eBackgroundFileEraser.getInstance().erase(fn)
except Exception, e:
print "[Trashcan] Failed to erase %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
def init(session):
global instance
instance.init(session)
instance = Trashcan()
| vit2/vit-e2 | lib/python/Tools/Trashcan.py | Python | gpl-2.0 | 4,858 |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test example app."""
import json
import os
import signal
import subprocess
import time
from os.path import abspath, dirname, join
import pytest
@pytest.yield_fixture
def example_app():
"""Example app fixture."""
current_dir = os.getcwd()
# Go to example directory
project_dir = dirname(dirname(abspath(__file__)))
exampleapp_dir = join(project_dir, 'examples')
os.chdir(exampleapp_dir)
# Setup application
assert subprocess.call('./app-setup.sh', shell=True) == 0
# Setup fixtures
assert subprocess.call('./app-fixtures.sh', shell=True) == 0
# Start example app
webapp = subprocess.Popen(
'FLASK_DEBUG=1 FLASK_APP=app.py flask run -p 5000',
stdout=subprocess.PIPE, preexec_fn=os.setsid, shell=True)
time.sleep(10)
yield webapp
# Stop server
os.killpg(webapp.pid, signal.SIGTERM)
# Tear down example app
subprocess.call('./app-teardown.sh', shell=True)
# Return to the original directory
os.chdir(current_dir)
def test_example_app(example_app):
"""Test example app."""
# Open page
cmd = 'curl http://localhost:5000/?q=body:test'
output = json.loads(
subprocess.check_output(cmd, shell=True).decode('utf-8'))
assert len(output['hits']['hits']) == 1
| tiborsimko/invenio-search | tests/test_examples_app.py | Python | mit | 1,522 |
import pytest
from charfinder import UnicodeNameIndex, tokenize, sample_chars, query_type
from unicodedata import name
@pytest.fixture
def sample_index():
return UnicodeNameIndex(sample_chars)
@pytest.fixture(scope="module")
def full_index():
return UnicodeNameIndex()
def test_query_type():
assert query_type('blue') == 'NAME'
def test_tokenize():
assert list(tokenize('')) == []
assert list(tokenize('a b')) == ['A', 'B']
assert list(tokenize('a-b')) == ['A', 'B']
assert list(tokenize('abc')) == ['ABC']
assert list(tokenize('café')) == ['CAFÉ']
def test_index():
sample_index = UnicodeNameIndex(sample_chars)
assert len(sample_index.index) == 9
def test_find_word_no_match(sample_index):
res = sample_index.find_chars('qwertyuiop')
assert len(res.items) == 0
def test_find_word_1_match(sample_index):
res = [(ord(char), name(char))
for char in sample_index.find_chars('currency').items]
assert res == [(8352, 'EURO-CURRENCY SIGN')]
def test_find_word_1_match_character_result(sample_index):
res = [name(char) for char in
sample_index.find_chars('currency').items]
assert res == ['EURO-CURRENCY SIGN']
def test_find_word_2_matches(sample_index):
res = [(ord(char), name(char))
for char in sample_index.find_chars('Euro').items]
assert res == [(8352, 'EURO-CURRENCY SIGN'),
(8364, 'EURO SIGN')]
def test_find_2_words_no_matches(sample_index):
res = sample_index.find_chars('Euro letter')
assert res.count == 0
def test_find_2_words_no_matches_because_one_not_found(sample_index):
res = sample_index.find_chars('letter qwertyuiop')
assert res.count == 0
def test_find_2_words_1_match(sample_index):
res = sample_index.find_chars('sign dollar')
assert res.count == 1
def test_find_2_words_2_matches(sample_index):
res = sample_index.find_chars('latin letter')
assert res.count == 2
def test_find_chars_many_matches_full(full_index):
res = full_index.find_chars('letter')
assert res.count > 7000
def test_find_1_word_1_match_full(full_index):
res = [(ord(char), name(char))
for char in full_index.find_chars('registered').items]
assert res == [(174, 'REGISTERED SIGN')]
def test_find_1_word_2_matches_full(full_index):
res = full_index.find_chars('rook')
assert res.count == 2
def test_find_3_words_no_matches_full(full_index):
res = full_index.find_chars('no such character')
assert res.count == 0
def test_find_with_start(sample_index):
res = [(ord(char), name(char))
for char in sample_index.find_chars('sign', 1).items]
assert res == [(8352, 'EURO-CURRENCY SIGN'), (8364, 'EURO SIGN')]
def test_find_with_stop(sample_index):
res = [(ord(char), name(char))
for char in sample_index.find_chars('sign', 0, 2).items]
assert res == [(36, 'DOLLAR SIGN'), (8352, 'EURO-CURRENCY SIGN')]
def test_find_with_start_stop(sample_index):
res = [(ord(char), name(char))
for char in sample_index.find_chars('sign', 1, 2).items]
assert res == [(8352, 'EURO-CURRENCY SIGN')]
| fluentpython/unicode-solutions | flupy-ch18/test_charfinder.py | Python | cc0-1.0 | 3,160 |
import subprocess
import os
from logging import info
import argparse
from servi.command import Command
import servi.config as c
from servi.utils import timeit
from servi.template_mgr import TemplateManager
from servi.commands.buildbox import get_all_boxes
from servi.exceptions import ServiError, ForceError
class UseboxCommand(Command):
def register_command_line(self, sub_parsers):
parser = sub_parsers.add_parser(
'usebox', help="Use a vagrant base box that you already "
"created with 'servi buildbox'.",
description=
"Use a vagrant base box that you already built with "
"'servi buildbox'.\n"
"See 'servi buildbox --help' for more information'"
)
parser.add_argument('-f', '--force', action='store_true')
# for testing
parser.add_argument('--mock', action='store_true',
help=argparse.SUPPRESS)
parser.set_defaults(command_func=self.run)
def run(self, args, extra_args):
all_boxes = get_all_boxes()
if len(all_boxes) is 0:
raise ServiError('No saved boxes found. Run "servi buildbox".')
ver = all_boxes[0][1]
tmgr = TemplateManager()
if not args.force and ver < tmgr.m_template.template_version:
raise ForceError('Existing saved servi_box has a version less '
'than existing template version.\n'
'Either run "servi buildbox" or run '
'"servi usebox --force."\n'
'Template version: {0}\n'
'servi_box: {1}'
.format(tmgr.m_template.template_version,
all_boxes[0][0]))
env = os.environ.copy()
env['servi_box'] = os.path.join(c.BOX_DIR, all_boxes[0][0])
os.chdir(c.MASTER_DIR)
if not args.mock:
with timeit():
subprocess.check_call('vagrant up', env=env, shell=True)
else:
info('mocking vagrant up with base box: {0}'
.format(env['servi_box']))
return True
command = UseboxCommand()
| rr326/servi | servi/commands/usebox.py | Python | mit | 2,241 |
"""
@author: dhoomakethu
"""
from __future__ import absolute_import, unicode_literals
from abc import ABCMeta, abstractmethod
class App(object):
__metaclass__ = ABCMeta
_driver = None
_service_store = None
_emulator = None
def __init__(self, network):
# self._network = getattr(Provider, network)
# self.compute_engine = get_driver(self._network)
self._network = network
@property
def driver(self):
return self._driver
@property
def store(self):
return self._service_store
@property
def network(self):
return self._network
@property
def emulator(self):
return self._emulator
@abstractmethod
def choice(self):
"""
choose a random vm
"""
# @abstractmethod
# def connect(self):
# """
# creates an connection to compute engine
# """
@abstractmethod
def stop_services(self, **kwargs):
"""
stops a cloud instance
"""
@abstractmethod
def terminate_services(self, **kwargs):
"""
terminates a cloud instance
"""
@abstractmethod
def reboot_services(self, **kwargs):
"""
reboots the instance
"""
@abstractmethod
def kill_process(self, **kwargs):
"""
kills a process from the instance
"""
@abstractmethod
def remote_kill_process(self, **kwargs):
"""
kills a process runnning in a remote instance
"""
@abstractmethod
def stop_upstart_job(self, **kwargs):
"""
stops an upstart job
"""
@abstractmethod
def stop_initd_job(self, **kwargs):
"""
stops an initd job
"""
@abstractmethod
def burn_cpu(self, **kwargs):
"""
Loads CPU core to desired load percentage
Args:
instance_ids:
cpuload:
duration:
cpucore:
Returns:
"""
@abstractmethod
def burn_ram(self, **kwargs):
"""
Stress RAM with desired load
Args:
instance_ids:
ramload:
duration:
Returns:
"""
@abstractmethod
def burn_io(self, **kwargs):
"""
Stress IO
Args:
instance_ids:
Returns:
"""
@abstractmethod
def burn_disk(self, **kwargs):
"""
Stress DISK
Args:
instance_ids:
size:
path:
duration:
Returns:
"""
@abstractmethod
def network_blackout(self, **kwargs):
"""
Simulates Network blackout on a container
Returns:
"""
@abstractmethod
def network_corrupt(self, **kwargs):
"""
Corrupts random network packets for the given service
Returns:
"""
@abstractmethod
def network_loss(self, **kwargs):
"""
drop random network packets
Returns:
"""
@abstractmethod
def network_duplicate(self, **kwargs):
"""
Duplicates network packets
Returns:
"""
@abstractmethod
def network_delay(self, **kwargs):
"""
Simulates delay with network transactions
Returns:
"""
@abstractmethod
def network_reorder(self, **kwargs):
"""
Reorders a given percent of network packets
Returns:
"""
| dhoomakethu/apocalypse | apocalypse/app/__init__.py | Python | mit | 3,520 |
# Copyright 2015 Kevin Murray <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import jinja2
from jinja2 import Environment, PackageLoader
import mpld3.urls
def pothole2title(string):
words = string.split('_')
return " ".join([w.title() for w in words])
def nice_params(param_dict):
nice = {}
for k, v in param_dict.items():
nice[pothole2title(k)] = v
return nice
def include_file(name):
return QCPP_LOADER.get_source(QCPP_ENV, name)[0]
QCPP_LOADER = PackageLoader('qcpp', 'templates')
QCPP_ENV = Environment(loader=QCPP_LOADER)
with open(mpld3.urls.D3_LOCAL) as fh:
D3 = fh.read()
with open(mpld3.urls.MPLD3_LOCAL) as fh:
MPLD3 = fh.read()
QCPP_ENV.globals['include_file'] = include_file
| kdmurray91/libqcpp | ui/qcpp/util.py | Python | mpl-2.0 | 1,347 |
# Copyright (C) 2005, Giovanni Bajo
# Based on previous work under copyright (c) 1999, 2002 McMillan Enterprises, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#import pythoncom
pycomCLSCTX_INPROC = 3
pycomCLSCTX_LOCAL_SERVER = 4
import os
d = {}
class NextID:
_reg_clsid_ = '{25E06E61-2D18-11D5-945F-00609736B700}'
_reg_desc_ = 'Text COM server'
_reg_progid_ = 'MEInc.NextID'
_reg_clsctx_ = pycomCLSCTX_INPROC | pycomCLSCTX_LOCAL_SERVER
_public_methods_ = [
'getNextID'
]
def __init__(self):
import win32api
win32api.MessageBox(0, "NextID.__init__ started", "NextID.py")
global d
if sys.frozen:
for entry in sys.path:
if entry.find('?') > -1:
here = os.path.dirname(entry.split('?')[0])
break
else:
here = os.getcwd()
else:
here = os.path.dirname(__file__)
self.fnm = os.path.join(here, 'id.cfg')
try:
d = eval(open(self.fnm, 'rU').read()+'\n')
except:
d = {
'systemID': 0xaaaab,
'highID': 0
}
win32api.MessageBox(0, "NextID.__init__ complete", "NextID.py")
def getNextID(self):
global d
d['highID'] = d['highID'] + 1
open(self.fnm, 'w').write(repr(d))
return '%(systemID)-0.5x%(highID)-0.7x' % d
def RegisterNextID():
from win32com.server import register
register.UseCommandLine(NextID)
def UnRegisterNextID():
from win32com.server import register
register.UnregisterServer(NextID._reg_clsid_, NextID._reg_progid_)
if __name__ == '__main__':
import sys
if "/unreg" in sys.argv:
UnRegisterNextID()
elif "/register" in sys.argv:
RegisterNextID()
else:
print "running as server"
import win32com.server.localserver
win32com.server.localserver.main()
raw_input("Press any key...")
| supercheetah/diceroller | pyinstaller/e2etests/win32/NextID.py | Python | artistic-2.0 | 2,653 |
import re
from ros_homebot_python import constants as c
from ros_homebot_python import utils
def camelcase_to_underscores(name):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def hash_str(s):
# print 'hash str:', repr(s)
s = str(s)
return sum(ord(letter) for letter in s)
class Packet(object):
def __init__(self, id, data='', check_id=True): # pylint: disable=W0622
if isinstance(id, int):
id = chr(id)
self.id = str(id)
if check_id:
assert self.id in c.ALL_IDS, 'Invalid ID: %s' % self.id
self.data = str(data)
def __hash__(self):
return self.hash
def __cmp__(self, other):
if not isinstance(other, Packet):
return NotImplementedError
return cmp(hash(self), hash(other))
def __eq__(self, other):
return hash(self) == hash(other)
@property
def hash(self):
return hash_str(self.id + self.data)
@classmethod
def from_ros_message(cls, msg):
return cls(chr(msg.id), msg.data)
@property
def parameters(self):
return self.data.split(' ')
@property
def id_name(self):
if self.id not in c.ALL_IDS:
return
return re.sub(r'[^a-z]+', '_', c.ALL_IDS[self.id].lower().strip())
@property
def non_get_id(self):
"""
Returns the ID from the data if the ID is "get_value".
"""
if self.id == c.ID_GET_VALUE and self.data:
return self.data[0]
return self.id
@property
def length(self):
return len(self.id.strip()) + len(self.data.strip())
# @classmethod
# def fromServiceRequest(cls, req, packet_id):
# req_name = type(req).__name__
#packet_name = camelcase_to_underscores(req_name)
@classmethod
def from_string(cls, s):
_id = c.ID_NULL
_data = ''
if s:
parts = s.split(' ')
if parts:
_id = parts[0]
_data = ' '.join(parts[1:])
return cls(_id, _data, check_id=False)
def __unicode__(self):
s = u'%s %s' % (self.id_name, self.data)
s = s.strip()
s = s.encode('utf-8')
return s
def __repr__(self):
return u'<%s: %s>' % (type(self).__name__, unicode(self))
class KVPacket(Packet):
def __init__(self, id, *args, **kwargs): # pylint: disable=W0622
data = ' '.join(map(str, args))
super(KVPacket, self).__init__(id, data, **kwargs)
class BooleanPacket(Packet):
"""
A packet with a single parameter that is either true or false.
"""
def __init__(self, id, data='', **kwargs): # pylint: disable=W0622
data = utils.to_10(data)
super(BooleanPacket, self).__init__(id, **kwargs)
self.data = data
class LEDPacket(BooleanPacket):
def __init__(self, data=''):
super(LEDPacket, self).__init__(c.ID_LED, data)
| chrisspen/homebot | src/ros/src/ros_homebot_python/src/ros_homebot_python/packet.py | Python | mit | 2,993 |
"""Unittest for idlelib.WidgetRedirector
100% coverage
"""
from test.test_support import requires
import unittest
from idlelib.idle_test.mock_idle import Func
from Tkinter import Tk, Text, TclError
from idlelib.WidgetRedirector import WidgetRedirector
class InitCloseTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.tk = Tk()
cls.text = Text(cls.tk)
@classmethod
def tearDownClass(cls):
cls.text.destroy()
cls.tk.destroy()
del cls.text, cls.tk
def test_init(self):
redir = WidgetRedirector(self.text)
self.assertEqual(redir.widget, self.text)
self.assertEqual(redir.tk, self.text.tk)
self.assertRaises(TclError, WidgetRedirector, self.text)
redir.close() # restore self.tk, self.text
def test_close(self):
redir = WidgetRedirector(self.text)
redir.register('insert', Func)
redir.close()
self.assertEqual(redir._operations, {})
self.assertFalse(hasattr(self.text, 'widget'))
class WidgetRedirectorTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
requires('gui')
cls.tk = Tk()
cls.text = Text(cls.tk)
@classmethod
def tearDownClass(cls):
cls.text.destroy()
cls.tk.destroy()
del cls.text, cls.tk
def setUp(self):
self.redir = WidgetRedirector(self.text)
self.func = Func()
self.orig_insert = self.redir.register('insert', self.func)
self.text.insert('insert', 'asdf') # leaves self.text empty
def tearDown(self):
self.text.delete('1.0', 'end')
self.redir.close()
def test_repr(self): # partly for 100% coverage
self.assertIn('Redirector', repr(self.redir))
self.assertIn('Original', repr(self.orig_insert))
def test_register(self):
self.assertEqual(self.text.get('1.0', 'end'), '\n')
self.assertEqual(self.func.args, ('insert', 'asdf'))
self.assertIn('insert', self.redir._operations)
self.assertIn('insert', self.text.__dict__)
self.assertEqual(self.text.insert, self.func)
def test_original_command(self):
self.assertEqual(self.orig_insert.operation, 'insert')
self.assertEqual(self.orig_insert.tk_call, self.text.tk.call)
self.orig_insert('insert', 'asdf')
self.assertEqual(self.text.get('1.0', 'end'), 'asdf\n')
def test_unregister(self):
self.assertIsNone(self.redir.unregister('invalid operation name'))
self.assertEqual(self.redir.unregister('insert'), self.func)
self.assertNotIn('insert', self.redir._operations)
self.assertNotIn('insert', self.text.__dict__)
def test_unregister_no_attribute(self):
del self.text.insert
self.assertEqual(self.redir.unregister('insert'), self.func)
def test_dispatch_intercept(self):
self.func.__init__(True)
self.assertTrue(self.redir.dispatch('insert', False))
self.assertFalse(self.func.args[0])
def test_dispatch_bypass(self):
self.orig_insert('insert', 'asdf')
# tk.call returns '' where Python would return None
self.assertEqual(self.redir.dispatch('delete', '1.0', 'end'), '')
self.assertEqual(self.text.get('1.0', 'end'), '\n')
def test_dispatch_error(self):
self.func.__init__(TclError())
self.assertEqual(self.redir.dispatch('insert', False), '')
self.assertEqual(self.redir.dispatch('invalid'), '')
def test_command_dispatch(self):
# Test that .__init__ causes redirection of tk calls
# through redir.dispatch
self.tk.call(self.text._w, 'insert', 'hello')
self.assertEqual(self.func.args, ('hello',))
self.assertEqual(self.text.get('1.0', 'end'), '\n')
# Ensure that called through redir .dispatch and not through
# self.text.insert by having mock raise TclError.
self.func.__init__(TclError())
self.assertEqual(self.tk.call(self.text._w, 'insert', 'boo'), '')
if __name__ == '__main__':
unittest.main(verbosity=2)
| svanschalkwyk/datafari | windows/python/Lib/idlelib/idle_test/test_widgetredir.py | Python | apache-2.0 | 4,133 |
import pytest
from flask import Flask
from fittrackee.emails.email import EmailTemplate
from .template_results.password_reset_request import (
expected_en_html_body,
expected_en_text_body,
expected_fr_html_body,
expected_fr_text_body,
)
class TestEmailTemplateForPasswordRequest:
@pytest.mark.parametrize(
'lang, expected_subject',
[
('en', 'FitTrackee - Password reset request'),
('fr', 'FitTrackee - Réinitialiser votre mot de passe'),
],
)
def test_it_gets_subject(
self, app: Flask, lang: str, expected_subject: str
) -> None:
email_template = EmailTemplate(app.config['TEMPLATES_FOLDER'])
subject = email_template.get_content(
'password_reset_request', lang, 'subject.txt', {}
)
assert subject == expected_subject
@pytest.mark.parametrize(
'lang, expected_text_body',
[('en', expected_en_text_body), ('fr', expected_fr_text_body)],
)
def test_it_gets_text_body(
self, app: Flask, lang: str, expected_text_body: str
) -> None:
email_template = EmailTemplate(app.config['TEMPLATES_FOLDER'])
email_data = {
'expiration_delay': '3 seconds' if lang == 'en' else '3 secondes',
'username': 'test',
'password_reset_url': 'http://localhost/password-reset?token=xxx',
'operating_system': 'Linux',
'browser_name': 'Firefox',
}
text_body = email_template.get_content(
'password_reset_request', lang, 'body.txt', email_data
)
assert text_body == expected_text_body
def test_it_gets_en_html_body(self, app: Flask) -> None:
email_template = EmailTemplate(app.config['TEMPLATES_FOLDER'])
email_data = {
'expiration_delay': '3 seconds',
'username': 'test',
'password_reset_url': 'http://localhost/password-reset?token=xxx',
'operating_system': 'Linux',
'browser_name': 'Firefox',
}
text_body = email_template.get_content(
'password_reset_request', 'en', 'body.html', email_data
)
assert expected_en_html_body in text_body
def test_it_gets_fr_html_body(self, app: Flask) -> None:
email_template = EmailTemplate(app.config['TEMPLATES_FOLDER'])
email_data = {
'expiration_delay': '3 secondes',
'username': 'test',
'password_reset_url': 'http://localhost/password-reset?token=xxx',
'operating_system': 'Linux',
'browser_name': 'Firefox',
}
text_body = email_template.get_content(
'password_reset_request', 'fr', 'body.html', email_data
)
assert expected_fr_html_body in text_body
| SamR1/FitTrackee | fittrackee/tests/emails/test_email_template_password_request.py | Python | agpl-3.0 | 2,820 |
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from typing import Text
from zerver.decorator import authenticated_json_post_view,\
has_request_variables, REQ, JsonableError
from zerver.lib.actions import check_send_typing_notification, \
extract_recipients
from zerver.lib.response import json_success
from zerver.models import UserProfile
@has_request_variables
def send_notification_backend(request, user_profile, operator=REQ('op'),
notification_to = REQ('to', converter=extract_recipients, default=[])):
# type: (HttpRequest, UserProfile, Text, List[Text]) -> HttpResponse
check_send_typing_notification(user_profile, notification_to, operator)
return json_success()
| sonali0901/zulip | zerver/views/typing.py | Python | apache-2.0 | 766 |
# -*- coding: utf-8 -*-
from django.db.models import Q
from django.views.generic import ListView
from django_get_forms.views import ProcessGetFormMixin
from .forms import SearchForm
from .models import Article
class SearchView(ProcessGetFormMixin, ListView):
template_name = 'demo/index.html'
form_class = SearchForm
def get_queryset(self):
if self.form.is_valid() and self.form.cleaned_data['query']:
query = self.form.cleaned_data['query']
return Article.objects.filter(
Q(summary__icontains=query) |
Q(title__icontains=query) |
Q(keywords__icontains=query)
)
return []
search = SearchView.as_view()
| estebistec/django-get-forms | examples/demo/demo/views.py | Python | bsd-3-clause | 722 |
def foo(*, a, b):
print(a)
print(b)
def bar():
print(1)
print(2)
| siosio/intellij-community | python/testData/refactoring/inlineFunction/keywordOnlyArgs/main.after.py | Python | apache-2.0 | 83 |
# Copyright (c) 2013 Calin Crisan
# This file is part of motionEye.
#
# motionEye is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import collections
import datetime
import errno
import glob
import logging
import math
import os.path
import re
import shlex
import subprocess
import urlparse
from tornado.ioloop import IOLoop
import diskctl
import motionctl
import powerctl
import settings
import tasks
import uploadservices
import utils
import v4l2ctl
_CAMERA_CONFIG_FILE_NAME = 'thread-%(id)s.conf'
_MAIN_CONFIG_FILE_NAME = 'motion.conf'
_ACTIONS = ['lock', 'unlock', 'light_on', 'light_off', 'alarm_on', 'alarm_off', 'up', 'right', 'down', 'left', 'zoom_in', 'zoom_out',
'preset1', 'preset2', 'preset3', 'preset4', 'preset5', 'preset6', 'preset7', 'preset8', 'preset9']
_main_config_cache = None
_camera_config_cache = {}
_camera_ids_cache = None
_additional_section_funcs = []
_additional_config_funcs = []
_additional_structure_cache = {}
_monitor_command_cache = {}
# when using the following video codecs, the ffmpeg_variable_bitrate parameter appears to have an exponential effect
_EXPONENTIAL_QUALITY_CODECS = ['mpeg4', 'msmpeg4', 'swf', 'flv', 'mov', 'ogg', 'mkv']
_EXPONENTIAL_QUALITY_FACTOR = 100000 # voodoo
_EXPONENTIAL_DEF_QUALITY = 511 # about 75%
_MAX_FFMPEG_VARIABLE_BITRATE = 32767
_KNOWN_MOTION_OPTIONS = set([
'auto_brightness',
'brightness',
'contrast',
'despeckle_filter',
'emulate_motion',
'event_gap',
'ffmpeg_bps',
'ffmpeg_output_debug_movies',
'ffmpeg_output_movies',
'ffmpeg_variable_bitrate',
'ffmpeg_video_codec',
'framerate',
'height',
'hue',
'lightswitch',
'locate_motion_mode',
'locate_motion_style',
'mask_file',
'max_movie_time',
'minimum_motion_frames',
'movie_filename',
'netcam_keepalive',
'netcam_tolerant_check',
'netcam_url',
'netcam_userpass',
'noise_level',
'noise_tune',
'on_event_end',
'on_event_start',
'on_movie_end',
'on_picture_save',
'output_debug_pictures',
'output_motion',
'output_pictures',
'picture_filename',
'post_capture',
'pre_capture',
'quality',
'rotate',
'rtsp_uses_tcp',
'saturation',
'smart_mask_speed',
'snapshot_filename',
'snapshot_interval',
'stream_authentication',
'stream_auth_method',
'stream_localhost',
'stream_maxrate',
'stream_motion',
'stream_port',
'stream_quality',
'target_dir',
'text_changes',
'text_double',
'text_left',
'text_right',
'threshold',
'videodevice',
'width',
])
def additional_section(func):
_additional_section_funcs.append(func)
def additional_config(func):
_additional_config_funcs.append(func)
import wifictl # @UnusedImport
import tzctl # @UnusedImport
def get_main(as_lines=False):
global _main_config_cache
if not as_lines and _main_config_cache is not None:
return _main_config_cache
config_file_path = os.path.join(settings.CONF_PATH, _MAIN_CONFIG_FILE_NAME)
logging.debug('reading main config from file %(path)s...' % {'path': config_file_path})
lines = None
try:
file = open(config_file_path, 'r')
except IOError as e:
if e.errno == errno.ENOENT: # file does not exist
logging.info('main config file %(path)s does not exist, using default values' % {'path': config_file_path})
lines = []
else:
logging.error('could not open main config file %(path)s: %(msg)s' % {
'path': config_file_path, 'msg': unicode(e)})
raise
if lines is None:
try:
lines = [l[:-1] for l in file.readlines()]
except Exception as e:
logging.error('could not read main config file %(path)s: %(msg)s' % {
'path': config_file_path, 'msg': unicode(e)})
raise
finally:
file.close()
if as_lines:
return lines
main_config = _conf_to_dict(lines,
list_names=['thread'],
no_convert=['@admin_username', '@admin_password', '@normal_username', '@normal_password'])
_get_additional_config(main_config)
_set_default_motion(main_config, old_config_format=motionctl.has_old_config_format())
_main_config_cache = main_config
return main_config
def set_main(main_config):
global _main_config_cache
main_config = dict(main_config)
for n, v in _main_config_cache.iteritems():
main_config.setdefault(n, v)
_main_config_cache = main_config
main_config = dict(main_config)
_set_additional_config(main_config)
config_file_path = os.path.join(settings.CONF_PATH, _MAIN_CONFIG_FILE_NAME)
# read the actual configuration from file
lines = get_main(as_lines=True)
# write the configuration to file
logging.debug('writing main config to %(path)s...' % {'path': config_file_path})
try:
file = open(config_file_path, 'w')
except Exception as e:
logging.error('could not open main config file %(path)s for writing: %(msg)s' % {
'path': config_file_path, 'msg': unicode(e)})
raise
lines = _dict_to_conf(lines, main_config, list_names=['thread'])
try:
file.writelines([utils.make_str(l) + '\n' for l in lines])
except Exception as e:
logging.error('could not write main config file %(path)s: %(msg)s' % {
'path': config_file_path, 'msg': unicode(e)})
raise
finally:
file.close()
def get_camera_ids(filter_valid=True):
global _camera_ids_cache
if _camera_ids_cache is not None:
return _camera_ids_cache
config_path = settings.CONF_PATH
logging.debug('listing config dir %(path)s...' % {'path': config_path})
try:
ls = os.listdir(config_path)
except Exception as e:
logging.error('failed to list config dir %(path)s: %(msg)s', {
'path': config_path, 'msg': unicode(e)})
raise
camera_ids = []
pattern = '^' + _CAMERA_CONFIG_FILE_NAME.replace('%(id)s', '(\d+)') + '$'
for name in ls:
match = re.match(pattern, name)
if match:
camera_id = int(match.groups()[0])
logging.debug('found camera with id %(id)s' % {
'id': camera_id})
camera_ids.append(camera_id)
camera_ids.sort()
if not filter_valid:
return camera_ids
filtered_camera_ids = []
for camera_id in camera_ids:
if get_camera(camera_id):
filtered_camera_ids.append(camera_id)
_camera_ids_cache = filtered_camera_ids
return filtered_camera_ids
def get_enabled_local_motion_cameras():
if not get_main().get('@enabled'):
return []
camera_ids = get_camera_ids()
cameras = [get_camera(camera_id) for camera_id in camera_ids]
return [c for c in cameras if c.get('@enabled') and utils.is_local_motion_camera(c)]
def get_network_shares():
if not get_main().get('@enabled'):
return []
camera_ids = get_camera_ids()
cameras = [get_camera(camera_id) for camera_id in camera_ids]
mounts = []
for camera in cameras:
if camera.get('@storage_device') != 'network-share':
continue
mounts.append({
'server': camera['@network_server'],
'share': camera['@network_share_name'],
'username': camera['@network_username'],
'password': camera['@network_password'],
})
return mounts
def get_camera(camera_id, as_lines=False):
if not as_lines and camera_id in _camera_config_cache:
return _camera_config_cache[camera_id]
camera_config_path = os.path.join(settings.CONF_PATH, _CAMERA_CONFIG_FILE_NAME) % {'id': camera_id}
logging.debug('reading camera config from %(path)s...' % {'path': camera_config_path})
try:
file = open(camera_config_path, 'r')
except Exception as e:
logging.error('could not open camera config file: %(msg)s' % {'msg': unicode(e)})
raise
try:
lines = [l.strip() for l in file.readlines()]
except Exception as e:
logging.error('could not read camera config file %(path)s: %(msg)s' % {
'path': camera_config_path, 'msg': unicode(e)})
raise
finally:
file.close()
if as_lines:
return lines
camera_config = _conf_to_dict(lines,
no_convert=['@name', '@network_share_name', '@network_server',
'@network_username', '@network_password', '@storage_device',
'@upload_server', '@upload_username', '@upload_password'])
if utils.is_local_motion_camera(camera_config):
# determine the enabled status
main_config = get_main()
threads = main_config.get('thread', [])
camera_config['@enabled'] = _CAMERA_CONFIG_FILE_NAME % {'id': camera_id} in threads
camera_config['@id'] = camera_id
old_config_format = motionctl.has_old_config_format()
# adapt directives from old configuration, if needed
if old_config_format:
logging.debug('using old motion config directives')
if 'output_normal' in camera_config:
camera_config['output_pictures'] = camera_config.pop('output_normal')
if 'output_all' in camera_config:
camera_config['emulate_motion'] = camera_config.pop('output_all')
if 'ffmpeg_cap_new' in camera_config:
camera_config['ffmpeg_output_movies'] = camera_config.pop('ffmpeg_cap_new')
if 'ffmpeg_cap_motion' in camera_config:
camera_config['ffmpeg_output_debug_movies'] = camera_config.pop('ffmpeg_cap_motion')
if 'locate' in camera_config:
camera_config['locate_motion_mode'] = camera_config.pop('locate')
if 'jpeg_filename' in camera_config:
camera_config['picture_filename'] = camera_config.pop('jpeg_filename')
if 'max_mpeg_time' in camera_config:
camera_config['max_movie_time'] = camera_config.pop('max_mpeg_time')
if 'webcam_port' in camera_config:
camera_config['stream_port'] = camera_config.pop('webcam_port')
if 'webcam_quality' in camera_config:
camera_config['stream_quality'] = camera_config.pop('webcam_quality')
if 'webcam_motion' in camera_config:
camera_config['stream_motion'] = camera_config.pop('webcam_motion')
if 'webcam_maxrate' in camera_config:
camera_config['stream_maxrate'] = camera_config.pop('webcam_maxrate')
if 'webcam_localhost' in camera_config:
camera_config['stream_localhost'] = camera_config.pop('webcam_localhost')
if 'gap' in camera_config:
camera_config['event_gap'] = camera_config.pop('gap')
if 'netcam_http' in camera_config:
camera_config['netcam_keepalive'] = camera_config.pop('netcam_http') in ['1.1', 'keepalive']
if 'despeckle' in camera_config:
camera_config['despeckle_filter'] = camera_config.pop('despeckle')
_get_additional_config(camera_config, camera_id=camera_id)
_set_default_motion_camera(camera_id, camera_config)
elif utils.is_remote_camera(camera_config):
pass
elif utils.is_simple_mjpeg_camera(camera_config):
_get_additional_config(camera_config, camera_id=camera_id)
_set_default_simple_mjpeg_camera(camera_id, camera_config)
else: # incomplete configuration
logging.warn('camera config file at %s is incomplete, ignoring' % camera_config_path)
return None
_camera_config_cache[camera_id] = dict(camera_config)
return camera_config
def set_camera(camera_id, camera_config):
camera_config['@id'] = camera_id
_camera_config_cache[camera_id] = camera_config
camera_config = dict(camera_config)
if utils.is_local_motion_camera(camera_config):
old_config_format = motionctl.has_old_config_format()
# adapt directives to old configuration, if needed
if old_config_format:
logging.debug('using old motion config directives')
if 'output_pictures' in camera_config:
camera_config['output_normal'] = camera_config.pop('output_pictures')
if 'emulate_motion' in camera_config:
camera_config['output_all'] = camera_config.pop('emulate_motion')
if 'ffmpeg_output_movies' in camera_config:
camera_config['ffmpeg_cap_new'] = camera_config.pop('ffmpeg_output_movies')
if 'ffmpeg_output_debug_movies' in camera_config:
camera_config['ffmpeg_cap_motion'] = camera_config.pop('ffmpeg_output_debug_movies')
if 'locate_motion_mode' in camera_config:
camera_config['locate'] = camera_config.pop('locate_motion_mode')
if 'picture_filename' in camera_config:
camera_config['jpeg_filename'] = camera_config.pop('picture_filename')
if 'max_movie_time' in camera_config:
camera_config['max_mpeg_time'] = camera_config.pop('max_movie_time')
if 'stream_port' in camera_config:
camera_config['webcam_port'] = camera_config.pop('stream_port')
if 'stream_quality' in camera_config:
camera_config['webcam_quality'] = camera_config.pop('stream_quality')
if 'stream_motion' in camera_config:
camera_config['webcam_motion'] = camera_config.pop('stream_motion')
if 'stream_maxrate' in camera_config:
camera_config['webcam_maxrate'] = camera_config.pop('stream_maxrate')
if 'stream_localhost' in camera_config:
camera_config['webcam_localhost'] = camera_config.pop('stream_localhost')
if 'stream_auth_method' in camera_config:
camera_config.pop('stream_auth_method')
if 'stream_authentication' in camera_config:
camera_config.pop('stream_authentication')
if 'event_gap' in camera_config:
camera_config['gap'] = camera_config.pop('event_gap')
if 'netcam_keepalive' in camera_config:
camera_config['netcam_http'] = '1.1' if camera_config.pop('netcam_keepalive') else '1.0'
if 'despeckle_filter' in camera_config:
camera_config['despeckle'] = camera_config.pop('despeckle_filter')
# set the enabled status in main config
main_config = get_main()
threads = main_config.setdefault('thread', [])
config_file_name = _CAMERA_CONFIG_FILE_NAME % {'id': camera_id}
if camera_config['@enabled'] and config_file_name not in threads:
threads.append(config_file_name)
elif not camera_config['@enabled']:
threads = [t for t in threads if t != config_file_name]
main_config['thread'] = threads
set_main(main_config)
_set_additional_config(camera_config, camera_id=camera_id)
elif utils.is_remote_camera(camera_config):
pass
elif utils.is_simple_mjpeg_camera(camera_config):
_set_additional_config(camera_config, camera_id=camera_id)
# read the actual configuration from file
config_file_path = os.path.join(settings.CONF_PATH, _CAMERA_CONFIG_FILE_NAME) % {'id': camera_id}
if os.path.isfile(config_file_path):
lines = get_camera(camera_id, as_lines=True)
else:
lines = []
# write the configuration to file
camera_config_path = os.path.join(settings.CONF_PATH, _CAMERA_CONFIG_FILE_NAME) % {'id': camera_id}
logging.debug('writing camera config to %(path)s...' % {'path': camera_config_path})
try:
file = open(camera_config_path, 'w')
except Exception as e:
logging.error('could not open camera config file %(path)s for writing: %(msg)s' % {
'path': camera_config_path, 'msg': unicode(e)})
raise
lines = _dict_to_conf(lines, camera_config)
try:
file.writelines([utils.make_str(l) + '\n' for l in lines])
except Exception as e:
logging.error('could not write camera config file %(path)s: %(msg)s' % {
'path': camera_config_path, 'msg': unicode(e)})
raise
finally:
file.close()
def add_camera(device_details):
global _camera_ids_cache
proto = device_details['proto']
if proto in ['netcam', 'mjpeg']:
host = device_details['host']
if device_details['port']:
host += ':' + str(device_details['port'])
if device_details['username'] and proto == 'mjpeg':
if device_details['password']:
host = device_details['username'] + ':' + device_details['password'] + '@' + host
else:
host = device_details['username'] + '@' + host
device_details['url'] = urlparse.urlunparse((device_details['scheme'], host, device_details['path'], '', '', ''))
# determine the last camera id
camera_ids = get_camera_ids()
camera_id = 1
while camera_id in camera_ids:
camera_id += 1
logging.info('adding new camera with id %(id)s...' % {'id': camera_id})
# prepare a default camera config
camera_config = {'@enabled': True}
if proto == 'v4l2':
# find a suitable resolution
for (w, h) in v4l2ctl.list_resolutions(device_details['path']):
if w > 300:
camera_config['width'] = w
camera_config['height'] = h
break
camera_config['videodevice'] = device_details['path']
elif proto == 'motioneye':
camera_config['@proto'] = 'motioneye'
camera_config['@scheme'] = device_details['scheme']
camera_config['@host'] = device_details['host']
camera_config['@port'] = device_details['port']
camera_config['@path'] = device_details['path']
camera_config['@username'] = device_details['username']
camera_config['@password'] = device_details['password']
camera_config['@remote_camera_id'] = device_details['remote_camera_id']
elif proto == 'netcam':
camera_config['netcam_url'] = device_details['url']
camera_config['text_double'] = True
if device_details['username']:
camera_config['netcam_userpass'] = device_details['username'] + ':' + device_details['password']
camera_config['netcam_keepalive'] = device_details.get('keep_alive', False)
camera_config['netcam_tolerant_check'] = True
if device_details.get('camera_index') == 'udp':
camera_config['rtsp_uses_tcp'] = False
if camera_config['netcam_url'].startswith('rtsp'):
camera_config['width'] = 640
camera_config['height'] = 480
else: # assuming mjpeg
camera_config['@proto'] = 'mjpeg'
camera_config['@url'] = device_details['url']
if utils.is_local_motion_camera(camera_config):
_set_default_motion_camera(camera_id, camera_config)
# go through the config conversion functions back and forth once
camera_config = motion_camera_ui_to_dict(motion_camera_dict_to_ui(camera_config), camera_config)
elif utils.is_simple_mjpeg_camera(camera_config):
_set_default_simple_mjpeg_camera(camera_id, camera_config)
# go through the config conversion functions back and forth once
camera_config = simple_mjpeg_camera_ui_to_dict(simple_mjpeg_camera_dict_to_ui(camera_config), camera_config)
# write the configuration to file
set_camera(camera_id, camera_config)
_camera_ids_cache = None
_camera_config_cache.clear()
camera_config = get_camera(camera_id)
return camera_config
def rem_camera(camera_id):
global _camera_ids_cache
camera_config_name = _CAMERA_CONFIG_FILE_NAME % {'id': camera_id}
camera_config_path = os.path.join(settings.CONF_PATH, _CAMERA_CONFIG_FILE_NAME) % {'id': camera_id}
# remove the camera from the main config
main_config = get_main()
threads = main_config.setdefault('thread', [])
threads = [t for t in threads if t != camera_config_name]
main_config['thread'] = threads
set_main(main_config)
logging.info('removing camera config file %(path)s...' % {'path': camera_config_path})
_camera_ids_cache = None
_camera_config_cache.clear()
try:
os.remove(camera_config_path)
except Exception as e:
logging.error('could not remove camera config file %(path)s: %(msg)s' % {
'path': camera_config_path, 'msg': unicode(e)})
raise
def main_ui_to_dict(ui):
data = {
'@show_advanced': ui['show_advanced'],
'@admin_username': ui['admin_username'],
'@admin_password': ui['admin_password'],
'@normal_username': ui['normal_username'],
'@normal_password': ui['normal_password']
}
# additional configs
for name, value in ui.iteritems():
if not name.startswith('_'):
continue
data['@' + name] = value
return data
def main_dict_to_ui(data):
ui = {
'show_advanced': data['@show_advanced'],
'admin_username': data['@admin_username'],
'admin_password': data['@admin_password'],
'normal_username': data['@normal_username'],
'normal_password': data['@normal_password']
}
# additional configs
for name, value in data.iteritems():
if not name.startswith('@_'):
continue
ui[name[1:]] = value
return ui
def motion_camera_ui_to_dict(ui, old_config=None):
import meyectl
import smbctl
old_config = dict(old_config or {})
main_config = get_main() # needed for surveillance password
data = {
# device
'@name': ui['name'],
'@enabled': ui['enabled'],
'auto_brightness': ui['auto_brightness'],
'framerate': int(ui['framerate']),
'rotate': int(ui['rotation']),
# file storage
'@storage_device': ui['storage_device'],
'@network_server': ui['network_server'],
'@network_share_name': ui['network_share_name'],
'@network_username': ui['network_username'],
'@network_password': ui['network_password'],
'@upload_enabled': ui['upload_enabled'],
'@upload_movie': ui['upload_movie'],
'@upload_picture': ui['upload_picture'],
'@upload_service': ui['upload_service'],
'@upload_server': ui['upload_server'],
'@upload_port': ui['upload_port'],
'@upload_method': ui['upload_method'],
'@upload_location': ui['upload_location'],
'@upload_subfolders': ui['upload_subfolders'],
'@upload_username': ui['upload_username'],
'@upload_password': ui['upload_password'],
# text overlay
'text_left': '',
'text_right': '',
'text_double': False,
# streaming
'stream_localhost': not ui['video_streaming'],
'stream_port': int(ui['streaming_port']),
'stream_maxrate': int(ui['streaming_framerate']),
'stream_quality': max(1, int(ui['streaming_quality'])),
'@webcam_resolution': max(1, int(ui['streaming_resolution'])),
'@webcam_server_resize': ui['streaming_server_resize'],
'stream_motion': ui['streaming_motion'],
'stream_auth_method': {'disabled': 0, 'basic': 1, 'digest': 2}.get(ui['streaming_auth_mode'], 0),
'stream_authentication': main_config['@normal_username'] + ':' + main_config['@normal_password'],
# still images
'output_pictures': False,
'snapshot_interval': 0,
'picture_filename': '',
'snapshot_filename': '',
'quality': max(1, int(ui['image_quality'])),
'@preserve_pictures': int(ui['preserve_pictures']),
# movies
'ffmpeg_output_movies': False,
'movie_filename': ui['movie_file_name'],
'max_movie_time': ui['max_movie_length'],
'@preserve_movies': int(ui['preserve_movies']),
# motion detection
'@motion_detection': ui['motion_detection'],
'emulate_motion': False,
'text_changes': ui['show_frame_changes'],
'locate_motion_mode': ui['show_frame_changes'],
'noise_tune': ui['auto_noise_detect'],
'noise_level': max(1, int(round(int(ui['noise_level']) * 2.55))),
'lightswitch': ui['light_switch_detect'],
'event_gap': int(ui['event_gap']),
'pre_capture': int(ui['pre_capture']),
'post_capture': int(ui['post_capture']),
'minimum_motion_frames': int(ui['minimum_motion_frames']),
'smart_mask_speed': 0,
'mask_file': '',
'output_debug_pictures': ui['create_debug_media'],
'ffmpeg_output_debug_movies': ui['create_debug_media'],
# working schedule
'@working_schedule': '',
# events
'on_event_start': '',
'on_event_end': '',
'on_movie_end': '',
'on_picture_save': ''
}
if utils.is_v4l2_camera(old_config):
proto = 'v4l2'
else:
proto = 'netcam'
if proto == 'v4l2':
# leave videodevice unchanged
# resolution
if not ui['resolution']:
ui['resolution'] = '320x240'
width = int(ui['resolution'].split('x')[0])
height = int(ui['resolution'].split('x')[1])
data['width'] = width
data['height'] = height
threshold = int(float(ui['frame_change_threshold']) * width * height / 100)
if 'brightness' in ui:
if int(ui['brightness']) == 50:
data['brightness'] = 0
else:
data['brightness'] = max(1, int(round(int(ui['brightness']) * 2.55)))
if 'contrast' in ui:
if int(ui['contrast']) == 50:
data['contrast'] = 0
else:
data['contrast'] = max(1, int(round(int(ui['contrast']) * 2.55)))
if 'saturation' in ui:
if int(ui['saturation']) == 50:
data['saturation'] = 0
else:
data['saturation'] = max(1, int(round(int(ui['saturation']) * 2.55)))
if 'hue' in ui:
if int(ui['hue']) == 50:
data['hue'] = 0
else:
data['hue'] = max(1, int(round(int(ui['hue']) * 2.55)))
else: # assuming netcam
if data.get('netcam_url', old_config.get('netcam_url', '')).startswith('rtsp'):
# motion uses the configured width and height for RTSP cameras
width = int(ui['resolution'].split('x')[0])
height = int(ui['resolution'].split('x')[1])
data['width'] = width
data['height'] = height
threshold = int(float(ui['frame_change_threshold']) * width * height / 100)
else: # width & height are not available for other netcams
threshold = int(float(ui['frame_change_threshold']) * 640 * 480 / 100)
data['threshold'] = threshold
if (ui['storage_device'] == 'network-share') and settings.SMB_SHARES:
mount_point = smbctl.make_mount_point(ui['network_server'], ui['network_share_name'], ui['network_username'])
if ui['root_directory'].startswith('/'):
ui['root_directory'] = ui['root_directory'][1:]
data['target_dir'] = os.path.normpath(os.path.join(mount_point, ui['root_directory']))
elif ui['storage_device'].startswith('local-disk'):
target_dev = ui['storage_device'][10:].replace('-', '/')
mounted_partitions = diskctl.list_mounted_partitions()
partition = mounted_partitions[target_dev]
mount_point = partition['mount_point']
if ui['root_directory'].startswith('/'):
ui['root_directory'] = ui['root_directory'][1:]
data['target_dir'] = os.path.normpath(os.path.join(mount_point, ui['root_directory']))
else:
data['target_dir'] = ui['root_directory']
# try to create the target dir
try:
os.makedirs(data['target_dir'])
logging.debug('created root directory %s for camera %s' % (data['target_dir'], data['@name']))
except Exception as e:
if isinstance(e, OSError) and e.errno == errno.EEXIST:
pass # already exists, things should be just fine
else:
logging.error('failed to create root directory "%s": %s' % (data['target_dir'], e), exc_info=True)
if ui['upload_enabled'] and '@id' in old_config:
upload_settings = {k[7:]: ui[k] for k in ui.iterkeys() if k.startswith('upload_')}
tasks.add(0, uploadservices.update, tag='uploadservices.update(%s)' % ui['upload_service'],
camera_id=old_config['@id'], service_name=ui['upload_service'], settings=upload_settings)
if ui['text_overlay']:
left_text = ui['left_text']
if left_text == 'camera-name':
data['text_left'] = ui['name']
elif left_text == 'timestamp':
data['text_left'] = '%Y-%m-%d\\n%T'
elif left_text == 'disabled':
data['text_left'] = ''
else:
data['text_left'] = ui['custom_left_text']
right_text = ui['right_text']
if right_text == 'camera-name':
data['text_right'] = ui['name']
elif right_text == 'timestamp':
data['text_right'] = '%Y-%m-%d\\n%T'
elif right_text == 'disabled':
data['text_right'] = ''
else:
data['text_right'] = ui['custom_right_text']
if proto == 'netcam' or data['width'] > 320:
data['text_double'] = True
if ui['still_images']:
capture_mode = ui['capture_mode']
if capture_mode == 'motion-triggered':
data['output_pictures'] = True
data['picture_filename'] = ui['image_file_name']
elif capture_mode == 'interval-snapshots':
data['snapshot_interval'] = int(ui['snapshot_interval'])
data['snapshot_filename'] = ui['image_file_name']
elif capture_mode == 'all-frames':
data['output_pictures'] = True
data['emulate_motion'] = True
data['picture_filename'] = ui['image_file_name']
if ui['movies']:
data['ffmpeg_output_movies'] = True
recording_mode = ui['recording_mode']
if recording_mode == 'motion-triggered':
data['emulate_motion'] = False
elif recording_mode == 'continuous':
data['emulate_motion'] = True
data['ffmpeg_video_codec'] = ui['movie_format']
q = int(ui['movie_quality'])
if motionctl.needs_ffvb_quirks():
if data['ffmpeg_video_codec'] in _EXPONENTIAL_QUALITY_CODECS:
vbr = max(1, _MAX_FFMPEG_VARIABLE_BITRATE * (1 - math.log(max(1, q * _EXPONENTIAL_QUALITY_FACTOR), _EXPONENTIAL_QUALITY_FACTOR * 100)))
else:
vbr = 1 + (_MAX_FFMPEG_VARIABLE_BITRATE - 1) / 100.0 * (100 - q)
else:
vbr = max(1, q)
data['ffmpeg_variable_bitrate'] = int(vbr)
# motion detection
if ui['despeckle_filter']:
data['despeckle_filter'] = old_config['despeckle_filter'] or 'EedDl'
else:
data['despeckle_filter'] = ''
if ui['mask']:
if ui['mask_type'] == 'smart':
data['smart_mask_speed'] = 10 - int(ui['smart_mask_sluggishness'])
elif ui['mask_type'] == 'editable':
capture_width, capture_height = data.get('width'), data.get('height')
if data.get('rotate') in [90, 270]:
capture_width, capture_height = capture_height, capture_width
data['mask_file'] = utils.build_editable_mask_file(old_config['@id'], ui['mask_lines'], capture_width, capture_height)
# working schedule
if ui['working_schedule']:
data['@working_schedule'] = (
ui['monday_from'] + '-' + ui['monday_to'] + '|' +
ui['tuesday_from'] + '-' + ui['tuesday_to'] + '|' +
ui['wednesday_from'] + '-' + ui['wednesday_to'] + '|' +
ui['thursday_from'] + '-' + ui['thursday_to'] + '|' +
ui['friday_from'] + '-' + ui['friday_to'] + '|' +
ui['saturday_from'] + '-' + ui['saturday_to'] + '|' +
ui['sunday_from'] + '-' + ui['sunday_to'])
data['@working_schedule_type'] = ui['working_schedule_type']
# event start
on_event_start = ['%(script)s start %%t' % {'script': meyectl.find_command('relayevent')}]
if ui['email_notifications_enabled']:
emails = re.sub('\\s', '', ui['email_notifications_addresses'])
on_event_start.append("%(script)s '%(server)s' '%(port)s' '%(account)s' '%(password)s' '%(tls)s' '%(from)s' '%(to)s' 'motion_start' '%%t' '%%Y-%%m-%%dT%%H:%%M:%%S' '%(timespan)s'" % {
'script': meyectl.find_command('sendmail'),
'server': ui['email_notifications_smtp_server'],
'port': ui['email_notifications_smtp_port'],
'account': ui['email_notifications_smtp_account'],
'password': ui['email_notifications_smtp_password'].replace(';', '\\;').replace('%', '%%'),
'tls': ui['email_notifications_smtp_tls'],
'from': ui['email_notifications_from'],
'to': emails,
'timespan': ui['email_notifications_picture_time_span']})
if ui['web_hook_notifications_enabled']:
url = re.sub('\\s', '+', ui['web_hook_notifications_url'])
on_event_start.append("%(script)s '%(method)s' '%(url)s'" % {
'script': meyectl.find_command('webhook'),
'method': ui['web_hook_notifications_http_method'],
'url': url})
if ui['command_notifications_enabled']:
on_event_start += utils.split_semicolon(ui['command_notifications_exec'])
data['on_event_start'] = '; '.join(on_event_start)
# event end
on_event_end = ['%(script)s stop %%t' % {'script': meyectl.find_command('relayevent')}]
data['on_event_end'] = '; '.join(on_event_end)
# movie end
on_movie_end = ['%(script)s movie_end %%t %%f' % {'script': meyectl.find_command('relayevent')}]
if ui['web_hook_storage_enabled']:
url = re.sub('\\s', '+', ui['web_hook_storage_url'])
on_movie_end.append("%(script)s '%(method)s' '%(url)s'" % {
'script': meyectl.find_command('webhook'),
'method': ui['web_hook_storage_http_method'],
'url': url})
if ui['command_storage_enabled']:
on_movie_end += utils.split_semicolon(ui['command_storage_exec'])
data['on_movie_end'] = '; '.join(on_movie_end)
# picture save
on_picture_save = ['%(script)s picture_save %%t %%f' % {'script': meyectl.find_command('relayevent')}]
if ui['web_hook_storage_enabled']:
url = re.sub('\\s', '+', ui['web_hook_storage_url'])
on_picture_save.append("%(script)s '%(method)s' '%(url)s'" % {
'script': meyectl.find_command('webhook'),
'method': ui['web_hook_storage_http_method'],
'url': url})
if ui['command_storage_enabled']:
on_picture_save += utils.split_semicolon(ui['command_storage_exec'])
data['on_picture_save'] = '; '.join(on_picture_save)
# additional configs
for name, value in ui.iteritems():
if not name.startswith('_'):
continue
data['@' + name] = value
# extra motion options
for name in old_config.keys():
if name not in _KNOWN_MOTION_OPTIONS and not name.startswith('@'):
old_config.pop(name)
extra_options = ui.get('extra_options', [])
for name, value in extra_options:
data[name] = value or ''
old_config.update(data)
return old_config
def motion_camera_dict_to_ui(data):
import smbctl
ui = {
# device
'name': data['@name'],
'enabled': data['@enabled'],
'id': data['@id'],
'auto_brightness': data['auto_brightness'],
'framerate': int(data['framerate']),
'rotation': int(data['rotate']),
# file storage
'smb_shares': settings.SMB_SHARES,
'storage_device': data['@storage_device'],
'network_server': data['@network_server'],
'network_share_name': data['@network_share_name'],
'network_username': data['@network_username'],
'network_password': data['@network_password'],
'disk_used': 0,
'disk_total': 0,
'available_disks': diskctl.list_mounted_disks(),
'upload_enabled': data['@upload_enabled'],
'upload_picture': data['@upload_picture'],
'upload_movie': data['@upload_movie'],
'upload_service': data['@upload_service'],
'upload_server': data['@upload_server'],
'upload_port': data['@upload_port'],
'upload_method': data['@upload_method'],
'upload_location': data['@upload_location'],
'upload_subfolders': data['@upload_subfolders'],
'upload_username': data['@upload_username'],
'upload_password': data['@upload_password'],
'upload_authorization_key': '', # needed, otherwise the field is hidden
'web_hook_storage_enabled': False,
'command_storage_enabled': False,
# text overlay
'text_overlay': False,
'left_text': 'camera-name',
'right_text': 'timestamp',
'custom_left_text': '',
'custom_right_text': '',
# streaming
'video_streaming': not data['stream_localhost'],
'streaming_framerate': int(data['stream_maxrate']),
'streaming_quality': int(data['stream_quality']),
'streaming_resolution': int(data['@webcam_resolution']),
'streaming_server_resize': data['@webcam_server_resize'],
'streaming_port': int(data['stream_port']),
'streaming_auth_mode': {0: 'disabled', 1: 'basic', 2: 'digest'}.get(data.get('stream_auth_method'), 'disabled'),
'streaming_motion': int(data['stream_motion']),
# still images
'still_images': False,
'capture_mode': 'motion-triggered',
'image_file_name': '%Y-%m-%d/%H-%M-%S',
'image_quality': data['quality'],
'snapshot_interval': 0,
'preserve_pictures': data['@preserve_pictures'],
# movies
'movies': False,
'recording_mode': 'motion-triggered',
'movie_file_name': data['movie_filename'],
'max_movie_length': data['max_movie_time'],
'preserve_movies': data['@preserve_movies'],
# motion detection
'motion_detection': data['@motion_detection'],
'show_frame_changes': data['text_changes'] or data['locate_motion_mode'],
'auto_noise_detect': data['noise_tune'],
'noise_level': int(int(data['noise_level']) / 2.55),
'light_switch_detect': data['lightswitch'],
'despeckle_filter': data['despeckle_filter'],
'event_gap': int(data['event_gap']),
'pre_capture': int(data['pre_capture']),
'post_capture': int(data['post_capture']),
'minimum_motion_frames': int(data['minimum_motion_frames']),
'mask': False,
'mask_type': 'smart',
'smart_mask_sluggishness': 5,
'mask_lines': [],
'create_debug_media': data['ffmpeg_output_debug_movies'] or data['output_debug_pictures'],
# motion notifications
'email_notifications_enabled': False,
'web_hook_notifications_enabled': False,
'command_notifications_enabled': False,
# working schedule
'working_schedule': False,
'working_schedule_type': 'during',
'monday_from': '', 'monday_to': '',
'tuesday_from': '', 'tuesday_to': '',
'wednesday_from': '', 'wednesday_to': '',
'thursday_from': '', 'thursday_to': '',
'friday_from': '', 'friday_to': '',
'saturday_from': '', 'saturday_to': '',
'sunday_from': '', 'sunday_to': ''
}
if utils.is_net_camera(data):
ui['device_url'] = data['netcam_url']
ui['proto'] = 'netcam'
# resolutions
if data['netcam_url'].startswith('rtsp'):
# motion uses the configured width and height for RTSP cameras
resolutions = utils.COMMON_RESOLUTIONS
resolutions = [r for r in resolutions if motionctl.resolution_is_valid(*r)]
ui['available_resolutions'] = [(str(w) + 'x' + str(h)) for (w, h) in resolutions]
ui['resolution'] = str(data['width']) + 'x' + str(data['height'])
threshold = data['threshold'] * 100.0 / (data['width'] * data['height'])
else: # width & height are not available for other netcams
# we have no other choice but use something like 640x480 as reference
threshold = data['threshold'] * 100.0 / (640 * 480)
else: # assuming v4l2
ui['device_url'] = data['videodevice']
ui['proto'] = 'v4l2'
# resolutions
resolutions = v4l2ctl.list_resolutions(data['videodevice'])
ui['available_resolutions'] = [(str(w) + 'x' + str(h)) for (w, h) in resolutions]
ui['resolution'] = str(data['width']) + 'x' + str(data['height'])
# the brightness & co. keys in the ui dictionary
# indicate the presence of these controls
# we must call v4l2ctl functions to determine the available controls
brightness = v4l2ctl.get_brightness(data['videodevice'])
if brightness is not None: # has brightness control
if data.get('brightness', 0) != 0:
ui['brightness'] = brightness
else:
ui['brightness'] = 50
contrast = v4l2ctl.get_contrast(data['videodevice'])
if contrast is not None: # has contrast control
if data.get('contrast', 0) != 0:
ui['contrast'] = contrast
else:
ui['contrast'] = 50
saturation = v4l2ctl.get_saturation(data['videodevice'])
if saturation is not None: # has saturation control
if data.get('saturation', 0) != 0:
ui['saturation'] = saturation
else:
ui['saturation'] = 50
hue = v4l2ctl.get_hue(data['videodevice'])
if hue is not None: # has hue control
if data.get('hue', 0) != 0:
ui['hue'] = hue
else:
ui['hue'] = 50
threshold = data['threshold'] * 100.0 / (data['width'] * data['height'])
ui['frame_change_threshold'] = threshold
if (data['@storage_device'] == 'network-share') and settings.SMB_SHARES:
mount_point = smbctl.make_mount_point(data['@network_server'], data['@network_share_name'], data['@network_username'])
ui['root_directory'] = data['target_dir'][len(mount_point):] or '/'
elif data['@storage_device'].startswith('local-disk'):
target_dev = data['@storage_device'][10:].replace('-', '/')
mounted_partitions = diskctl.list_mounted_partitions()
for partition in mounted_partitions.values():
if partition['target'] == target_dev and data['target_dir'].startswith(partition['mount_point']):
ui['root_directory'] = data['target_dir'][len(partition['mount_point']):] or '/'
break
else: # not found for some reason
logging.error('could not find mounted partition for device "%s" and target dir "%s"' % (target_dev, data['target_dir']))
ui['root_directory'] = data['target_dir']
else:
ui['root_directory'] = data['target_dir']
# disk usage
usage = None
if os.path.exists(data['target_dir']):
usage = utils.get_disk_usage(data['target_dir'])
if usage:
ui['disk_used'], ui['disk_total'] = usage
text_left = data['text_left']
text_right = data['text_right']
if text_left or text_right:
ui['text_overlay'] = True
if text_left == data['@name']:
ui['left_text'] = 'camera-name'
elif text_left == '%Y-%m-%d\\n%T':
ui['left_text'] = 'timestamp'
elif text_left == '':
ui['left_text'] = 'disabled'
else:
ui['left_text'] = 'custom-text'
ui['custom_left_text'] = text_left
if text_right == data['@name']:
ui['right_text'] = 'camera-name'
elif text_right == '%Y-%m-%d\\n%T':
ui['right_text'] = 'timestamp'
elif text_right == '':
ui['right_text'] = 'disabled'
else:
ui['right_text'] = 'custom-text'
ui['custom_right_text'] = text_right
emulate_motion = data['emulate_motion']
output_pictures = data['output_pictures']
picture_filename = data['picture_filename']
snapshot_interval = data['snapshot_interval']
snapshot_filename = data['snapshot_filename']
ui['still_images'] = (((emulate_motion or output_pictures) and picture_filename) or
(snapshot_interval and snapshot_filename))
if emulate_motion:
ui['capture_mode'] = 'all-frames'
if picture_filename:
ui['image_file_name'] = picture_filename
elif snapshot_interval:
ui['capture_mode'] = 'interval-snapshots'
ui['snapshot_interval'] = snapshot_interval
if snapshot_filename:
ui['image_file_name'] = snapshot_filename
elif output_pictures:
ui['capture_mode'] = 'motion-triggered'
if picture_filename:
ui['image_file_name'] = picture_filename
if data['ffmpeg_output_movies']:
ui['movies'] = True
if emulate_motion:
ui['recording_mode'] = 'continuous'
else:
ui['recording_mode'] = 'motion-triggered'
ui['movie_format'] = data['ffmpeg_video_codec']
bitrate = data['ffmpeg_variable_bitrate']
if motionctl.needs_ffvb_quirks():
if data['ffmpeg_video_codec'] in _EXPONENTIAL_QUALITY_CODECS:
q = (100 * _EXPONENTIAL_QUALITY_FACTOR) ** ((1 - float(bitrate) / _MAX_FFMPEG_VARIABLE_BITRATE)) / _EXPONENTIAL_QUALITY_FACTOR
else:
q = 100 - (bitrate - 1) * 100.0 / (_MAX_FFMPEG_VARIABLE_BITRATE - 1)
ui['movie_quality'] = int(q)
else:
ui['movie_quality'] = bitrate
# mask
if data['mask_file']:
ui['mask'] = True
ui['mask_type'] = 'editable'
capture_width, capture_height = data.get('width'), data.get('height')
if int(data.get('rotate')) in [90, 270]:
capture_width, capture_height = capture_height, capture_width
ui['mask_lines'] = utils.parse_editable_mask_file(data['@id'], capture_width, capture_height)
elif data['smart_mask_speed']:
ui['mask'] = True
ui['mask_type'] = 'smart'
ui['smart_mask_sluggishness'] = 10 - data['smart_mask_speed']
# working schedule
working_schedule = data['@working_schedule']
if working_schedule:
days = working_schedule.split('|')
ui['working_schedule'] = True
ui['monday_from'], ui['monday_to'] = days[0].split('-')
ui['tuesday_from'], ui['tuesday_to'] = days[1].split('-')
ui['wednesday_from'], ui['wednesday_to'] = days[2].split('-')
ui['thursday_from'], ui['thursday_to'] = days[3].split('-')
ui['friday_from'], ui['friday_to'] = days[4].split('-')
ui['saturday_from'], ui['saturday_to'] = days[5].split('-')
ui['sunday_from'], ui['sunday_to'] = days[6].split('-')
ui['working_schedule_type'] = data['@working_schedule_type']
# event start
on_event_start = data.get('on_event_start') or []
if on_event_start:
on_event_start = utils.split_semicolon(on_event_start)
ui['email_notifications_picture_time_span'] = 0
command_notifications = []
for e in on_event_start:
if e.count('sendmail'):
e = shlex.split(e)
if len(e) < 10:
continue
if len(e) < 16:
# backwards compatibility with older configs lacking "from" field
e.insert(-5, '')
ui['email_notifications_enabled'] = True
ui['email_notifications_smtp_server'] = e[-11]
ui['email_notifications_smtp_port'] = e[-10]
ui['email_notifications_smtp_account'] = e[-9]
ui['email_notifications_smtp_password'] = e[-8].replace('\\;', ';').replace('%%', '%')
ui['email_notifications_smtp_tls'] = e[-7].lower() == 'true'
ui['email_notifications_from'] = e[-6]
ui['email_notifications_addresses'] = e[-5]
try:
ui['email_notifications_picture_time_span'] = int(e[-1])
except:
ui['email_notifications_picture_time_span'] = 0
elif e.count('webhook'):
e = shlex.split(e)
if len(e) < 3:
continue
ui['web_hook_notifications_enabled'] = True
ui['web_hook_notifications_http_method'] = e[-2]
ui['web_hook_notifications_url'] = e[-1]
elif e.count('relayevent') or e.count('eventrelay.py'):
continue # ignore internal relay script
else: # custom command
command_notifications.append(e)
if command_notifications:
ui['command_notifications_enabled'] = True
ui['command_notifications_exec'] = '; '.join(command_notifications)
# movie end
on_movie_end = data.get('on_movie_end') or []
if on_movie_end:
on_movie_end = utils.split_semicolon(on_movie_end)
command_storage = []
for e in on_movie_end:
if e.count('webhook'):
e = shlex.split(e)
if len(e) < 3:
continue
ui['web_hook_storage_enabled'] = True
ui['web_hook_storage_http_method'] = e[-2]
ui['web_hook_storage_url'] = e[-1]
elif e.count('relayevent') or e.count('eventrelay.py'):
continue # ignore internal relay script
else: # custom command
command_storage.append(e)
if command_storage:
ui['command_storage_enabled'] = True
ui['command_storage_exec'] = '; '.join(command_storage)
# additional configs
for name, value in data.iteritems():
if not name.startswith('@_'):
continue
ui[name[1:]] = value
# extra motion options
extra_options = []
for name, value in data.iteritems():
if name not in _KNOWN_MOTION_OPTIONS and not name.startswith('@'):
if isinstance(value, bool):
value = ['off', 'on'][value] # boolean values should be transferred as on/off
extra_options.append((name, value))
ui['extra_options'] = extra_options
# action commands
action_commands = get_action_commands(data['@id'])
ui['actions'] = action_commands.keys()
return ui
def simple_mjpeg_camera_ui_to_dict(ui, old_config=None):
old_config = dict(old_config or {})
data = {
# device
'@name': ui['name'],
'@enabled': ui['enabled'],
}
# additional configs
for name, value in ui.iteritems():
if not name.startswith('_'):
continue
data['@' + name] = value
old_config.update(data)
return old_config
def simple_mjpeg_camera_dict_to_ui(data):
ui = {
'name': data['@name'],
'enabled': data['@enabled'],
'id': data['@id'],
'proto': 'mjpeg',
'url': data['@url']
}
# additional configs
for name, value in data.iteritems():
if not name.startswith('@_'):
continue
ui[name[1:]] = value
# action commands
action_commands = get_action_commands(data['@id'])
ui['actions'] = action_commands.keys()
return ui
def get_action_commands(camera_id):
action_commands = {}
for action in _ACTIONS:
path = os.path.join(settings.CONF_PATH, '%s_%s' % (action, camera_id))
if os.access(path, os.X_OK):
action_commands[action] = path
return action_commands
def get_monitor_command(camera_id):
if camera_id not in _monitor_command_cache:
path = os.path.join(settings.CONF_PATH, 'monitor_%s' % camera_id)
if os.access(path, os.X_OK):
_monitor_command_cache[camera_id] = path
else:
_monitor_command_cache[camera_id] = None
return _monitor_command_cache[camera_id]
def invalidate_monitor_commands():
_monitor_command_cache.clear()
def backup():
logging.debug('generating config backup file')
if len(os.listdir(settings.CONF_PATH)) > 100:
logging.debug('config path "%s" appears to be a system-wide config directory, performing a selective backup' % settings.CONF_PATH)
cmd = ['tar', 'zc', 'motion.conf']
cmd += map(os.path.basename, glob.glob(os.path.join(settings.CONF_PATH, 'thread-*.conf')))
try:
content = subprocess.check_output(cmd, cwd=settings.CONF_PATH)
logging.debug('backup file created (%s bytes)' % len(content))
return content
except Exception as e:
logging.error('backup failed: %s' % e, exc_info=True)
return None
else:
logging.debug('config path "%s" appears to be a motion-specific config directory, performing a full backup' % settings.CONF_PATH)
try:
content = subprocess.check_output(['tar', 'zc', '.'], cwd=settings.CONF_PATH)
logging.debug('backup file created (%s bytes)' % len(content))
return content
except Exception as e:
logging.error('backup failed: %s' % e, exc_info=True)
return None
def restore(content):
global _main_config_cache
global _camera_config_cache
global _camera_ids_cache
global _additional_structure_cache
logging.info('restoring config from backup file')
cmd = ['tar', 'zxC', settings.CONF_PATH]
try:
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
msg = p.communicate(content)[0]
if msg:
logging.error('failed to restore configuration: %s' % msg)
return False
logging.debug('configuration restored successfully')
if settings.ENABLE_REBOOT:
def later():
powerctl.reboot()
io_loop = IOLoop.instance()
io_loop.add_timeout(datetime.timedelta(seconds=2), later)
else:
invalidate()
return {'reboot': settings.ENABLE_REBOOT}
except Exception as e:
logging.error('failed to restore configuration: %s' % e, exc_info=True)
return None
def invalidate():
global _main_config_cache
global _camera_config_cache
global _camera_ids_cache
global _additional_structure_cache
logging.debug('invalidating config cache')
_main_config_cache = None
_camera_config_cache = {}
_camera_ids_cache = None
_additional_structure_cache = {}
def _value_to_python(value):
value_lower = value.lower()
if value_lower == 'off':
return False
elif value_lower == 'on':
return True
try:
return int(value)
except ValueError:
try:
return float(value)
except ValueError:
return value
def _python_to_value(value):
if value is True:
return 'on'
elif value is False:
return 'off'
elif isinstance(value, (int, float)):
return str(value)
else:
return value
def _conf_to_dict(lines, list_names=[], no_convert=[]):
data = utils.OrderedDict()
for line in lines:
line = line.strip()
if len(line) == 0: # empty line
continue
match = re.match('^\#\s*(\@\w+)\s*(.*)', line)
if match:
name, value = match.groups()[:2]
elif line.startswith('#') or line.startswith(';'): # comment line
continue
else:
parts = line.split(None, 1)
if len(parts) == 1: # empty value
parts.append('')
(name, value) = parts
value = value.strip()
if name not in no_convert:
value = _value_to_python(value)
if name in list_names:
data.setdefault(name, []).append(value)
else:
data[name] = value
return data
def _dict_to_conf(lines, data, list_names=[]):
conf_lines = []
remaining = utils.OrderedDict(data)
processed = set()
# parse existing lines and replace the values
for line in lines:
line = line.strip()
if len(line) == 0: # empty line
conf_lines.append(line)
continue
match = re.match('^\#\s*(\@\w+)\s*(.*)', line)
if match: # @line
(name, value) = match.groups()[:2]
elif line.startswith('#') or line.startswith(';'): # simple comment line
conf_lines.append(line)
continue
else:
parts = line.split(None, 1)
if len(parts) == 2:
(name, value) = parts
else:
(name, value) = parts[0], ''
if name in processed:
continue # name already processed
processed.add(name)
if name in list_names:
new_value = data.get(name)
if new_value is not None:
for v in new_value:
if v is None:
continue
line = name + ' ' + _python_to_value(v)
conf_lines.append(line)
else:
line = name + ' ' + value
conf_lines.append(line)
else:
new_value = data.get(name)
if new_value is not None:
value = _python_to_value(new_value)
line = name + ' ' + value
conf_lines.append(line)
remaining.pop(name, None)
# add the remaining config values not covered by existing lines
if len(remaining) and len(lines):
conf_lines.append('') # add a blank line
for (name, value) in remaining.iteritems():
if name.startswith('@_'):
continue # ignore additional configs
if name in list_names:
for v in value:
if v is None:
continue
line = name + ' ' + _python_to_value(v)
conf_lines.append(line)
else:
line = name + ' ' + _python_to_value(value)
conf_lines.append(line)
# build the final config lines
conf_lines.sort(key=lambda l: not l.startswith('@'))
lines = []
for i, line in enumerate(conf_lines):
# squeeze successive blank lines
if i > 0 and len(line.strip()) == 0 and len(conf_lines[i - 1].strip()) == 0:
continue
if line.startswith('@'):
line = '# ' + line
elif i > 0 and conf_lines[i - 1].startswith('@'):
lines.append('') # add a blank line between @lines and the rest
lines.append(line)
return lines
def _set_default_motion(data, old_config_format):
data.setdefault('@enabled', True)
data.setdefault('@show_advanced', False)
data.setdefault('@admin_username', 'admin')
data.setdefault('@admin_password', '')
data.setdefault('@normal_username', 'user')
data.setdefault('@normal_password', '')
data.setdefault('setup_mode', False)
if old_config_format:
data.setdefault('control_port', settings.MOTION_CONTROL_PORT)
data.setdefault('control_html_output', True)
data.setdefault('control_localhost', settings.MOTION_CONTROL_LOCALHOST)
else:
data.setdefault('webcontrol_port', settings.MOTION_CONTROL_PORT)
data.setdefault('webcontrol_html_output', True)
data.setdefault('webcontrol_localhost', settings.MOTION_CONTROL_LOCALHOST)
def _set_default_motion_camera(camera_id, data):
data.setdefault('@name', 'Camera' + str(camera_id))
data.setdefault('@id', camera_id)
if not utils.is_net_camera(data):
data.setdefault('videodevice', '/dev/video0')
data.setdefault('brightness', 0)
data.setdefault('contrast', 0)
data.setdefault('saturation', 0)
data.setdefault('hue', 0)
data.setdefault('width', 352)
data.setdefault('height', 288)
data.setdefault('auto_brightness', False)
data.setdefault('framerate', 2)
data.setdefault('rotate', 0)
data.setdefault('@storage_device', 'custom-path')
data.setdefault('@network_server', '')
data.setdefault('@network_share_name', '')
data.setdefault('@network_username', '')
data.setdefault('@network_password', '')
data.setdefault('target_dir', os.path.join(settings.MEDIA_PATH, data['@name']))
data.setdefault('@upload_enabled', False)
data.setdefault('@upload_picture', True)
data.setdefault('@upload_movie', True)
data.setdefault('@upload_service', 'ftp')
data.setdefault('@upload_server', '')
data.setdefault('@upload_port', '')
data.setdefault('@upload_method', 'POST')
data.setdefault('@upload_location', '')
data.setdefault('@upload_subfolders', True)
data.setdefault('@upload_username', '')
data.setdefault('@upload_password', '')
data.setdefault('stream_localhost', False)
data.setdefault('stream_port', 8080 + camera_id)
data.setdefault('stream_maxrate', 5)
data.setdefault('stream_quality', 85)
data.setdefault('stream_motion', False)
data.setdefault('stream_auth_method', 0)
data.setdefault('@webcam_resolution', 100)
data.setdefault('@webcam_server_resize', False)
data.setdefault('text_left', data['@name'])
data.setdefault('text_right', '%Y-%m-%d\\n%T')
data.setdefault('text_double', False)
data.setdefault('@motion_detection', True)
data.setdefault('text_changes', False)
data.setdefault('locate_motion_mode', False)
data.setdefault('locate_motion_style', 'redbox')
data.setdefault('threshold', 2000)
data.setdefault('noise_tune', True)
data.setdefault('noise_level', 32)
data.setdefault('lightswitch', 0)
data.setdefault('despeckle_filter', '')
data.setdefault('minimum_motion_frames', 20)
data.setdefault('smart_mask_speed', 0)
data.setdefault('mask_file', '')
data.setdefault('ffmpeg_output_debug_movies', False)
data.setdefault('output_debug_pictures', False)
data.setdefault('pre_capture', 1)
data.setdefault('post_capture', 1)
data.setdefault('output_pictures', False)
data.setdefault('picture_filename', '')
data.setdefault('emulate_motion', False)
data.setdefault('event_gap', 30)
data.setdefault('snapshot_interval', 0)
data.setdefault('snapshot_filename', '')
data.setdefault('quality', 85)
data.setdefault('@preserve_pictures', 0)
data.setdefault('movie_filename', '%Y-%m-%d/%H-%M-%S')
data.setdefault('max_movie_time', 0)
data.setdefault('ffmpeg_output_movies', False)
if motionctl.has_new_movie_format_support():
data.setdefault('ffmpeg_video_codec', 'mp4') # will use h264 codec
if motionctl.needs_ffvb_quirks():
data.setdefault('ffmpeg_variable_bitrate', _MAX_FFMPEG_VARIABLE_BITRATE / 4) # 75%
else:
data.setdefault('ffmpeg_variable_bitrate', 75) # 75%
else:
data.setdefault('ffmpeg_video_codec', 'msmpeg4')
data.setdefault('ffmpeg_variable_bitrate', _EXPONENTIAL_DEF_QUALITY)
data.setdefault('@preserve_movies', 0)
data.setdefault('@working_schedule', '')
data.setdefault('@working_schedule_type', 'outside')
data.setdefault('on_event_start', '')
data.setdefault('on_event_end', '')
data.setdefault('on_movie_end', '')
data.setdefault('on_picture_save', '')
def _set_default_simple_mjpeg_camera(camera_id, data):
data.setdefault('@name', 'Camera' + str(camera_id))
data.setdefault('@id', camera_id)
def get_additional_structure(camera, separators=False):
if _additional_structure_cache.get((camera, separators)) is None:
logging.debug('loading additional config structure for %s, %s separators' % (
'camera' if camera else 'main',
'with' if separators else 'without'))
# gather sections
sections = utils.OrderedDict()
for func in _additional_section_funcs:
result = func()
if not result:
continue
if result.get('reboot') and not settings.ENABLE_REBOOT:
continue
if bool(result.get('camera')) != bool(camera):
continue
result['name'] = func.func_name
sections[func.func_name] = result
logging.debug('additional config section: %s' % result['name'])
configs = utils.OrderedDict()
for func in _additional_config_funcs:
result = func()
if not result:
continue
if result.get('reboot') and not settings.ENABLE_REBOOT:
continue
if bool(result.get('camera')) != bool(camera):
continue
if result['type'] == 'separator' and not separators:
continue
result['name'] = func.func_name
configs[func.func_name] = result
section = sections.setdefault(result.get('section'), {})
section.setdefault('configs', []).append(result)
logging.debug('additional config item: %s' % result['name'])
_additional_structure_cache[(camera, separators)] = sections, configs
return _additional_structure_cache[(camera, separators)]
def _get_additional_config(data, camera_id=None):
args = [camera_id] if camera_id else []
(sections, configs) = get_additional_structure(camera=bool(camera_id))
get_funcs = set([c.get('get') for c in configs.itervalues() if c.get('get')])
get_func_values = collections.OrderedDict((f, f(*args)) for f in get_funcs)
for name, section in sections.iteritems():
if not section.get('get'):
continue
if section.get('get_set_dict'):
data['@_' + name] = get_func_values.get(section['get'], {}).get(name)
else:
data['@_' + name] = get_func_values.get(section['get'])
for name, config in configs.iteritems():
if not config.get('get'):
continue
if config.get('get_set_dict'):
data['@_' + name] = get_func_values.get(config['get'], {}).get(name)
else:
data['@_' + name] = get_func_values.get(config['get'])
def _set_additional_config(data, camera_id=None):
args = [camera_id] if camera_id else []
(sections, configs) = get_additional_structure(camera=bool(camera_id))
set_func_values = collections.OrderedDict()
for name, section in sections.iteritems():
if not section.get('set'):
continue
if ('@_' + name) not in data:
continue
if section.get('get_set_dict'):
set_func_values.setdefault(section['set'], {})[name] = data['@_' + name]
else:
set_func_values[section['set']] = data['@_' + name]
for name, config in configs.iteritems():
if not config.get('set'):
continue
if ('@_' + name) not in data:
continue
if config.get('get_set_dict'):
set_func_values.setdefault(config['set'], {})[name] = data['@_' + name]
else:
set_func_values[config['set']] = data['@_' + name]
for func, value in set_func_values.iteritems():
func(*(args + [value]))
| AffzPedro/motioneye | motioneye/config.py | Python | gpl-3.0 | 70,399 |
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.runtime import Context
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.module_utils.six import string_types
from ansible.template import Templar, AnsibleContext, AnsibleEnvironment
from ansible.utils.unsafe_proxy import AnsibleUnsafe, wrap_var
from units.mock.loader import DictDataLoader
class BaseTemplar(object):
def setUp(self):
self.test_vars = dict(
foo="bar",
bam="{{foo}}",
num=1,
var_true=True,
var_false=False,
var_dict=dict(a="b"),
bad_dict="{a='b'",
var_list=[1],
recursive="{{recursive}}",
some_var="blip",
some_static_var="static_blip",
some_keyword="{{ foo }}",
some_unsafe_var=wrap_var("unsafe_blip"),
some_static_unsafe_var=wrap_var("static_unsafe_blip"),
some_unsafe_keyword=wrap_var("{{ foo }}"),
)
self.fake_loader = DictDataLoader({
"/path/to/my_file.txt": "foo\n",
})
self.templar = Templar(loader=self.fake_loader, variables=self.test_vars)
def is_unsafe(self, obj):
if obj is None:
return False
if hasattr(obj, '__UNSAFE__'):
return True
if isinstance(obj, AnsibleUnsafe):
return True
if isinstance(obj, dict):
for key in obj.keys():
if self.is_unsafe(key) or self.is_unsafe(obj[key]):
return True
if isinstance(obj, list):
for item in obj:
if self.is_unsafe(item):
return True
if isinstance(obj, string_types) and hasattr(obj, '__UNSAFE__'):
return True
return False
# class used for testing arbitrary objects passed to template
class SomeClass(object):
foo = 'bar'
def __init__(self):
self.blip = 'blip'
class SomeUnsafeClass(AnsibleUnsafe):
def __init__(self):
super(SomeUnsafeClass, self).__init__()
self.blip = 'unsafe blip'
class TestTemplarTemplate(BaseTemplar, unittest.TestCase):
def test_lookup_jinja_dict_key_in_static_vars(self):
res = self.templar.template("{'some_static_var': '{{ some_var }}'}",
static_vars=['some_static_var'])
# self.assertEqual(res['{{ a_keyword }}'], "blip")
print(res)
def test_templatable(self):
res = self.templar.templatable('foo')
self.assertTrue(res)
def test_templatable_none(self):
res = self.templar.templatable(None)
self.assertTrue(res)
@patch('ansible.template.Templar.template', side_effect=AnsibleError)
def test_templatable_exception(self, mock_template):
res = self.templar.templatable('foo')
self.assertFalse(res)
def test_template_convert_bare_string(self):
res = self.templar.template('foo', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_nested(self):
res = self.templar.template('bam', convert_bare=True)
self.assertEqual(res, 'bar')
def test_template_convert_bare_unsafe(self):
res = self.templar.template('some_unsafe_var', convert_bare=True)
self.assertEqual(res, 'unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_bare_filter(self):
res = self.templar.template('bam|capitalize', convert_bare=True)
self.assertEqual(res, 'Bar')
def test_template_convert_bare_filter_unsafe(self):
res = self.templar.template('some_unsafe_var|capitalize', convert_bare=True)
self.assertEqual(res, 'Unsafe_blip')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_convert_data(self):
res = self.templar.template('{{foo}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
@patch('ansible.template.safe_eval', side_effect=AnsibleError)
def test_template_convert_data_template_in_data(self, mock_safe_eval):
res = self.templar.template('{{bam}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_convert_data_bare(self):
res = self.templar.template('bam', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, 'bam')
def test_template_convert_data_to_json(self):
res = self.templar.template('{{bam|to_json}}', convert_data=True)
self.assertTrue(res)
self.assertEqual(res, '"bar"')
def test_template_convert_data_convert_bare_data_bare(self):
res = self.templar.template('bam', convert_data=True, convert_bare=True)
self.assertTrue(res)
self.assertEqual(res, 'bar')
def test_template_unsafe_non_string(self):
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_template_unsafe_non_string_subclass(self):
unsafe_obj = SomeUnsafeClass()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
# TODO: not sure what template is supposed to do it, but it currently throws attributeError
@patch('ansible.template.Templar._clean_data')
def test_template_unsafe_non_string_clean_data_exception(self, mock_clean_data):
msg = 'Error raised from _clean_data by test_template_unsafe_non_string_clean_data_exception'
mock_clean_data.side_effect = AnsibleError(msg)
unsafe_obj = AnsibleUnsafe()
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
# TODO: not sure what template is supposed to do it, but it currently throws attributeError
@patch('ansible.template.Templar._clean_data', side_effect=AnsibleError)
def test_template_unsafe_non_string_subclass_clean_data_exception(self, mock_clean_data):
unsafe_obj = SomeUnsafeClass()
self.assertTrue(self.is_unsafe(unsafe_obj))
res = self.templar.template(unsafe_obj)
self.assertTrue(self.is_unsafe(res), 'returned value from template.template (%s) is not marked unsafe' % res)
def test_weird(self):
data = u'''1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7'''
self.assertRaisesRegexp(AnsibleError,
'template error while templating string',
self.templar.template,
data)
class TestTemplarCleanData(BaseTemplar, unittest.TestCase):
def test_clean_data(self):
res = self.templar._clean_data(u'some string')
self.assertEqual(res, u'some string')
def test_clean_data_not_stringtype(self):
res = self.templar._clean_data(None)
# None vs NoneType
self.assertEqual(res, None)
def test_clean_data_jinja(self):
res = self.templar._clean_data(u'1 2 {what} 3 4 {{foo}} 5 6 7')
self.assertEqual(res, u'1 2 {what} 3 4 {#foo#} 5 6 7')
def test_clean_data_block(self):
res = self.templar._clean_data(u'1 2 {%what%} 3 4 {{foo}} 5 6 7')
self.assertEqual(res, u'1 2 {#what#} 3 4 {#foo#} 5 6 7')
# def test_clean_data_weird(self):
# res = self.templar._clean_data(u'1 2 #}huh{# %}ddfg{% }}dfdfg{{ {%what%} {{#foo#}} {%{bar}%} {#%blip%#} {{asdfsd%} 3 4 {{foo}} 5 6 7')
# print(res)
self.assertEqual(res, u'1 2 {#what#} 3 4 {#foo#} 5 6 7')
def test_clean_data_object(self):
obj = {u'foo': [1, 2, 3, u'bdasdf', u'{what}', u'{{foo}}', 5]}
clean_obj = {u'foo': [1, 2, 3, u'bdasdf', u'{what}', u'{#foo#}', 5]}
res = self.templar._clean_data(obj)
self.assertNotEqual(res, obj)
self.assertEqual(res, clean_obj)
def test_clean_data_bad_dict(self):
res = self.templar._clean_data(u'{{bad_dict}}')
self.assertEqual(res, u'{#bad_dict#}')
def test_clean_data_unsafe_obj(self):
some_obj = SomeClass()
unsafe_obj = wrap_var(some_obj)
res = self.templar._clean_data(unsafe_obj)
self.assertIsInstance(res, SomeClass)
class TestTemplarMisc(BaseTemplar, unittest.TestCase):
def test_templar_simple(self):
templar = self.templar
# test some basic templating
self.assertEqual(templar.template("{{foo}}"), "bar")
self.assertEqual(templar.template("{{foo}}\n"), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=True), "bar\n")
self.assertEqual(templar.template("{{foo}}\n", preserve_trailing_newlines=False), "bar")
self.assertEqual(templar.template("{{bam}}"), "bar")
self.assertEqual(templar.template("{{num}}"), 1)
self.assertEqual(templar.template("{{var_true}}"), True)
self.assertEqual(templar.template("{{var_false}}"), False)
self.assertEqual(templar.template("{{var_dict}}"), dict(a="b"))
self.assertEqual(templar.template("{{bad_dict}}"), "{a='b'")
self.assertEqual(templar.template("{{var_list}}"), [1])
self.assertEqual(templar.template(1, convert_bare=True), 1)
# force errors
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{bad_var}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{lookup('file', bad_var)}}")
self.assertRaises(AnsibleError, templar.template, "{{lookup('bad_lookup')}}")
self.assertRaises(AnsibleError, templar.template, "{{recursive}}")
self.assertRaises(AnsibleUndefinedVariable, templar.template, "{{foo-bar}}")
# test with fail_on_undefined=False
self.assertEqual(templar.template("{{bad_var}}", fail_on_undefined=False), "{{bad_var}}")
# test set_available_variables()
templar.set_available_variables(variables=dict(foo="bam"))
self.assertEqual(templar.template("{{foo}}"), "bam")
# variables must be a dict() for set_available_variables()
self.assertRaises(AssertionError, templar.set_available_variables, "foo=bam")
def test_templar_escape_backslashes(self):
# Rule of thumb: If escape backslashes is True you should end up with
# the same number of backslashes as when you started.
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=True), "\tbar")
self.assertEqual(self.templar.template("\t{{foo}}", escape_backslashes=False), "\tbar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=True), "\\bar")
self.assertEqual(self.templar.template("\\{{foo}}", escape_backslashes=False), "\\bar")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=True), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=True), "\\bar\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\t' }}", escape_backslashes=False), "\\bar\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=True), "\\bar\\\\t")
self.assertEqual(self.templar.template("\\{{foo + '\\\\t' }}", escape_backslashes=False), "\\bar\\t")
def test_template_jinja2_extensions(self):
fake_loader = DictDataLoader({})
templar = Templar(loader=fake_loader)
old_exts = C.DEFAULT_JINJA2_EXTENSIONS
try:
C.DEFAULT_JINJA2_EXTENSIONS = "foo,bar"
self.assertEqual(templar._get_extensions(), ['foo', 'bar'])
finally:
C.DEFAULT_JINJA2_EXTENSIONS = old_exts
class TestTemplarLookup(BaseTemplar, unittest.TestCase):
def test_lookup_missing_plugin(self):
self.assertRaisesRegexp(AnsibleError,
r'lookup plugin \(not_a_real_lookup_plugin\) not found',
self.templar._lookup,
'not_a_real_lookup_plugin',
'an_arg', a_keyword_arg='a_keyword_arg_value')
def test_lookup_list(self):
res = self.templar._lookup('list', 'an_arg', 'another_arg')
self.assertEqual(res, 'an_arg,another_arg')
def test_lookup_jinja_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'an_undefined_jinja_var' is undefined",
self.templar._lookup,
'list', '{{ an_undefined_jinja_var }}')
def test_lookup_jinja_defined(self):
res = self.templar._lookup('list', '{{ some_var }}')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_string_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
'{{ some_var }}')
def test_lookup_jinja_dict_list_passed(self):
self.assertRaisesRegexp(AnsibleError,
"with_dict expects a dict",
self.templar._lookup,
'dict',
['foo', 'bar'])
def test_lookup_jinja_kwargs(self):
res = self.templar._lookup('list', 'blip', random_keyword='12345')
self.assertTrue(self.is_unsafe(res))
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_list_wantlist(self):
res = self.templar._lookup('list', '{{ some_var }}', wantlist=True)
self.assertEqual(res, ["blip"])
def test_lookup_jinja_list_wantlist_undefined(self):
self.assertRaisesRegexp(AnsibleUndefinedVariable,
"'some_undefined_var' is undefined",
self.templar._lookup,
'list',
'{{ some_undefined_var }}',
wantlist=True)
def test_lookup_jinja_list_wantlist_unsafe(self):
res = self.templar._lookup('list', '{{ some_unsafe_var }}', wantlist=True)
for lookup_result in res:
self.assertTrue(self.is_unsafe(lookup_result))
# self.assertIsInstance(lookup_result, AnsibleUnsafe)
# Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_var }}'})
self.assertEqual(res['{{ a_keyword }}'], "blip")
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe(self):
res = self.templar._lookup('list', {'{{ some_unsafe_key }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ some_unsafe_key }}']))
# self.assertIsInstance(res['{{ some_unsafe_key }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_dict_unsafe_value(self):
res = self.templar._lookup('list', {'{{ a_keyword }}': '{{ some_unsafe_var }}'})
self.assertTrue(self.is_unsafe(res['{{ a_keyword }}']))
# self.assertIsInstance(res['{{ a_keyword }}'], AnsibleUnsafe)
# TODO: Should this be an AnsibleUnsafe
# self.assertIsInstance(res, AnsibleUnsafe)
def test_lookup_jinja_none(self):
res = self.templar._lookup('list', None)
self.assertIsNone(res)
class TestAnsibleContext(BaseTemplar, unittest.TestCase):
def _context(self, variables=None):
variables = variables or {}
env = AnsibleEnvironment()
context = AnsibleContext(env, parent={}, name='some_context',
blocks={})
for key, value in variables.items():
context.vars[key] = value
return context
def test(self):
context = self._context()
self.assertIsInstance(context, AnsibleContext)
self.assertIsInstance(context, Context)
def test_resolve_unsafe(self):
context = self._context(variables={'some_unsafe_key': wrap_var('some_unsafe_string')})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res, AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_list(self):
context = self._context(variables={'some_unsafe_key': [wrap_var('some unsafe string 1')]})
res = context.resolve('some_unsafe_key')
# self.assertIsInstance(res[0], AnsibleUnsafe)
self.assertTrue(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res)
def test_resolve_unsafe_dict(self):
context = self._context(variables={'some_unsafe_key':
{'an_unsafe_dict': wrap_var('some unsafe string 1')}
})
res = context.resolve('some_unsafe_key')
self.assertTrue(self.is_unsafe(res['an_unsafe_dict']),
'return of AnsibleContext.resolve (%s) was expected to be marked unsafe but was not' % res['an_unsafe_dict'])
def test_resolve(self):
context = self._context(variables={'some_key': 'some_string'})
res = context.resolve('some_key')
self.assertEqual(res, 'some_string')
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
def test_resolve_none(self):
context = self._context(variables={'some_key': None})
res = context.resolve('some_key')
self.assertEqual(res, None)
# self.assertNotIsInstance(res, AnsibleUnsafe)
self.assertFalse(self.is_unsafe(res),
'return of AnsibleContext.resolve (%s) was not expected to be marked unsafe but was' % res)
| skg-net/ansible | test/units/template/test_templar.py | Python | gpl-3.0 | 19,870 |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Python package for automating GUI manipulation on Windows"""
__version__ = "0.6.8"
import sys # noqa: E402
import warnings # noqa: E402
def deprecated(method, deprecated_name=None):
"""Decorator for deprecated methods"""
if deprecated_name is None:
deprecated_name = ''.join([subname.capitalize() for subname in method.__name__.split('_')])
def wrap(*args, **kwargs):
warnings.simplefilter("default", DeprecationWarning)
warnings.warn("Method .{}() is deprecated, use .{}() instead." \
"".format(deprecated_name, method.__name__), DeprecationWarning, stacklevel=2)
return method(*args, **kwargs)
return wrap
if sys.platform == 'win32':
# Importing only pythoncom can fail with the errors like:
# ImportError: No system module 'pywintypes' (pywintypes27.dll)
# So try to facilitate pywintypes*.dll loading with implicit import of win32api
import win32api # noqa: E402
import pythoncom # noqa: E402
def _get_com_threading_mode(module_sys):
"""Set up COM threading model
The ultimate goal is MTA, but the mode is adjusted
if it was already defined prior to pywinauto import.
"""
com_init_mode = 0 # COINIT_MULTITHREADED = 0x0
if hasattr(module_sys, "coinit_flags"):
warnings.warn("Apply externally defined coinit_flags: {0}"
.format(module_sys.coinit_flags), UserWarning)
com_init_mode = module_sys.coinit_flags
try:
# Probe the selected COM threading mode
pythoncom.CoInitializeEx(com_init_mode)
pythoncom.CoUninitialize()
except pythoncom.com_error:
warnings.warn("Revert to STA COM threading mode", UserWarning)
com_init_mode = 2 # revert back to STA
return com_init_mode
sys.coinit_flags = _get_com_threading_mode(sys)
#=========================================================================
class WindowNotFoundError(Exception):
"""No window could be found"""
pass
from . import findwindows
WindowAmbiguousError = findwindows.WindowAmbiguousError
ElementNotFoundError = findwindows.ElementNotFoundError
ElementAmbiguousError = findwindows.ElementAmbiguousError
from . import findbestmatch
from . import backend as backends
MatchError = findbestmatch.MatchError
from pywinauto.application import Application, WindowSpecification
class Desktop(object):
"""Simple class to call something like ``Desktop().WindowName.ControlName.method()``"""
def __init__(self, backend=None, allow_magic_lookup=True):
"""Create desktop element description"""
if not backend:
backend = backends.registry.name
if backend not in backends.registry.backends:
raise ValueError('Backend "{0}" is not registered!'.format(backend))
self.backend = backends.registry.backends[backend]
self.allow_magic_lookup = allow_magic_lookup
def window(self, **kwargs):
"""Create WindowSpecification object for top-level window"""
if 'top_level_only' not in kwargs:
kwargs['top_level_only'] = True
if 'backend' in kwargs:
raise ValueError('Using another backend than set in Desktop constructor is not allowed!')
kwargs['backend'] = self.backend.name
return WindowSpecification(kwargs, allow_magic_lookup=self.allow_magic_lookup)
def windows(self, **kwargs):
"""Return a list of wrapped top level windows"""
if 'top_level_only' not in kwargs:
kwargs['top_level_only'] = True
if 'backend' in kwargs:
raise ValueError('Using another backend than set in Desktop constructor is not allowed!!')
kwargs['backend'] = self.backend.name
windows = findwindows.find_elements(**kwargs)
return [self.backend.generic_wrapper_class(win) for win in windows]
def __getitem__(self, key):
"""Allow describe top-level window as Desktop()['Window Caption']"""
return self.window(best_match=key)
def __getattribute__(self, attr_name):
"""Attribute access for this class"""
allow_magic_lookup = object.__getattribute__(self, "allow_magic_lookup") # Beware of recursions here!
try:
return object.__getattribute__(self, attr_name)
except AttributeError:
if not allow_magic_lookup:
raise
return self[attr_name] # delegate it to __get_item__
def from_point(self, x, y):
"""Get wrapper object for element at specified screen coordinates (x, y)"""
element_info = self.backend.element_info_class.from_point(x, y)
return self.backend.generic_wrapper_class(element_info)
def top_from_point(self, x, y):
"""Get wrapper object for top level element at specified screen coordinates (x, y)"""
top_element_info = self.backend.element_info_class.top_from_point(x, y)
return self.backend.generic_wrapper_class(top_element_info)
def get_active(self):
"""Get wrapper object for active element"""
element_info = self.backend.element_info_class.get_active()
return self.backend.generic_wrapper_class(element_info)
| pywinauto/pywinauto | pywinauto/__init__.py | Python | bsd-3-clause | 7,043 |
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from datetime import datetime, timedelta
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext as _
from desktop.lib.paths import get_apps_root
from useradmin.models import install_sample_user
from hbased.ttypes import AlreadyExists
from hbase.api import HbaseApi
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Create and fill some demo tables in the first configured cluster.'
args = '<username>'
def handle(self, *args, **options):
if args:
user = args[0]
else:
user = install_sample_user()
api = HbaseApi(user=user)
cluster_name = api.getClusters()[0]['name'] # Currently pick first configured cluster
# Check connectivity
api.connectCluster(cluster_name)
self.create_analytics_table(api, cluster_name)
self.load_analytics_table(api, cluster_name)
self.create_binary_table(api, cluster_name)
self.load_binary_table(api, cluster_name)
def create_analytics_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'analytics_demo', [{'properties': {'name': 'hour'}}, {'properties': {'name': 'day'}}, {'properties': {'name': 'total'}}])
except AlreadyExists:
pass
def load_analytics_table(self, api, cluster_name):
table_data = os.path.join(get_apps_root(), 'hbase', 'example', 'analytics', 'hbase-analytics.tsv')
api.bulkUpload(cluster_name, 'analytics_demo', open(table_data))
def create_binary_table(self, api, cluster_name):
try:
api.createTable(cluster_name, 'document_demo', [{'properties': {'name': 'doc'}}])
except AlreadyExists:
pass
def load_binary_table(self, api, cluster_name):
today = datetime.now().strftime('%Y%m%d')
tomorrow = (datetime.now() + timedelta(days=1)).strftime('%Y%m%d')
api.putRow(cluster_name, 'document_demo', today, {'doc:txt': 'Hue is awesome!'})
api.putRow(cluster_name, 'document_demo', today, {'doc:json': '{"user": "hue", "coolness": "extra"}'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I like HBase</xml>'})
api.putRow(cluster_name, 'document_demo', tomorrow, {'doc:version': '<xml>I LOVE HBase</xml>'})
root = os.path.join(get_apps_root(), 'hbase', 'example', 'documents')
api.putRow(cluster_name, 'document_demo', today, {'doc:img': open(root + '/hue-logo.png', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:html': open(root + '/gethue.com.html', "rb").read()})
api.putRow(cluster_name, 'document_demo', today, {'doc:pdf': open(root + '/gethue.pdf', "rb").read()})
| vmanoria/bluemix-hue-filebrowser | hue-3.8.1-bluemix/apps/hbase/src/hbase/management/commands/hbase_setup.py | Python | gpl-2.0 | 3,477 |
import sys, re, textwrap
class ParseError(Exception):
# args[1] is the line number that caused the problem
def __init__(self, why, lineno):
self.why = why
self.lineno = lineno
def __str__(self):
return ("ParseError: the JS API docs were unparseable on line %d: %s" %
(self.lineno, self.why))
class Accumulator:
def __init__(self, holder, firstline):
self.holder = holder
self.firstline = firstline
self.otherlines = []
def addline(self, line):
self.otherlines.append(line)
def finish(self):
# take a list of strings like:
# "initial stuff" (this is in firstline)
# " more stuff" (this is in lines[0])
# " yet more stuff"
# " indented block"
# " indented block"
# " nonindented stuff" (lines[-1])
#
# calculate the indentation level by looking at all but the first
# line, and removing the whitespace they all have in common. Then
# join the results with newlines and return a single string.
pieces = []
if self.firstline:
pieces.append(self.firstline)
if self.otherlines:
pieces.append(textwrap.dedent("\n".join(self.otherlines)))
self.holder["description"] = "\n".join(pieces)
class APIParser:
def parse(self, lines, lineno):
api = {"line_number": lineno}
titleLine = lines.pop(0)
if "name" not in titleLine:
raise ParseError("Opening <api> tag must have a name attribute.",
lineno)
m = re.search("name=['\"]{0,1}([-\w\.]*?)['\"]", titleLine)
if not m:
raise ParseError("No value for name attribute found in "
"opening <api> tag.", lineno)
lineno += 1
api["name"] = m.group(1)
finalLine = lines.pop()
if not "</api>" in finalLine:
raise ParseError("Closing </api> not found.", lineno+len(lines))
props = []
currentPropHolder = None
params = []
tag, info, firstline = self._parseTypeLine(lines[0], lineno)
api["type"] = tag
if tag == 'property':
if not 'type' in info:
raise ParseError("No type found for @property.", lineno)
api['property_type'] = info['type']
# info is ignored
currentAccumulator = Accumulator(api, firstline)
for line in lines[1:]:
lineno += 1 # note that we count from lines[1:]
if not line.lstrip().startswith("@"):
currentAccumulator.addline(line)
continue
# we're starting a new section
currentAccumulator.finish()
tag, info, firstline = self._parseTypeLine(line, lineno)
if tag == "prop":
if "type" not in info:
raise ParseError("@prop lines must include {type}: '%s'" %
line, lineno)
if "name" not in info:
raise ParseError("@prop lines must provide a name: '%s'" %
line, lineno)
props.append(info) # build up props[]
currentAccumulator = Accumulator(info, firstline)
continue
# close off the @prop list
if props and currentPropHolder:
currentPropHolder["props"] = props
props = []
if tag == "returns":
api["returns"] = info
# the Accumulator will add ["description"] when done
currentAccumulator = Accumulator(info, firstline)
# @prop tags get attached to api["returns"]
currentPropHolder = info
continue
if tag == "param":
if info.get("required", False) and "default" in info:
raise ParseError("required parameters should not have defaults: '%s'"
% line, lineno)
params.append(info)
currentAccumulator = Accumulator(info, firstline)
# @prop tags get attached to this param
currentPropHolder = info
continue
raise ParseError("unknown '@' section header %s in '%s'" %
(tag, line), lineno)
currentAccumulator.finish()
if props and currentPropHolder:
currentPropHolder["props"] = props
if params:
api["params"] = params
return api
def _parseTypeLine(self, line, lineno):
# handle these things:
# @method
# @returns description
# @returns {string} description
# @param NAME {type} description
# @param NAME
# @prop NAME {type} description
# @prop NAME
info = {"line_number": lineno}
pieces = line.split()
if not pieces:
raise ParseError("line is too short: '%s'" % line, lineno)
if not pieces[0].startswith("@"):
raise ParseError("type line should start with @: '%s'" % line,
lineno)
tag = pieces[0][1:]
skip = 1
expect_name = tag in ("param", "prop")
if len(pieces) == 1:
description = ""
else:
if pieces[1].startswith("{"):
# NAME is missing, pieces[1] is TYPE
pass
else:
if expect_name:
info["required"] = not pieces[1].startswith("[")
name = pieces[1].strip("[ ]")
if "=" in name:
name, info["default"] = name.split("=")
info["name"] = name
skip += 1
if len(pieces) > skip and pieces[skip].startswith("{"):
info["type"] = pieces[skip].strip("{ }")
skip += 1
# we've got the metadata, now extract the description
pieces = line.split(None, skip)
if len(pieces) > skip:
description = pieces[skip]
else:
description = ""
return tag, info, description
def parse_hunks(text):
# return a list of tuples. Each is one of:
# ("raw", string) : non-API blocks
# ("api-json", dict) : API blocks
processed = 0 # we've handled all bytes up-to-but-not-including this offset
line_number = 1
for m in re.finditer("<api[\w\W]*?</api>", text, re.M):
start = m.start()
if start > processed+1:
hunk = text[processed:start]
yield ("markdown", hunk)
processed = start
line_number += hunk.count("\n")
api_text = m.group(0)
api_lines = api_text.splitlines()
d = APIParser().parse(api_lines, line_number)
yield ("api-json", d)
processed = m.end()
line_number += api_text.count("\n")
if processed < len(text):
yield ("markdown", text[processed:])
class TestRenderer:
# render docs for test purposes
def getm(self, d, key):
return d.get(key, "<MISSING>")
def join_lines(self, text):
return " ".join([line.strip() for line in text.split("\n")])
def render_prop(self, p):
s = "props[%s]: " % self.getm(p, "name")
pieces = []
for k in ("type", "description", "required", "default"):
if k in p:
pieces.append("%s=%s" % (k, self.join_lines(str(p[k]))))
return s + ", ".join(pieces)
def render_param(self, p):
pieces = []
for k in ("name", "type", "description", "required", "default"):
if k in p:
pieces.append("%s=%s" % (k, self.join_lines(str(p[k]))))
yield ", ".join(pieces)
for prop in p.get("props", []):
yield " " + self.render_prop(prop)
def format_api(self, api):
yield "name= %s" % self.getm(api, "name")
yield "type= %s" % self.getm(api, "type")
yield "description= %s" % self.getm(api, "description")
params = api.get("params", [])
if params:
yield "parameters:"
for p in params:
for pline in self.render_param(p):
yield " " + pline
r = api.get("returns", None)
if r:
yield "returns:"
if "type" in r:
yield " type= %s" % r["type"]
if "description" in r:
yield " description= %s" % self.join_lines(r["description"])
props = r.get("props", [])
for p in props:
yield " " + self.render_prop(p)
def render_docs(self, docs_json, outf=sys.stdout):
for (t,data) in docs_json:
if t == "api-json":
#import pprint
#for line in str(pprint.pformat(data)).split("\n"):
# outf.write("JSN: " + line + "\n")
for line in self.format_api(data):
outf.write("API: " + line + "\n")
else:
for line in str(data).split("\n"):
outf.write("MD :" + line + "\n")
def hunks_to_dict(docs_json):
exports = {}
for (t,data) in docs_json:
if t != "api-json":
continue
if data["name"]:
exports[data["name"]] = data
return exports
if __name__ == "__main__":
json = False
if sys.argv[1] == "--json":
json = True
del sys.argv[1]
docs_text = open(sys.argv[1]).read()
docs_parsed = list(parse_hunks(docs_text))
if json:
import simplejson
print simplejson.dumps(docs_parsed, indent=2)
else:
TestRenderer().render_docs(docs_parsed)
| mozilla/FlightDeck | cuddlefish/apiparser.py | Python | bsd-3-clause | 9,926 |
import gdsCAD as cad
from junctions import JJunctions
import utilities
import collections
class Singlejunction_transmon():
"""
This class returns a single junction Yale Transmon
"""
def __init__(self, name, dict_pads, dict_junctions, short=False,
junctiontest=False):
self.name = name
self.dict_pads = dict_pads
self.dict_junctions = dict_junctions
self.short = short
self.junctiontest = junctiontest
self.overl_junc_lead = self.dict_junctions['overl_junc_lead']
self.position_offs_junc = self.dict_pads['height'] + self.dict_pads['lead_height'] +\
- self.overl_junc_lead
self.pad_spacing = 2 * self.position_offs_junc + 2 * (self.dict_junctions['bjunction_height'] +
self.dict_junctions['junction_height']) + dict_junctions['w_dolan_bridge'] +\
self.dict_junctions['appr_overlap']
def gen_pattern(self):
self.cell = cad.core.Cell(self.name)
if self.short:
self.dict_pads['fork_depth'] = 0
self.pad_spacing = 2 * \
(self.dict_pads['height'] + self.dict_pads['lead_height'])
self.dict_pads['rounded_edges'] = False
else:
self.draw_junctions()
self.draw_pads()
# self.add_vortex_holes()
def draw_pads(self):
width = self.dict_pads.get('width', 250)
height = self.dict_pads.get('height', 600)
lead_width = self.dict_pads.get('lead_width', 10)
lead_height = height + self.dict_pads.get('lead_height', 20)
fork_depth = self.dict_pads.get('fork_depth', 1)
rounded_edges = self.dict_pads.get('rounded_edges', False)
layer = self.dict_pads['layer']
# Now make 2 cells for the upper pad and lower pad
pads = cad.core.Cell("PADS")
top_width = self.dict_junctions['bjunction_width'] + 6
if top_width > lead_width:
raise ValueError(" topwidth should be smaller than leadwidth")
lower_pad_points = [(-0.5 * width, 0),
(0.5 * width, 0),
(0.5 * width, height),
(0.5 * lead_width, height),
(0.5 * top_width, lead_height),
(-0.5 * top_width, lead_height),
(-0.5 * lead_width, height),
(-0.5 * width, height)]
lower_pad = cad.core.Boundary(lower_pad_points,
layer=layer)
if rounded_edges:
corners = collections.OrderedDict()
corners['BL0'] = 0
corners['BR1'] = 1
corners['TR2'] = 2
corners['TL7'] = 7
upper_pad = cad.utils.translate(cad.utils.reflect(
lower_pad, 'x'), (0, self.pad_spacing))
pad_list = cad.core.Elements([lower_pad, upper_pad])
pads.add(pad_list)
self.cell.add(pads)
def add_vortex_holes(self):
holes = cad.core.Cell("HOLES")
layer_holes = 22
height = self.dict_pads['height']
width = self.dict_pads['width']
holes_dim = 20.
mesh_wire = 5
period = holes_dim + mesh_wire
nr_holes_x = int((width - 5) / period)
nr_holes_y = int((height - 5) / period)
dict_corners = {}
dict_corners['BL'] = 0
dict_corners['TL'] = 1
dict_corners['TR'] = 2
dict_corners['BR'] = 3
radius = 0.2
start_posx = -width / 2. + mesh_wire
start_posy = mesh_wire
# make element of all the holes
for i in range(0, nr_holes_y):
for j in range(0, nr_holes_x):
posx = start_posx + j * period
posy = start_posy + i * period
hole = cad.shapes.Rectangle((posx, posy),
(posx + holes_dim, posy + holes_dim), layer=layer_holes)
hole_rounded = utilities.make_rounded_edges(
hole, radius, dict_corners)
holes.add(hole_rounded)
# holes.show()
self.cell.add(holes)
def draw_junctions(self):
junctions = JJunctions('junctions', self.dict_junctions)
self.cell.add(junctions.draw_junctions(),
origin=(0, self.position_offs_junc))
class Squidjunction_transmon():
"""
This class returns a squid junction Yale Transmon
"""
def __init__(self, name, dict_pads, dict_squidloop, dict_junctions):
self.name = name
self.dict_pads = dict_pads
self.dict_squidloop = dict_squidloop
self.dict_junctions = dict_junctions
self.overl_junc_lead = self.dict_junctions['overl_junc_lead']
self.position_offs_junc_y = self.dict_pads['height'] + self.dict_pads['lead_height'] +\
self.dict_squidloop['squid_height'] + self.dict_squidloop['squid_thickness'] +\
- self.overl_junc_lead
self.position_offs_junc_x = 0.5 * \
self.dict_squidloop['squid_width'] + 0.5 * \
self.dict_squidloop['squid_thickness']
self.pad_spacing = 2 * self.position_offs_junc_y + 2 * (self.dict_junctions['bjunction_height'] +
self.dict_junctions['junction_height']) + dict_junctions['w_dolan_bridge'] +\
self.dict_junctions['appr_overlap']
def gen_pattern(self):
self.cell = cad.core.Cell(self.name)
self.draw_pads()
self.draw_junctions()
# self.add_vortex_holes()
def draw_pads(self):
width = self.dict_pads.get('width', 250)
height = self.dict_pads.get('height', 600)
lead_width = self.dict_pads.get('lead_width', 10)
lead_height = height + self.dict_pads.get('lead_height', 20)
fork_depth = self.dict_pads.get('fork_depth', 0)
rounded_edges = self.dict_pads.get('rounded_edges', False)
layer = self.dict_pads['layer']
squid_thickness = 2 * self.dict_squidloop.get('squid_thickness', 3)
squid_width = self.dict_squidloop.get('squid_width', 10)
squid_height = self.dict_squidloop.get('squid_height', 10)
squid_width_wrap = squid_thickness + squid_width
squid_height_wrap = 0.5 * squid_thickness + lead_height + squid_height
# Now make 2 cells for the upper pad and lower pad
# We also divide thcikness by 2, that's the reason for 6 in the
# denominator
pads = cad.core.Cell("PADS")
lower_pad_points = [(-0.5 * width, 0),
(0.5 * width, 0),
(0.5 * width, height),
(0.5 * lead_width, height),
(0.5 * lead_width, lead_height),
(0.5 * squid_width_wrap, lead_height),
(0.5 * squid_width_wrap, squid_height_wrap),
(0.5 * squid_width_wrap - (1 / 6.) *
squid_thickness, squid_height_wrap),
(0.5 * squid_width_wrap - (1 / 6.) *
squid_thickness, squid_height_wrap - fork_depth),
(0.5 * squid_width_wrap - (2 / 6.) *
squid_thickness, squid_height_wrap - fork_depth),
(0.5 * squid_width_wrap - (2 / 6.) *
squid_thickness, squid_height_wrap),
(0.5 * squid_width, squid_height_wrap),
(0.5 * squid_width, squid_height_wrap - squid_height),
(-0.5 * squid_width, squid_height_wrap - squid_height),
(-0.5 * squid_width, squid_height_wrap),
(-0.5 * squid_width - (1 / 6.) *
squid_thickness, squid_height_wrap),
(-0.5 * squid_width - (1 / 6.) * squid_thickness,
squid_height_wrap - fork_depth),
(-0.5 * squid_width - (2 / 6.) * squid_thickness,
squid_height_wrap - fork_depth),
(-0.5 * squid_width - (2 / 6.) *
squid_thickness, squid_height_wrap),
(-0.5 * squid_width_wrap, squid_height_wrap),
(-0.5 * squid_width_wrap, lead_height),
(-0.5 * lead_width, lead_height),
(-0.5 * lead_width, height),
(-0.5 * width, height)]
lower_pad = cad.core.Boundary(lower_pad_points,
layer=layer)
if rounded_edges:
corners = collections.OrderedDict()
corners['BL0'] = 0
corners['BR1'] = 1
corners['TR2'] = 2
corners['BL3O'] = 3
corners['TL4O'] = 4
corners['BR5'] = 5
corners['TR6'] = 6
# corners['TL7'] = 7
# corners['BR8O'] = 8
# corners['BL9O'] = 9
# corners['TR10'] = 10
corners['TL11'] = 11
corners['BR12O'] = 12
corners['BL13O'] = 13
corners['TR14'] = 14
# corners['TL15'] = 15
# corners['BR16O'] = 16
# corners['BL17O'] = 17
# corners['TR18'] = 18
corners['TL19'] = 19
corners['BL20'] = 20
corners['TR21O'] = 21
corners['BR22O'] = 22
corners['TL23'] = 23
rad_corner = 0.3
lower_pad = utilities.make_rounded_edges(lower_pad,
rad_corner,
corners)
upper_pad = cad.utils.translate(cad.utils.reflect(
lower_pad, 'x'), (0, self.pad_spacing))
pad_list = cad.core.Elements([lower_pad, upper_pad])
pads.add(pad_list)
self.cell.add(pads)
def draw_junctions(self):
junctions = JJunctions('junctions_L', self.dict_junctions)
self.cell.add(junctions.draw_junctions(), origin=(
self.position_offs_junc_x, self.position_offs_junc_y))
junctions.name = 'junctions_R'
self.cell.add(junctions.draw_junctions(
), origin=(-self.position_offs_junc_x, self.position_offs_junc_y))
| srpeiter/ChipDesignCad | source_dev/transmon.py | Python | gpl-3.0 | 10,556 |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("stories", "0010_auto_20140922_1514")]
operations = [
migrations.AddField(
model_name="story",
name="audio_link",
field=models.URLField(
help_text="A link to an mp3 file to publish on this story", max_length=255, null=True, blank=True
),
)
]
| rapidpro/dash | dash/stories/migrations/0011_story_audio_link.py | Python | bsd-3-clause | 461 |
import json
import threading
import webbrowser
from wsgiref.simple_server import make_server
import dataset
PORT = 8080
def showleaderboard(environ, response):
res = open("leaderboard.html").read()
return res
def getleaderboard(environ, response):
return getleaderboardJSON()
def getleaderboardJSON():
db = dataset.connect('sqlite:///gadakeco.db')
leaderboard = db["leaderboard"]
items = []
for k in leaderboard:
# print(k)
items.append({h:v for h, v in k.items()})
return json.dumps(items)
def postentry(environ, response):
if environ['REQUEST_METHOD'] == 'POST':
try:
data_size = int(environ['CONTENT_LENGTH'])
print(environ['wsgi.input'])
data = environ['wsgi.input'].read(data_size).decode("utf-8")
data = data.replace("\'", "\"")
print(data)
myj = json.loads(data)
db = dataset.connect('sqlite:///gadakeco.db')
leaderboard = db["leaderboard"]
leaderboard.insert(myj)
print(myj)
print(*[k for k in leaderboard], sep="\n")
result = "successfull"
except Exception as e:
print("ERROR:" + str(e))
result = "Error while parsing."
return result
else:
return "No matching pattern :'("
def test_postentry(environ, response):
result = open(FILE).read()
return result
def getFile(environ, response):
try:
result = open(environ["PATH_INFO"][1:]).read()
except:
result = "error404 File not Found"
return result
def request(environ, response):
# print(environ['PATH_INFO'])
urls = {
"/leaderboard":showleaderboard,
"/postentry": postentry,
"/testpostentry":test_postentry,
"/getleaderboard":getleaderboard,
"/mytable.js":getFile
}
path = environ['PATH_INFO'].lower()
print(path)
result = "No matching pattern :("
if path in urls:
result = urls[path](environ, response)
result = result.encode('utf_8')
headers = [('Content-type', 'text/html; charset=utf-8'),
('Content-Length', str(len(result)))]
response('200 OK', headers)
# response('200 OK',[('Content-type', 'text/plain')])
return [result]
def open_browser():
def _open_browser():
webbrowser.open('http://localhost:%s/%s' % (PORT, "leaderboard"))
thread = threading.Timer(2, _open_browser)
thread.start()
def start_server():
"""Start the server."""
httpd = make_server("", PORT, request)
httpd.serve_forever()
if __name__ == "__main__":
open_browser()
start_server()
| Daarknes/Gadakeco | server/startserver.py | Python | gpl-3.0 | 2,800 |
## Copyright 2003-2006 Luc Saffre.
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This is an alternative for reportlab/lib/styles.py
The original version of styles.py is copyright ReportLab Inc. 2000
v 1.15 2002/07/24 19:56:37 andy_robinson Exp $
Changes made by Luc Saffre:
- I rewrote PropertySet because I wanted true inheritance.
- Besides this I thought it useful that one can also access the Styles
in a StyleSheet using "attribute" syntax. For example on can now
write::
stylesheet.Normal.spaceAfter = 6
which is equivament to the classic syntax::
stylesheet["Normal"].spaceAfter = 6
- keepWithNext was missing in defaults attribute list
- many more changes in 2006
"""
from reportlab.lib import colors
from reportlab.lib import pagesizes
from reportlab.lib.units import inch,mm
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT
VA_MIDDLE="MIDDLE"
VA_CENTER="MIDDLE"
VA_TOP="TOP"
VA_BOTTOM="BOTTOM"
from lino.misc.pset import PropertySet, StyleSheet
# from sdoc.lists import ListStyle, NumberedListStyle
# from lino.sdoc.tables import TableModel
class FlowStyle(PropertySet):
defaults = dict(
leftIndent=0,
rightIndent=0,
spaceBefore=0,
spaceAfter=0,
backColor=None,
keepWithNext=False,
pageBreakBefore=False,
pageBreakAfter=False,
wordWrap=None, # added for reportlab 2.x
)
class CharacterStyle(PropertySet):
defaults = dict(
fontName='Times-Roman',
fontSize=10,
textColor=colors.black,
rise=False,
underline=False,
)
class ParagraphStyle(FlowStyle):
defaults = dict(dict(
leading=12,
firstLineIndent=0,
textStyle=None,
alignment=TA_LEFT,
allowSplitting = True,
bulletFontName='Times-Roman',
bulletFontSize=10,
bulletIndent=0,
wrap=True,
**FlowStyle.defaults),**CharacterStyle.defaults)
class LineStyle(PropertySet):
defaults = {
'width':1,
'color': colors.black
}
def prepareCanvas(self, canvas):
"""You can ask a LineStyle to set up the canvas for drawing
the lines."""
canvas.setLineWidth(1)
#etc. etc.
class ListStyle(FlowStyle):
defaults = dict(
bulletWidth=12,
bulletText= '-',
**FlowStyle.defaults)
def getBulletText(self,listInstance):
return self.bulletText
class NumberedListStyle(ListStyle):
defaults = dict(
showParent=False,
**ListStyle.defaults)
def getBulletText(self,listInstance):
text = str(listInstance.itemCount)+'.'
if self.showParent:
parent = listInstance.getParent()
if parent is not None:
text = parent.getBulletText() + text
return text
class TableStyle(FlowStyle):
defaults = dict(
flowStyle=None,
paraStyle=None,
dataCellFormats=[],
headerCellFormats= [],
showHeaders=False,
isgrowing=True,
**FlowStyle.defaults)
def formatTable(self,cmdName,*params):
# self.dataCellFormats = list(self.dataCellFormats)
addCellFormats(self.dataCellFormats,
cmdName,
(0,0),
(-1,-1),
*params)
def formatHeader(self,cmdName,*params):
# self.headerCellFormats = list(self.headerCellFormats)
addCellFormats(self.headerCellFormats,
cmdName,
(0,0),
(-1,0),
*params)
class DocumentStyle(PropertySet):
defaults = dict(
pagesize=pagesizes.A4,
showBoundary=0,
leftMargin=inch,
rightMargin=inch,
topMargin=inch,
bottomMargin=inch,
header=None,
footer=None,
innerMargin=None,
outerMargin=None,
)
class FrameStyle(PropertySet):
defaults = dict(
halign=TA_LEFT,
valign=VA_TOP,
borderStyle=LineStyle(),
borderStyleTop=None,
borderStyleBottom=None,
borderStyleRight=None,
borderStyleLeft=None,
padding=0,
paddingTop=None,
paddingBottom=None,
paddingRight=None,
paddingLeft=None,
)
## class DocumentTool:
## def __init__(self,doc):
## self.doc = doc
## def TitlePageHeader(self):
## self.doc.beginTable(self.doc.styles.EmptyTable)
## self.doc.formatParagraph(fontSize=8)
## self.doc.formatTable("LINEBELOW",0.1,colors.black)
## self.doc.p(self.doc.getTitle())
## self.doc.endCell()
## self.doc.formatParagraph(alignment=TA_RIGHT)
## self.doc.p("Page %d" % self.doc.getPageNumber())
## self.doc.endTable()
#
#
#
def getDefaultStyleSheet():
sheet = StyleSheet()
sheet.define("BODY",DocumentStyle())
sheet.define("Header",FrameStyle(valign=VA_BOTTOM))
sheet.define("Footer",FrameStyle(valign=VA_TOP))
sheet.define("P",ParagraphStyle(
fontName='Times-Roman',
fontSize=10,
spaceBefore=3,
spaceAfter=3,
leading=12
))
sheet.define("TH",sheet.P.child(alignment=TA_CENTER))
sheet.define("TD",sheet.P.child())
sheet.define("TR",sheet.P.child())
sheet.define("Verses",sheet.P.child(wrap=False))
sheet.define("Right",sheet.P.child(alignment=TA_RIGHT))
sheet.define("Center",sheet.P.child(alignment=TA_CENTER))
sheet.define("H1",sheet.P.child(
fontName = 'Times-Bold',
keepWithNext=True,
fontSize=18,
leading=22,
spaceAfter=6))
sheet.define("H2",sheet.H1.child(
fontSize=14,
leading=18,
spaceBefore=12,
spaceAfter=6))
sheet.define("H3",sheet.H2.child(
fontSize=12,
leading=14,
spaceBefore=12,
spaceAfter=6))
sheet.define("PRE",sheet.P.child(
fontName='Courier',
wrap=False,
fontSize=8,
leading=8.8,
firstLineIndent=0,
leftIndent=36))
#sheet.define("Wrapped",sheet.P.child(wrap=False,
# alignment=TA_LEFT))
sheet.define('UL', ListStyle(bulletWidth=12))
sheet.define('OL', NumberedListStyle(bulletWidth=12))
sheet.define("LI",sheet.P.child(
spaceBefore=1,
spaceAfter=1,
leftIndent=30,
firstLineIndent=0,
bulletText="\xe2\x80\xa2",
bulletIndent=0))
sheet.define("TABLE", TableStyle(
leftIndent=20,
rightIndent=50,
dataCellFormats=[
('ALIGN',(0,0),(-1,-1),'LEFT'),
('VALIGN',(0,0),(-1,-1),'TOP'),
('INNERGRID', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
]))
sheet.define("EmptyTable", TableStyle( dataCellFormats=[
('ALIGN',(0,0),(-1,-1),'LEFT'),
('VALIGN',(0,0),(-1,-1),'TOP'),
]))
sheet.define("DataTable",TableStyle(dataCellFormats=[
('ALIGN',(0,0),(-1,-1),'LEFT'),
('VALIGN',(0,0),(-1,-1),'TOP'),
('LINEBELOW', (0,0), (-1,-1), 0.25, colors.black),
('BOX', (0,0), (-1,-1), 0.25, colors.black),
# ('BACKGROUND', (0,0), (-1,-1), colors.grey),
]))
return sheet
#tool = DocumentTool(doc)
#s.define('TitlePageHeader',tool.TitlePageHeader)
#return s
| lsaffre/timtools | timtools/gendoc/styles.py | Python | bsd-2-clause | 7,898 |
import cherrypy
# 這是 MAN 類別的定義
'''
# 在 application 中導入子模組
import programs.cdag30.man as cdag30_man
# 加入 cdag30 模組下的 man.py 且以子模組 man 對應其 MAN() 類別
root.cdag30.man = cdag30_man.MAN()
# 完成設定後, 可以利用
/cdag30/man/assembly
# 呼叫 man.py 中 MAN 類別的 assembly 方法
'''
class MAN(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag30 模組下的 MAN 類別.<br /><br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="assembly">執行 MAN 類別中的 assembly 方法</a><br /><br />
請確定下列零件於 V:/home/lego/man 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/lego_man.7z">lego_man.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
@cherrypy.expose
def assembly(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/wl_header.js"></script>
<script type="text/javascript" src="/static/weblink/pfcUtils.js"></script>
</head>
<body>
</script><script language="JavaScript">
/*man2.py 完全利用函式呼叫進行組立*/
/*設計一個零件組立函式*/
// featID 為組立件第一個組立零件的編號
// inc 則為 part1 的組立順序編號, 第一個入組立檔編號為 featID+0
// part2 為外加的零件名稱
////////////////////////////////////////////////
// axis_plane_assembly 組立函式
////////////////////////////////////////////////
function axis_plane_assembly(session, assembly, transf, featID, inc, part2, axis1, plane1, axis2, plane2){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
var asmDatums = new Array(axis1, plane1);
var compDatums = new Array(axis2, plane2);
var relation = new Array (pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS, pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (true, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
}
// 以上為 axis_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly 採 align 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly() 函式
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// three_plane_assembly2 採 mate 組立, 若 featID 為 0 表示為空組立檔案
///////////////////////////////////////////////////////////////////////////////////////////////////////////
function three_plane_assembly2(session, assembly, transf, featID, inc, part2, plane1, plane2, plane3, plane4, plane5, plane6){
var descr = pfcCreate("pfcModelDescriptor").CreateFromFileName ("v:/home/lego/man/"+part2);
var componentModel = session.GetModelFromDescr(descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate("intseq");
// 若 featID 為 0 表示為空組立檔案
if (featID != 0){
ids.Append(featID+inc);
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = subPath.Leaf;
}else{
var subPath = pfcCreate("MpfcAssembly").CreateComponentPath(assembly, ids);
subassembly = assembly;
// 設法取得第一個組立零件 first_featID
// 取得 assembly 項下的元件 id, 因為只有一個零件, 採用 index 0 取出其 featID
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
// 此一 featID 為組立件中的第一個零件編號, 也就是樂高人偶的 body
var first_featID = components.Item(0).Id;
}
var constrs = pfcCreate("pfcComponentConstraints");
var asmDatums = new Array(plane1, plane2, plane3);
var compDatums = new Array(plane4, plane5, plane6);
var MpfcSelect = pfcCreate("MpfcSelect");
for (var i = 0; i < 3; i++)
{
var asmItem = subassembly.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, asmDatums[i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName(pfcCreate("pfcModelItemType").ITEM_SURFACE, compDatums[i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection(asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection(compItem, void null);
var constr = pfcCreate("pfcComponentConstraint").Create(pfcCreate("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate("pfcConstraintAttributes").Create (false, false);
constrs.Append(constr);
}
asmcomp.SetConstraints(constrs, void null);
// 若 featID = 0 則傳回 first_featID
if (featID == 0)
return first_featID;
}
// 以上為 three_plane_assembly2() 函式, 主要採三面 MATE 組立
//
// 假如 Creo 所在的操作系統不是 Windows 環境
if (!pfcIsWindows())
// 則啟動對應的 UniversalXPConnect 執行權限 (等同 Windows 下的 ActiveX)
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
// pfcGetProESession() 是位於 pfcUtils.js 中的函式, 確定此 JavaScript 是在嵌入式瀏覽器中執行
var session = pfcGetProESession();
// 設定 config option, 不要使用元件組立流程中內建的假設約束條件
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣, Pro/Web.Link 中的變數無法直接建立, 必須透過 pfcCreate() 建立
var identityMatrix = pfcCreate("pfcMatrix3D");
// 建立 identity 位置矩陣
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set(x, y, 1.0);
else
identityMatrix.Set(x, y, 0.0);
}
// 利用 identityMatrix 建立 transf 座標轉換矩陣
var transf = pfcCreate("pfcTransform3D").Create(identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔的空白組立檔案, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件, 若不符合條件則丟出錯誤訊息
if (model == void null || model.Type != pfcCreate("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
// 將此模型設為組立物件
var assembly = model;
/////////////////////////////////////////////////////////////////
// 開始執行組立, 全部採函式呼叫組立
/////////////////////////////////////////////////////////////////
// Body 與空組立檔案採三個平面約束組立
// 空組立面為 ASM_TOP, ASM_FRONT, ASM_RIGHT
// Body 組立面為 TOP, FRONT, RIGHT
// 若 featID=0 表示為空組立檔案, 而且函式會傳回第一個組立件的 featID
var featID = three_plane_assembly(session, assembly, transf, 0, 0, "LEGO_BODY.prt", "ASM_TOP", "ASM_FRONT", "ASM_RIGHT", "TOP", "FRONT", "RIGHT");
// 利用函式呼叫組立右手 ARM, 組立增量次序為 1
axis_plane_assembly(session, assembly, transf, featID, 0,
"LEGO_ARM_RT.prt", "A_13", "DTM1", "A_4", "DTM1");
// 利用函式呼叫組立左手 ARM, 組立增量次序為 2
axis_plane_assembly(session, assembly, transf, featID, 0,
"LEGO_ARM_LT.prt", "A_9", "DTM2", "A_4", "DTM1");
// 利用函式呼叫組立右手 HAND, 組立增量次序為 3
axis_plane_assembly(session, assembly, transf, featID, 1,
"LEGO_HAND.prt", "A_2", "DTM2", "A_1", "DTM3");
// 利用函式呼叫組立左手 HAND, 組立增量次序為 4
axis_plane_assembly(session, assembly, transf, featID, 2,
"LEGO_HAND.prt", "A_2", "DTM2", "A_1", "DTM3");
// 利用函式呼叫組立人偶頭部 HEAD, 組立增量次序為 5
// BODY id 為 featID+0, 以 A_2 及 DTM3 約束
// HEAD 則直接呼叫檔案名稱, 以 A_2, DTM2 約束
axis_plane_assembly(session, assembly, transf, featID, 0,
"LEGO_HEAD.prt", "A_2", "DTM3", "A_2", "DTM2");
// Body 與 WAIST 採三個平面約束組立
// Body 組立面為 DTM4, DTM5, DTM6
// WAIST 組立面為 DTM1, DTM2, DTM3, 組立增量次序為 6, 與 body 採三面 mate 組立
three_plane_assembly2(session, assembly, transf, featID, 0, "LEGO_WAIST.prt", "DTM4", "DTM5", "DTM6", "DTM1", "DTM2", "DTM3");
// 右腳
axis_plane_assembly(session, assembly, transf, featID, 6,
"LEGO_LEG_RT.prt", "A_8", "DTM4", "A_10", "DTM1");
// 左腳
axis_plane_assembly(session, assembly, transf, featID, 6,
"LEGO_LEG_LT.prt", "A_8", "DTM5", "A_10", "DTM1");
// 紅帽
axis_plane_assembly(session, assembly, transf, featID, 5,
"LEGO_HAT.prt", "A_2", "TOP", "A_2", "FRONT");
// regenerate 並且 repaint 組立檔案
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
| 40123248/2015cd_midterm2 | man2.py | Python | gpl-3.0 | 13,353 |
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCustomName('Bib Fortuna')
mobileTemplate.setLevel(1)
mobileTemplate.setDifficulty(Difficulty.NORMAL)
mobileTemplate.setSocialGroup("township")
mobileTemplate.setOptionsBitmask(Options.INVULNERABLE)
templates = Vector()
templates.add('object/mobile/shared_bib_fortuna.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/melee/unarmed/shared_unarmed_default.iff', WeaponType.UNARMED, 1.0, 6, 'kinetic')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('creatureMeleeAttack')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('bibfortunaeisley', mobileTemplate)
return | ProjectSWGCore/NGECore2 | scripts/mobiles/generic/static/tatooine/bibfortuna.py | Python | lgpl-3.0 | 1,107 |
"""A library for integrating pyOpenSSL with CherryPy.
The OpenSSL module must be importable for SSL functionality.
You can obtain it from `here <https://launchpad.net/pyopenssl>`_.
To use this module, set CherryPyWSGIServer.ssl_adapter to an instance of
SSLAdapter. There are two ways to use SSL:
Method One
----------
* ``ssl_adapter.context``: an instance of SSL.Context.
If this is not None, it is assumed to be an SSL.Context instance,
and will be passed to SSL.Connection on bind(). The developer is
responsible for forming a valid Context object. This approach is
to be preferred for more flexibility, e.g. if the cert and key are
streams instead of files, or need decryption, or SSL.SSLv3_METHOD
is desired instead of the default SSL.SSLv23_METHOD, etc. Consult
the pyOpenSSL documentation for complete options.
Method Two (shortcut)
---------------------
* ``ssl_adapter.certificate``: the filename of the server SSL certificate.
* ``ssl_adapter.private_key``: the filename of the server's private key file.
Both are None by default. If ssl_adapter.context is None, but .private_key
and .certificate are both given and valid, they will be read, and the
context will be automatically created from them.
"""
import socket
import threading
import time
from cherrypy import wsgiserver
try:
from OpenSSL import SSL
from OpenSSL import crypto
if hasattr(SSL, 'Connection'):
SSLConnectionType = SSL.Connection
else:
SSLConnectionType = SSL.ConnectionType
except ImportError:
SSL = None
class SSL_fileobject(wsgiserver.CP_fileobject):
"""SSL file object attached to a socket object."""
ssl_timeout = 3
ssl_retry = .01
def _safe_call(self, is_reader, call, *args, **kwargs):
"""Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).
"""
start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
# Sleep and try again. This is dangerous, because it means
# the rest of the stack has no way of differentiating
# between a "new handshake" error and "client dropped".
# Note this isn't an endless loop: there's a timeout below.
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
errnum = e.args[0]
if is_reader and errnum in wsgiserver.socket_errors_to_ignore:
return ""
raise socket.error(errnum)
except SSL.Error as e:
if is_reader and e.args == (-1, 'Unexpected EOF'):
return ""
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if thirdarg == 'http request':
# The client is talking HTTP to an HTTPS server.
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if time.time() - start > self.ssl_timeout:
raise socket.timeout("timed out")
def recv(self, size):
return self._safe_call(True, super(SSL_fileobject, self).recv, size)
def sendall(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).sendall,
*args, **kwargs)
def send(self, *args, **kwargs):
return self._safe_call(False, super(SSL_fileobject, self).send,
*args, **kwargs)
class SSLConnection:
"""A thread-safe wrapper for an SSL.Connection.
``*args``: the arguments to create the wrapped ``SSL.Connection(*args)``.
"""
def __init__(self, *args):
self._ssl_conn = SSL.Connection(*args)
self._lock = threading.RLock()
for f in ('get_context', 'pending', 'send', 'write', 'recv', 'read',
'renegotiate', 'bind', 'listen', 'connect', 'accept',
'setblocking', 'fileno', 'close', 'get_cipher_list',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'makefile', 'get_app_data', 'set_app_data', 'state_string',
'sock_shutdown', 'get_peer_certificate', 'want_read',
'want_write', 'set_connect_state', 'set_accept_state',
'connect_ex', 'sendall', 'settimeout', 'gettimeout'):
exec("""def %s(self, *args):
self._lock.acquire()
try:
return self._ssl_conn.%s(*args)
finally:
self._lock.release()
""" % (f, f))
def shutdown(self, *args):
self._lock.acquire()
try:
# pyOpenSSL.socket.shutdown takes no args
return self._ssl_conn.shutdown()
finally:
self._lock.release()
class pyOpenSSLAdapter(wsgiserver.SSLAdapter):
"""A wrapper for integrating pyOpenSSL with CherryPy."""
context = None
"""An instance of SSL.Context."""
certificate = None
"""The filename of the server SSL certificate."""
private_key = None
"""The filename of the server's private key file."""
certificate_chain = None
"""Optional. The filename of CA's intermediate certificate bundle.
This is needed for cheaper "chained root" SSL certificates, and should be
left as None if not required."""
def __init__(self, certificate, private_key, certificate_chain=None):
if SSL is None:
raise ImportError("You must install pyOpenSSL to use HTTPS.")
self.context = None
self.certificate = certificate
self.private_key = private_key
self.certificate_chain = certificate_chain
self._environ = None
def bind(self, sock):
"""Wrap and return the given socket."""
if self.context is None:
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
def wrap(self, sock):
"""Wrap and return the given socket, plus WSGI environ entries."""
return sock, self._environ.copy()
def get_context(self):
"""Return an SSL.Context from self attributes."""
# See http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/442473
c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
def get_environ(self):
"""Return WSGI environ entries to be merged into each request."""
ssl_environ = {
"HTTPS": "on",
# pyOpenSSL doesn't provide access to any of these AFAICT
# 'SSL_PROTOCOL': 'SSLv2',
# SSL_CIPHER string The cipher specification name
# SSL_VERSION_INTERFACE string The mod_ssl program version
# SSL_VERSION_LIBRARY string The OpenSSL program version
}
if self.certificate:
# Server certificate attributes
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({
'SSL_SERVER_M_VERSION': cert.get_version(),
'SSL_SERVER_M_SERIAL': cert.get_serial_number(),
# 'SSL_SERVER_V_START':
# Validity of server's certificate (start time),
# 'SSL_SERVER_V_END':
# Validity of server's certificate (end time),
})
for prefix, dn in [("I", cert.get_issuer()),
("S", cert.get_subject())]:
# X509Name objects don't seem to have a way to get the
# complete DN string. Use str() and slice it instead,
# because str(dn) == "<X509Name object '/C=US/ST=...'>"
dnstr = str(dn)[18:-2]
wsgikey = 'SSL_SERVER_%s_DN' % prefix
ssl_environ[wsgikey] = dnstr
# The DN should be of the form: /k1=v1/k2=v2, but we must allow
# for any value to contain slashes itself (in a URL).
while dnstr:
pos = dnstr.rfind("=")
dnstr, value = dnstr[:pos], dnstr[pos + 1:]
pos = dnstr.rfind("/")
dnstr, key = dnstr[:pos], dnstr[pos + 1:]
if key and value:
wsgikey = 'SSL_SERVER_%s_DN_%s' % (prefix, key)
ssl_environ[wsgikey] = value
return ssl_environ
def makefile(self, sock, mode='r', bufsize=-1):
if SSL and isinstance(sock, SSLConnectionType):
timeout = sock.gettimeout()
f = SSL_fileobject(sock, mode, bufsize)
f.ssl_timeout = timeout
return f
else:
return wsgiserver.CP_fileobject(sock, mode, bufsize)
| drzoidberg33/plexpy | lib/cherrypy/wsgiserver/ssl_pyopenssl.py | Python | gpl-3.0 | 9,357 |
# coding=utf-8
from __future__ import unicode_literals
import time
from collections import namedtuple
class ExpiringList(list):
"""Smart custom list, with a cache expiration."""
CachedResult = namedtuple('CachedResult', 'time value')
def __init__(self, items=None, cache_timeout=3600, implicit_clean=False):
"""Initialize the MissingPosterList.
:param items: Provide the initial list.
:param cache_timeout: Timeout after which the item expires.
:param implicit_clean: If enabled, run the clean() method, to check for expired items. Else you'll have to run
this periodically.
"""
list.__init__(self, items or [])
self.cache_timeout = cache_timeout
self.implicit_clean = implicit_clean
def append(self, item):
"""Add new items to the list."""
if self.implicit_clean:
self.clean()
super(ExpiringList, self).append((int(time.time()), item))
def clean(self):
"""Use the cache_timeout to remove expired items."""
new_list = [_ for _ in self if _[0] + self.cache_timeout > int(time.time())]
self.__init__(new_list, self.cache_timeout, self.implicit_clean)
def has(self, value):
"""Check if the value is in the list.
We need a smarter method to check if an item is already in the list. This will return a list with items that
match the value.
:param value: The value to check for.
:return: A list of tuples with matches. For example: (141234234, '12342').
"""
if self.implicit_clean:
self.clean()
return [
ExpiringList.CachedResult(time=match[0], value=match[1])
for match in self if match[1] == value
]
def get(self, value):
"""Check if the value is in the list.
We need a smarter method to check if an item is already in the list. This will return a list with items that
match the value.
:param value: The value to check for.
:return: A single item, if it matches. For example: <CachedResult()>.
"""
if self.implicit_clean:
self.clean()
matches = [_ for _ in self if _[1] == value]
if not matches:
return None
if len(matches) > 1:
# If we detect more then one match, let's remove then all.
for match in matches:
self.remove(match)
return None
if len(matches):
return ExpiringList.CachedResult(time=matches[0][0], value=matches[0][1])
class ExpiringKeyValue(list):
"""Smart custom list (that acts like a dictionary, with a cache expiration."""
CachedResult = namedtuple('CachedResult', 'time key value')
def __init__(self, items=None, cache_timeout=3600, implicit_clean=False):
"""Initialize the MissingPosterList.
:param items: Provide the initial list.
:param cache_timeout: Timeout after which the item expires.
:param implicit_clean: If enabled, run the clean() method, to check for expired items. Else you'll have to run
this periodically.
"""
list.__init__(self, items or [])
self.cache_timeout = cache_timeout
self.implicit_clean = implicit_clean
def append(self, key, value):
"""Add new items to the list."""
if self.implicit_clean:
self.clean()
super(ExpiringKeyValue, self).append((int(time.time()), key, value))
def clean(self):
"""Use the cache_timeout to remove expired items."""
new_list = [_ for _ in self if _[0] + self.cache_timeout > int(time.time())]
self.__init__(new_list, self.cache_timeout, self.implicit_clean)
def has(self, value):
"""Check if the value is in the list.
We need a smarter method to check if an item is already in the list. This will return a list with items that
match the value.
:param value: The key to check for.
:return: A list of tuples with matches. For example: [<CachedResult()>, <CachedResult()>].
"""
if self.implicit_clean:
self.clean()
return [
ExpiringKeyValue.CachedResult(time=match[0], key=match[1], value=match[2])
for match in self if match[2] == value
]
def get(self, key):
"""Check if the key is in the list.
We need a smarter method to check if an item is already in the list. This will return a list with items that
match the value.
:param key: The value to check for.
:return: A single item, if it matches. For example: <CachedResult()>.
"""
if self.implicit_clean:
self.clean()
matches = [_ for _ in self if _[1] == key]
if not matches:
return None
if len(matches) > 1:
# If we detect more then one match, let's remove then all.
for match in matches:
self.remove(match)
return None
if len(matches):
return ExpiringKeyValue.CachedResult(time=matches[0][0], key=matches[0][1], value=matches[0][2])
| pymedusa/Medusa | medusa/show/recommendations/__init__.py | Python | gpl-3.0 | 5,174 |
"""Utility functions for copying and archiving files and directory trees.
XXX The functions here don't copy the resource fork or other metadata on Mac.
"""
import os
import sys
import stat
from os.path import abspath
import fnmatch
import collections
import errno
try:
from pwd import getpwnam
except ImportError:
getpwnam = None
try:
from grp import getgrnam
except ImportError:
getgrnam = None
__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2",
"copytree", "move", "rmtree", "Error", "SpecialFileError",
"ExecError", "make_archive", "get_archive_formats",
"register_archive_format", "unregister_archive_format"]
class Error(EnvironmentError):
pass
class SpecialFileError(EnvironmentError):
"""Raised when trying to do a kind of operation (e.g. copying) which is
not supported on a special file (e.g. a named pipe)"""
class ExecError(EnvironmentError):
"""Raised when a command could not be executed"""
try:
WindowsError
except NameError:
WindowsError = None
def copyfileobj(fsrc, fdst, length=16*1024):
"""copy data from file-like object fsrc to file-like object fdst"""
while 1:
buf = fsrc.read(length)
if not buf:
break
fdst.write(buf)
def _samefile(src, dst):
# Macintosh, Unix.
if hasattr(os.path, 'samefile'):
try:
return os.path.samefile(src, dst)
except OSError:
return False
# All other platforms: check for same pathname.
return (os.path.normcase(os.path.abspath(src)) ==
os.path.normcase(os.path.abspath(dst)))
def copyfile(src, dst):
"""Copy data from src to dst"""
if _samefile(src, dst):
raise Error("`%s` and `%s` are the same file" % (src, dst))
for fn in [src, dst]:
try:
st = os.stat(fn)
except OSError:
# File most likely does not exist
pass
else:
# XXX What about other special files? (sockets, devices...)
if stat.S_ISFIFO(st.st_mode):
raise SpecialFileError("`%s` is a named pipe" % fn)
with open(src, 'rb') as fsrc:
with open(dst, 'wb') as fdst:
copyfileobj(fsrc, fdst)
def copymode(src, dst):
"""Copy mode bits from src to dst"""
if hasattr(os, 'chmod'):
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
os.chmod(dst, mode)
def copystat(src, dst):
"""Copy all stat info (mode bits, atime, mtime, flags) from src to dst"""
st = os.stat(src)
mode = stat.S_IMODE(st.st_mode)
if hasattr(os, 'utime'):
os.utime(dst, (st.st_atime, st.st_mtime))
if hasattr(os, 'chmod'):
os.chmod(dst, mode)
if hasattr(os, 'chflags') and hasattr(st, 'st_flags'):
try:
os.chflags(dst, st.st_flags)
except OSError, why:
if (not hasattr(errno, 'EOPNOTSUPP') or
why.errno != errno.EOPNOTSUPP):
raise
def copy(src, dst):
"""Copy data and mode bits ("cp src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copymode(src, dst)
def copy2(src, dst):
"""Copy data and all stat info ("cp -p src dst").
The destination may be a directory.
"""
if os.path.isdir(dst):
dst = os.path.join(dst, os.path.basename(src))
copyfile(src, dst)
copystat(src, dst)
def ignore_patterns(*patterns):
"""Function that can be used as copytree() ignore parameter.
Patterns is a sequence of glob-style patterns
that are used to exclude files"""
def _ignore_patterns(path, names):
ignored_names = []
for pattern in patterns:
ignored_names.extend(fnmatch.filter(names, pattern))
return set(ignored_names)
return _ignore_patterns
def copytree(src, dst, symlinks=False, ignore=None):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
os.makedirs(dst)
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error, err:
errors.extend(err.args[0])
except EnvironmentError, why:
errors.append((srcname, dstname, str(why)))
try:
copystat(src, dst)
except OSError, why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.extend((src, dst, str(why)))
if errors:
raise Error, errors
def rmtree(path, ignore_errors=False, onerror=None):
"""Recursively delete a directory tree.
If ignore_errors is set, errors are ignored; otherwise, if onerror
is set, it is called to handle the error with arguments (func,
path, exc_info) where func is os.listdir, os.remove, or os.rmdir;
path is the argument to that function that caused it to fail; and
exc_info is a tuple returned by sys.exc_info(). If ignore_errors
is false and onerror is None, an exception is raised.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
try:
if os.path.islink(path):
# symlinks to directories are forbidden, see bug #1669
raise OSError("Cannot call rmtree on a symbolic link")
except OSError:
onerror(os.path.islink, path, sys.exc_info())
# can't continue even if onerror hook returns
return
names = []
try:
names = os.listdir(path)
except os.error, err:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error, err:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def _basename(path):
# A basename() variant which first strips the trailing slash, if present.
# Thus we always get the last component of the path, even for directories.
return os.path.basename(path.rstrip(os.path.sep))
def move(src, dst):
"""Recursively move a file or directory to another location. This is
similar to the Unix "mv" command.
If the destination is a directory or a symlink to a directory, the source
is moved inside the directory. The destination path must not already
exist.
If the destination already exists but is not a directory, it may be
overwritten depending on os.rename() semantics.
If the destination is on our current filesystem, then rename() is used.
Otherwise, src is copied to the destination and then removed.
A lot more could be done here... A look at a mv.c shows a lot of
the issues this implementation glosses over.
"""
real_dst = dst
if os.path.isdir(dst):
if _samefile(src, dst):
# We might be on a case insensitive filesystem,
# perform the rename anyway.
os.rename(src, dst)
return
real_dst = os.path.join(dst, _basename(src))
if os.path.exists(real_dst):
raise Error, "Destination path '%s' already exists" % real_dst
try:
os.rename(src, real_dst)
except OSError:
if os.path.isdir(src):
if _destinsrc(src, dst):
raise Error, "Cannot move a directory '%s' into itself '%s'." % (src, dst)
copytree(src, real_dst, symlinks=True)
rmtree(src)
else:
copy2(src, real_dst)
os.unlink(src)
def _destinsrc(src, dst):
src = abspath(src)
dst = abspath(dst)
if not src.endswith(os.path.sep):
src += os.path.sep
if not dst.endswith(os.path.sep):
dst += os.path.sep
return dst.startswith(src)
def _get_gid(name):
"""Returns a gid, given a group name."""
if getgrnam is None or name is None:
return None
try:
result = getgrnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _get_uid(name):
"""Returns an uid, given a user name."""
if getpwnam is None or name is None:
return None
try:
result = getpwnam(name)
except KeyError:
result = None
if result is not None:
return result[2]
return None
def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
owner=None, group=None, logger=None):
"""Create a (possibly compressed) tar file from all the files under
'base_dir'.
'compress' must be "gzip" (the default), "bzip2", or None.
'owner' and 'group' can be used to define an owner and a group for the
archive that is being built. If not provided, the current owner and group
will be used.
The output tar file will be named 'base_name' + ".tar", possibly plus
the appropriate compression extension (".gz", or ".bz2").
Returns the output filename.
"""
tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', None: ''}
compress_ext = {'gzip': '.gz', 'bzip2': '.bz2'}
# flags for compression program, each element of list will be an argument
if compress is not None and compress not in compress_ext.keys():
raise ValueError, \
("bad value for 'compress': must be None, 'gzip' or 'bzip2'")
archive_name = base_name + '.tar' + compress_ext.get(compress, '')
archive_dir = os.path.dirname(archive_name)
if not os.path.exists(archive_dir):
logger.info("creating %s" % archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# creating the tarball
import tarfile # late import so Python build itself doesn't break
if logger is not None:
logger.info('Creating tar archive')
uid = _get_uid(owner)
gid = _get_gid(group)
def _set_uid_gid(tarinfo):
if gid is not None:
tarinfo.gid = gid
tarinfo.gname = group
if uid is not None:
tarinfo.uid = uid
tarinfo.uname = owner
return tarinfo
if not dry_run:
tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
try:
tar.add(base_dir, filter=_set_uid_gid)
finally:
tar.close()
return archive_name
def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False):
# XXX see if we want to keep an external call here
if verbose:
zipoptions = "-r"
else:
zipoptions = "-rq"
from distutils.errors import DistutilsExecError
from distutils.spawn import spawn
try:
spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run)
except DistutilsExecError:
# XXX really should distinguish between "couldn't find
# external 'zip' command" and "zip failed".
raise ExecError, \
("unable to create zip file '%s': "
"could neither import the 'zipfile' module nor "
"find a standalone zip utility") % zip_filename
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None):
"""Create a zip file from all the files under 'base_dir'.
The output zip file will be named 'base_name' + ".zip". Uses either the
"zipfile" Python module (if available) or the InfoZIP "zip" utility
(if installed and found on the default search path). If neither tool is
available, raises ExecError. Returns the name of the output zip
file.
"""
zip_filename = base_name + ".zip"
archive_dir = os.path.dirname(base_name)
if not os.path.exists(archive_dir):
if logger is not None:
logger.info("creating %s", archive_dir)
if not dry_run:
os.makedirs(archive_dir)
# If zipfile module is not available, try spawning an external 'zip'
# command.
try:
import zipfile
except ImportError:
zipfile = None
if zipfile is None:
_call_external_zip(base_dir, zip_filename, verbose, dry_run)
else:
if logger is not None:
logger.info("creating '%s' and adding '%s' to it",
zip_filename, base_dir)
if not dry_run:
zip = zipfile.ZipFile(zip_filename, "w",
compression=zipfile.ZIP_DEFLATED)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zip.write(path, path)
if logger is not None:
logger.info("adding '%s'", path)
zip.close()
return zip_filename
_ARCHIVE_FORMATS = {
'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"),
'zip': (_make_zipfile, [],"ZIP file")
}
def get_archive_formats():
"""Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
"""
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
def register_archive_format(name, function, extra_args=None, description=''):
"""Registers an archive format.
name is the name of the format. function is the callable that will be
used to create archives. If provided, extra_args is a sequence of
(name, value) tuples that will be passed as arguments to the callable.
description can be provided to describe the format, and will be returned
by the get_archive_formats() function.
"""
if extra_args is None:
extra_args = []
if not isinstance(function, collections.Callable):
raise TypeError('The %s object is not callable' % function)
if not isinstance(extra_args, (tuple, list)):
raise TypeError('extra_args needs to be a sequence')
for element in extra_args:
if not isinstance(element, (tuple, list)) or len(element) !=2 :
raise TypeError('extra_args elements are : (arg_name, value)')
_ARCHIVE_FORMATS[name] = (function, extra_args, description)
def unregister_archive_format(name):
del _ARCHIVE_FORMATS[name]
def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
dry_run=0, owner=None, group=None, logger=None):
"""Create an archive file (eg. zip or tar).
'base_name' is the name of the file to create, minus any format-specific
extension; 'format' is the archive format: one of "zip", "tar", "bztar"
or "gztar".
'root_dir' is a directory that will be the root directory of the
archive; ie. we typically chdir into 'root_dir' before creating the
archive. 'base_dir' is the directory where we start archiving from;
ie. 'base_dir' will be the common prefix of all files and
directories in the archive. 'root_dir' and 'base_dir' both default
to the current directory. Returns the name of the archive file.
'owner' and 'group' are used when creating a tar archive. By default,
uses the current owner and group.
"""
save_cwd = os.getcwd()
if root_dir is not None:
if logger is not None:
logger.debug("changing into '%s'", root_dir)
base_name = os.path.abspath(base_name)
if not dry_run:
os.chdir(root_dir)
if base_dir is None:
base_dir = os.curdir
kwargs = {'dry_run': dry_run, 'logger': logger}
try:
format_info = _ARCHIVE_FORMATS[format]
except KeyError:
raise ValueError, "unknown archive format '%s'" % format
func = format_info[0]
for arg, val in format_info[1]:
kwargs[arg] = val
if format != 'zip':
kwargs['owner'] = owner
kwargs['group'] = group
try:
filename = func(base_name, base_dir, **kwargs)
finally:
if root_dir is not None:
if logger is not None:
logger.debug("changing back to '%s'", save_cwd)
os.chdir(save_cwd)
return filename
| Symmetry-Innovations-Pty-Ltd/Python-2.7-for-QNX6.5.0-x86 | usr/pkg/lib/python2.7/shutil.py | Python | mit | 18,302 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Commands for:
- building and publishing virtual environments
- sync'ing application code
- managing (e.g. start, stop, etc.) the any service
on hosts.
Here's an example of updating all running hosts attached to a load-balancer:
.. code:: bash
fab hosts:lb=ipypi-test-i code_sync
Here's one where just echo which hosts haven been selected by our criteria:
.. code:: bash
fab hosts:env=test who
Here's one where we say exactly which host(s) to target and disable their
service(s):
.. code:: bash
fab -H i-2b6d1a58.internalpypi.io,ipypi-test-10-1-1-01.internalpypi.io,10.1.1
.56 svc_disable
"""
__version__ = '0.1.0'
import os
import time
import boto.ec2.elb
from fabric import api
# statics
class Context(dict):
def __getattr__(self, item):
return self[item]
ctx = Context(
app_name=None,
STARTUP_DELAY_SECS=5,
WAIT_DEFAULT_TIMEOUT_SECS=60,
WAIT_POLL_FREQ_SECS=5,
HEALTH_FILE='/var/lib/app/health',
DOMAIN='example.com',
WORKERS=[],
AWS_ENVIRONMENT_TAG='ChefEnvironment',
AWS_DISABLED_TAG='Disabled',
AWS_API_SUBNET_IDS=[],
AWS_WORKER_SUBNET_IDS=[],
AWS_VPC_ID='vpc-asd',
AWS_GROUP_ID=None,
AWS_ACCESS_KEY_ID=os.environ.get('AWS_ACCESS_KEY_ID', None),
AWS_SECRET_ACCESS_KEY=os.environ.get('AWS_SECRET_ACCESS_KEY', None),
S3_BUCKET='company.debs',
S3_ENDPOINT='s3-us-west-1.amazonaws.com',
)
# environment
api.env.user = os.environ.get('USER')
api.env.region_name = 'us-east-1'
api.env.instances = None
api.env.lbs = None
# common tasks
@api.task
def hosts(env=None, lb=None, subenets=None):
"""
Selects hosts to target.
:param env: The environment from which hosts should be *included*. All by
default. Should be one of 'prod', 'test', 'stage', 'dev'.
:param lb: The load-balancer whose attached hosts should be *included*.
"""
populate_lbs()
if lb:
lb = resolve_lb(lb)
tags = {}
if env:
tags['tag:' + ctx.AWS_ENVIRONMENT_TAG] = env
populate_instances(tags=tags, lb=lb, subenets=subenets)
# HACK: dns resolution does not seem to be working for all instances
#api.env.hosts = [i.id + '.' + ctx.DOMAIN for i in api.env.instances]
api.env.hosts = [
inst.interfaces[0].private_ip_address for inst in api.env.instances
]
for instance in api.env.instances:
print instance, instance.tags
@api.task
def who():
"""
Echos hosts that will be targeted by commands.
"""
pass
@api.task
def code_sync(branch='release', commit='HEAD', clear_cached='t'):
clear_cached = parse_flag(clear_cached)
with api.cd('~/' + ctx.app_name):
api.run('git fetch')
api.run('git checkout ' + branch)
if hash != 'HEAD':
swallow = api.run('git pull')
result = api.run(
'git branch --contains {} | grep {} | wc -l'.format(
commit, branch,
)
)
if int(result.strip()) == 0:
raise ValueError(
'Commit "{}" is not a part of "{}" branch!'.format(
commit, branch
)
)
api.run('git checkout ' + commit)
if clear_cached:
with api.settings(shell='bash -i -c'):
api.run("find -type f -regex '.+\.pyc' -exec rm -rf {} \;")
@api.task
def code_stat():
with api.cd('~/{name}'.format(name=ctx.app_name)):
api.run('echo `git rev-parse --abbrev-ref HEAD`:`git rev-parse '
'--verify HEAD`')
@api.task
@api.parallel
def shells():
"""
Ghetto detects whether any shell(s) are running.
"""
with api.settings(shell='bash -i -c'):
api.run('[ -z `pgrep -f "^python.*shell$" -u deploy` ]')
@api.task
def migrate_db():
with api.cd('~/' + ctx.app_name):
with api.settings(shell='bash -i -c'):
api.run('./scripts/migrate-db upgrade')
# service tasks
@api.task
def svc_hosts(env=None, lb=None):
hosts(env=env, lb=lb, subenets=ctx.AWS_API_SUBNET_IDS)
@api.task
def svc_start(skip_enable='f', wait='t'):
"""
Starts the service.
:param skip_enable: Flag indicating whether to skip enabling the host.
:param wait: Flag indicating whether to wait for host to roll into its lbs.
"""
api.run('service {} start; sleep {}'.format(
ctx.app_name, ctx.STARTUP_DELAY_SECS
))
api.run('service {} start'.format(ctx.app_name))
skip_enable = parse_flag(skip_enable)
if not skip_enable:
svc_enable()
wait_in_lbs(parse_wait(wait))
@api.task
def svc_stop(skip_disable='f', wait='t'):
"""
Stops the service.
:param skip_disable: Flag indicating whether to skip disabling the host.
:param wait: Flag indicating whether to wait for host to fall out of its
load-balancers.
"""
skip_disable = parse_flag(skip_disable)
if not skip_disable:
svc_disable()
wait_out_lbs(parse_wait(wait))
@api.task
def svc_reload():
"""
Reloads the service.
"""
api.run('service {} reload'.format(ctx.app_name))
@api.task
def svc_restart():
"""
Hard restarts the service.
"""
svc_disable()
api.run('service {} restart; sleep {}'.format(
ctx.app_name, ctx.STARTUP_DELAY_SECS
))
svc_enable()
@api.task
def svc_up(branch='release', commit='HEAD', restart='f'):
"""
Checks out code and reload or restarts the service.
:param branch: Branch to checkout. Defaults to "release".
:param commit: Commit hash within the branch to sync to, defaults to "HEAD".
:param restart: Flag indicating whether the service should be restarted or
just reloaded (the default).
"""
restart = parse_flag(restart)
code_sync(branch, commit)
# TODO: enable this
#migrate_db()
if restart:
svc_restart()
else:
svc_reload()
svc_stat()
@api.task
def svc_stat():
"""
Prints service status.
"""
code_stat()
api.run('service {} status'.format(ctx.app_name))
api.run('curl 127.0.01:5000/health')
@api.task
def svc_enable(wait='t'):
"""
Enabled service for traffic.
:param wait: Flag indicating whether to wait for host to roll into its
load-balancers.
"""
api.run('echo -n "finding your center" > {0}'.format(ctx.HEALTH_FILE))
wait_in_lbs(parse_wait(wait))
@api.task
def svc_disable(wait='t'):
"""
Disables service from serving traffic.
:param wait: Flag indicating whether to wait for host to fall out of its
load-balancers.
"""
wait = parse_wait(wait)
api.run('[ ! -f {0} ] || rm {0}'.format(ctx.HEALTH_FILE))
wait_out_lbs(wait)
# worker helpers
@api.task
def wrk_hosts(env=None, lb=None):
hosts(env=env, lb=lb, subenets=ctx.AWS_WORKER_SUBNET_IDS)
@api.task
def wrk_up(branch='release', commit='HEAD'):
"""
Checks out code and restarts all workers.
:param branch: Branch to checkout. Defaults to "release".
:param commit: Commit hash within the branch to sync to, defaults to "HEAD".
"""
code_sync(branch, commit)
wrk_restart()
wrk_stat()
@api.task
def wrk_stat(*workers):
"""
Prints status about the requested workers, or all if none are specified.
"""
code_stat()
for name in workers or ctx.WORKERS:
api.run('supervisorctl status {}; sleep 1'.format(name))
@api.task
def wrk_start(*workers):
"""
Starts the requested workers, or all if none are specified.
"""
for name in workers or ctx.WORKERS:
api.run('supervisorctl start {}; sleep 1'.format(name))
@api.task
def wrk_stop(*workers):
"""
Stops the requested workers, or all if none are specified.
"""
for name in workers or ctx.WORKERS:
api.run('supervisorctl stop {}; sleep 1'.format(name))
@api.task
def wrk_restart(*workers):
"""
Restarts the requested workers, or all if none are specified.
"""
for name in workers or ctx.WORKERS:
api.run('supervisorctl stop {}; sleep 1'.format(name))
# package tasks
@api.task
def pkg_build(version, branch='release', commit='HEAD', publish=False):
"""
Builds and downloads a deb of app_name (w/o the virtualenv).
:param version: Release version (e.g. 1.0.0).
:param branch: git branch from which to package. Defaults to 'release'.
:param commit: git commit commit from which to package. Defaults to 'HEAD'.
"""
code_sync(branch=branch, commit=commit)
if commit == 'HEAD':
with api.cd('~/' + ctx.app_name):
commit = api.run('git rev-parse HEAD')
with api.cd('~'):
api.run(
'[ ! -f {app_name}_1.{version}_all.deb ] || '
'rm -f {app_name}_1.{version}_all.deb'
.format(app_name=ctx.app_name, version=version)
)
rv = api.run(
'fpm -s dir -t deb -n {package_name} -v {version} '
'-a all -x "*.git" -x "*.pyc" '
'--description "{app_name} @ {branch}:{commit}" '
'--deb-user={user} '
'--deb-group={user} '
'~/{package_name}'
.format(
app_name=ctx.app_name,
package_name=ctx.app_name,
version=version,
user=api.env.user,
branch=branch,
commit=commit,
)
)
file_name = rv.split('"')[-2]
if publish:
pkg_publish(file_name)
@api.task
def pkg_build_venv(version, branch='release', commit='HEAD', publish=False):
"""
Builds and downloads a deb of app_name virtualenv (w/o the lib).
:param version: Release version (e.g. 1.0.0).
:param branch: git branch from which to package. Defaults to 'release'.
:param commit: git commit commit from which to package. Defaults to 'HEAD'.
"""
code_sync(commit=commit, branch=branch)
if commit == 'HEAD':
with api.cd('~/' + ctx.app_name):
commit = api.run('git rev-parse HEAD')
with api.cd('~'):
api.run(
'[ ! -f {app_name}-venv_{version}_amd64.deb ] || '
'rm -f {app_name}-venv_{version}_amd64.deb'
.format(app_name=ctx.app_name, version=version)
)
rv = api.run(
'fpm -s python -t deb -n {app} -v {version} '
'--description "{app_name} virtual environment @ {branch}:{commit}" '
'--deb-user={user} '
'--deb-group={user} '
'-s dir ~/.virtualenvs/{venv} '
.format(
app_name=ctx.app_name,
app=ctx.app_name + '_venv',
venv=ctx.app_name,
version=version,
user=api.env.user,
branch=branch,
commit=commit,
)
)
file_name = rv.split('"')[-2]
if publish:
pkg_publish(file_name)
@api.task
def pkg_publish(file_name):
"""
Uploads a deb package to the s3 bucket backing our apt repo. Note that:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
must both be set in your environment *and* have write permissions to the
s3 bucket.
:param file_name: Name of built deb file to publish.
"""
if ctx.AWS_ACCESS_KEY_ID is None:
raise Exception('Your environment is missing AWS_ACCESS_KEY_ID')
if ctx.AWS_SECRET_ACCESS_KEY is None:
raise Exception('Your environment is missing AWS_SECRET_ACCESS_KEY')
with api.cd('~'):
api.run(
'deb-s3 publish {file_name} '
'--bucket={s3_bucket} '
'--access-key-id={s3_access_key} '
'--secret-access-key={s3_secret_key} '
'--endpoint={s3_endpoint} '
'--visibility=private '
'--arch={arch}'
.format(
file_name=file_name,
s3_bucket=ctx.S3_BUCKET,
s3_access_key=ctx.AWS_ACCESS_KEY_ID,
s3_secret_key=ctx.AWS_SECRET_ACCESS_KEY,
s3_endpoint=ctx.S3_ENDPOINT,
arch='amd64',
)
)
# generic helpers
def parse_flag(flag):
if flag.lower() in (True, 1, '1', 't', 'true'):
return True
if flag.lower() in (False, 0, '0', 'f', 'false'):
return False
raise ValueError('Invalid flag value "{}"'.format(flag))
def parse_wait(raw):
try:
return int(raw)
except (ValueError, TypeError):
flag = parse_flag(raw)
if flag:
return ctx.WAIT_DEFAULT_TIMEOUT_SECS
return 0
# aws helpers
def populate_instances(
tags=None,
lb=None,
exclude_disabled=True,
subenets=None,
):
def local_filter(instance):
if subenets and instance.subnet_id not in subenets:
return False
if instance.tags.get(ctx.AWS_DISABLED_TAG, None) is not None:
return False
if lb:
return any(instance.id == i.id for i in lb.instances)
return True
if api.env.instances:
return api.env.instances
remote_filter = {
'vpc-id': ctx.AWS_VPC_ID,
'instance-state-name': 'running',
}
if ctx.AWS_GROUP_ID:
remote_filter['instance.group-id'] = ctx.AWS_GROUP_ID
if tags:
remote_filter.update(tags)
cxn = boto.ec2.connect_to_region(api.env.region_name)
instances = [
instance.instances[0]
for instance in cxn.get_all_instances(filters=remote_filter)
if local_filter(instance.instances[0])
]
api.env.instances = instances
return api.env.instances
def populate_lbs():
if api.env.lbs is not None:
return api.env.lbs
cxn = boto.ec2.elb.connect_to_region(api.env.region_name)
api.env.lbs = [
lb for lb in cxn.get_all_load_balancers()
if lb.instances is not None
]
return api.env.lbs
def resolve_lb(hint):
return resolve_lbs(hint)[0]
def resolve_lbs(*hints):
mapping = dict((lb.name, lb) for lb in api.env.lbs)
lbs = []
for hint in hints:
if hint in mapping:
lbs.append(mapping[hint])
continue
raise ValueError('Unknown load balancer "{}"'.format(hint))
return lbs
def instance_lbs(instance):
return [
lb for lb in api.env.lbs
if any(instance.id == i.id for i in lb.instances)
]
def current_instance():
populate_instances()
populate_lbs()
host_string = api.env.host_string
for i in api.env.instances:
if 'Name' in i.tags and i.tags['Name'].startswith(host_string):
break
if i.private_ip_address.startswith(host_string):
break
if i.private_ip_address.replace('.', '-') in host_string:
break
else:
i = None
return i
def wait_in_lbs(timeout):
def in_service(states):
return (
not states and
states[0].state == 'InService'
)
wait_xx_lbs(timeout, in_service)
def wait_out_lbs(timeout):
def out_of_service(states):
return (
not states or
states[0].state == 'OutOfService'
)
wait_xx_lbs(timeout, out_of_service)
def wait_xx_lbs(timeout, health):
instance = current_instance()
if instance is None:
return
lbs = instance_lbs(instance)
et = time.time() + timeout
while True:
lbs = [
lb for lb in lbs
if not health(lb.get_instance_health([instance.id]))
]
if not lbs:
break
if time.time() > et:
raise Exception(
'Timed out after {} sec(s) waiting on host "{}" '
'health for lb(s) {}'.format(
timeout,
api.env.host_string,
', '.join((lb.name for lb in lbs))
)
)
print '[%s] local: waiting %s sec(s) for lb(s) %s' % (
api.env.host_string, ctx.WAIT_POLL_FREQ_SECS, ', '.join(
(lb.name for lb in lbs)
),
)
time.sleep(ctx.WAIT_POLL_FREQ_SECS)
| verygood-ops/fab-ops | fabfile.py | Python | apache-2.0 | 16,236 |
from cs_plone3_theme import Plone3Theme
class BootstrapTheme(Plone3Theme):
_template_dir = 'templates/bootstrap_theme'
summary = 'A Theme for Plone 3/4 based on Twitter Bootstrap'
skinbase = 'Bootstrap Theme'
use_local_commands = True
def post(self, command, output_dir, vars):
print "-----------------------------------------------------------"
print "Generation finished"
print "Remember to pin plone.app.jquery = 1.7.1.1"
print "in your buildout"
print
print "See README.txt for details"
print "-----------------------------------------------------------"
| codesyntax/CodeSkel | codeskel/bootstrap_theme.py | Python | mit | 637 |
# oppia/context_processors.py
from django.conf import settings
import oppia
from oppia.models import Points, Award
def get_points(request):
if not request.user.is_authenticated():
return {'points': 0, 'badges':0 }
else:
points = Points.get_userscore(request.user)
if points is None:
points = 0
badges = Award.get_userawards(request.user)
if badges is None:
badges = 0
return {'points': points, 'badges':badges }
def get_version(request):
version = "v" + str(oppia.VERSION[0]) + "." + str(oppia.VERSION[1]) + "." + str(oppia.VERSION[2])
return {'version': version }
def get_settings(request):
return { 'OPPIA_ALLOW_SELF_REGISTRATION': settings.OPPIA_ALLOW_SELF_REGISTRATION,
'OPPIA_GOOGLE_ANALYTICS_ENABLED': settings.OPPIA_GOOGLE_ANALYTICS_ENABLED,
'OPPIA_GOOGLE_ANALYTICS_CODE': settings.OPPIA_GOOGLE_ANALYTICS_CODE,
'OPPIA_GOOGLE_ANALYTICS_DOMAIN': settings.OPPIA_GOOGLE_ANALYTICS_DOMAIN,
'OPPIA_SHOW_GRAVATARS': settings.OPPIA_SHOW_GRAVATARS,}
| DigitalCampus/django-maf-oppia | oppia/context_processors.py | Python | gpl-3.0 | 1,090 |
"""This module corresponds to functionality documented
at https://blockchain.info/api/blockchain_wallet_api
"""
import json
from . import util
from .exceptions import *
import logging
class Wallet:
"""The :class:`Wallet` class mirrors operations listed on the wallet API page.
It needs to be initialized on a per-wallet basis and will cache the wallet
identitifer, password, second password and API key (if provided).
"""
def __init__(self, identifier, password, second_password=None, api_code=None):
"""Initializes a wallet object.
:param str identifier: wallet identifier (GUID)
:param str password : decryption password
:param str second_password: second password (optional)
:param str api_code: Blockchain.info API code
"""
self.identifier = identifier
self.password = password
self.second_password = second_password
self.api_code = api_code
def send(self, to, amount, from_address=None, fee=None, note=None):
"""Send bitcoin from your wallet to a single address.
:param str to: recipient bitcoin address
:param int amount: amount to send (in satoshi)
:param str from_address: specific address to send from (optional)
:param int fee: transaction fee in satoshi. Must be greater than the default
fee (optional).
:param str note: public note to include with the transaction (optional)
:return: an instance of :class:`PaymentResponse` class
"""
recipient = {to: amount}
return self.send_many(recipient, from_address, fee, note)
def send_many(self, recipients, from_address=None, fee=None, note=None):
"""Send bitcoin from your wallet to multiple addresses.
:param dictionary recipients: dictionary with the structure of 'address':amount
:param str from_address: specific address to send from (optional)
:param int fee: transaction fee in satoshi. Must be greater than the default
fee (optional).
:param str note: public note to include with the transaction (optional)
:return: an instance of :class:`PaymentResponse` class
"""
params = self.build_basic_request()
method = ''
if len(recipients) == 1:
to_address, amount = recipients.popitem()
params['to'] = to_address
params['amount'] = amount
method = 'payment'
else:
params['recipients'] = json.dumps(recipients)
method = 'sendmany'
if from_address is not None:
params['from'] = from_address
if fee is not None:
params['fee'] = fee
if note is not None:
params['note'] = note
response = util.call_api("merchant/{0}/{1}".format(self.identifier, method), params)
json_response = json.loads(response)
self.parse_error(json_response)
payment_response = PaymentResponse(json_response['message'], json_response['tx_hash'], json_response.get('notice'))
return payment_response
def get_balance(self):
"""Fetch the wallet balance. Includes unconfirmed transactions
and possibly double spends.
:return: wallet balance in satoshi
"""
response = util.call_api("merchant/{0}/balance".format(self.identifier), self.build_basic_request())
json_response = json.loads(response)
self.parse_error(json_response)
return json_response.get('balance')
def list_addresses(self, confirmations=0):
"""List all active addresses in the wallet.
:param int confirmations: minimum number of confirmations transactions
must have before being included in balance of
addresses (optional)
:return: an array of :class:`Address` objects
"""
params = self.build_basic_request()
params['confirmations'] = confirmations
response = util.call_api("merchant/{0}/list".format(self.identifier), params)
json_response = json.loads(response)
self.parse_error(json_response)
addresses = []
for a in json_response['addresses']:
address = Address(a['balance'], a['address'], a['label'], a['total_received'])
addresses.append(address)
return addresses
def get_address(self, address, confirmations=0):
"""Retrieve an address from the wallet.
:param str address: address in the wallet to look up
:param int confirmations: minimum number of confirmations transactions
must have before being included in the balance
(optional)
:return: an instance of :class:`Address` class
"""
params = self.build_basic_request()
params['address'] = address
params['confirmations'] = confirmations
response = util.call_api("merchant/{0}/address_balance".format(self.identifier), params)
json_response = json.loads(response)
self.parse_error(json_response)
return Address(json_response['balance'],
json_response['address'],
None,
json_response['total_received'])
def new_address(self, label=None):
"""Generate a new address and add it to the wallet.
:param str label: label to attach to this address (optional)
:return: an instance of :class:`Address` class
"""
params = self.build_basic_request()
if label is not None:
params['label'] = label
response = util.call_api("merchant/{0}/new_address".format(self.identifier), params)
json_response = json.loads(response)
self.parse_error(json_response)
return Address(0,
json_response['address'],
'none',
0)
def archive_address(self, address):
"""Archive an address.
:param str address: address to archive
:return: string representation of the archived address
"""
params = self.build_basic_request()
params['address'] = address
response = util.call_api("merchant/{0}/archive_address".format(self.identifier), params)
json_response = json.loads(response)
self.parse_error(json_response)
return json_response['archived']
def unarchive_address(self, address):
"""Unarchive an address.
:param str address: address to unarchive
:return: string representation of the unarchived address
"""
params = self.build_basic_request()
params['address'] = address
response = util.call_api("merchant/{0}/unarchive_address".format(self.identifier), params)
json_response = json.loads(response)
self.parse_error(json_response)
return json_response['active']
def consolidate(self, days):
"""Consolidate the wallet addresses.
:param int days: addresses which have not received any
transactions in at least this many days will be consolidated.
:return: a string array of consolidated addresses
"""
params = self.build_basic_request()
params['days'] = days
response = util.call_api("merchant/{0}/auto_consolidate".format(self.identifier), params)
json_response = json.loads(response)
self.parse_error(json_response)
return json_response['consolidated']
def build_basic_request(self):
params = {'password': self.password}
if self.second_password is not None:
params['second_password'] = self.second_password
if self.api_code is not None:
params['api_code'] = self.api_code
return params
def parse_error(self, json_response):
error = json_response.get('error')
if error is not None:
raise APIException(error, 0)
class PaymentResponse:
def __init__(self, message, tx_hash, notice):
self.message = message
self.tx_hash = tx_hash
self.notice = notice
class Address:
def __init__(self, balance, address, label, total_received):
self.balance = balance
self.address = address
self.label = label
self.total_received = total_received
| mofax/evendice | blockchain/wallet.py | Python | mit | 8,480 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 10 14:46:37 2019
Funções de forma para a viga de 3 nós de Euler-Bernouilli
Completo!
@author: markinho
"""
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
#para viga
L = sp.Symbol('L')
x1 = -L/2
x2 = 0
x3 = L/2
u1 = sp.Symbol('u1')
u2 = sp.Symbol('u2')
u3 = sp.Symbol('u3')
u4 = sp.Symbol('u4')
u5 = sp.Symbol('u5')
u6 = sp.Symbol('u6')
#Mat_Coef = sp.Matrix([[1, -L/2, L**2/4, -L**3/8, L**4/16, -L**5/32],
# [0, 1, -L, 3*L**2/4, -L**3/2, 5*L**4/16],
# [1, 0, 0, 0, 0, 0],
# [0, 1, 0, 0, 0, 0],
# [1, L/2, L**2/4, L**3/8, L**4/16, L**5/32],
# [0, 1, L, 3*L**2/4, L**3/2, 5*L**4/16]])
Mat_Coef = sp.Matrix([[1, x1, x1**2, x1**3, x1**4, x1**5],
[0, 1, 2*x1, 3*x1**2, 4*x1**3, 5*x1**4],
[1, x2, x2**2, x2**3, x2**4, x2**5],
[0, 1, 2*x2, 3*x2**2, 4*x2**3, 5*x2**4],
[1, x3, x3**2, x3**3, x3**4, x3**5],
[0, 1, 2*x3, 3*x3**2, 4*x3**3, 5*x3**4]])
U = sp.Matrix([u1, u2, u3, u4, u5, u6])
Coefs = Mat_Coef.inv() * U
A = Coefs[0]
B = Coefs[1]
C = Coefs[2]
D = Coefs[3]
E = Coefs[4]
F = Coefs[5]
x = sp.Symbol('x')
Ns = sp.expand(A + B*x + C*x**2 + D*x**3 + E*x**4 + F*x**5)
N1 = sp.Add(*[argi for argi in Ns.args if argi.has(u1)]).subs(u1, 1)
N2 = sp.Add(*[argi for argi in Ns.args if argi.has(u2)]).subs(u2, 1)
N3 = sp.Add(*[argi for argi in Ns.args if argi.has(u3)]).subs(u3, 1)
N4 = sp.Add(*[argi for argi in Ns.args if argi.has(u4)]).subs(u4, 1)
N5 = sp.Add(*[argi for argi in Ns.args if argi.has(u5)]).subs(u5, 1)
N6 = sp.Add(*[argi for argi in Ns.args if argi.has(u6)]).subs(u6, 1)
Nn = sp.Matrix([N1, N2, N3, N4, N5, N6])
##geração do grafico ---------------------------------------------------------------------
##convertendo para função python
#nN1 = sp.utilities.lambdify([x, L], N1, "numpy")
#nN2 = sp.utilities.lambdify([x, L], N2, "numpy")
#
#nN3 = sp.utilities.lambdify([x, L], N3, "numpy")
#nN4 = sp.utilities.lambdify([x, L], N4, "numpy")
#
#nN5 = sp.utilities.lambdify([x, L], N5, "numpy")
#nN6 = sp.utilities.lambdify([x, L], N6, "numpy")
#
#L = 2.
#x = np.linspace(-L/2., L/2, 100)
#
#plt.plot(x, nN1(x, L), label="N1")
#plt.plot(x, nN3(x, L), label="N3")
#plt.plot(x, nN5(x, L), label="N5")
#plt.title('Deslocamentos')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
#
#plt.plot(x, nN2(x, L), label="N2")
#plt.plot(x, nN4(x, L), label="N4")
#plt.plot(x, nN6(x, L), label="N6")
#plt.title('Rotações')
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
#primeira derivada
dN1 = sp.diff(N1, x)
dN2 = sp.diff(N2, x)
dN3 = sp.diff(N3, x)
dN4 = sp.diff(N4, x)
dN5 = sp.diff(N5, x)
dN6 = sp.diff(N6, x)
#segunda derivada
ddN1 = sp.diff(dN1, x)
ddN2 = sp.diff(dN2, x)
ddN3 = sp.diff(dN3, x)
ddN4 = sp.diff(dN4, x)
ddN5 = sp.diff(dN5, x)
ddN6 = sp.diff(dN6, x)
#terceira derivada
dddN1 = sp.diff(ddN1, x)
dddN2 = sp.diff(ddN2, x)
dddN3 = sp.diff(ddN3, x)
dddN4 = sp.diff(ddN4, x)
dddN5 = sp.diff(ddN5, x)
dddN6 = sp.diff(ddN6, x)
#quarta derivada
ddddN1 = sp.diff(dddN1, x)
ddddN2 = sp.diff(dddN2, x)
ddddN3 = sp.diff(dddN3, x)
ddddN4 = sp.diff(dddN4, x)
ddddN5 = sp.diff(dddN5, x)
ddddN6 = sp.diff(dddN6, x)
#cálculo da matriz de rigidez
B = sp.Matrix([ddN1, ddN2, ddN3, ddN4, ddN5, ddN6])
BB = B * B.T
E = sp.Symbol('E')
I = sp.Symbol('I')
Ke = E*I*sp.integrate( BB, (x, x1, x3) )
#viga em balanço com 1000kN na extremidade, E = 20000 kN/cm2, nu=0.3, b=2cm, h=10cm e L = 6m ---------------------
b = 2
h = 10
F = 1000
Kvb = Ke[2:,2:]
Kvb = np.array(Kvb.subs({L:6, I:b*h**3/12, E:20000})).astype(np.float64)
Fvb = np.array([0, 0, -F, 0])
UvbEB = np.linalg.solve(Kvb,Fvb)
UgvbEB = np.array([0, 0, UvbEB[0], UvbEB[1], UvbEB[2], UvbEB[3]])
#viga biapoiada com 1000kN no meio, E = 20000 kN/cm2, nu=0.3, b=2cm, h=10cm e L = 6m ---------------------
b = 2
h = 10
F = 1000
Kvba = Ke[1:5,1:5]
Kvba = np.array(Kvba.subs({L:6, I:b*h**3/12, E:20000})).astype(np.float64)
Fvba = np.array([0, -F, 0, 0])
UvbaEB = np.linalg.solve(Kvba,Fvba)
UgvbaEB = np.array([0, UvbEB[0], UvbEB[1], UvbEB[2], UvbEB[3], 0])
# 160 elementos do site ------------------------------------------------------------------------------------------------ 160!!!
t_w = 3
h = 139
b_f = 50
t_f = 3
I_z = (t_w*h**3)/(12) + 2 * (b_f * t_f**3)/(12) + 2 * b_f * t_f * ( t_f/2 + h/2 )**2
Ke2 = np.array(Ke.subs({L:200, I:I_z, E:20000})).astype(np.float64)
Ke7 = np.array(Ke.subs({L:700, I:I_z, E:20000})).astype(np.float64)
#calculo do vetor de forças nodais equivalentes
g = sp.Symbol('g')
q = sp.Symbol('q')
Feg = -g * sp.integrate( Nn, (x, x1, x3) )
Fegq = -(g+q) * sp.integrate( Nn, (x, x1, x3) )
Fe1 = np.array(Feg.subs({L:200, g:0.528})).astype(np.float64)
Fe2 = np.array(Fegq.subs({L:700, g:0.528, q:2.11})).astype(np.float64)
Fe3 = np.array(Feg.subs({L:700, g:0.528})).astype(np.float64)
#correspondencia
ID1 = np.array([12, 0, 1, 2, 3, 4])
ID2 = np.array([3, 4, 5, 6, 7, 8])
ID3 = np.array([7, 8, 9, 10, 13, 11])
#matriz de rigidez global
K = np.zeros((14,14))
for i in range(0, 6):
for j in range(0,6):
K[ ID1[i], ID1[j] ] += Ke2[i,j]
K[ ID2[i], ID2[j] ] += Ke7[i,j]
K[ ID3[i], ID3[j] ] += Ke7[i,j]
#vetor de forças global
F = np.zeros(14)
for i in range(0, 6):
F[ ID1[i] ] += Fe1[i]
F[ ID2[i] ] += Fe2[i]
F[ ID3[i] ] += Fe3[i]
Ku = K[:-2, :-2]
Fu = F[:-2]
Kr = K[-2:, :-2]
#usando o numpy
U_np = np.linalg.solve(Ku, Fu)
U = np.zeros(14)
U[:-2] = U_np
#cálculo das reações de apoio
Frapo = np.zeros(2)
Frapo = F[-2:]
Rapo = np.dot(Kr, U_np) - Frapo
#deformações no sistema local
u1 = np.zeros(6)
u1 = U[ ID1 ]
u2 = np.zeros(6)
u2 = U[ ID2 ]
u3 = np.zeros(6)
u3 = U[ ID3 ]
#deslocamentos no elemento ---------------------------------------------------------------------------------
N_EL = sp.Matrix([[N1],
[N2],
[N3],
[N4],
[N5],
[N6]])
#para cálculo das rotações
dN_ES = sp.Matrix([[dN1],
[dN2],
[dN3],
[dN4],
[dN5],
[dN6]])
#para o cálculo do momento
dN_M = sp.Matrix([[ddN1],
[ddN2],
[ddN3],
[ddN4],
[ddN5],
[ddN6]])
#para o cálculo do cortante
dN_C = sp.Matrix([[dddN1],
[dddN2],
[dddN3],
[dddN4],
[dddN5],
[dddN6]])
##vetor de deformações genérico
#ug1, ug2, ug3, ug4, ug5, ug6 = sp.symbols('ug1 ug2 ug3 ug4 ug5 ug6')
#Ug = sp.Matrix([ug1, ug2, ug3, ug4, ug5, ug6])
#Ug = sp.Matrix(UgvbEB)
#Ug = sp.Matrix(UgvbaEB)
Ug1 = sp.Matrix(u1)
Ug2 = sp.Matrix(u2)
Ug3 = sp.Matrix(u3)
# analítico --------------------------------------------------------------------------------------------------------
Ra = 2725/8*g + 3675/8*q
Rb = 4475/8*g + 1925/8*q
Ms1 = Ra*x - g*x**2/2
Ms2 = Ra*(200 + x) - g*200*(100 + x) - q*x**2/2
Ms3 = Rb*x - g*x**2/2
Vs1 = sp.diff(Ms1, x)
Vs2 = sp.diff(Ms2, x)
Vs3 = -sp.diff(Ms3, x)
# para viga em balanço com 1 elemento --------------------------------------------------------------------------
#deslocamentos = (N_EL.transpose() * Ug)[0]
#rotacoes = (dN_ES.transpose() * Ug)[0]
#momento = - (20000 * b*h**3/12) * (dN_M.transpose() * Ug)[0]
#cortante = (20000 * b*h**3/12) * (dN_C.transpose() * Ug)[0]
#
#
#deslocamentos_f = sp.utilities.lambdify([x, L], deslocamentos, "numpy")
#rotacoes_f = sp.utilities.lambdify([x, L], rotacoes, "numpy")
#momento_f = sp.utilities.lambdify([x, L], momento, "numpy")
#cortante_f = sp.utilities.lambdify([x, L], cortante, "numpy")
#
#L = 6.
#x = np.linspace(-L/2, L/2, 100)
#
#plt.plot(x, deslocamentos_f(x, L), label="deslocamentos")
#plt.plot(x, np.zeros(100), label="zero")
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
#
#plt.plot(x, rotacoes_f(x, L), label="rotacoes")
#plt.plot(x, np.zeros(100), label="zero")
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
#
#plt.plot(x, momento_f(x, L), label="momento")
#plt.plot(x, np.zeros(100), label="zero")
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
#
#plt.plot(x, cortante_f(x, L), label="cortante")
#plt.plot(x, np.zeros(100), label="zero")
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
# para viga do trem do material com 3 elementos --------------------------------------------------------------------
deslocamentos1 = (N_EL.transpose() * Ug1)[0]
deslocamentos2 = (N_EL.transpose() * Ug2)[0]
deslocamentos3 = (N_EL.transpose() * Ug3)[0]
rotacoes1 = (dN_ES.transpose() * Ug1)[0]
rotacoes2 = (dN_ES.transpose() * Ug2)[0]
rotacoes3 = (dN_ES.transpose() * Ug3)[0]
momento1 = - (20000 * I_z) * (dN_M.transpose() * Ug1)[0]
momento2 = - (20000 * I_z) * (dN_M.transpose() * Ug2)[0]
momento3 = - (20000 * I_z) * (dN_M.transpose() * Ug3)[0]
cortante1 = -(20000 * I_z) * (dN_C.transpose() * Ug1)[0]
cortante2 = -(20000 * I_z) * (dN_C.transpose() * Ug2)[0]
cortante3 = -(20000 * I_z) * (dN_C.transpose() * Ug3)[0]
Ms1f = sp.utilities.lambdify([x, g, q], -Ms1, "numpy")
Ms2f = sp.utilities.lambdify([x, g, q], -Ms2, "numpy")
Ms3f = sp.utilities.lambdify([x, g, q], -Ms3, "numpy")
Vs1f = sp.utilities.lambdify([x, g, q], Vs1, "numpy")
Vs2f = sp.utilities.lambdify([x, g, q], Vs2, "numpy")
Vs3f = sp.utilities.lambdify([x, g, q], Vs3, "numpy")
deslocamentos_f1 = sp.utilities.lambdify([x, L], deslocamentos1, "numpy")
deslocamentos_f2 = sp.utilities.lambdify([x, L], deslocamentos2, "numpy")
deslocamentos_f3 = sp.utilities.lambdify([x, L], deslocamentos3, "numpy")
rotacoes_f1 = sp.utilities.lambdify([x, L], rotacoes1, "numpy")
rotacoes_f2 = sp.utilities.lambdify([x, L], rotacoes2, "numpy")
rotacoes_f3 = sp.utilities.lambdify([x, L], rotacoes3, "numpy")
momento_f1 = sp.utilities.lambdify([x, L], momento1, "numpy")
momento_f2 = sp.utilities.lambdify([x, L], momento2, "numpy")
momento_f3 = sp.utilities.lambdify([x, L], momento3, "numpy")
cortante_f1 = sp.utilities.lambdify([x, L], cortante1, "numpy")
cortante_f2 = sp.utilities.lambdify([x, L], cortante2, "numpy")
cortante_f3 = sp.utilities.lambdify([x, L], cortante3, "numpy")
x200 = np.linspace(-200/2, 200/2, 100)
x700 = np.linspace(-700/2, 700/2, 100)
x1 = np.linspace(0, 200, 100)
x2 = np.linspace(200, 900, 100)
x3 = np.linspace(900, 1600, 100)
x_20 = np.linspace(0, 700, 100)
x_3i = np.linspace(700, 0, 100)
x = np.linspace(0, 1600, 300)
#plt.plot(x1, deslocamentos_f1(x200, 200), label="deslocamentos")
#plt.plot(x2, deslocamentos_f2(x700, 700))
#plt.plot(x3, deslocamentos_f3(x700, 700))
#plt.plot(x, np.zeros(300), label="zero", color="black")
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
#
#plt.plot(x1, rotacoes_f1(x200, 200), label="rotacoes")
#plt.plot(x2, rotacoes_f2(x700, 700))
#plt.plot(x3, rotacoes_f3(x700, 700))
#plt.plot(x, np.zeros(300), label="zero", color="black")
#plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
#plt.show()
plt.plot(x1, momento_f1(x200, 200), label="momento")
plt.plot(x2, momento_f2(x700, 700))
plt.plot(x3, momento_f3(x700, 700))
plt.plot(x1, Ms1f(x1, 0.528, 0.528+2.11), "--", color="red", label="Momento analítico")
plt.plot(x2, Ms2f(x_20, 0.528, 0.528+2.11), "--", color="red")
plt.plot(x3, Ms3f(x_3i, 0.528, 0.528+2.11), "--", color="red")
plt.plot(x, np.zeros(300), label="zero", color="black")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
plt.plot(x1, cortante_f1(x200, 200), label="cortante")
plt.plot(x2, cortante_f2(x700, 700))
plt.plot(x3, cortante_f3(x700, 700))
plt.plot(x1, -Vs1f(x1, 0.528, 0.528+2.11), "--", color="red", label="Cortante analítico")
plt.plot(x2, -Vs2f(x_20, 0.528, 0.528+2.11), "--", color="red")
plt.plot(x3, -Vs3f(x_3i, 0.528, 0.528+2.11), "--", color="red")
plt.plot(x, np.zeros(300), label="zero", color="black")
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | argenta-web/argenta-web.github.io | MEFaplicado-html/vigas/codigos/Derivando-FuncoesFormaVigaEulerBernouilli3nos.py | Python | mit | 12,228 |
"""
Tor Browser Launcher
https://github.com/micahflee/torbrowser-launcher/
Copyright (c) 2013-2014 Micah Lee <[email protected]>
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import os, sys, argparse
from common import Common, SHARE
from settings import Settings
from launcher import Launcher
def main():
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--settings', action='store_true', dest='settings', help='Open Tor Browser Launcher settings')
parser.add_argument('url', nargs='*', help='URL to load')
args = parser.parse_args()
settings = bool(args.settings)
url_list = args.url
# load the version and print the banner
with open(os.path.join(SHARE, 'version')) as buf:
tor_browser_launcher_version = buf.read().strip()
print _('Tor Browser Launcher')
print _('By Micah Lee, licensed under MIT')
print _('version {0}').format(tor_browser_launcher_version)
print 'https://github.com/micahflee/torbrowser-launcher'
common = Common(tor_browser_launcher_version)
# is torbrowser-launcher already running?
tbl_pid = common.get_pid(common.paths['tbl_bin'], True)
if tbl_pid:
print _('Tor Browser Launcher is already running (pid {0}), bringing to front').format(tbl_pid)
common.bring_window_to_front(tbl_pid)
sys.exit()
if settings:
# settings mode
app = Settings(common)
else:
# launcher mode
app = Launcher(common, url_list)
if __name__ == "__main__":
main()
| kytvi2p/torbrowser-launcher | torbrowser_launcher/__init__.py | Python | mit | 2,525 |
# Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fakeredis
import logging
import os
from c7n_mailer.ldap_lookup import LdapLookup, Redis
from ldap3 import Server, Connection, MOCK_SYNC
from ldap3.strategy import mockBase
logger = logging.getLogger('custodian.mailer')
PETER = (
'uid=peter,cn=users,dc=initech,dc=com',
{
'uid': ['peter'],
'manager': 'uid=bill_lumbergh,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'displayName': 'Peter',
'objectClass': 'person'
}
)
BILL = (
'uid=bill_lumbergh,cn=users,dc=initech,dc=com',
{
'uid': ['bill_lumbergh'],
'mail': '[email protected]',
'displayName': 'Bill Lumberg',
'objectClass': 'person'
}
)
MAILER_CONFIG = {
'smtp_port': 25,
'from_address': '[email protected]',
'contact_tags': ['OwnerEmail', 'SupportEmail'],
'queue_url': 'https://sqs.us-east-1.amazonaws.com/xxxx/cloudcustodian-mailer',
'region': 'us-east-1',
'ses_region': 'us-east-1',
'ldap_uri': 'ldap.initech.com',
'smtp_server': 'smtp.inittech.com',
'cache_engine': 'sqlite',
'role': 'arn:aws:iam::xxxx:role/cloudcustodian-mailer',
'ldap_uid_tags': ['CreatorName', 'Owner'],
'templates_folders': [os.path.abspath(os.path.dirname(__file__)),
os.path.abspath('/')],
}
MAILER_CONFIG_AZURE = {
'queue_url': 'asq://storageaccount.queue.core.windows.net/queuename',
'from_address': '[email protected]',
'sendgrid_api_key': 'SENDGRID_API_KEY',
'templates_folders': [os.path.abspath(os.path.dirname(__file__)),
os.path.abspath('/')],
}
RESOURCE_1 = {
'AvailabilityZone': 'us-east-1a',
'Attachments': [],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
},
{
'Value': 'peter',
'Key': 'CreatorName'
}
],
'VolumeId': 'vol-01a0e6ea6b89f0099'
}
RESOURCE_2 = {
'AvailabilityZone': 'us-east-1c',
'Attachments': [],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
},
{
'Value': 'peter',
'Key': 'CreatorName'
}
],
'VolumeId': 'vol-21a0e7ea9b19f0043',
'Size': 8
}
RESOURCE_3 = {
'AvailabilityZone': 'us-east-1c',
"CreateTime": "2019-05-07T19:09:46.148Z",
'Attachments': [
{
"AttachTime": "2019-05-07T19:09:46.000Z",
"Device": "/dev/xvda",
"InstanceId": "i-00000000000000000",
"State": "attached",
"VolumeId": "vol-00000000000000000",
"DeleteOnTermination": 'true'
}
],
'Tags': [
{
'Value': '[email protected]',
'Key': 'SupportEmail'
},
{
'Value': 'peter',
'Key': 'CreatorName'
}
],
'VolumeId': 'vol-21a0e7ea9b19f0043',
'Size': 8,
'State': "in-use"
}
SQS_MESSAGE_1 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'to': ['resource-owner', 'ldap_uid_tags'],
'email_ldap_username_manager': True,
'template': '',
'priority_header': '1',
'type': 'notify',
'transport': {'queue': 'xxx', 'type': 'sqs'},
'subject': '{{ account }} AWS EBS Volumes will be DELETED in 15 DAYS!'
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'to': ['resource-owner', 'ldap_uid_tags'],
'email_ldap_username_manager': True,
'template': '',
'priority_header': '1',
'type': 'notify',
'subject': 'EBS Volumes will be DELETED in 15 DAYS!'
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_1]
}
SQS_MESSAGE_2 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size']
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size']
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_1, RESOURCE_2]
}
SQS_MESSAGE_3 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size&metric_value_tag=Size']
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'type': 'notify',
'to': ['datadog://?metric_name=EBS_volume.available.size&metric_value_tag=Size']
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_2]
}
SQS_MESSAGE_4 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'to': ['resource-owner', 'ldap_uid_tags'],
'cc': ['[email protected]', '[email protected]'],
'email_ldap_username_manager': True,
'template': 'default.html',
'priority_header': '1',
'type': 'notify',
'transport': {'queue': 'xxx', 'type': 'sqs'},
'subject': '{{ account }} AWS EBS Volumes will be DELETED in 15 DAYS!'
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'to': ['resource-owner', 'ldap_uid_tags'],
'cc': ['[email protected]', '[email protected]'],
'email_ldap_username_manager': True,
'template': 'default.html.j2',
'priority_header': '1',
'type': 'notify',
'subject': 'EBS Volumes will be DELETED in 15 DAYS!'
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_1]
}
SQS_MESSAGE_5 = {
'account': 'core-services-dev',
'account_id': '000000000000',
'region': 'us-east-1',
'action': {
'to': ['slack://#test-channel'],
'template': 'default.html',
'type': 'notify',
'transport': {'queue': 'xxx', 'type': 'sqs'},
'subject': '{{ account }} AWS EBS Volumes will be DELETED in 15 DAYS!'
},
'policy': {
'filters': [{'Attachments': []}, {'tag:maid_status': 'absent'}],
'resource': 'ebs',
'actions': [
{
'type': 'mark-for-op',
'days': 15,
'op': 'delete'
},
{
'to': ['slack://tag/SlackChannel'],
'template': 'slack_default.j2',
'type': 'notify',
'subject': 'EBS Volumes will be DELETED in 15 DAYS!'
}
],
'comments': 'We are deleting your EBS volumes.',
'name': 'ebs-mark-unattached-deletion'
},
'event': None,
'resources': [RESOURCE_3]
}
ASQ_MESSAGE = '''{
"account":"subscription",
"account_id":"ee98974b-5d2a-4d98-a78a-382f3715d07e",
"region":"all",
"action":{
"to":[
"[email protected]"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
},
"policy":{
"resource":"azure.keyvault",
"name":"test-notify-for-keyvault",
"actions":[
{
"to":[
"[email protected]"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
}
]
},
"event":null,
"resources":[
{
"name":"cckeyvault1",
"tags":{
},
"resourceGroup":"test_keyvault",
"location":"southcentralus",
"type":"Microsoft.KeyVault/vaults",
"id":"/subscriptions/ee98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_keyvault/providers/Microsoft.KeyVault/vaults/cckeyvault1"
}
]
}'''
ASQ_MESSAGE_TAG = '''{
"account":"subscription",
"account_id":"ee98974b-5d2a-4d98-a78a-382f3715d07e",
"region":"all",
"action":{
"to":[
"tag:owner"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
},
"policy":{
"resource":"azure.keyvault",
"name":"test-notify-for-keyvault",
"actions":[
{
"to":[
"tag:owner"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
}
]
},
"event":null,
"resources":[
{
"name":"cckeyvault1",
"tags":{
"owner":"[email protected]"
},
"resourceGroup":"test_keyvault",
"location":"southcentralus",
"type":"Microsoft.KeyVault/vaults",
"id":"/subscriptions/ee98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_keyvault/providers/Microsoft.KeyVault/vaults/cckeyvault1"
}
]
}'''
ASQ_MESSAGE_SLACK = '''{
"account":"subscription",
"account_id":"ee98974b-5d2a-4d98-a78a-382f3715d07e",
"region":"all",
"action":{
"to":[
"slack://#test-channel"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
},
"policy":{
"resource":"azure.keyvault",
"name":"test-notify-for-keyvault",
"actions":[
{
"to":[
"slack://#test-channel"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
}
]
},
"event":null,
"resources":[
{
"name":"cckeyvault1",
"tags":{
},
"resourceGroup":"test_keyvault",
"location":"southcentralus",
"type":"Microsoft.KeyVault/vaults",
"id":"/subscriptions/ee98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_keyvault/providers/Microsoft.KeyVault/vaults/cckeyvault1"
}
]
}'''
ASQ_MESSAGE_DATADOG = '''{
"account":"subscription",
"account_id":"ee98974b-5d2a-4d98-a78a-382f3715d07e",
"region":"all",
"action":{
"to":[
"datadog://?metric_name=EBS_volume.available.size"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
},
"policy":{
"resource":"azure.keyvault",
"name":"test-notify-for-keyvault",
"actions":[
{
"to":[
"datadog://?metric_name=EBS_volume.available.size"
],
"template":"default",
"priority_header":"2",
"type":"notify",
"transport":{
"queue":"https://test.queue.core.windows.net/testcc",
"type":"asq"
},
"subject":"testing notify action"
}
]
},
"event":null,
"resources":[
{
"name":"cckeyvault1",
"tags":{
},
"resourceGroup":"test_keyvault",
"location":"southcentralus",
"type":"Microsoft.KeyVault/vaults",
"id":"/subscriptions/ee98974b-5d2a-4d98-a78a-382f3715d07e/resourceGroups/test_keyvault/providers/Microsoft.KeyVault/vaults/cckeyvault1"
}
]
}'''
# Monkey-patch ldap3 to work around a bytes/text handling bug.
_safe_rdn = mockBase.safe_rdn
def safe_rdn(*a, **kw):
return [(k, mockBase.to_raw(v)) for k, v in _safe_rdn(*a, **kw)]
mockBase.safe_rdn = safe_rdn
def get_fake_ldap_connection():
server = Server('my_fake_server')
connection = Connection(
server,
client_strategy=MOCK_SYNC
)
connection.bind()
connection.strategy.add_entry(PETER[0], PETER[1])
connection.strategy.add_entry(BILL[0], BILL[1])
return connection
def get_ldap_lookup(cache_engine=None, uid_regex=None):
if cache_engine == 'sqlite':
config = {
'cache_engine': 'sqlite',
'ldap_cache_file': ':memory:'
}
elif cache_engine == 'redis':
config = {
'cache_engine': 'redis',
'redis_host': 'localhost'
}
if uid_regex:
config['ldap_uid_regex'] = uid_regex
ldap_lookup = MockLdapLookup(config, logger)
michael_bolton = {
'dn': 'CN=Michael Bolton,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'manager': 'CN=Milton,cn=users,dc=initech,dc=com',
'displayName': 'Michael Bolton'
}
milton = {
'uid': '123456',
'dn': 'CN=Milton,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'manager': 'CN=cthulhu,cn=users,dc=initech,dc=com',
'displayName': 'Milton'
}
bob_porter = {
'dn': 'CN=Bob Porter,cn=users,dc=initech,dc=com',
'mail': '[email protected]',
'manager': 'CN=Bob Slydell,cn=users,dc=initech,dc=com',
'displayName': 'Bob Porter'
}
ldap_lookup.base_dn = 'cn=users,dc=initech,dc=com'
ldap_lookup.uid_key = 'uid'
ldap_lookup.attributes.append('uid')
ldap_lookup.caching.set('michael_bolton', michael_bolton)
ldap_lookup.caching.set(bob_porter['dn'], bob_porter)
ldap_lookup.caching.set('123456', milton)
ldap_lookup.caching.set(milton['dn'], milton)
return ldap_lookup
class MockLdapLookup(LdapLookup):
# allows us to instantiate this object and not need a redis daemon
def get_redis_connection(self, redis_host, redis_port):
return MockRedisLookup()
# us to instantiate this object and not have ldap3 try to connect
# to anything or raise exception in unit tests, we replace connection with a mock
def get_connection(self, ignore, these, params):
return get_fake_ldap_connection()
class MockRedisLookup(Redis):
def __init__(self):
self.connection = fakeredis.FakeStrictRedis()
| FireballDWF/cloud-custodian | tools/c7n_mailer/tests/common.py | Python | apache-2.0 | 16,794 |
"""
FormES
--------
"""
from corehq.pillows.mappings.const import NULL_VALUE
from . import filters
from .es_query import HQESQuery
class FormES(HQESQuery):
index = 'forms'
default_filters = {
'is_xform_instance': filters.term("doc_type", "xforminstance"),
'has_xmlns': filters.exists("xmlns"),
'has_user': filters.exists("form.meta.userID"),
'has_domain': filters.exists("domain"),
}
@property
def builtin_filters(self):
return [
form_ids,
xmlns,
app,
submitted,
completed,
user_id,
user_type,
user_ids_handle_unknown,
j2me_submissions,
updating_cases,
] + super(FormES, self).builtin_filters
def user_aggregation(self):
return self.terms_aggregation('form.meta.userID', 'user')
def completed_histogram(self, timezone=None):
return self.date_histogram('date_histogram', 'form.meta.timeEnd', 'day', timezone=timezone)
def submitted_histogram(self, timezone=None):
return self.date_histogram('date_histogram', 'received_on', 'day', timezone=timezone)
def domain_aggregation(self):
return self.terms_aggregation('domain.exact', 'domain')
def only_archived(self):
"""Include only archived forms, which are normally excluded"""
return (self.remove_default_filter('is_xform_instance')
.filter(filters.doc_type('xformarchived')))
def form_ids(form_ids):
return filters.term('_id', form_ids)
def xmlns(xmlnss):
return filters.term('xmlns.exact', xmlnss)
def app(app_ids):
return filters.term('app_id', app_ids)
def submitted(gt=None, gte=None, lt=None, lte=None):
return filters.date_range('received_on', gt, gte, lt, lte)
def completed(gt=None, gte=None, lt=None, lte=None):
return filters.date_range('form.meta.timeEnd', gt, gte, lt, lte)
def user_id(user_ids):
if not isinstance(user_ids, (list, set, tuple)):
user_ids = [user_ids]
return filters.term(
'form.meta.userID',
[x if x is not None else NULL_VALUE for x in user_ids]
)
def user_type(user_types):
return filters.term("user_type", user_types)
def user_ids_handle_unknown(user_ids):
missing_users = None in user_ids
user_ids = [_f for _f in user_ids if _f]
if not missing_users:
user_filter = user_id(user_ids)
elif user_ids and missing_users:
user_filter = filters.OR(
user_id(user_ids),
filters.missing('form.meta.userID'),
)
else:
user_filter = filters.missing('form.meta.userID')
return user_filter
def j2me_submissions(gt=None, gte=None, lt=None, lte=None):
return filters.AND(
filters.regexp("form.meta.appVersion", "v2+.[0-9]+.*"),
submitted(gt, gte, lt, lte)
)
def updating_cases(case_ids):
"""return only those forms that have case blocks that touch the cases listed in `case_ids`
"""
return filters.term("__retrieved_case_ids", case_ids)
| dimagi/commcare-hq | corehq/apps/es/forms.py | Python | bsd-3-clause | 3,080 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" BVT tests for Templates ISO
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.remoteSSHClient import remoteSSHClient
from integration.lib.utils import *
from integration.lib.base import *
from integration.lib.common import *
import urllib
from random import random
#Import System modules
import datetime
class Services:
"""Test Templates Services
"""
def __init__(self):
self.services = {
"account": {
"email": "[email protected]",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "fr3sca",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 100, # in MHz
"memory": 64, # In MBs
},
"disk_offering": {
"displaytext": "Small",
"name": "Small",
"disksize": 1
},
"virtual_machine": {
"displayname": "testVM",
"hypervisor": 'XenServer',
"protocol": 'TCP',
"ssh_port": 22,
"username": "root",
"password": "password",
"privateport": 22,
"publicport": 22,
},
"volume": {
"diskname": "Test Volume",
},
"template_1": {
"displaytext": "Cent OS Template",
"name": "Cent OS Template",
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
},
"template_2": {
"displaytext": "Public Template",
"name": "Public template",
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
"isfeatured": True,
"ispublic": True,
"isextractable": True,
"mode": "HTTP_DOWNLOAD",
},
"templatefilter": 'self',
"destzoneid": 5,
# For Copy template (Destination zone)
"isfeatured": True,
"ispublic": True,
"isextractable": False,
"bootable": True,
"passwordenabled": True,
"ostypeid": '946b031b-0e10-4f4a-a3fc-d212ae2ea07f',
"mode": 'advanced',
# Networking mode: Advanced, basic
"sleep": 30,
"timeout": 10,
}
class TestCreateTemplate(cloudstackTestCase):
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
self.dbclient.close()
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
@classmethod
def setUpClass(cls):
cls.services = Services().services
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["volume"]["diskoffering"] = cls.disk_offering.id
cls.services["volume"]["zoneid"] = cls.zone.id
cls.services["sourcezoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
#create virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
#Stop virtual machine
cls.virtual_machine.stop(cls.api_client)
# Poll listVM to ensure VM is stopped properly
timeout = cls.services["timeout"]
while True:
time.sleep(cls.services["sleep"])
# Ensure that VM is in stopped state
list_vm_response = list_virtual_machines(
cls.api_client,
id=cls.virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Stopped':
break
if timeout == 0:
raise Exception(
"Failed to stop VM (ID: %s) in change service offering" %
vm.id)
timeout = timeout - 1
list_volume = list_volumes(
cls.api_client,
virtualmachineid=cls.virtual_machine.id,
type='ROOT',
listall=True
)
cls.volume = list_volume[0]
cls._cleanup = [
cls.account,
cls.service_offering,
cls.disk_offering,
]
return
@classmethod
def tearDownClass(cls):
try:
cls.api_client = super(TestCreateTemplate, cls).getClsTestClient().getApiClient()
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_01_create_template(self):
"""Test create public & private template
"""
# Validate the following:
# 1. database (vm_template table) should be updated
# with newly created template
# 2. UI should show the newly added template
# 3. ListTemplates API should show the newly added template
#Create template from Virtual machine and Volume ID
template = Template.create(
self.apiclient,
self.services["template_1"],
self.volume.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.cleanup.append(template)
self.debug("Created template with ID: %s" % template.id)
list_template_response = list_templates(
self.apiclient,
templatefilter=\
self.services["templatefilter"],
id=template.id
)
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check list response returns a valid list"
)
#Verify template response to check whether template added successfully
self.assertNotEqual(
len(list_template_response),
0,
"Check template available in List Templates"
)
template_response = list_template_response[0]
self.assertEqual(
template_response.displaytext,
self.services["template_1"]["displaytext"],
"Check display text of newly created template"
)
name = template_response.name
self.assertEqual(
name.count(self.services["template_1"]["name"]),
1,
"Check name of newly created template"
)
self.assertEqual(
template_response.ostypeid,
self.services["template_1"]["ostypeid"],
"Check osTypeID of newly created template"
)
return
class TestTemplates(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.services = Services().services
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostypeid"]
)
cls.services["virtual_machine"]["zoneid"] = cls.zone.id
cls.services["volume"]["diskoffering"] = cls.disk_offering.id
cls.services["volume"]["zoneid"] = cls.zone.id
cls.services["template_2"]["zoneid"] = cls.zone.id
cls.services["sourcezoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.services["account"],
admin=True,
domainid=cls.domain.id
)
cls.user = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.account.name
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
#create virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["virtual_machine"],
templateid=template.id,
accountid=cls.account.account.name,
domainid=cls.account.account.domainid,
serviceofferingid=cls.service_offering.id,
mode=cls.services["mode"]
)
#Stop virtual machine
cls.virtual_machine.stop(cls.api_client)
# Poll listVM to ensure VM is stopped properly
timeout = cls.services["timeout"]
while True:
time.sleep(cls.services["sleep"])
# Ensure that VM is in stopped state
list_vm_response = list_virtual_machines(
cls.api_client,
id=cls.virtual_machine.id
)
if isinstance(list_vm_response, list):
vm = list_vm_response[0]
if vm.state == 'Stopped':
break
if timeout == 0:
raise Exception(
"Failed to stop VM (ID: %s) in change service offering" %
vm.id)
timeout = timeout - 1
list_volume = list_volumes(
cls.api_client,
virtualmachineid=cls.virtual_machine.id,
type='ROOT',
listall=True
)
try:
cls.volume = list_volume[0]
except Exception as e:
raise Exception(
"Exception: Unable to find root volume foe VM: %s" %
cls.virtual_machine.id)
#Create templates for Edit, Delete & update permissions testcases
cls.template_1 = Template.create(
cls.api_client,
cls.services["template_1"],
cls.volume.id,
account=cls.account.account.name,
domainid=cls.account.account.domainid
)
cls.template_2 = Template.create(
cls.api_client,
cls.services["template_2"],
cls.volume.id,
account=cls.account.account.name,
domainid=cls.account.account.domainid
)
cls._cleanup = [
cls.service_offering,
cls.disk_offering,
cls.account,
cls.user
]
@classmethod
def tearDownClass(cls):
try:
cls.api_client = super(TestTemplates, cls).getClsTestClient().getApiClient()
#Cleanup created resources such as templates and VMs
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
return
def tearDown(self):
try:
self.dbclient.close()
#Clean up, terminate the created templates
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def test_02_edit_template(self):
"""Test Edit template
"""
# Validate the following:
# 1. UI should show the edited values for template
# 2. database (vm_template table) should have updated values
new_displayText = random_gen()
new_name = random_gen()
cmd = updateTemplate.updateTemplateCmd()
# Update template attributes
cmd.id = self.template_1.id
cmd.displaytext = new_displayText
cmd.name = new_name
cmd.bootable = self.services["bootable"]
cmd.passwordenabled = self.services["passwordenabled"]
self.apiclient.updateTemplate(cmd)
self.debug("Edited template with new name: %s" % new_name)
# Sleep to ensure update reflected across all the calls
time.sleep(self.services["sleep"])
timeout = self.services["timeout"]
while True:
# Verify template response for updated attributes
list_template_response = list_templates(
self.apiclient,
templatefilter=\
self.services["templatefilter"],
id=self.template_1.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
if isinstance(list_template_response, list):
break
elif timeout == 0:
raise Exception("List Template failed!")
time.sleep(10)
timeout = timeout -1
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_template_response),
0,
"Check template available in List Templates"
)
template_response = list_template_response[0]
self.debug("New Name: %s" % new_displayText)
self.debug("Name in Template response: %s"
% template_response.displaytext)
self.assertEqual(
template_response.displaytext,
new_displayText,
"Check display text of updated template"
)
self.assertEqual(
template_response.name,
new_name,
"Check name of updated template"
)
self.assertEqual(
str(template_response.passwordenabled).lower(),
str(self.services["passwordenabled"]).lower(),
"Check passwordenabled field of updated template"
)
self.assertEqual(
template_response.ostypeid,
self.services["ostypeid"],
"Check OSTypeID of updated template"
)
return
def test_03_delete_template(self):
"""Test delete template
"""
# Validate the following:
# 1. UI should not show the deleted template
# 2. database (vm_template table) should not contain deleted template
self.debug("Deleting Template ID: %s" % self.template_1.id)
self.template_1.delete(self.apiclient)
list_template_response = list_templates(
self.apiclient,
templatefilter=\
self.services["templatefilter"],
id=self.template_1.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
# Verify template is deleted properly using ListTemplates
self.assertEqual(
list_template_response,
None,
"Check if template exists in List Templates"
)
return
def test_04_extract_template(self):
"Test for extract template"
# Validate the following
# 1. Admin should able extract and download the templates
# 2. ListTemplates should display all the public templates
# for all kind of users
# 3 .ListTemplates should not display the system templates
self.debug("Extracting template with ID: %s" % self.template_2.id)
cmd = extractTemplate.extractTemplateCmd()
cmd.id = self.template_2.id
cmd.mode = self.services["template_2"]["mode"]
cmd.zoneid = self.zone.id
list_extract_response = self.apiclient.extractTemplate(cmd)
try:
# Format URL to ASCII to retrieve response code
formatted_url = urllib.unquote_plus(list_extract_response.url)
url_response = urllib.urlopen(formatted_url)
response_code = url_response.getcode()
except Exception:
self.fail(
"Extract Template Failed with invalid URL %s (template id: %s)" \
% (formatted_url, self.template_2.id)
)
self.assertEqual(
list_extract_response.id,
self.template_2.id,
"Check ID of the extracted template"
)
self.assertEqual(
list_extract_response.extractMode,
self.services["template_2"]["mode"],
"Check mode of extraction"
)
self.assertEqual(
list_extract_response.zoneid,
self.services["template_2"]["zoneid"],
"Check zone ID of extraction"
)
self.assertEqual(
response_code,
200,
"Check for a valid response download URL"
)
return
def test_05_template_permissions(self):
"""Update & Test for template permissions"""
# Validate the following
# 1. listTemplatePermissions returns valid
# permissions set for template
# 2. permission changes should be reflected in vm_template
# table in database
self.debug("Updating Template permissions ID:%s" % self.template_2.id)
cmd = updateTemplatePermissions.updateTemplatePermissionsCmd()
# Update template permissions
cmd.id = self.template_2.id
cmd.isfeatured = self.services["isfeatured"]
cmd.ispublic = self.services["ispublic"]
cmd.isextractable = self.services["isextractable"]
self.apiclient.updateTemplatePermissions(cmd)
list_template_response = list_templates(
self.apiclient,
templatefilter='featured',
id=self.template_2.id,
account=self.account.account.name,
domainid=self.account.account.domainid
)
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check list response returns a valid list"
)
# Verify template response for updated permissions for normal user
template_response = list_template_response[0]
self.assertEqual(
template_response.id,
self.template_2.id,
"Check template ID"
)
self.assertEqual(
template_response.ispublic,
int(True),
"Check ispublic permission of template"
)
self.assertNotEqual(
template_response.templatetype,
'SYSTEM',
"ListTemplates should not list any system templates"
)
return
def test_06_copy_template(self):
"""Test for copy template from one zone to another"""
# Validate the following
# 1. copy template should be successful and
# secondary storage should contain new copied template.
self.debug("Copy template from Zone: %s to %s" % (
self.services["sourcezoneid"],
self.services["destzoneid"]
))
cmd = copyTemplate.copyTemplateCmd()
cmd.id = self.template_2.id
cmd.destzoneid = self.services["destzoneid"]
cmd.sourcezoneid = self.services["sourcezoneid"]
self.apiclient.copyTemplate(cmd)
# Verify template is copied to another zone using ListTemplates
list_template_response = list_templates(
self.apiclient,
templatefilter=\
self.services["templatefilter"],
id=self.template_2.id,
zoneid=self.services["destzoneid"]
)
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_template_response),
0,
"Check template extracted in List Templates"
)
template_response = list_template_response[0]
self.assertEqual(
template_response.id,
self.template_2.id,
"Check ID of the downloaded template"
)
self.assertEqual(
template_response.zoneid,
self.services["destzoneid"],
"Check zone ID of the copied template"
)
# Cleanup- Delete the copied template
cmd = deleteTemplate.deleteTemplateCmd()
cmd.id = template_response.id
cmd.zoneid = self.services["destzoneid"]
self.apiclient.deleteTemplate(cmd)
return
def test_07_list_public_templates(self):
"""Test only public templates are visible to normal user"""
# Validate the following
# 1. ListTemplates should show only 'public' templates for normal user
list_template_response = list_templates(
self.apiclient,
templatefilter='featured',
account=self.user.account.name,
domainid=self.user.account.domainid
)
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_template_response),
0,
"Check template available in List Templates"
)
#Template response should list all 'public' templates
for template in list_template_response:
self.assertEqual(
template.ispublic,
True,
"ListTemplates should list only public templates"
)
return
def test_08_list_system_templates(self):
"""Test System templates are not visible to normal user"""
# Validate the following
# 1. ListTemplates should not show 'SYSTEM' templates for normal user
list_template_response = list_templates(
self.apiclient,
templatefilter='featured',
account=self.user.account.name,
domainid=self.user.account.domainid
)
self.assertEqual(
isinstance(list_template_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
len(list_template_response),
0,
"Check template available in List Templates"
)
for template in list_template_response:
self.assertNotEqual(
template.templatetype,
'SYSTEM',
"ListTemplates should not list any system templates"
)
return
| argv0/cloudstack | test/integration/smoke/test_templates.py | Python | apache-2.0 | 31,136 |
def inicializar():
tab = []
for i in range(3):
linha = []
for j in range(3):
linha.append("XX")
tab.append(linha)
return tab
def main():
jogo = inicilizar()
print (jogo)
if _name_ == "_main_":
main()
| dehssousa/devops-aula05 | docs/docs/docs/src/src/jogovelha.py | Python | apache-2.0 | 266 |
#!/usr/bin/env python3
import sys,re
from beautifier import Email, Url
import ftfy
name1="Tschöp"
email1=" [email protected]"
email2=" [email protected]. "
email3=" jim at gmail.com"
#cleanre = r"[^a-zA-Z0-9@]"
#cleanmail = re.sub(cleanre,"",email2)
clean = email2.strip(' ').strip(".")
print(f">{clean}<")
clean=email1.strip('.com')
print(f"{clean}")
em1 = Email(email2)
#print(em1.domain)
#print(em1.username)
#print(f"{em1.username}@{em1.domain}")
#print(ftfy.fix_text(name1)) | jdurbin/sandbox | python/panda/emailfix.py | Python | mit | 485 |
#
# Example file for working with filesystem shell methods
# (For Python 3.x, be sure to use the ExampleSnippets3.txt file)
import os
import shutil
from os import path
from shutil import make_archive
from zipfile import ZipFile
def main():
if path.exists("textfile.txt"):
src = path.realpath("textfile.txt")
head, tail = path.split(src)
print(head)
print(tail)
print(src)
dst = src + ".bak"
shutil.copy(src, dst)
shutil.copystat(src, dst)
os.rename("textfile.txt.bak", "newfile.txt")
root_dir, tail = path.split(src)
shutil.make_archive("archive", "zip", root_dir)
with ZipFile("testzip.zip", "w") as newzip:
newzip.write("newfile.txt")
newzip.write("textfile.txt")
if __name__ == "__main__":
main()
| thatguyandy27/python-sandbox | learning-python/Ch4/shell_start.py | Python | mit | 846 |
"""
The :mod:`costcla.probcal` module includes methods for probability calibration
"""
from .probcal import ROCConvexHull
__all__ = ['ROCConvexHull',]
| albahnsen/CostSensitiveClassification | costcla/probcal/__init__.py | Python | bsd-3-clause | 153 |
__author__ = 'manabchetia'
from pyneural import pyneural
from os import listdir
from os.path import join, isfile
import pandas as pd
import numpy as np
from PIL import Image
import leargist as gist
from sklearn.cross_validation import train_test_split
from sknn.mlp import Classifier, Layer
from pandas.io.pickle import read_pickle
from sklearn.externals import joblib
import cPickle
# https://github.com/fchollet/keras/blob/master/examples/mnist_nn.py
# from nolearn.dbn import DBN
# img_dir = '../data/uni/'
img_dir = '../data/final'
n_classes = 50
# n_files_per_class = 4
n_files_per_class = 240
clf_cache = 'pyneural_model_5000' # 240 images per class
# clf_cache = 'pyneural_model_4' # 4 images per class
def get_img_files(img_dir):
imgs = filter(lambda x: ".JPG" in x, listdir(img_dir))
df = pd.DataFrame(index=imgs, columns={'CLASS', 'GIST_DESC', 'TYPE'})
df["CLASS"] = np.repeat(np.linspace(0, n_classes - 1, num=n_classes), n_files_per_class)
return df
def extract_GIST(df):
gist_desc = []
# Loop over each image
for img in list(df.index):
img = Image.open(join(img_dir, img))
desc = gist.color_gist(img)
gist_desc.append(desc.astype(np.float32))
df['GIST_DESC'] = gist_desc
return df
def get_accuracy(predictions, truth):
mask = predictions==truth
correct = np.count_nonzero(mask)
return correct * 100 / len(predictions)
def get_df(df_cache):
if isfile(df_cache):
print('DataFrame found. \nLoading DataFrame in memory')
df = read_pickle(df_cache)
else:
print('Reading image files ...')
df = get_img_files(img_dir)
print('Separating Training and Test files ...')
# Version 2
X_train_file, X_test_file, y_train_file, y_test_file = train_test_split(list(df.index), list(df['CLASS']),
test_size=0.25, random_state=15)
df.loc[X_test_file, 'TYPE'] = 'TEST'
df.loc[X_train_file, 'TYPE'] = 'TRAIN'
print('Extracting GIST features ...')
df = extract_GIST(df)
print('Writing DataFrame to disk')
df.to_pickle(df_cache)
return df
def get_classifier(clf_cache, df, n_iter):
# global clf
if isfile(clf_cache):
print('Model found. \nLoading Model from disk')
# with open(clf_cache, 'rb') as fid:
# clf = cPickle.load(fid)
else:
print('Getting X,Y for training ...')
df_train = df[df['TYPE'] == 'TRAIN']
features_train = np.asarray(list(df_train['GIST_DESC']))
labels_train = np.asarray(list(df_train['CLASS']), dtype=np.int8)
n_rows, n_features = features_train.shape # 150, 960
# n_labels = 50
labels_expanded = np.zeros((n_rows, n_classes), dtype=np.int8)
for i in xrange(n_rows):
labels_expanded[i][labels_train[i]] = 1
print('Training ...')
clf = pyneural.NeuralNet([n_features, n_iter, n_classes])
clf.train(features_train, labels_expanded, 10, 40, 0.005, 0.0,
1.0) # features, labels, iterations, batch size, learning rate, L2 penalty, decay multiplier
# with open(clf_cache, 'wb') as fid:
# cPickle.dump(clf, fid)
return clf
if __name__ == '__main__':
df_cache = 'df.pickle.big'
df = get_df(df_cache)
# PyNeural
# Get X, Y
# if isfile(clf_cache):
clf = get_classifier(clf_cache, df, n_iter=3000)
joblib.dump(clf, 'filename.pkl')
print('Testing ...')
df_test = df[df['TYPE'] == 'TEST']
features_test = np.asarray(list(df_test['GIST_DESC']))
labels_test = np.asarray(list(df_test['CLASS']))
predictions = np.asarray(clf.predict_label(features_test), dtype=np.int8)
print(predictions)
print(" ")
print(labels_test)
print('Accuracy: {} %'.format(get_accuracy(predictions, labels_test)))
## Scikit Neural Network
# Get X, Y
# print('Getting X,Y for training ...')
# df_train = df[df['TYPE'] == 'TRAIN']
#
# features_train = np.asarray(list(df_train['GIST_DESC']))
# labels_train = np.asarray(list(df_train['CLASS']), dtype=np.int8)
#
# # Training
# print("Training ...")
# nn = Classifier(layers=[Layer("Sigmoid", units=400), Layer("Softmax")], learning_rate=0.001, n_iter=2000)
# nn.fit(features_train, labels_train)
#
# # Testing
# df_test = df[df['TYPE'] == 'TEST']
# features_test = np.asarray(list(df_test['GIST_DESC']))
# labels_test = np.asarray(list(df_test['CLASS']))
#
# print('Accuracy: {}%'.format(nn.score(features_test, labels_test)*100))
## NoLEARN DBN
# # Get X, Y
# print('Getting X,Y for training ...')
# df_train = df[df['TYPE'] == 'TRAIN']
#
# features_train = np.asarray(list(df_train['GIST_DESC']))
# labels_train = list(df_train['CLASS'])
# nn = DBN([features_train.shape[1], 400, 10], learn_rates=0.3, learn_rate_decays=0.9, epochs=10, verbose=1,)
#
# # print(features_train.shape, labels_train.)
# nn.fit(features_train, labels_train)
# #
# # # Testing
# df_test = df[df['TYPE'] == 'TEST']
# features_test = np.asarray(list(df_test['GIST_DESC']))
# labels_test = list(df_test['CLASS'])
# # print('Accuracy: {}%'.format(nn.score(features_test, labels_test)*100)) | liboyin/horc | src/classifier_gist_neural.py | Python | gpl-2.0 | 5,380 |
#!/usr/bin/env python
#
# getSeqFlankBlatHit.py
#
# author: Joseph Tran <[email protected]>
# date: 25-03-2015
#
import argparse
import logging
## arguments ##
parser = argparse.ArgumentParser(description="Extract genomic sequences flanking blat hits")
parser.add_argument("genome", help="genome in fasta format")
parser.add_argument("modblat", help="modified blat alignment file in psl-like format (one header line, cf. QT)")
parser.add_argument("-U", "--target_upstream_fragment_size", dest="upstream_frag_sz", type=int, default=800, help="the target upstream fragment size to extract [default: %(default)s]")
parser.add_argument("-D", "--target_downstream_fragment_size", dest="downstream_frag_sz", type=int, default=4000, help="the target downstream fragment size to extract [default: %(default)s]")
args = parser.parse_args()
## logging ##
# create logger
logger = logging.getLogger('getSeqFlankBlatHit')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
## functions ##
def main():
try:
### steps ###
## load genome
## load mod blat
## iterate over blat hits and extract genomic sequences regarding the given fragment sizes (transcript and LTR) at the reference position
### 2 cases: strand + or -
## export bed with sequence coordinates to extract
## export fasta output using pybedtools
### seq_id: Qname ; Tname ; LTR size; transcript size ; total size; RC if strand "-"
### seq
## load genome
logger.info("Loading fasta genome ...")
if stat(args.genome).st_size == 0:
logger.error("genome file is empty: " + args.genome )
sys.exit(1)
else:
fasta = Fasta(args.genome)
logger.info("genome file: " + args.genome)
logger.info("number of reference sequences: " + str(len(sorted(fasta.keys()))))
## load mod blat
logger.info("Loading modblat ...")
if stat(args.modblat).st_size == 0:
logger.error("modblat file is empty: " + args.modblat )
sys.exit(1)
else:
logger.info("mod blat file: " + args.modblat)
mb = ModBlat(args.modblat)
logger.info("number of blat hits: " + str(len(mb.hits)))
for hit in mb.hits:
logger.log(0, "qname/tname pair: " + str(hit.qname) + "/" + str(hit.tname))
## compute genomic coordinates
logger.info("Compute genomic bed items coordinates ...")
bedItems= []
for hit in mb.hits:
bi = hit.computeGenomicSequenceBedItem(args.upstream_frag_sz, args.downstream_frag_sz)
bedItems.append(bi.totuple())
logger.info("number of bed items: " + str(len(bedItems)))
## export bed items to bed file
logger.info("Export to bed file ...")
bed = pybedtools.BedTool(bedItems)
outfile = path.basename(path.splitext(args.modblat)[0]) + '_seqFlankBlatHit.bed'
bed.saveas(outfile, trackline="track name='genomic sequence extraction flanking blat hit' color=128,0,0")
num_lines = sum(1 for line in open(outfile))
logger.info("number of lines in bed file: " + str(num_lines))
## get fasta sequence from bed
logger.info("Get fasta sequences from bed ...")
fasta_out = path.basename(path.splitext(args.modblat)[0]) + '_seqFlankBlatHit.fasta'
bed = bed.sequence(fi=args.genome, s=True, name=True)
bedout = bed.save_seqs(fasta_out)
assert open(bedout.seqfn).read() == open(bed.seqfn).read()
fout = Fasta(fasta_out)
logger.info("flanking blat hits sequences file: " + fasta_out)
logger.info("number of flanking sequences: " + str(len(sorted(fout.keys()))))
except KeyboardInterrupt:
print "Shutdown requested...exiting"
except Exception:
traceback.print_exc(file=sys.stdout)
### MAIN ###
if __name__ == '__main__':
import sys, traceback
from os import path, stat
from pyfasta import Fasta
from datetime import datetime
import pybedtools
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from getSeqFlankBlatHitLib import ModBlatHit, ModBlat, BedItem
startTime = datetime.now()
main()
logger.info("Execution time: " + str(datetime.now() - startTime))
sys.exit(0)
| jos4uke/getSeqFlankBlatHit | getSeqFlankBlatHit.py | Python | gpl-2.0 | 4,651 |
from flask_restful import Resource
from flask import request, Response
from emuvim.api.openstack.openstack_dummies.base_openstack_dummy import BaseOpenstackDummy
from datetime import datetime
import logging
import json
import uuid
import copy
class NeutronDummyApi(BaseOpenstackDummy):
def __init__(self, ip, port, compute):
super(NeutronDummyApi, self).__init__(ip, port)
self.compute = compute
self.api.add_resource(NeutronListAPIVersions, "/")
self.api.add_resource(Shutdown, "/shutdown")
self.api.add_resource(NeutronShowAPIv2Details, "/v2.0")
self.api.add_resource(NeutronListNetworks, "/v2.0/networks.json", "/v2.0/networks",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronShowNetwork, "/v2.0/networks/<network_id>.json", "/v2.0/networks/<network_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronCreateNetwork, "/v2.0/networks.json", "/v2.0/networks",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronUpdateNetwork, "/v2.0/networks/<network_id>.json", "/v2.0/networks/<network_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronDeleteNetwork, "/v2.0/networks/<network_id>.json", "/v2.0/networks/<network_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronListSubnets, "/v2.0/subnets.json", "/v2.0/subnets",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronShowSubnet, "/v2.0/subnets/<subnet_id>.json", "/v2.0/subnets/<subnet_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronCreateSubnet, "/v2.0/subnets.json", "/v2.0/subnets",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronUpdateSubnet, "/v2.0/subnets/<subnet_id>.json", "/v2.0/subnets/<subnet_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronDeleteSubnet, "/v2.0/subnets/<subnet_id>.json", "/v2.0/subnets/<subnet_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronListPorts, "/v2.0/ports.json", "/v2.0/ports",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronShowPort, "/v2.0/ports/<port_id>.json", "/v2.0/ports/<port_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronCreatePort, "/v2.0/ports.json", "/v2.0/ports",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronUpdatePort, "/v2.0/ports/<port_id>.json", "/v2.0/ports/<port_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronDeletePort, "/v2.0/ports/<port_id>.json", "/v2.0/ports/<port_id>",
resource_class_kwargs={'api': self})
self.api.add_resource(NeutronAddFloatingIp, "/v2.0/floatingips.json", "/v2.0/floatingips",
resource_class_kwargs={'api': self})
def _start_flask(self):
logging.info("Starting %s endpoint @ http://%s:%d" % (__name__, self.ip, self.port))
if self.app is not None:
self.app.before_request(self.dump_playbook)
self.app.run(self.ip, self.port, debug=True, use_reloader=False)
class Shutdown(Resource):
def get(self):
logging.debug(("%s is beeing shut down") % (__name__))
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
class NeutronListAPIVersions(Resource):
def get(self):
"""
Lists API versions.
:return: Returns a json with API versions.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: Neutron - List API Versions")
resp = dict()
resp['versions'] = dict()
versions = [{
"status": "CURRENT",
"id": "v2.0",
"links": [
{
"href": request.url_root + '/v2.0',
"rel": "self"
}
]
}]
resp['versions'] = versions
return Response(json.dumps(resp), status=200, mimetype='application/json')
class NeutronShowAPIv2Details(Resource):
def get(self):
"""
Returns API details.
:return: Returns a json with API details.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
resp = dict()
resp['resources'] = dict()
resp['resources'] = [{
"links": [
{
"href": request.url_root + 'v2.0/subnets',
"rel": "self"
}
],
"name": "subnet",
"collection": "subnets"
},
{
"links": [
{
"href": request.url_root + 'v2.0/networks',
"rel": "self"
}
],
"name": "network",
"collection": "networks"
},
{
"links": [
{
"href": request.url_root + 'v2.0/ports',
"rel": "self"
}
],
"name": "ports",
"collection": "ports"
}
]
return Response(json.dumps(resp), status=200, mimetype='application/json')
class NeutronListNetworks(Resource):
def __init__(self, api):
self.api = api
def get(self):
"""
Lists all networks, used in son-emu. If a 'name' or one or more 'id's are specified, it will only list the
network with the name, or the networks specified via id.
:return: Returns a json response, starting with 'networks' as root node.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
if request.args.get('name'):
tmp_network = NeutronShowNetwork(self.api)
return tmp_network.get_network(request.args.get('name'), True)
id_list = request.args.getlist('id')
if len(id_list) == 1:
tmp_network = NeutronShowNetwork(self.api)
return tmp_network.get_network(request.args.get('id'), True)
network_list = list()
network_dict = dict()
if len(id_list) == 0:
for net in self.api.compute.nets.values():
tmp_network_dict = net.create_network_dict()
if tmp_network_dict not in network_list:
network_list.append(tmp_network_dict)
else:
for net in self.api.compute.nets.values():
if net.id in id_list:
tmp_network_dict = net.create_network_dict()
if tmp_network_dict not in network_list:
network_list.append(tmp_network_dict)
network_dict["networks"] = network_list
return Response(json.dumps(network_dict), status=200, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: List networks exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronShowNetwork(Resource):
def __init__(self, api):
self.api = api
def get(self, network_id):
"""
Returns the network, specified via 'network_id'.
:param network_id: The unique ID string of the network.
:type network_id: ``str``
:return: Returns a json response, starting with 'network' as root node and one network description.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
return self.get_network(network_id, False)
def get_network(self, network_name_or_id, as_list):
"""
Returns one network description of the network, specified via 'network_name_or_id'.
:param network_name_or_id: The indicator string, which specifies the requested network.
:type network_name_or_id: ``str``
:param as_list: Determines if the network description should start with the root node 'network' or 'networks'.
:type as_list: ``bool``
:return: Returns a json response, with one network description.
:rtype: :class:`flask.response`
"""
try:
net = self.api.compute.find_network_by_name_or_id(network_name_or_id)
if net is None:
return Response(u'Network not found.\n', status=404, mimetype='application/json')
tmp_network_dict = net.create_network_dict()
tmp_dict = dict()
if as_list:
tmp_dict["networks"] = [tmp_network_dict]
else:
tmp_dict["network"] = tmp_network_dict
return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show network exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronCreateNetwork(Resource):
def __init__(self, api):
self.api = api
def post(self):
"""
Creates a network with the name, specified within the request under ['network']['name'].
:return: * 400, if the network already exists.
* 500, if any exception occurred while creation.
* 201, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
network_dict = json.loads(request.data)
name = network_dict['network']['name']
net = self.api.compute.find_network_by_name_or_id(name)
if net is not None:
return Response('Network already exists.\n', status=400, mimetype='application/json')
net = self.api.compute.create_network(name)
return Response(json.dumps({"network": net.create_network_dict()}), status=201, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Create network excepiton.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronUpdateNetwork(Resource):
def __init__(self, api):
self.api = api
def put(self, network_id): # TODO currently only the name will be changed
"""
Updates the existing network with the given parameters.
:param network_id: The indicator string, which specifies the requested network.
:type network_id: ``str``
:return: * 404, if the network could not be found.
* 500, if any exception occurred while updating the network.
* 200, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
try:
if network_id in self.api.compute.nets:
net = self.api.compute.nets[network_id]
network_dict = json.loads(request.data)
old_net = copy.copy(net)
if "status" in network_dict["network"]:
net.status = network_dict["network"]["status"]
if "subnets" in network_dict["network"]:
pass # tmp_network_dict["subnets"] = None
if "name" in network_dict["network"] and net.name != network_dict["network"]["name"]:
net.name = network_dict["network"]["name"]
if "admin_state_up" in network_dict["network"]:
pass # tmp_network_dict["admin_state_up"] = True
if "tenant_id" in network_dict["network"]:
pass # tmp_network_dict["tenant_id"] = "c1210485b2424d48804aad5d39c61b8f"
if "shared" in network_dict["network"]:
pass # tmp_network_dict["shared"] = False
return Response(json.dumps(network_dict), status=200, mimetype='application/json')
return Response('Network not found.\n', status=404, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show networks exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronDeleteNetwork(Resource):
def __init__(self, api):
self.api = api
def delete(self, network_id):
"""
Deletes the specified network and all its subnets.
:param network_id: The indicator string, which specifies the requested network.
:type network_id: ``str``
:return: * 404, if the network or the subnet could not be removed.
* 500, if any exception occurred while deletion.
* 204, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
try:
if network_id not in self.api.compute.nets:
return Response('Could not find network. (' + network_id + ')\n',
status=404, mimetype='application/json')
net = self.api.compute.nets[network_id]
delete_subnet = NeutronDeleteSubnet(self.api)
resp = delete_subnet.delete(net.subnet_id)
if not '204' in resp.status and not '404' in resp.status:
return resp
self.api.compute.delete_network(network_id)
return Response('Network ' + str(network_id) + ' deleted.\n', status=204, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Delete network exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronListSubnets(Resource):
def __init__(self, api):
self.api = api
def get(self):
"""
Lists all subnets, used in son-emu. If a 'name' or one or more 'id's are specified, it will only list the
subnet with the name, or the subnets specified via id.
:return: Returns a json response, starting with 'subnets' as root node.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
if request.args.get('name'):
show_subnet = NeutronShowSubnet(self.api)
return show_subnet.get_subnet(request.args.get('name'), True)
id_list = request.args.getlist('id')
if len(id_list) == 1:
show_subnet = NeutronShowSubnet(self.api)
return show_subnet.get_subnet(id_list[0], True)
subnet_list = list()
subnet_dict = dict()
if len(id_list) == 0:
for net in self.api.compute.nets.values():
if net.subnet_id is not None:
tmp_subnet_dict = net.create_subnet_dict()
subnet_list.append(tmp_subnet_dict)
else:
for net in self.api.compute.nets.values():
if net.subnet_id in id_list:
tmp_subnet_dict = net.create_subnet_dict()
subnet_list.append(tmp_subnet_dict)
subnet_dict["subnets"] = subnet_list
return Response(json.dumps(subnet_dict), status=200, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: List subnets exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronShowSubnet(Resource):
def __init__(self, api):
self.api = api
def get(self, subnet_id):
"""
Returns the subnet, specified via 'subnet_id'.
:param subnet_id: The unique ID string of the subnet.
:type subnet_id: ``str``
:return: Returns a json response, starting with 'subnet' as root node and one subnet description.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
return self.get_subnet(subnet_id, False)
def get_subnet(self, subnet_name_or_id, as_list):
"""
Returns one subnet description of the subnet, specified via 'subnet_name_or_id'.
:param subnet_name_or_id: The indicator string, which specifies the requested subnet.
:type subnet_name_or_id: ``str``
:param as_list: Determines if the subnet description should start with the root node 'subnet' or 'subnets'.
:type as_list: ``bool``
:return: Returns a json response, with one subnet description.
:rtype: :class:`flask.response`
"""
try:
for net in self.api.compute.nets.values():
if net.subnet_id == subnet_name_or_id or net.subnet_name == subnet_name_or_id:
tmp_subnet_dict = net.create_subnet_dict()
tmp_dict = dict()
if as_list:
tmp_dict["subnets"] = [tmp_subnet_dict]
else:
tmp_dict["subnet"] = tmp_subnet_dict
return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
return Response('Subnet not found. (' + subnet_name_or_id + ')\n', status=404, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show subnet exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronCreateSubnet(Resource):
def __init__(self, api):
self.api = api
def post(self):
"""
Creates a subnet with the name, specified within the request under ['subnet']['name'].
:return: * 400, if the 'CIDR' format is wrong or it does not exist.
* 404, if the network was not found.
* 409, if the corresponding network already has one subnet.
* 500, if any exception occurred while creation and
* 201, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
subnet_dict = json.loads(request.data)
net = self.api.compute.find_network_by_name_or_id(subnet_dict['subnet']['network_id'])
if net is None:
return Response('Could not find network.\n', status=404, mimetype='application/json')
net.subnet_name = subnet_dict["subnet"].get('name', str(net.name) + '-sub')
if net.subnet_id is not None:
return Response('Only one subnet per network is supported\n', status=409, mimetype='application/json')
if "id" in subnet_dict["subnet"]:
net.subnet_id = subnet_dict["subnet"]["id"]
else:
net.subnet_id = str(uuid.uuid4())
import emuvim.api.openstack.ip_handler as IP
net.set_cidr(IP.get_new_cidr(net.subnet_id))
if "tenant_id" in subnet_dict["subnet"]:
pass
if "allocation_pools" in subnet_dict["subnet"]:
pass
if "gateway_ip" in subnet_dict["subnet"]:
net.gateway_ip = subnet_dict["subnet"]["gateway_ip"]
if "ip_version" in subnet_dict["subnet"]:
pass
if "enable_dhcp" in subnet_dict["subnet"]:
pass
return Response(json.dumps({'subnet': net.create_subnet_dict()}), status=201, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Create network excepiton.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronUpdateSubnet(Resource):
def __init__(self, api):
self.api = api
def put(self, subnet_id):
"""
Updates the existing subnet with the given parameters.
:param subnet_id: The indicator string, which specifies the requested subnet.
:type subnet_id: ``str``
:return: * 404, if the network could not be found.
* 500, if any exception occurred while updating the network.
* 200, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
try:
for net in self.api.compute.nets.values():
if net.subnet_id == subnet_id:
subnet_dict = json.loads(request.data)
if "name" in subnet_dict["subnet"]:
net.subnet_name = subnet_dict["subnet"]["name"]
if "network_id" in subnet_dict["subnet"]:
net.id = subnet_dict["subnet"]["network_id"]
if "tenant_id" in subnet_dict["subnet"]:
pass
if "allocation_pools" in subnet_dict["subnet"]:
pass
if "gateway_ip" in subnet_dict["subnet"]:
net.gateway_ip = subnet_dict["subnet"]["gateway_ip"]
if "ip_version" in subnet_dict["subnet"]:
pass
if "cidr" in subnet_dict["subnet"]:
net.set_cidr(subnet_dict["subnet"]["cidr"])
if "id" in subnet_dict["subnet"]:
net.subnet_id = subnet_dict["subnet"]["id"]
if "enable_dhcp" in subnet_dict["subnet"]:
pass
net.subnet_update_time = str(datetime.now())
tmp_dict = {'subnet': net.create_subnet_dict()}
return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
return Response('Network not found.\n', status=404, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show networks exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronDeleteSubnet(Resource):
def __init__(self, api):
self.api = api
def delete(self, subnet_id):
"""
Deletes the specified subnet.
:param subnet_id: The indicator string, which specifies the requested subnet.
:type subnet_id: ``str``
:return: * 404, if the subnet could not be removed.
* 500, if any exception occurred while deletion.
* 204, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
try:
for net in self.api.compute.nets.values():
if net.subnet_id == subnet_id:
for server in self.api.compute.computeUnits.values():
for port_name in server.port_names:
port = self.api.compute.find_port_by_name_or_id(port_name)
if port.net_name == net.name:
port.ip_address = None
self.api.compute.dc.net.removeLink(
link=None,
node1=self.api.compute.dc.containers[server.name],
node2=self.api.compute.dc.switch)
port.net_name = None
net.delete_subnet()
return Response('Subnet ' + str(subnet_id) + ' deleted.\n',
status=204, mimetype='application/json')
return Response('Could not find subnet.', status=404, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Delete subnet exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronListPorts(Resource):
def __init__(self, api):
self.api = api
def get(self):
"""
Lists all ports, used in son-emu. If a 'name' or one or more 'id's are specified, it will only list the
port with the name, or the ports specified via id.
:return: Returns a json response, starting with 'ports' as root node.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
try:
if request.args.get('name'):
show_port = NeutronShowPort(self.api)
return show_port.get_port(request.args.get('name'), True)
id_list = request.args.getlist('id')
if len(id_list) == 1:
show_port = NeutronShowPort(self.api)
return show_port.get_port(request.args.get('id'), True)
port_list = list()
port_dict = dict()
if len(id_list) == 0:
for port in self.api.compute.ports.values():
tmp_port_dict = port.create_port_dict(self.api.compute)
port_list.append(tmp_port_dict)
else:
for port in self.api.compute.ports.values():
if port.id in id_list:
tmp_port_dict = port.create_port_dict(self.api.compute)
port_list.append(tmp_port_dict)
port_dict["ports"] = port_list
return Response(json.dumps(port_dict), status=200, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: List ports exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronShowPort(Resource):
def __init__(self, api):
self.api = api
def get(self, port_id):
"""
Returns the port, specified via 'port_id'.
:param port_id: The unique ID string of the network.
:type port_id: ``str``
:return: Returns a json response, starting with 'port' as root node and one network description.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s GET" % str(self.__class__.__name__))
return self.get_port(port_id, False)
def get_port(self, port_name_or_id, as_list):
"""
Returns one network description of the port, specified via 'port_name_or_id'.
:param port_name_or_id: The indicator string, which specifies the requested port.
:type port_name_or_id: ``str``
:param as_list: Determines if the port description should start with the root node 'port' or 'ports'.
:type as_list: ``bool``
:return: Returns a json response, with one port description.
:rtype: :class:`flask.response`
"""
try:
port = self.api.compute.find_port_by_name_or_id(port_name_or_id)
if port is None:
return Response('Port not found. (' + port_name_or_id + ')\n', status=404, mimetype='application/json')
tmp_port_dict = port.create_port_dict(self.api.compute)
tmp_dict = dict()
if as_list:
tmp_dict["ports"] = [tmp_port_dict]
else:
tmp_dict["port"] = tmp_port_dict
return Response(json.dumps(tmp_dict), status=200, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show port exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronCreatePort(Resource):
def __init__(self, api):
self.api = api
def post(self):
"""
Creates a port with the name, specified within the request under ['port']['name'].
:return: * 404, if the network could not be found.
* 500, if any exception occurred while creation and
* 201, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
port_dict = json.loads(request.data)
net_id = port_dict['port']['network_id']
if net_id not in self.api.compute.nets:
return Response('Could not find network.\n', status=404, mimetype='application/json')
net = self.api.compute.nets[net_id]
if 'name' in port_dict['port']:
name = port_dict['port']['name']
else:
num_ports = len(self.api.compute.ports)
name = "port:cp%s:man:%s" % (num_ports, str(uuid.uuid4()))
if self.api.compute.find_port_by_name_or_id(name):
return Response("Port with name %s already exists.\n" % name, status=500, mimetype='application/json')
port = self.api.compute.create_port(name)
port.net_name = net.name
port.ip_address = net.get_new_ip_address(name)
if "admin_state_up" in port_dict["port"]:
pass
if "device_id" in port_dict["port"]:
pass
if "device_owner" in port_dict["port"]:
pass
if "fixed_ips" in port_dict["port"]:
pass
if "mac_address" in port_dict["port"]:
port.mac_address = port_dict["port"]["mac_address"]
if "status" in port_dict["port"]:
pass
if "tenant_id" in port_dict["port"]:
pass
# add the port to a stack if the specified network is a stack network
for stack in self.api.compute.stacks.values():
for net in stack.nets.values():
if net.id == net_id:
stack.ports[name] = port
return Response(json.dumps({'port': port.create_port_dict(self.api.compute)}), status=201,
mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Show port exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronUpdatePort(Resource):
def __init__(self, api):
self.api = api
def put(self, port_id):
"""
Updates the existing port with the given parameters.
:param network_id: The indicator string, which specifies the requested port.
:type network_id: ``str``
:return: * 404, if the network could not be found.
* 500, if any exception occurred while updating the network.
* 200, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s PUT" % str(self.__class__.__name__))
try:
port_dict = json.loads(request.data)
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
return Response("Port with id %s does not exists.\n" % port_id, status=404, mimetype='application/json')
old_port = copy.copy(port)
stack = None
for s in self.api.compute.stacks.values():
for port in s.ports.values():
if port.id == port_id:
stack = s
if "admin_state_up" in port_dict["port"]:
pass
if "device_id" in port_dict["port"]:
pass
if "device_owner" in port_dict["port"]:
pass
if "fixed_ips" in port_dict["port"]:
pass
if "id" in port_dict["port"]:
port.id = port_dict["port"]["id"]
if "mac_address" in port_dict["port"]:
port.mac_address = port_dict["port"]["mac_address"]
if "name" in port_dict["port"] and port_dict["port"]["name"] != port.name:
port.set_name(port_dict["port"]["name"])
if stack is not None:
if port.net_name in stack.nets:
stack.nets[port.net_name].update_port_name_for_ip_address(port.ip_address, port.name)
stack.ports[port.name] = stack.ports[old_port.name]
del stack.ports[old_port.name]
if "network_id" in port_dict["port"]:
pass
if "status" in port_dict["port"]:
pass
if "tenant_id" in port_dict["port"]:
pass
return Response(json.dumps({'port': port.create_port_dict(self.api.compute)}), status=200,
mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Update port exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronDeletePort(Resource):
def __init__(self, api):
self.api = api
def delete(self, port_id):
"""
Deletes the specified port.
:param port_id: The indicator string, which specifies the requested port.
:type port_id: ``str``
:return: * 404, if the port could not be found.
* 500, if any exception occurred while deletion.
* 204, if everything worked out.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s DELETE" % str(self.__class__.__name__))
try:
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is None:
return Response("Port with id %s does not exists.\n" % port_id, status=404)
stack = None
for s in self.api.compute.stacks.values():
for p in s.ports.values():
if p.id == port_id:
stack = s
if stack is not None:
if port.net_name in stack.nets:
stack.nets[port.net_name].withdraw_ip_address(port.ip_address)
for server in stack.servers.values():
try:
server.port_names.remove(port.name)
except ValueError:
pass
# delete the port
self.api.compute.delete_port(port.id)
return Response('Port ' + port_id + ' deleted.\n', status=204, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Delete port exception.")
return Response(ex.message, status=500, mimetype='application/json')
class NeutronAddFloatingIp(Resource):
def __init__(self, api):
self.api = api
def get(self):
"""
Added a quick and dirty fake for the OSM integration. Returns a list of
floating IPs. Has nothing to do with the setup inside the emulator.
But its enough to make the OSM driver happy.
@PG Sandman: Feel free to improve this and let it do something meaningful.
"""
resp = dict()
resp["floatingips"] = list()
# create a list of floting IP definitions and return it
for i in range(100, 110):
ip=dict()
ip["router_id"] = "router_id"
ip["description"] = "hardcoded in api"
ip["created_at"] = "router_id"
ip["updated_at"] = "router_id"
ip["revision_number"] = 1
ip["tenant_id"] = "tenant_id"
ip["project_id"] = "project_id"
ip["floating_network_id"] = str(i)
ip["status"] = "ACTIVE"
ip["id"] = str(i)
ip["port_id"] = "port_id"
ip["floating_ip_address"] = "172.0.0.%d" % i
ip["fixed_ip_address"] = "10.0.0.%d" % i
resp["floatingips"].append(ip)
return Response(json.dumps(resp), status=200, mimetype='application/json')
def post(self):
"""
Adds a floating IP to neutron.
:return: Returns a floating network description.
:rtype: :class:`flask.response`
"""
logging.debug("API CALL: %s POST" % str(self.__class__.__name__))
try:
# Fiddle with floating_network !
req = json.loads(request.data)
network_id = req["floatingip"]["floating_network_id"]
net = self.api.compute.find_network_by_name_or_id(network_id)
if net != self.api.manage.floating_network:
return Response("You have to specify the existing floating network\n",
status=400, mimetype='application/json')
port_id = req["floatingip"].get("port_id", None)
port = self.api.compute.find_port_by_name_or_id(port_id)
if port is not None:
if port.net_name != self.api.manage.floating_network.name:
return Response("You have to specify a port in the floating network\n",
status=400, mimetype='application/json')
if port.floating_ip is not None:
return Response("We allow only one floating ip per port\n", status=400, mimetype='application/json')
else:
num_ports = len(self.api.compute.ports)
name = "port:cp%s:fl:%s" % (num_ports, str(uuid.uuid4()))
port = self.api.compute.create_port(name)
port.net_name = net.name
port.ip_address = net.get_new_ip_address(name)
port.floating_ip = port.ip_address
response = dict()
resp = response["floatingip"] = dict()
resp["floating_network_id"] = net.id
resp["status"] = "ACTIVE"
resp["id"] = net.id
resp["port_id"] = port.id
resp["floating_ip_address"] = port.floating_ip
resp["fixed_ip_address"] = port.floating_ip
return Response(json.dumps(response), status=200, mimetype='application/json')
except Exception as ex:
logging.exception("Neutron: Create FloatingIP exception %s.", ex)
return Response(ex.message, status=500, mimetype='application/json')
| knodir/son-emu | src/emuvim/api/openstack/openstack_dummies/neutron_dummy_api.py | Python | apache-2.0 | 38,680 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import random
from google.cloud import bigquery
import pytest
from . import append_rows_pending
DIR = pathlib.Path(__file__).parent
regions = ["US", "non-US"]
@pytest.fixture(params=regions)
def sample_data_table(
request: pytest.FixtureRequest,
bigquery_client: bigquery.Client,
project_id: str,
dataset_id: str,
dataset_id_non_us: str,
) -> str:
dataset = dataset_id
if request.param != "US":
dataset = dataset_id_non_us
schema = bigquery_client.schema_from_json(str(DIR / "customer_record_schema.json"))
table_id = f"append_rows_proto2_{random.randrange(10000)}"
full_table_id = f"{project_id}.{dataset}.{table_id}"
table = bigquery.Table(full_table_id, schema=schema)
table = bigquery_client.create_table(table, exists_ok=True)
yield full_table_id
bigquery_client.delete_table(table, not_found_ok=True)
def test_append_rows_pending(
capsys: pytest.CaptureFixture,
bigquery_client: bigquery.Client,
sample_data_table: str,
):
project_id, dataset_id, table_id = sample_data_table.split(".")
append_rows_pending.append_rows_pending(
project_id=project_id, dataset_id=dataset_id, table_id=table_id
)
out, _ = capsys.readouterr()
assert "have been committed" in out
rows = bigquery_client.query(
f"SELECT * FROM `{project_id}.{dataset_id}.{table_id}`"
).result()
row_items = [
# Convert to sorted tuple of items to more easily search for expected rows.
tuple(sorted(row.items()))
for row in rows
]
assert (("customer_name", "Alice"), ("row_num", 1)) in row_items
assert (("customer_name", "Bob"), ("row_num", 2)) in row_items
assert (("customer_name", "Charles"), ("row_num", 3)) in row_items
| googleapis/python-bigquery-storage | samples/snippets/append_rows_pending_test.py | Python | apache-2.0 | 2,359 |
# coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import adba
import time
import datetime
import requests
import threading
import sickbeard
from sickbeard import db, helpers, logger
from sickbeard.indexers.indexer_config import INDEXER_TVDB
exception_dict = {}
anidb_exception_dict = {}
xem_exception_dict = {}
exceptionsCache = {}
exceptionsSeasonCache = {}
exceptionLock = threading.Lock()
def shouldRefresh(exList):
"""
Check if we should refresh cache for items in exList
:param exList: exception list to check if needs a refresh
:return: True if refresh is needed
"""
MAX_REFRESH_AGE_SECS = 86400 # 1 day
cache_db_con = db.DBConnection('cache.db')
rows = cache_db_con.select("SELECT last_refreshed FROM scene_exceptions_refresh WHERE list = ?", [exList])
if rows:
lastRefresh = int(rows[0]['last_refreshed'])
return int(time.mktime(datetime.datetime.today().timetuple())) > lastRefresh + MAX_REFRESH_AGE_SECS
else:
return True
def setLastRefresh(exList):
"""
Update last cache update time for shows in list
:param exList: exception list to set refresh time
"""
cache_db_con = db.DBConnection('cache.db')
cache_db_con.upsert(
"scene_exceptions_refresh",
{'last_refreshed': int(time.mktime(datetime.datetime.today().timetuple()))},
{'list': exList}
)
def get_scene_exceptions(indexer_id, season=-1):
"""
Given a indexer_id, return a list of all the scene exceptions.
"""
exceptionsList = []
if indexer_id not in exceptionsCache or season not in exceptionsCache[indexer_id]:
cache_db_con = db.DBConnection('cache.db')
exceptions = cache_db_con.select("SELECT show_name FROM scene_exceptions WHERE indexer_id = ? and season = ?",
[indexer_id, season])
if exceptions:
exceptionsList = list({cur_exception["show_name"] for cur_exception in exceptions})
if indexer_id not in exceptionsCache:
exceptionsCache[indexer_id] = {}
exceptionsCache[indexer_id][season] = exceptionsList
else:
exceptionsList = exceptionsCache[indexer_id][season]
# Add generic exceptions regardless of the season if there is no exception for season
if season != -1 and not exceptionsList:
exceptionsList += get_scene_exceptions(indexer_id, season=-1)
return list({exception for exception in exceptionsList})
def get_all_scene_exceptions(indexer_id):
"""
Get all scene exceptions for a show ID
:param indexer_id: ID to check
:return: dict of exceptions
"""
exceptionsDict = {}
cache_db_con = db.DBConnection('cache.db')
exceptions = cache_db_con.select("SELECT show_name,season FROM scene_exceptions WHERE indexer_id = ?", [indexer_id])
if exceptions:
for cur_exception in exceptions:
if not cur_exception["season"] in exceptionsDict:
exceptionsDict[cur_exception["season"]] = []
exceptionsDict[cur_exception["season"]].append(cur_exception["show_name"])
return exceptionsDict
def get_scene_seasons(indexer_id):
"""
return a list of season numbers that have scene exceptions
"""
exceptionsSeasonList = []
if indexer_id not in exceptionsSeasonCache:
cache_db_con = db.DBConnection('cache.db')
sql_results = cache_db_con.select("SELECT DISTINCT(season) as season FROM scene_exceptions WHERE indexer_id = ?",
[indexer_id])
if sql_results:
exceptionsSeasonList = list({int(x["season"]) for x in sql_results})
if indexer_id not in exceptionsSeasonCache:
exceptionsSeasonCache[indexer_id] = {}
exceptionsSeasonCache[indexer_id] = exceptionsSeasonList
else:
exceptionsSeasonList = exceptionsSeasonCache[indexer_id]
return exceptionsSeasonList
def get_scene_exception_by_name(show_name):
return get_scene_exception_by_name_multiple(show_name)[0]
def get_scene_exception_by_name_multiple(show_name):
"""
Given a show name, return the indexerid of the exception, None if no exception
is present.
"""
# try the obvious case first
cache_db_con = db.DBConnection('cache.db')
exception_result = cache_db_con.select(
"SELECT indexer_id, season FROM scene_exceptions WHERE LOWER(show_name) = ? ORDER BY season ASC",
[show_name.lower()])
if exception_result:
return [(int(x["indexer_id"]), int(x["season"])) for x in exception_result]
out = []
all_exception_results = cache_db_con.select("SELECT show_name, indexer_id, season FROM scene_exceptions")
for cur_exception in all_exception_results:
cur_exception_name = cur_exception["show_name"]
cur_indexer_id = int(cur_exception["indexer_id"])
if show_name.lower() in (
cur_exception_name.lower(),
sickbeard.helpers.sanitizeSceneName(cur_exception_name).lower().replace('.', ' ')):
logger.log(u"Scene exception lookup got indexer id {}, using that".format
(cur_indexer_id), logger.DEBUG)
out.append((cur_indexer_id, int(cur_exception["season"])))
if out:
return out
return [(None, None)]
def retrieve_exceptions(): # pylint:disable=too-many-locals, too-many-branches
"""
Looks up the exceptions on github, parses them into a dict, and inserts them into the
scene_exceptions table in cache.db. Also clears the scene name cache.
"""
do_refresh = False
for indexer in sickbeard.indexerApi().indexers:
if shouldRefresh(sickbeard.indexerApi(indexer).name):
do_refresh = True
if do_refresh:
loc = sickbeard.indexerApi(INDEXER_TVDB).config['scene_loc']
logger.log(u"Checking for scene exception updates from {}".format(loc))
try:
jdata = helpers.getURL(loc, session=sickbeard.indexerApi(INDEXER_TVDB).session, json=True)
except Exception:
jdata = None
if not jdata:
# When jdata is None, trouble connecting to github, or reading file failed
logger.log(u"Check scene exceptions update failed. Unable to update from {}".format(loc), logger.DEBUG)
else:
for indexer in sickbeard.indexerApi().indexers:
try:
setLastRefresh(sickbeard.indexerApi(indexer).name)
for indexer_id in jdata[sickbeard.indexerApi(indexer).config['xem_origin']]:
alias_list = [
{scene_exception: int(scene_season)}
for scene_season in jdata[sickbeard.indexerApi(indexer).config['xem_origin']][indexer_id]
for scene_exception in jdata[sickbeard.indexerApi(indexer).config['xem_origin']][indexer_id][scene_season]
]
exception_dict[indexer_id] = alias_list
except Exception:
continue
# XEM scene exceptions
_xem_exceptions_fetcher()
for xem_ex in xem_exception_dict:
if xem_ex in exception_dict:
exception_dict[xem_ex] += exception_dict[xem_ex]
else:
exception_dict[xem_ex] = xem_exception_dict[xem_ex]
# AniDB scene exceptions
_anidb_exceptions_fetcher()
for anidb_ex in anidb_exception_dict:
if anidb_ex in exception_dict:
exception_dict[anidb_ex] += anidb_exception_dict[anidb_ex]
else:
exception_dict[anidb_ex] = anidb_exception_dict[anidb_ex]
queries = []
cache_db_con = db.DBConnection('cache.db')
for cur_indexer_id in exception_dict:
sql_ex = cache_db_con.select("SELECT show_name FROM scene_exceptions WHERE indexer_id = ?;", [cur_indexer_id])
existing_exceptions = [x["show_name"] for x in sql_ex]
if cur_indexer_id not in exception_dict:
continue
for cur_exception_dict in exception_dict[cur_indexer_id]:
for ex in cur_exception_dict.iteritems():
cur_exception, curSeason = ex
if cur_exception not in existing_exceptions:
queries.append(
["INSERT OR IGNORE INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?);",
[cur_indexer_id, cur_exception, curSeason]])
if queries:
cache_db_con.mass_action(queries)
logger.log(u"Updated scene exceptions", logger.DEBUG)
# cleanup
exception_dict.clear()
anidb_exception_dict.clear()
xem_exception_dict.clear()
def update_scene_exceptions(indexer_id, scene_exceptions, season=-1):
"""
Given a indexer_id, and a list of all show scene exceptions, update the db.
"""
cache_db_con = db.DBConnection('cache.db')
cache_db_con.action('DELETE FROM scene_exceptions WHERE indexer_id=? and season=?', [indexer_id, season])
logger.log(u"Updating scene exceptions", logger.INFO)
# A change has been made to the scene exception list. Let's clear the cache, to make this visible
if indexer_id in exceptionsCache:
exceptionsCache[indexer_id] = {}
exceptionsCache[indexer_id][season] = scene_exceptions
for cur_exception in scene_exceptions:
cache_db_con.action("INSERT INTO scene_exceptions (indexer_id, show_name, season) VALUES (?,?,?)",
[indexer_id, cur_exception, season])
def _anidb_exceptions_fetcher():
if shouldRefresh('anidb'):
logger.log(u"Checking for scene exception updates for AniDB")
for show in sickbeard.showList:
if show.is_anime and show.indexer == 1:
try:
anime = adba.Anime(None, name=show.name, tvdbid=show.indexerid, autoCorrectName=True)
except Exception:
continue
else:
if anime.name and anime.name != show.name:
anidb_exception_dict[show.indexerid] = [{anime.name: -1}]
setLastRefresh('anidb')
return anidb_exception_dict
xem_session = requests.Session()
def _xem_exceptions_fetcher():
if shouldRefresh('xem'):
for indexer in sickbeard.indexerApi().indexers:
logger.log(u"Checking for XEM scene exception updates for {}".format
(sickbeard.indexerApi(indexer).name))
url = "http://thexem.de/map/allNames?origin={}&seasonNumbers=1".format(sickbeard.indexerApi(indexer).config['xem_origin'])
parsedJSON = helpers.getURL(url, session=xem_session, timeout=90, json=True)
if not parsedJSON:
logger.log(u"Check scene exceptions update failed for {}, Unable to get URL: {}".format
(sickbeard.indexerApi(indexer).name, url), logger.DEBUG)
continue
if parsedJSON['result'] == 'failure':
continue
for indexerid, names in parsedJSON['data'].iteritems():
try:
xem_exception_dict[int(indexerid)] = names
except Exception as error:
logger.log(u"XEM: Rejected entry: indexerid:{}; names:{}".format(indexerid, names), logger.WARNING)
logger.log(u"XEM: Rejected entry error message:{}".format(error), logger.DEBUG)
setLastRefresh('xem')
return xem_exception_dict
def getSceneSeasons(indexer_id):
"""get a list of season numbers that have scene exceptions"""
cache_db_con = db.DBConnection('cache.db')
seasons = cache_db_con.select("SELECT DISTINCT season FROM scene_exceptions WHERE indexer_id = ?", [indexer_id])
return [cur_exception["season"] for cur_exception in seasons]
| pedro2d10/SickRage-FR | sickbeard/scene_exceptions.py | Python | gpl-3.0 | 12,610 |
import datetime
import os
from django import forms
from django.db.models.fields import Field
from django.core import checks
from django.core.files.base import File
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile
from django.db.models import signals
from django.utils.encoding import force_str, force_text
from django.utils import six
from django.utils.translation import ugettext_lazy as _
class FieldFile(File):
def __init__(self, instance, field, name):
super(FieldFile, self).__init__(None, name)
self.instance = instance
self.field = field
self.storage = field.storage
self._committed = True
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.name)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file') or self._file is None:
self._file = self.storage.open(self.name, 'rb')
return self._file
def _set_file(self, file):
self._file = file
def _del_file(self):
del self._file
file = property(_get_file, _set_file, _del_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
if not self._committed:
return self.file.size
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
self.file.open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self.name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = content.size
self._committed = True
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
if not self:
return
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self.file
self.storage.delete(self.name)
self.name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
self._committed = False
if save:
self.instance.save()
delete.alters_data = True
def _get_closed(self):
file = getattr(self, '_file', None)
return file is None or file.closed
closed = property(_get_closed)
def close(self):
file = getattr(self, '_file', None)
if file is not None:
file.close()
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'name': self.name, 'closed': False, '_committed': True, '_file': None}
class FileDescriptor(object):
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
>>> with open('/tmp/hello.world', 'r') as f:
... instance.file = File(f)
"""
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError(
"The '%s' attribute can only be accessed from %s instances."
% (self.field.name, owner.__name__))
# This is slightly complicated, so worth an explanation.
# instance.file`needs to ultimately return some instance of `File`,
# probably a subclass. Additionally, this returned object needs to have
# the FieldFile API so that users can easily do things like
# instance.file.path and have that delegated to the file storage engine.
# Easy enough if we're strict about assignment in __set__, but if you
# peek below you can see that we're not. So depending on the current
# value of the field we have to dynamically construct some sort of
# "thing" to return.
# The instance dict contains whatever was originally assigned
# in __set__.
file = instance.__dict__[self.field.name]
# If this value is a string (instance.file = "path/to/file") or None
# then we simply wrap it with the appropriate attribute class according
# to the file field. [This is FieldFile for FileFields and
# ImageFieldFile for ImageFields; it's also conceivable that user
# subclasses might also want to subclass the attribute class]. This
# object understands how to convert a path to a file, and also how to
# handle None.
if isinstance(file, six.string_types) or file is None:
attr = self.field.attr_class(instance, self.field, file)
instance.__dict__[self.field.name] = attr
# Other types of files may be assigned as well, but they need to have
# the FieldFile interface added to the. Thus, we wrap any other type of
# File inside a FieldFile (well, the field's attr_class, which is
# usually FieldFile).
elif isinstance(file, File) and not isinstance(file, FieldFile):
file_copy = self.field.attr_class(instance, self.field, file.name)
file_copy.file = file
file_copy._committed = False
instance.__dict__[self.field.name] = file_copy
# Finally, because of the (some would say boneheaded) way pickle works,
# the underlying FieldFile might not actually itself have an associated
# file. So we need to reset the details of the FieldFile in those cases.
elif isinstance(file, FieldFile) and not hasattr(file, 'field'):
file.instance = instance
file.field = self.field
file.storage = self.field.storage
# That was fun, wasn't it?
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
# The class to wrap instance attributes in. Accessing the file object off
# the instance will always return an instance of attr_class.
attr_class = FieldFile
# The descriptor to use for accessing the attribute off of the class.
descriptor_class = FileDescriptor
description = _("File")
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
self._primary_key_set_explicitly = 'primary_key' in kwargs
self._unique_set_explicitly = 'unique' in kwargs
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(FileField, self).check(**kwargs)
errors.extend(self._check_unique())
errors.extend(self._check_primary_key())
return errors
def _check_unique(self):
if self._unique_set_explicitly:
return [
checks.Error(
"'unique' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E200',
)
]
else:
return []
def _check_primary_key(self):
if self._primary_key_set_explicitly:
return [
checks.Error(
"'primary_key' is not a valid argument for a %s." % self.__class__.__name__,
hint=None,
obj=self,
id='fields.E201',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(FileField, self).deconstruct()
if kwargs.get("max_length", None) == 100:
del kwargs["max_length"]
kwargs['upload_to'] = self.upload_to
if self.storage is not default_storage:
kwargs['storage'] = self.storage
return name, path, args, kwargs
def get_internal_type(self):
return "FileField"
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super(FileField, self).get_prep_value(value)
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return six.text_type(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super(FileField, self).pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
file.save(file.name, file, save=False)
return file
def contribute_to_class(self, cls, name, **kwargs):
super(FileField, self).contribute_to_class(cls, name, **kwargs)
setattr(cls, self.name, self.descriptor_class(self))
def get_directory_name(self):
return os.path.normpath(force_text(datetime.datetime.now().strftime(force_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
# Important: None means "no change", other false value means "clear"
# This subtle distinction (rather than a more explicit marker) is
# needed because we need to consume values that are also sane for a
# regular (non Model-) Form to find in its cleaned_data dictionary.
if data is not None:
# This value will be converted to unicode and stored in the
# database, so leaving False as-is is not acceptable.
if not data:
data = ''
setattr(instance, self.name, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField, 'max_length': self.max_length}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFileDescriptor(FileDescriptor):
"""
Just like the FileDescriptor, but for ImageFields. The only difference is
assigning the width/height to the width_field/height_field, if appropriate.
"""
def __set__(self, instance, value):
previous_file = instance.__dict__.get(self.field.name)
super(ImageFileDescriptor, self).__set__(instance, value)
# To prevent recalculating image dimensions when we are instantiating
# an object from the database (bug #11084), only update dimensions if
# the field had a value before this assignment. Since the default
# value for FileField subclasses is an instance of field.attr_class,
# previous_file will only be None when we are called from
# Model.__init__(). The ImageField.update_dimension_fields method
# hooked up to the post_init signal handles the Model.__init__() cases.
# Assignment happening outside of Model.__init__() will trigger the
# update right here.
if previous_file is not None:
self.field.update_dimension_fields(instance, force=True)
class ImageFieldFile(ImageFile, FieldFile):
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
descriptor_class = ImageFileDescriptor
description = _("Image")
def __init__(self, verbose_name=None, name=None, width_field=None,
height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
super(ImageField, self).__init__(verbose_name, name, **kwargs)
def check(self, **kwargs):
errors = super(ImageField, self).check(**kwargs)
errors.extend(self._check_image_library_installed())
return errors
def _check_image_library_installed(self):
try:
from PIL import Image # NOQA
except ImportError:
return [
checks.Error(
'Cannot use ImageField because Pillow is not installed.',
hint=('Get Pillow at https://pypi.python.org/pypi/Pillow '
'or run command "pip install pillow".'),
obj=self,
id='fields.E210',
)
]
else:
return []
def deconstruct(self):
name, path, args, kwargs = super(ImageField, self).deconstruct()
if self.width_field:
kwargs['width_field'] = self.width_field
if self.height_field:
kwargs['height_field'] = self.height_field
return name, path, args, kwargs
def contribute_to_class(self, cls, name, **kwargs):
super(ImageField, self).contribute_to_class(cls, name, **kwargs)
# Attach update_dimension_fields so that dimension fields declared
# after their corresponding image field don't stay cleared by
# Model.__init__, see bug #11196.
# Only run post-initialization dimension update on non-abstract models
if not cls._meta.abstract:
signals.post_init.connect(self.update_dimension_fields, sender=cls)
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions
won't be updated if the dimensions fields are already populated. This
avoids unnecessary recalculation when loading an object from the
database.
Dimensions can be forced to update with force=True, which is how
ImageFileDescriptor.__set__ calls this method.
"""
# Nothing to update if the field doesn't have dimension fields.
has_dimension_fields = self.width_field or self.height_field
if not has_dimension_fields:
return
# getattr will call the ImageFileDescriptor's __get__ method, which
# coerces the assigned value into an instance of self.attr_class
# (ImageFieldFile in this case).
file = getattr(instance, self.attname)
# Nothing to update if we have no file and not being forced to update.
if not file and not force:
return
dimension_fields_filled = not(
(self.width_field and not getattr(instance, self.width_field))
or (self.height_field and not getattr(instance, self.height_field))
)
# When both dimension fields have values, we are most likely loading
# data from the database or updating an image field that already had
# an image stored. In the first case, we don't want to update the
# dimension fields because we are already getting their values from the
# database. In the second case, we do want to update the dimensions
# fields and will skip this return because force will be True since we
# were called from ImageFileDescriptor.__set__.
if dimension_fields_filled and not force:
return
# file should be an instance of ImageFieldFile or should be None.
if file:
width = file.width
height = file.height
else:
# No file, so clear dimensions fields.
width = None
height = None
# Update the width and height fields.
if self.width_field:
setattr(instance, self.width_field, width)
if self.height_field:
setattr(instance, self.height_field, height)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| simbha/mAngE-Gin | lib/django/db/models/fields/files.py | Python | mit | 18,693 |
# Copyright 2022 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
from absl.testing import absltest
from flax.core import Scope, Array, init, unfreeze, nn
import jax
from jax import random, numpy as jnp
default_norm = partial(nn.batch_norm)
def residual_block(scope: Scope, x: Array, conv, norm, act, features: int, strides=(1, 1)):
residual = x
x = scope.child(conv, 'conv_1')(x, features, (1, 1))
x = scope.child(norm, 'bn_1')(x)
x = act(x)
x = scope.child(conv, 'conv_2')(x, 4 * features, (3, 3), strides=strides)
x = scope.child(norm, 'bn_2')(x)
x = act(x)
x = scope.child(conv, 'conv_3')(x, 4 * features, (1, 1))
x = scope.child(norm, 'bn_3')(x)
if x.shape != residual.shape:
residual = scope.child(conv, 'proj_conv')(residual, 4 * features, (1, 1), strides=strides)
residual = scope.child(norm, 'proj_bn')(residual)
return act(residual + x)
def resnet(scope: Scope, x,
block_sizes=(3, 4, 6, 3),
features=16, num_classes=1000,
dtype=jnp.float32,
norm=default_norm,
act=nn.relu,
):
conv = partial(nn.conv, bias=False, dtype=dtype)
norm = partial(norm, dtype=dtype)
x = scope.child(conv, 'init_conv')(x, 16, (7, 7), padding=((3, 3), (3, 3)))
x = scope.child(norm, 'init_bn')(x)
x = act(x)
x = nn.max_pool(x, (2, 2), (2, 2), 'SAME')
for i, size in enumerate(block_sizes):
for j in range(size):
strides = (1, 1)
if i > 0 and j == 0:
strides = (2, 2)
block_features = features * 2 ** i
block_scope = scope.push(f'block_{i}_{j}')
x = residual_block(block_scope, x, conv, norm, act, block_features, strides)
# we can access parameters of the sub module by operating on the scope
# Example:
# block_scope.get_kind('params')['conv_1']['kernel']
x = jnp.mean(x, (1, 2))
x = scope.child(nn.dense, 'out')(x, num_classes)
return x
class ResNetTest(absltest.TestCase):
def test_resnet(self):
block_sizes = (2, 2)
x = random.normal(random.PRNGKey(0), (1, 64, 64, 3))
y, variables = init(resnet)(random.PRNGKey(1), x, block_sizes=block_sizes, features=16)
param_shapes = unfreeze(
jax.tree_map(jnp.shape, variables['params']))
self.assertEqual(y.shape, (1, 1000))
self.assertEqual(param_shapes, {
'init_conv': {'kernel': (7, 7, 3, 16)},
'init_bn': {'bias': (16,), 'scale': (16,)},
'out': {'kernel': (128, 1000), 'bias': (1000,)},
'block_0_0': {
'conv_1': {'kernel': (1, 1, 16, 16)},
'conv_2': {'kernel': (3, 3, 16, 64)},
'conv_3': {'kernel': (1, 1, 64, 64)},
'bn_1': {'bias': (16,), 'scale': (16,)},
'bn_2': {'bias': (64,), 'scale': (64,)},
'bn_3': {'bias': (64,), 'scale': (64,)},
'proj_conv': {'kernel': (1, 1, 16, 64)},
'proj_bn': {'bias': (64,), 'scale': (64,)},
},
'block_0_1': {
'conv_1': {'kernel': (1, 1, 64, 16)},
'conv_2': {'kernel': (3, 3, 16, 64)},
'conv_3': {'kernel': (1, 1, 64, 64)},
'bn_1': {'bias': (16,), 'scale': (16,)},
'bn_2': {'bias': (64,), 'scale': (64,)},
'bn_3': {'bias': (64,), 'scale': (64,)},
},
'block_1_0': {
'conv_1': {'kernel': (1, 1, 64, 32)},
'conv_2': {'kernel': (3, 3, 32, 128)},
'conv_3': {'kernel': (1, 1, 128, 128)},
'bn_1': {'bias': (32,), 'scale': (32,)},
'bn_2': {'bias': (128,), 'scale': (128,)},
'bn_3': {'bias': (128,), 'scale': (128,)},
'proj_conv': {'kernel': (1, 1, 64, 128)},
'proj_bn': {'bias': (128,), 'scale': (128,)},
},
'block_1_1': {
'conv_1': {'kernel': (1, 1, 128, 32)},
'conv_2': {'kernel': (3, 3, 32, 128)},
'conv_3': {'kernel': (1, 1, 128, 128)},
'bn_1': {'bias': (32,), 'scale': (32,)},
'bn_2': {'bias': (128,), 'scale': (128,)},
'bn_3': {'bias': (128,), 'scale': (128,)},
},
})
if __name__ == '__main__':
absltest.main()
| google/flax | tests/core/design/core_resnet_test.py | Python | apache-2.0 | 4,684 |
from __future__ import unicode_literals
import warnings
from django.contrib import admin
from django.contrib.auth import logout
from django.contrib.messages import error
from django.contrib.redirects.models import Redirect
from django.core.exceptions import MiddlewareNotUsed
from django.urls import reverse, resolve
from django.http import (HttpResponse, HttpResponseRedirect,
HttpResponsePermanentRedirect, HttpResponseGone)
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import Template, RequestContext
from django.utils.cache import get_max_age
from django.utils.lru_cache import lru_cache
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from mezzanine.conf import settings
from mezzanine.core.models import SitePermission
from mezzanine.core.management.commands.createdb import (DEFAULT_USERNAME,
DEFAULT_PASSWORD)
from mezzanine.utils.cache import (cache_key_prefix, nevercache_token,
cache_get, cache_set, cache_installed)
from mezzanine.utils.conf import middlewares_or_subclasses_installed
from mezzanine.utils.deprecation import (MiddlewareMixin, is_authenticated)
from mezzanine.utils.sites import current_site_id
from mezzanine.utils.urls import next_url
class AdminLoginInterfaceSelectorMiddleware(MiddlewareMixin):
"""
Checks for a POST from the admin login view and if authentication is
successful and the "site" interface is selected, redirect to the site.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
login_type = request.POST.get("mezzanine_login_interface")
if login_type and not is_authenticated(request.user):
response = view_func(request, *view_args, **view_kwargs)
if is_authenticated(request.user):
if login_type == "admin":
next = next_url(request) or request.get_full_path()
username = request.user.get_username()
if (username == DEFAULT_USERNAME and
request.user.check_password(DEFAULT_PASSWORD)):
error(request, mark_safe(_(
"Your account is using the default password, "
"please <a href='%s'>change it</a> immediately.")
% reverse("user_change_password",
args=(request.user.id,))))
else:
next = "/"
return HttpResponseRedirect(next)
else:
return response
return None
class SitePermissionMiddleware(MiddlewareMixin):
"""
Marks the current user with a ``has_site_permission`` which is
used in place of ``user.is_staff`` to achieve per-site staff
access.
"""
def process_view(self, request, view_func, view_args, view_kwargs):
has_site_permission = False
if request.user.is_superuser:
has_site_permission = True
elif request.user.is_staff:
lookup = {"user": request.user, "sites": current_site_id()}
try:
SitePermission.objects.get(**lookup)
except SitePermission.DoesNotExist:
admin_index = reverse("admin:index")
if request.path.startswith(admin_index):
logout(request)
view_func = admin.site.login
extra_context = {"no_site_permission": True}
return view_func(request, extra_context=extra_context)
else:
has_site_permission = True
request.user.has_site_permission = has_site_permission
class TemplateForDeviceMiddleware(MiddlewareMixin):
"""
DEPRECATED: Device detection has been removed from Mezzanine.
Inserts device-specific templates to the template list.
"""
def __init__(self, *args, **kwargs):
super(TemplateForDeviceMiddleware, self).__init__(*args, **kwargs)
warnings.warn(
"`TemplateForDeviceMiddleware` is deprecated. "
"Please remove it from your middleware settings.",
FutureWarning, stacklevel=2
)
class TemplateForHostMiddleware(MiddlewareMixin):
"""
Inserts host-specific templates to the template list.
"""
def __init__(self, *args, **kwargs):
super(TemplateForHostMiddleware, self).__init__(*args, **kwargs)
warnings.warn(
"`TemplateForHostMiddleware` is deprecated. Please upgrade "
"to the template loader. See: https://goo.gl/SzHPR4",
FutureWarning, stacklevel=2
)
class UpdateCacheMiddleware(MiddlewareMixin):
"""
Response phase for Mezzanine's cache middleware. Handles caching
the response, and then performing the second phase of rendering,
for content enclosed by the ``nevercache`` tag.
"""
def process_response(self, request, response):
# Caching is only applicable for text-based, non-streaming
# responses. We also skip it for non-200 statuses during
# development, so that stack traces are correctly rendered.
is_text = response.get("content-type", "").startswith("text")
valid_status = response.status_code == 200
streaming = getattr(response, "streaming", False)
if not is_text or streaming or (settings.DEBUG and not valid_status):
return response
# Cache the response if all the required conditions are met.
# Response must be marked for updating by the
# ``FetchFromCacheMiddleware`` having a cache get miss, the
# user must not be authenticated, the HTTP status must be OK
# and the response mustn't include an expiry age, indicating it
# shouldn't be cached.
marked_for_update = getattr(request, "_update_cache", False)
anon = hasattr(request, "user") and not is_authenticated(request.user)
timeout = get_max_age(response)
if timeout is None:
timeout = settings.CACHE_MIDDLEWARE_SECONDS
if anon and valid_status and marked_for_update and timeout:
cache_key = cache_key_prefix(request) + request.get_full_path()
_cache_set = lambda r: cache_set(cache_key, r.content, timeout)
if callable(getattr(response, "render", None)):
response.add_post_render_callback(_cache_set)
else:
_cache_set(response)
# Second phase rendering for non-cached template code and
# content. Split on the delimiter the ``nevercache`` tag
# wrapped its contents in, and render only the content
# enclosed by it, to avoid possible template code injection.
token = nevercache_token()
try:
token = token.encode('utf-8')
except AttributeError:
pass
parts = response.content.split(token)
# Restore csrf token from cookie - check the response
# first as it may be being set for the first time.
csrf_token = None
try:
csrf_token = response.cookies[settings.CSRF_COOKIE_NAME].value
except KeyError:
try:
csrf_token = request.COOKIES[settings.CSRF_COOKIE_NAME]
except KeyError:
pass
if csrf_token:
request.META["CSRF_COOKIE"] = csrf_token
context = RequestContext(request)
for i, part in enumerate(parts):
if i % 2:
part = Template(part).render(context).encode("utf-8")
parts[i] = part
response.content = b"".join(parts)
response["Content-Length"] = len(response.content)
if hasattr(request, '_messages'):
# Required to clear out user messages.
request._messages.update(response)
# Response needs to be run-through the CSRF middleware again so
# that if there was a {% csrf_token %} inside of the nevercache
# the cookie will be correctly set for the the response
if csrf_middleware_installed():
response.csrf_processing_done = False
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_response(request, response)
return response
@lru_cache(maxsize=None)
def csrf_middleware_installed():
csrf_mw_name = "django.middleware.csrf.CsrfViewMiddleware"
return middlewares_or_subclasses_installed([csrf_mw_name])
class FetchFromCacheMiddleware(MiddlewareMixin):
"""
Request phase for Mezzanine cache middleware. Return a response
from cache if found, othwerwise mark the request for updating
the cache in ``UpdateCacheMiddleware``.
"""
def process_request(self, request):
if (cache_installed() and request.method == "GET" and
not is_authenticated(request.user)):
cache_key = cache_key_prefix(request) + request.get_full_path()
response = cache_get(cache_key)
# We need to force a csrf token here, as new sessions
# won't receieve one on their first request, with cache
# middleware running.
if csrf_middleware_installed():
csrf_mw = CsrfViewMiddleware()
csrf_mw.process_view(request, lambda x: None, None, None)
get_token(request)
if response is None:
request._update_cache = True
else:
return HttpResponse(response)
class SSLRedirectMiddleware(MiddlewareMixin):
"""
Handles redirections required for SSL when ``SSL_ENABLED`` is ``True``.
If ``SSL_FORCE_HOST`` is ``True``, and is not the current host,
redirect to it.
Also ensure URLs defined by ``SSL_FORCE_URL_PREFIXES`` are redirect
to HTTPS, and redirect all other URLs to HTTP if on HTTPS.
"""
def __init__(self, *args):
warnings.warn(
"SSLRedirectMiddleware is deprecated. See "
"https://docs.djangoproject.com/en/stable/ref/middleware/"
"#module-django.middleware.security for alternative solutions.",
DeprecationWarning)
super(SSLRedirectMiddleware, self).__init__(*args)
def languages(self):
if not hasattr(self, "_languages"):
self._languages = dict(settings.LANGUAGES).keys()
return self._languages
def process_request(self, request):
force_host = settings.SSL_FORCE_HOST
response = None
if force_host and request.get_host().split(":")[0] != force_host:
url = "http://%s%s" % (force_host, request.get_full_path())
response = HttpResponsePermanentRedirect(url)
elif settings.SSL_ENABLED and not settings.DEV_SERVER:
url = "%s%s" % (request.get_host(), request.get_full_path())
path = request.path
if settings.USE_I18N and path[1:3] in self.languages():
path = path[3:]
if path.startswith(settings.SSL_FORCE_URL_PREFIXES):
if not request.is_secure():
response = HttpResponseRedirect("https://%s" % url)
elif request.is_secure() and settings.SSL_FORCED_PREFIXES_ONLY:
response = HttpResponseRedirect("http://%s" % url)
if response and request.method == "POST":
if resolve(request.get_full_path()).url_name == "fb_do_upload":
# The handler for the flash file uploader in filebrowser
# doesn't have access to the http headers Django will use
# to determine whether the request is secure or not, so
# in this case we don't attempt a redirect - note that
# when /admin is restricted to SSL using Mezzanine's SSL
# setup, the flash uploader will post over SSL, so
# someone would need to explictly go out of their way to
# trigger this.
return
# Tell the client they need to re-POST.
response.status_code = 307
return response
class RedirectFallbackMiddleware(MiddlewareMixin):
"""
Port of Django's ``RedirectFallbackMiddleware`` that uses
Mezzanine's approach for determining the current site.
"""
def __init__(self, *args, **kwargs):
super(RedirectFallbackMiddleware, self).__init__(*args, **kwargs)
if "django.contrib.redirects" not in settings.INSTALLED_APPS:
raise MiddlewareNotUsed
def process_response(self, request, response):
if response.status_code == 404:
lookup = {
"site_id": current_site_id(),
"old_path": request.get_full_path(),
}
try:
redirect = Redirect.objects.get(**lookup)
except Redirect.DoesNotExist:
pass
else:
if not redirect.new_path:
response = HttpResponseGone()
else:
response = HttpResponsePermanentRedirect(redirect.new_path)
return response
| readevalprint/mezzanine | mezzanine/core/middleware.py | Python | bsd-2-clause | 13,207 |
import enum
import os
import subprocess
from typing import Optional, Dict, Tuple
from ray_release.exception import ReleaseTestConfigError
from ray_release.logger import logger
from ray_release.wheels import DEFAULT_BRANCH
class Frequency(enum.Enum):
DISABLED = enum.auto()
ANY = enum.auto()
MULTI = enum.auto()
NIGHTLY = enum.auto()
WEEKLY = enum.auto()
frequency_str_to_enum = {
"disabled": Frequency.DISABLED,
"any": Frequency.ANY,
"multi": Frequency.MULTI,
"nightly": Frequency.NIGHTLY,
"weekly": Frequency.WEEKLY,
}
class Priority(enum.Enum):
DEFAULT = 0
MANUAL = 10
HIGH = 50
HIGHEST = 100
priority_str_to_enum = {
"default": Priority.DEFAULT,
"manual": Priority.MANUAL,
"high": Priority.HIGH,
"highest": Priority.HIGHEST,
}
def get_frequency(frequency_str: str) -> Frequency:
frequency_str = frequency_str.lower()
if frequency_str not in frequency_str_to_enum:
raise ReleaseTestConfigError(
f"Frequency not found: {frequency_str}. Must be one of "
f"{list(frequency_str_to_enum.keys())}."
)
return frequency_str_to_enum[frequency_str]
def get_priority(priority_str: str) -> Priority:
priority_str = priority_str.lower()
if priority_str not in priority_str_to_enum:
raise ReleaseTestConfigError(
f"Priority not found: {priority_str}. Must be one of "
f"{list(priority_str_to_enum.keys())}."
)
return priority_str_to_enum[priority_str]
def split_ray_repo_str(repo_str: str) -> Tuple[str, str]:
if "https://" in repo_str:
if "/tree/" in repo_str:
url, branch = repo_str.split("/tree/", maxsplit=2)
return f"{url}.git", branch.rstrip("/")
return repo_str, DEFAULT_BRANCH # Default branch
if ":" in repo_str:
owner_or_url, commit_or_branch = repo_str.split(":")
else:
owner_or_url = repo_str
commit_or_branch = DEFAULT_BRANCH
# Else, construct URL
url = f"https://github.com/{owner_or_url}/ray.git"
return url, commit_or_branch
def get_buildkite_prompt_value(key: str) -> Optional[str]:
try:
value = subprocess.check_output(
["buildkite-agent", "meta-data", "get", key], text=True
)
except Exception as e:
logger.warning(f"Could not fetch metadata for {key}: {e}")
return None
logger.debug(f"Got Buildkite prompt value for {key}: {value}")
return value
def get_pipeline_settings() -> Dict:
"""Get pipeline settings.
Retrieves settings from the buildkite agent, environment variables,
and default values (in that order of preference)."""
settings = get_default_settings()
settings = update_settings_from_environment(settings)
settings = update_settings_from_buildkite(settings)
return settings
def get_default_settings() -> Dict:
settings = {
"frequency": Frequency.ANY,
"test_name_filter": None,
"ray_wheels": None,
"ray_test_repo": None,
"ray_test_branch": None,
"priority": Priority.DEFAULT,
"no_concurrency_limit": False,
}
return settings
def update_settings_from_environment(settings: Dict) -> Dict:
if "RELEASE_FREQUENCY" in os.environ:
settings["frequency"] = get_frequency(os.environ["RELEASE_FREQUENCY"])
if "RAY_TEST_REPO" in os.environ:
settings["ray_test_repo"] = os.environ["RAY_TEST_REPO"]
settings["ray_test_branch"] = os.environ.get("RAY_TEST_BRANCH", DEFAULT_BRANCH)
elif "BUILDKITE_BRANCH" in os.environ:
settings["ray_test_repo"] = os.environ["BUILDKITE_REPO"]
settings["ray_test_branch"] = os.environ["BUILDKITE_BRANCH"]
if "RAY_WHEELS" in os.environ:
settings["ray_wheels"] = os.environ["RAY_WHEELS"]
if "TEST_NAME" in os.environ:
settings["test_name_filter"] = os.environ["TEST_NAME"]
if "RELEASE_PRIORITY" in os.environ:
settings["priority"] = get_priority(os.environ["RELEASE_PRIORITY"])
if "NO_CONCURRENCY_LIMIT" in os.environ:
settings["no_concurrency_limit"] = bool(int(os.environ["NO_CONCURRENCY_LIMIT"]))
return settings
def update_settings_from_buildkite(settings: Dict):
release_frequency = get_buildkite_prompt_value("release-frequency")
if release_frequency:
settings["frequency"] = get_frequency(release_frequency)
ray_test_repo_branch = get_buildkite_prompt_value("release-ray-test-repo-branch")
if ray_test_repo_branch:
repo, branch = split_ray_repo_str(ray_test_repo_branch)
settings["ray_test_repo"] = repo
settings["ray_test_branch"] = branch
ray_wheels = get_buildkite_prompt_value("release-ray-wheels")
if ray_wheels:
settings["ray_wheels"] = ray_wheels
test_name_filter = get_buildkite_prompt_value("release-test-name")
if ray_wheels:
settings["test_name_filter"] = test_name_filter
test_priority = get_buildkite_prompt_value("release-priority")
if test_priority:
settings["priority"] = get_priority(test_priority)
no_concurrency_limit = get_buildkite_prompt_value("release-no-concurrency-limit")
if no_concurrency_limit == "yes":
settings["no_concurrency_limit"] = True
return settings
| ray-project/ray | release/ray_release/buildkite/settings.py | Python | apache-2.0 | 5,311 |
"""
This file contains a minimal set of tests for compliance with the extension
array interface test suite, and should contain no other tests.
The test suite for the full functionality of the array is located in
`pandas/tests/arrays/`.
The tests in this file are inherited from the BaseExtensionTests, and only
minimal tweaks should be applied to get the tests passing (by overwriting a
parent method).
Additional tests should either be added to one of the BaseExtensionTests
classes (if they are relevant for the extension interface for all dtypes), or
be added to the array-specific tests in `pandas/tests/arrays/`.
"""
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.core.arrays import DatetimeArray
from pandas.tests.extension import base
@pytest.fixture(params=["US/Central"])
def dtype(request):
return DatetimeTZDtype(unit="ns", tz=request.param)
@pytest.fixture
def data(dtype):
data = DatetimeArray(pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype)
return data
@pytest.fixture
def data_missing(dtype):
return DatetimeArray(
np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype
)
@pytest.fixture
def data_for_sorting(dtype):
a = pd.Timestamp("2000-01-01")
b = pd.Timestamp("2000-01-02")
c = pd.Timestamp("2000-01-03")
return DatetimeArray(np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype)
@pytest.fixture
def data_missing_for_sorting(dtype):
a = pd.Timestamp("2000-01-01")
b = pd.Timestamp("2000-01-02")
return DatetimeArray(np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype)
@pytest.fixture
def data_for_grouping(dtype):
"""
Expected to be like [B, B, NA, NA, A, A, B, C]
Where A < B < C and NA is missing
"""
a = pd.Timestamp("2000-01-01")
b = pd.Timestamp("2000-01-02")
c = pd.Timestamp("2000-01-03")
na = "NaT"
return DatetimeArray(
np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype
)
@pytest.fixture
def na_cmp():
def cmp(a, b):
return a is pd.NaT and a is b
return cmp
@pytest.fixture
def na_value():
return pd.NaT
# ----------------------------------------------------------------------------
class BaseDatetimeTests:
pass
# ----------------------------------------------------------------------------
# Tests
class TestDatetimeDtype(BaseDatetimeTests, base.BaseDtypeTests):
pass
class TestConstructors(BaseDatetimeTests, base.BaseConstructorsTests):
def test_series_constructor(self, data):
# Series construction drops any .freq attr
data = data._with_freq(None)
super().test_series_constructor(data)
class TestGetitem(BaseDatetimeTests, base.BaseGetitemTests):
pass
class TestMethods(BaseDatetimeTests, base.BaseMethodsTests):
@pytest.mark.skip(reason="Incorrect expected")
def test_value_counts(self, all_data, dropna):
pass
def test_combine_add(self, data_repeated):
# Timestamp.__add__(Timestamp) not defined
pass
class TestInterface(BaseDatetimeTests, base.BaseInterfaceTests):
def test_array_interface(self, data):
if data.tz:
# np.asarray(DTA) is currently always tz-naive.
pytest.skip("GH-23569")
else:
super().test_array_interface(data)
class TestArithmeticOps(BaseDatetimeTests, base.BaseArithmeticOpsTests):
implements = {"__sub__", "__rsub__"}
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
# frame & scalar
if all_arithmetic_operators in self.implements:
df = pd.DataFrame({"A": data})
self.check_opname(df, all_arithmetic_operators, data[0], exc=None)
else:
# ... but not the rest.
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_add_series_with_extension_array(self, data):
# Datetime + Datetime not implemented
s = pd.Series(data)
msg = "cannot add DatetimeArray and DatetimeArray"
with pytest.raises(TypeError, match=msg):
s + data
def test_arith_series_with_array(self, data, all_arithmetic_operators):
if all_arithmetic_operators in self.implements:
s = pd.Series(data)
self.check_opname(s, all_arithmetic_operators, s.iloc[0], exc=None)
else:
# ... but not the rest.
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
def test_divmod_series_array(self):
# GH 23287
# skipping because it is not implemented
pass
class TestCasting(BaseDatetimeTests, base.BaseCastingTests):
pass
class TestComparisonOps(BaseDatetimeTests, base.BaseComparisonOpsTests):
def _compare_other(self, s, data, op_name, other):
# the base test is not appropriate for us. We raise on comparison
# with (some) integers, depending on the value.
pass
class TestMissing(BaseDatetimeTests, base.BaseMissingTests):
pass
class TestReshaping(BaseDatetimeTests, base.BaseReshapingTests):
@pytest.mark.skip(reason="We have DatetimeTZBlock")
def test_concat(self, data, in_frame):
pass
def test_concat_mixed_dtypes(self, data):
# concat(Series[datetimetz], Series[category]) uses a
# plain np.array(values) on the DatetimeArray, which
# drops the tz.
super().test_concat_mixed_dtypes(data)
@pytest.mark.parametrize("obj", ["series", "frame"])
def test_unstack(self, obj):
# GH-13287: can't use base test, since building the expected fails.
dtype = DatetimeTZDtype(tz="US/Central")
data = DatetimeArray._from_sequence(
["2000", "2001", "2002", "2003"],
dtype=dtype,
)
index = pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"])
if obj == "series":
ser = pd.Series(data, index=index)
expected = pd.DataFrame(
{"A": data.take([0, 1]), "B": data.take([2, 3])},
index=pd.Index(["a", "b"], name="b"),
)
expected.columns.name = "a"
else:
ser = pd.DataFrame({"A": data, "B": data}, index=index)
expected = pd.DataFrame(
{
("A", "A"): data.take([0, 1]),
("A", "B"): data.take([2, 3]),
("B", "A"): data.take([0, 1]),
("B", "B"): data.take([2, 3]),
},
index=pd.Index(["a", "b"], name="b"),
)
expected.columns.names = [None, "a"]
result = ser.unstack(0)
self.assert_equal(result, expected)
class TestSetitem(BaseDatetimeTests, base.BaseSetitemTests):
pass
class TestGroupby(BaseDatetimeTests, base.BaseGroupbyTests):
pass
class TestPrinting(BaseDatetimeTests, base.BasePrintingTests):
pass
class Test2DCompat(BaseDatetimeTests, base.Dim2CompatTests):
pass
| datapythonista/pandas | pandas/tests/extension/test_datetime.py | Python | bsd-3-clause | 7,463 |
# Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| andi34/kernel_samsung_espresso-cm | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | Python | gpl-2.0 | 3,244 |
import sys
import os.path
import pprint
import xmlrpclib
class SafeTransportWithCert(xmlrpclib.SafeTransport):
"""Helper class to force the right certificate for the transport class."""
def __init__(self, key_path, cert_path):
xmlrpclib.SafeTransport.__init__(self) # no super, because old style class
self._key_path = key_path
self._cert_path = cert_path
def make_connection(self, host):
"""This method will automatically be called by the ServerProxy class when a transport channel is needed."""
host_with_cert = (host, {'key_file' : self._key_path, 'cert_file' : self._cert_path})
return xmlrpclib.SafeTransport.make_connection(self, host_with_cert) # no super, because old style class
def ssl_call(method_name, params, endpoint, key_path='alice-key.pem', cert_path='alice-cert.pem', host='127.0.0.1', port=8008):
creds_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '../..', 'creds'))
if not os.path.isabs(key_path):
key_path = os.path.join(creds_path, key_path)
if not os.path.isabs(cert_path):
cert_path = os.path.join(creds_path, cert_path)
key_path = os.path.abspath(os.path.expanduser(key_path))
cert_path = os.path.abspath(os.path.expanduser(cert_path))
if not os.path.isfile(key_path) or not os.path.isfile(cert_path):
raise RuntimeError("Key or cert file not found (%s, %s)" % (key_path, cert_path))
transport = SafeTransportWithCert(key_path, cert_path)
proxy = xmlrpclib.ServerProxy("https://%s:%s/%s" % (host, str(port), endpoint), transport=transport)
# return proxy.get_version()
method = getattr(proxy, method_name)
return method(*params)
def get_creds_file_contents(filename):
creds_path = os.path.normpath(os.path.join(os.path.dirname(__file__), '../..', 'creds'))
if not os.path.isabs(filename):
filename = os.path.join(creds_path, filename)
filename = os.path.abspath(os.path.expanduser(filename))
contents = None
with open(filename, 'r') as f:
contents = f.read()
return contents
COLORS={"reset":"\x1b[00m",
"blue": "\x1b[01;34m",
"cyan": "\x1b[01;36m",
"green": "\x1b[01;32m",
"yellow": "\x1b[01;33m",
"red": "\x1b[01;05;37;41m"}
def print_call(method_name, params, res):
# output stuff
print COLORS["blue"],
print "--> %s(%s)" % (method_name, params)
print COLORS["cyan"],
pprint.pprint(res, indent=4, width=20)
print COLORS["reset"]
WARNINGS = []
def warn(msg):
global WARNINGS
WARNINGS.append(msg)
def print_warnings():
global WARNINGS
if len(WARNINGS) > 0:
print COLORS["yellow"],
print
print "WARNINGS:"
for msg in WARNINGS:
print msg
print COLORS["reset"]
| EICT/C-BAS | test/unit/v1/testtools.py | Python | bsd-3-clause | 2,817 |
from django.apps import apps
from django.conf import settings
from django.utils.module_loading import import_string
from collections import namedtuple
import io
import inspect
from uuid import uuid1
from ..template import template_inheritance
from ..util import qualified_name, b58enc
# __init__() below creates a list of templates, each of which has a list of providers
# this named tuple adds a small amount of extra clarity to it.
# I could use a dict or OrderedDict, but I need order AND fast indexing
TemplateProviderList = namedtuple("TemplateProviderList", [ 'template', 'providers' ])
# ProviderRun.initialize_providers() creates a list of these to hold provider options from settings.py
# I can't keep the options inside the provider class itself because a given class can be listed
# more than once in settings.py (with different options).
ProviderEntry = namedtuple("ProviderEntry", [ 'cls', 'options' ])
####################################################
### Main runner for providers
class ProviderRun(object):
'''A run through the providers for tself and its ancestors'''
SETTINGS_KEY = 'CONTENT_PROVIDERS'
CONTENT_PROVIDERS = []
@classmethod
def initialize_providers(cls):
'''Initializes the providers (called from dmp app ready())'''
dmp = apps.get_app_config('django_mako_plus')
# regular content providers
cls.CONTENT_PROVIDERS = []
for provider_settings in dmp.options[cls.SETTINGS_KEY]:
# import the class for this provider
assert 'provider' in provider_settings, "Invalid entry in settings.py: CONTENT_PROVIDERS item must have 'provider' key"
provider_cls = import_string(provider_settings['provider'])
# combine options from all of its bases, then from settings.py
options = {}
for base in reversed(inspect.getmro(provider_cls)):
options.update(getattr(base, 'DEFAULT_OPTIONS', {}))
options.update(provider_settings)
# add to the list
if options['enabled']:
pe = ProviderEntry(provider_cls, options)
pe.options['template_cache_key'] = '_dmp_provider_{}_'.format(id(pe))
cls.CONTENT_PROVIDERS.append(pe)
def __init__(self, tself, group=None):
'''
tself: `self` object from a Mako template (available during rendering).
group: provider group to include (defaults to all groups if None)
'''
# a unique context id for this run
self.uid = b58enc(uuid1().int)
self.tself = tself
self.request = tself.context.get('request')
self.context = tself.context
self.buffer = io.StringIO()
# get the ProviderClassInfo objects that are used in this group
group_pes = [ pe for pe in self.CONTENT_PROVIDERS if group is None or pe.options['group'] == group ]
# Create a map of template -> providers for this run
# {
# base.htm: [ JsLinkProvider(), CssLinkProvider(), ... ]
# app_base.htm: [ JsLinkProvider(), CssLinkProvider(), ... ]
# index.html: [ JsLinkProvider(), CssLinkProvider(), ... ]
# }
self.templates = []
for tmpl in self._get_template_inheritance():
tpl = TemplateProviderList(tmpl, [])
for index, pe in enumerate(group_pes):
tpl.providers.append(pe.cls(self, tmpl, index, pe.options))
self.templates.append(tpl)
def _get_template_inheritance(self):
'''Returns a list of the template inheritance of tself, starting with the oldest ancestor'''
return reversed(list(template_inheritance(self.tself)))
def run(self):
'''Performs the run through the templates and their providers'''
for tpl in self.templates:
for provider in tpl.providers:
provider.provide()
def write(self, content):
'''Provider instances use this to write to the buffer'''
self.buffer.write(content)
if settings.DEBUG:
self.buffer.write('\n')
def getvalue(self):
'''Returns the buffer string'''
return self.buffer.getvalue()
| doconix/django-mako-plus | django_mako_plus/provider/runner.py | Python | apache-2.0 | 4,260 |
# Generated by Django 2.0.8 on 2019-04-10 11:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('structure', '0011_structurecomplexprotein'),
]
operations = [
migrations.CreateModel(
name='StructureVectors',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('center_axis', models.CharField(max_length=100)),
('tm1_axis', models.CharField(max_length=100)),
('structure', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='structure.Structure')),
],
options={
'db_table': 'structure_vectors',
},
),
]
| cmunk/protwis | structure/migrations/0012_structurevectors.py | Python | apache-2.0 | 842 |
from django import forms
from django.db.models.functions import Lower
from django.contrib.auth.models import User
from core import models
class Search(forms.Form):
name = forms.CharField(required=False)
address = forms.CharField(required=False)
group = forms.ModelChoiceField(queryset=models.HostGroup.objects.order_by(Lower('name')), required=False,
widget=forms.Select(attrs={'class': 'need-select2'}))
class Edit(forms.ModelForm):
groups = forms.ModelMultipleChoiceField(required=False, queryset=models.HostGroup.objects.order_by(Lower('name')),
widget=forms.SelectMultiple(attrs={'class': 'need-select2'}))
users = forms.ModelMultipleChoiceField(required=False, queryset=User.objects.all(),
widget=forms.SelectMultiple(attrs={'class': 'need-select2'}))
class Meta:
model = models.Host
fields = ('name', 'users', 'address', 'groups')
| telminov/ansible-manager | core/forms/host.py | Python | mit | 1,005 |
import collections
from supriya import CalculationRate
from supriya.ugens.BEQSuite import BEQSuite
class BHiCut(BEQSuite):
"""
A high-cut filter.
::
>>> source = supriya.ugens.In.ar(0)
>>> bhi_cut = supriya.ugens.BHiCut.ar(
... frequency=1200,
... max_order=5,
... order=2,
... source=source,
... )
>>> bhi_cut
BHiCut.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
[("source", None), ("frequency", 1200), ("order", 2), ("max_order", 5)]
)
_valid_calculation_rates = (CalculationRate.AUDIO,)
| Pulgama/supriya | supriya/ugens/BHiCut.py | Python | mit | 666 |
"""
Settings for the blog are namespaced in the BLOG setting.
For exmaple your project's `settings.py` file might looks like this:
BLOG = {
'BLOG_TITLE': 'My Blog',
}
"""
from django.conf import settings as project_settings
DEFAULTS = {
'BLOG_TITLE': 'My Blog',
'BLOG_SITE_URL': 'http://localhost',
}
# Check if a setting is applied in the Django project settings.py,
# if not use the default.
SETTINGS = {}
for setting_name, setting_default in DEFAULTS.items():
try:
SETTINGS[setting_name] = project_settings.BLOG[setting_name]
except (AttributeError, KeyError):
SETTINGS[setting_name] = DEFAULTS[setting_name]
| vacuus/kilonull | kilonull/settings.py | Python | lgpl-3.0 | 654 |
import random
import string
def random_string(n):
result = ''
for _ in range(10):
result += random.SystemRandom().choice(
string.ascii_uppercase + string.digits)
return result
| adrianp/cartz | server/utils.py | Python | mit | 210 |
import pytest
from thefuck.rules.python_module_error import get_new_command, match
from thefuck.types import Command
@pytest.fixture
def module_error_output(filename, module_name):
return """Traceback (most recent call last):
File "{0}", line 1, in <module>
import {1}
ModuleNotFoundError: No module named '{1}'""".format(
filename, module_name
)
@pytest.mark.parametrize(
"test",
[
Command("python hello_world.py", "Hello World"),
Command(
"./hello_world.py",
"""Traceback (most recent call last):
File "hello_world.py", line 1, in <module>
pritn("Hello World")
NameError: name 'pritn' is not defined""",
),
],
)
def test_not_match(test):
assert not match(test)
positive_tests = [
(
"python some_script.py",
"some_script.py",
"more_itertools",
"pip install more_itertools && python some_script.py",
),
(
"./some_other_script.py",
"some_other_script.py",
"a_module",
"pip install a_module && ./some_other_script.py",
),
]
@pytest.mark.parametrize(
"script, filename, module_name, corrected_script", positive_tests
)
def test_match(script, filename, module_name, corrected_script, module_error_output):
assert match(Command(script, module_error_output))
@pytest.mark.parametrize(
"script, filename, module_name, corrected_script", positive_tests
)
def test_get_new_command(
script, filename, module_name, corrected_script, module_error_output
):
assert get_new_command(Command(script, module_error_output)) == corrected_script
| nvbn/thefuck | tests/rules/test_python_module_error.py | Python | mit | 1,629 |
<<<<<<< HEAD
<<<<<<< HEAD
"Test posix functions"
from test import support
# Skip these tests if there is no posix module.
posix = support.import_module('posix')
import errno
import sys
import time
import os
import fcntl
import platform
import pwd
import shutil
import stat
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
for teardown_file in self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(None, None, None)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
try:
name = pwd.getpwuid(posix.getuid()).pw_name
except KeyError:
# the current UID may not have a pwd entry
raise unittest.SkipTest("need a pwd entry")
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertTrue(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
self.assertTrue(posix.statvfs(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
with open(support.TESTFN, 'w') as fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', None) in os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
try:
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
else:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
finally:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
else:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section is locked
posix.lockf(fd, posix.F_ULOCK, 4)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
posix.posix_fallocate(fd, 0, 10)
except OSError as inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
if inst.errno != errno.EINVAL or not sys.platform.startswith("sunos"):
raise
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_fd, "test needs fd support in os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.utime(fd)
posix.utime(fd, None)
self.assertRaises(TypeError, posix.utime, fd, (None, None))
self.assertRaises(TypeError, posix.utime, fd, (now, None))
self.assertRaises(TypeError, posix.utime, fd, (None, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(None, None))
self.assertRaises(ValueError, posix.utime, fd, (None, None), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks, "test needs follow_symlinks support in os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, None, follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), follow_symlinks=False)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=False)
posix.utime(support.TESTFN, (now, now), follow_symlinks=False)
posix.utime(support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should not crash
try:
size = posix.writev(fd, [])
except OSError:
# writev(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) for i in [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) for i in buf])
# Issue #20113: empty list of buffers should not crash
try:
size = posix.readv(fd, [])
except OSError:
# readv(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertTrue(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC)
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
self.assertTrue(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, float(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertTrue(posix.stat(support.TESTFN))
self.assertTrue(posix.stat(os.fsencode(support.TESTFN)))
self.assertTrue(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify None for path argument',
posix.stat, None)
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') and hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
try:
posix.mknod(support.TESTFN, mode, 0)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code for chown, fchown and lchown tests."""
def check_stat(uid, gid):
if stat_func is not None:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
if uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even if they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix in 4591 fixes it for good!
#
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
elif platform.system() in ('HP-UX', 'SunOS'):
# HP-UX and Solaris can allow a non-root user to chown() to root
# (issue #5113)
raise unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
if 0 not in os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
for t in str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', None))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', None))
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', None))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertTrue(support.TESTFN in posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir is called without argument,
# it's the same as listdir(os.curdir).
self.assertTrue(support.TESTFN in posix.listdir())
def test_listdir_bytes(self):
# When listdir is called with a bytes object,
# the returned strings are of type bytes.
self.assertTrue(os.fsencode(support.TESTFN) in posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir in os.supports_fd,
"test needs fd support for posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertTrue(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertTrue(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling with flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(os.get_inheritable(r))
self.assertFalse(os.get_inheritable(w))
self.assertTrue(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
self.assertTrue(fcntl.fcntl(w, fcntl.F_GETFL) & os.O_NONBLOCK)
# try reading from an empty pipe: this should fail, not block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail or perform a partial write, not block
try:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
except OSError:
pass
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
import _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
try:
chflags_func(target_file, flags, **kwargs)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
return posix.chflags(path, flags, follow_symlinks=False)
for fn in (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
try:
fn(_DUMMY_SYMLINK, flags)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
if os.name == "nt":
item_type = str
else:
item_type = bytes
for k, v in posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception, because
# the test results in Error in that case. Is that ok?
# raise unittest.SkipTest("cannot create directory for testing")
return
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest("mkdir cannot create directory sufficiently deep for getcwd test")
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G 2>/dev/null') as idg:
groups = idg.read().strip()
ret = idg.close()
if ret is not None or not groups:
raise unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
if sys.platform == 'darwin':
import sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or '10.0'
if tuple(int(n) for n in dt.split('.')[0:2]) < (10, 6):
raise unittest.SkipTest("getgroups(2) is broken prior to 10.6")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests for the posix *at functions follow
@unittest.skipUnless(os.access in os.supports_dir_fd, "test needs dir_fd support for os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertTrue(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
finally:
posix.close(f)
@unittest.skipUnless(os.chmod in os.supports_dir_fd, "test needs dir_fd support in os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
finally:
posix.close(f)
@unittest.skipUnless(os.chown in os.supports_dir_fd, "test needs dir_fd support in os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
finally:
posix.close(f)
@unittest.skipUnless(os.stat in os.supports_dir_fd, "test needs dir_fd support in os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=None)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
finally:
posix.close(f)
@unittest.skipUnless(os.utime in os.supports_dir_fd, "test needs dir_fd support in os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
now = time.time()
posix.utime(support.TESTFN, None, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd and follow_symlinks together
if os.utime in os.supports_follow_symlinks:
try:
posix.utime(support.TESTFN, follow_symlinks=False, dir_fd=f)
except ValueError:
# whoops! using both together not supported on this platform.
pass
finally:
posix.close(f)
@unittest.skipUnless(os.link in os.supports_dir_fd, "test needs dir_fd support in os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir in os.supports_dir_fd, "test needs dir_fd support in os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should not raise exception
finally:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod in os.supports_dir_fd) and hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO and dir_fd support for os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
@unittest.skipUnless(os.open in os.supports_dir_fd, "test needs dir_fd support in os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
try:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
finally:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink in os.supports_dir_fd, "test needs dir_fd support in os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
finally:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename in os.supports_dir_fd, "test needs dir_fd support in os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
except:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
raise
else:
posix.stat(support.TESTFN) # should not raise exception
finally:
posix.close(f)
@unittest.skipUnless(os.symlink in os.supports_dir_fd, "test needs dir_fd support in os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink in os.supports_dir_fd, "test needs dir_fd support in os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should not raise exception
try:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
except:
support.unlink(support.TESTFN + 'del')
raise
else:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
finally:
posix.close(f)
@unittest.skipUnless(os.mkfifo in os.supports_dir_fd, "test needs dir_fd support in os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_yield'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_yield(self):
# This has no error conditions (at least on Linux).
posix.sched_yield()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
if sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched for name, sched in posix.__dict__.items()
if name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
try:
parent = posix.sched_getscheduler(os.getppid())
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() or sched_setscheduler() on
# a process with a scheduling policy other than SCHED_FIFO or SCHED_RR
# is implementation-defined: NetBSD and FreeBSD can return EINVAL.
if not sys.platform.startswith(('freebsd', 'netbsd')):
try:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
except OSError as e:
if e.errno != errno.EPERM:
raise
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, None)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(None)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
try:
interval = posix.sched_rr_get_interval(0)
except OSError as e:
# This likely means that sched_rr_get_interval is only valid for
# processes with the SCHED_RR scheduler in effect.
if e.errno != errno.EINVAL:
raise
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
for cpu in mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
if len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even if the filesystem doesn't report holes,
# if the OS supports it the SEEK_* constants
# will be defined and will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
with open(support.TESTFN, 'r+b') as fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
for i in range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
except OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it is not true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
raise unittest.SkipTest("OSError raised!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames in their exceptions.
"""
for name in ("rename", "replace", "link"):
function = getattr(os, name, None)
if function is None:
continue
for dst in ("noodly2", support.TESTFN):
try:
function('doesnotexistfilename', dst)
except OSError as e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
break
else:
self.fail("No valid path_error2() test for os." + name)
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups or [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
try:
support.run_unittest(PosixTester, PosixGroupsTester)
finally:
support.reap_children()
if __name__ == '__main__':
test_main()
=======
"Test posix functions"
from test import support
# Skip these tests if there is no posix module.
posix = support.import_module('posix')
import errno
import sys
import time
import os
import fcntl
import platform
import pwd
import shutil
import stat
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
for teardown_file in self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(None, None, None)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
try:
name = pwd.getpwuid(posix.getuid()).pw_name
except KeyError:
# the current UID may not have a pwd entry
raise unittest.SkipTest("need a pwd entry")
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertTrue(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
self.assertTrue(posix.statvfs(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
with open(support.TESTFN, 'w') as fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', None) in os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
try:
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
else:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
finally:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
else:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section is locked
posix.lockf(fd, posix.F_ULOCK, 4)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
posix.posix_fallocate(fd, 0, 10)
except OSError as inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
if inst.errno != errno.EINVAL or not sys.platform.startswith("sunos"):
raise
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_fd, "test needs fd support in os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.utime(fd)
posix.utime(fd, None)
self.assertRaises(TypeError, posix.utime, fd, (None, None))
self.assertRaises(TypeError, posix.utime, fd, (now, None))
self.assertRaises(TypeError, posix.utime, fd, (None, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(None, None))
self.assertRaises(ValueError, posix.utime, fd, (None, None), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks, "test needs follow_symlinks support in os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, None, follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), follow_symlinks=False)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=False)
posix.utime(support.TESTFN, (now, now), follow_symlinks=False)
posix.utime(support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should not crash
try:
size = posix.writev(fd, [])
except OSError:
# writev(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) for i in [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) for i in buf])
# Issue #20113: empty list of buffers should not crash
try:
size = posix.readv(fd, [])
except OSError:
# readv(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertTrue(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC)
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
self.assertTrue(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, float(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertTrue(posix.stat(support.TESTFN))
self.assertTrue(posix.stat(os.fsencode(support.TESTFN)))
self.assertTrue(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify None for path argument',
posix.stat, None)
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') and hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
try:
posix.mknod(support.TESTFN, mode, 0)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code for chown, fchown and lchown tests."""
def check_stat(uid, gid):
if stat_func is not None:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
if uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even if they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix in 4591 fixes it for good!
#
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
elif platform.system() in ('HP-UX', 'SunOS'):
# HP-UX and Solaris can allow a non-root user to chown() to root
# (issue #5113)
raise unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
if 0 not in os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
for t in str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', None))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', None))
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', None))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertTrue(support.TESTFN in posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir is called without argument,
# it's the same as listdir(os.curdir).
self.assertTrue(support.TESTFN in posix.listdir())
def test_listdir_bytes(self):
# When listdir is called with a bytes object,
# the returned strings are of type bytes.
self.assertTrue(os.fsencode(support.TESTFN) in posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir in os.supports_fd,
"test needs fd support for posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertTrue(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertTrue(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling with flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(os.get_inheritable(r))
self.assertFalse(os.get_inheritable(w))
self.assertTrue(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
self.assertTrue(fcntl.fcntl(w, fcntl.F_GETFL) & os.O_NONBLOCK)
# try reading from an empty pipe: this should fail, not block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail or perform a partial write, not block
try:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
except OSError:
pass
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
import _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
try:
chflags_func(target_file, flags, **kwargs)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
return posix.chflags(path, flags, follow_symlinks=False)
for fn in (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
try:
fn(_DUMMY_SYMLINK, flags)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
if os.name == "nt":
item_type = str
else:
item_type = bytes
for k, v in posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception, because
# the test results in Error in that case. Is that ok?
# raise unittest.SkipTest("cannot create directory for testing")
return
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest("mkdir cannot create directory sufficiently deep for getcwd test")
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G 2>/dev/null') as idg:
groups = idg.read().strip()
ret = idg.close()
if ret is not None or not groups:
raise unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
if sys.platform == 'darwin':
import sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or '10.0'
if tuple(int(n) for n in dt.split('.')[0:2]) < (10, 6):
raise unittest.SkipTest("getgroups(2) is broken prior to 10.6")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests for the posix *at functions follow
@unittest.skipUnless(os.access in os.supports_dir_fd, "test needs dir_fd support for os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertTrue(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
finally:
posix.close(f)
@unittest.skipUnless(os.chmod in os.supports_dir_fd, "test needs dir_fd support in os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
finally:
posix.close(f)
@unittest.skipUnless(os.chown in os.supports_dir_fd, "test needs dir_fd support in os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
finally:
posix.close(f)
@unittest.skipUnless(os.stat in os.supports_dir_fd, "test needs dir_fd support in os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=None)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
finally:
posix.close(f)
@unittest.skipUnless(os.utime in os.supports_dir_fd, "test needs dir_fd support in os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
now = time.time()
posix.utime(support.TESTFN, None, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd and follow_symlinks together
if os.utime in os.supports_follow_symlinks:
try:
posix.utime(support.TESTFN, follow_symlinks=False, dir_fd=f)
except ValueError:
# whoops! using both together not supported on this platform.
pass
finally:
posix.close(f)
@unittest.skipUnless(os.link in os.supports_dir_fd, "test needs dir_fd support in os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir in os.supports_dir_fd, "test needs dir_fd support in os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should not raise exception
finally:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod in os.supports_dir_fd) and hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO and dir_fd support for os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
@unittest.skipUnless(os.open in os.supports_dir_fd, "test needs dir_fd support in os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
try:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
finally:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink in os.supports_dir_fd, "test needs dir_fd support in os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
finally:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename in os.supports_dir_fd, "test needs dir_fd support in os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
except:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
raise
else:
posix.stat(support.TESTFN) # should not raise exception
finally:
posix.close(f)
@unittest.skipUnless(os.symlink in os.supports_dir_fd, "test needs dir_fd support in os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink in os.supports_dir_fd, "test needs dir_fd support in os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should not raise exception
try:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
except:
support.unlink(support.TESTFN + 'del')
raise
else:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
finally:
posix.close(f)
@unittest.skipUnless(os.mkfifo in os.supports_dir_fd, "test needs dir_fd support in os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_yield'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_yield(self):
# This has no error conditions (at least on Linux).
posix.sched_yield()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
if sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched for name, sched in posix.__dict__.items()
if name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
try:
parent = posix.sched_getscheduler(os.getppid())
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() or sched_setscheduler() on
# a process with a scheduling policy other than SCHED_FIFO or SCHED_RR
# is implementation-defined: NetBSD and FreeBSD can return EINVAL.
if not sys.platform.startswith(('freebsd', 'netbsd')):
try:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
except OSError as e:
if e.errno != errno.EPERM:
raise
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, None)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(None)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
try:
interval = posix.sched_rr_get_interval(0)
except OSError as e:
# This likely means that sched_rr_get_interval is only valid for
# processes with the SCHED_RR scheduler in effect.
if e.errno != errno.EINVAL:
raise
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
for cpu in mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
if len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even if the filesystem doesn't report holes,
# if the OS supports it the SEEK_* constants
# will be defined and will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
with open(support.TESTFN, 'r+b') as fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
for i in range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
except OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it is not true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
raise unittest.SkipTest("OSError raised!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames in their exceptions.
"""
for name in ("rename", "replace", "link"):
function = getattr(os, name, None)
if function is None:
continue
for dst in ("noodly2", support.TESTFN):
try:
function('doesnotexistfilename', dst)
except OSError as e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
break
else:
self.fail("No valid path_error2() test for os." + name)
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups or [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
try:
support.run_unittest(PosixTester, PosixGroupsTester)
finally:
support.reap_children()
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"Test posix functions"
from test import support
# Skip these tests if there is no posix module.
posix = support.import_module('posix')
import errno
import sys
import time
import os
import fcntl
import platform
import pwd
import shutil
import stat
import tempfile
import unittest
import warnings
_DUMMY_SYMLINK = os.path.join(tempfile.gettempdir(),
support.TESTFN + '-dummy-symlink')
class PosixTester(unittest.TestCase):
def setUp(self):
# create empty file
fp = open(support.TESTFN, 'w+')
fp.close()
self.teardown_files = [ support.TESTFN ]
self._warnings_manager = support.check_warnings()
self._warnings_manager.__enter__()
warnings.filterwarnings('ignore', '.* potential security risk .*',
RuntimeWarning)
def tearDown(self):
for teardown_file in self.teardown_files:
support.unlink(teardown_file)
self._warnings_manager.__exit__(None, None, None)
def testNoArgFunctions(self):
# test posix functions which take no arguments and have
# no side-effects which we need to cleanup (e.g., fork, wait, abort)
NO_ARG_FUNCTIONS = [ "ctermid", "getcwd", "getcwdb", "uname",
"times", "getloadavg",
"getegid", "geteuid", "getgid", "getgroups",
"getpid", "getpgrp", "getppid", "getuid", "sync",
]
for name in NO_ARG_FUNCTIONS:
posix_func = getattr(posix, name, None)
if posix_func is not None:
posix_func()
self.assertRaises(TypeError, posix_func, 1)
@unittest.skipUnless(hasattr(posix, 'getresuid'),
'test needs posix.getresuid()')
def test_getresuid(self):
user_ids = posix.getresuid()
self.assertEqual(len(user_ids), 3)
for val in user_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'getresgid'),
'test needs posix.getresgid()')
def test_getresgid(self):
group_ids = posix.getresgid()
self.assertEqual(len(group_ids), 3)
for val in group_ids:
self.assertGreaterEqual(val, 0)
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid(self):
current_user_ids = posix.getresuid()
self.assertIsNone(posix.setresuid(*current_user_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresuid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresuid'),
'test needs posix.setresuid()')
def test_setresuid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_user_ids = posix.getresuid()
if 0 not in current_user_ids:
new_user_ids = (current_user_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresuid, *new_user_ids)
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid(self):
current_group_ids = posix.getresgid()
self.assertIsNone(posix.setresgid(*current_group_ids))
# -1 means don't change that value.
self.assertIsNone(posix.setresgid(-1, -1, -1))
@unittest.skipUnless(hasattr(posix, 'setresgid'),
'test needs posix.setresgid()')
def test_setresgid_exception(self):
# Don't do this test if someone is silly enough to run us as root.
current_group_ids = posix.getresgid()
if 0 not in current_group_ids:
new_group_ids = (current_group_ids[0]+1, -1, -1)
self.assertRaises(OSError, posix.setresgid, *new_group_ids)
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs os.initgroups()")
def test_initgroups(self):
# It takes a string and an integer; check that it raises a TypeError
# for other argument lists.
self.assertRaises(TypeError, posix.initgroups)
self.assertRaises(TypeError, posix.initgroups, None)
self.assertRaises(TypeError, posix.initgroups, 3, "foo")
self.assertRaises(TypeError, posix.initgroups, "foo", 3, object())
# If a non-privileged user invokes it, it should fail with OSError
# EPERM.
if os.getuid() != 0:
try:
name = pwd.getpwuid(posix.getuid()).pw_name
except KeyError:
# the current UID may not have a pwd entry
raise unittest.SkipTest("need a pwd entry")
try:
posix.initgroups(name, 13)
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
else:
self.fail("Expected OSError to be raised by initgroups")
@unittest.skipUnless(hasattr(posix, 'statvfs'),
'test needs posix.statvfs()')
def test_statvfs(self):
self.assertTrue(posix.statvfs(os.curdir))
@unittest.skipUnless(hasattr(posix, 'fstatvfs'),
'test needs posix.fstatvfs()')
def test_fstatvfs(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstatvfs(fp.fileno()))
self.assertTrue(posix.statvfs(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'ftruncate'),
'test needs posix.ftruncate()')
def test_ftruncate(self):
fp = open(support.TESTFN, 'w+')
try:
# we need to have some data to truncate
fp.write('test')
fp.flush()
posix.ftruncate(fp.fileno(), 0)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'truncate'), "test needs posix.truncate()")
def test_truncate(self):
with open(support.TESTFN, 'w') as fp:
fp.write('test')
fp.flush()
posix.truncate(support.TESTFN, 0)
@unittest.skipUnless(getattr(os, 'execve', None) in os.supports_fd, "test needs execve() to support the fd parameter")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_fexecve(self):
fp = os.open(sys.executable, os.O_RDONLY)
try:
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(fp, [sys.executable, '-c', 'pass'], os.environ)
else:
self.assertEqual(os.waitpid(pid, 0), (pid, 0))
finally:
os.close(fp)
@unittest.skipUnless(hasattr(posix, 'waitid'), "test needs posix.waitid()")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
def test_waitid(self):
pid = os.fork()
if pid == 0:
os.chdir(os.path.split(sys.executable)[0])
posix.execve(sys.executable, [sys.executable, '-c', 'pass'], os.environ)
else:
res = posix.waitid(posix.P_PID, pid, posix.WEXITED)
self.assertEqual(pid, res.si_pid)
@unittest.skipUnless(hasattr(posix, 'lockf'), "test needs posix.lockf()")
def test_lockf(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.lockf(fd, posix.F_LOCK, 4)
# section is locked
posix.lockf(fd, posix.F_ULOCK, 4)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pread'), "test needs posix.pread()")
def test_pread(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'es', posix.pread(fd, 2, 1))
# the first pread() shouldn't disturb the file offset
self.assertEqual(b'te', posix.read(fd, 2))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'pwrite'), "test needs posix.pwrite()")
def test_pwrite(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test')
os.lseek(fd, 0, os.SEEK_SET)
posix.pwrite(fd, b'xx', 1)
self.assertEqual(b'txxt', posix.read(fd, 4))
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fallocate'),
"test needs posix.posix_fallocate()")
def test_posix_fallocate(self):
fd = os.open(support.TESTFN, os.O_WRONLY | os.O_CREAT)
try:
posix.posix_fallocate(fd, 0, 10)
except OSError as inst:
# issue10812, ZFS doesn't appear to support posix_fallocate,
# so skip Solaris-based since they are likely to have ZFS.
if inst.errno != errno.EINVAL or not sys.platform.startswith("sunos"):
raise
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'posix_fadvise'),
"test needs posix.posix_fadvise()")
def test_posix_fadvise(self):
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.posix_fadvise(fd, 0, 0, posix.POSIX_FADV_WILLNEED)
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_fd, "test needs fd support in os.utime")
def test_utime_with_fd(self):
now = time.time()
fd = os.open(support.TESTFN, os.O_RDONLY)
try:
posix.utime(fd)
posix.utime(fd, None)
self.assertRaises(TypeError, posix.utime, fd, (None, None))
self.assertRaises(TypeError, posix.utime, fd, (now, None))
self.assertRaises(TypeError, posix.utime, fd, (None, now))
posix.utime(fd, (int(now), int(now)))
posix.utime(fd, (now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, now), ns=(now, now))
self.assertRaises(ValueError, posix.utime, fd, (now, 0), ns=(None, None))
self.assertRaises(ValueError, posix.utime, fd, (None, None), ns=(now, 0))
posix.utime(fd, (int(now), int((now - int(now)) * 1e9)))
posix.utime(fd, ns=(int(now), int((now - int(now)) * 1e9)))
finally:
os.close(fd)
@unittest.skipUnless(os.utime in os.supports_follow_symlinks, "test needs follow_symlinks support in os.utime")
def test_utime_nofollow_symlinks(self):
now = time.time()
posix.utime(support.TESTFN, None, follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), follow_symlinks=False)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), follow_symlinks=False)
posix.utime(support.TESTFN, (int(now), int(now)), follow_symlinks=False)
posix.utime(support.TESTFN, (now, now), follow_symlinks=False)
posix.utime(support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'writev'), "test needs posix.writev()")
def test_writev(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
n = os.writev(fd, (b'test1', b'tt2', b't3'))
self.assertEqual(n, 10)
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(b'test1tt2t3', posix.read(fd, 10))
# Issue #20113: empty list of buffers should not crash
try:
size = posix.writev(fd, [])
except OSError:
# writev(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'readv'), "test needs posix.readv()")
def test_readv(self):
fd = os.open(support.TESTFN, os.O_RDWR | os.O_CREAT)
try:
os.write(fd, b'test1tt2t3')
os.lseek(fd, 0, os.SEEK_SET)
buf = [bytearray(i) for i in [5, 3, 2]]
self.assertEqual(posix.readv(fd, buf), 10)
self.assertEqual([b'test1', b'tt2', b't3'], [bytes(i) for i in buf])
# Issue #20113: empty list of buffers should not crash
try:
size = posix.readv(fd, [])
except OSError:
# readv(fd, []) raises OSError(22, "Invalid argument")
# on OpenIndiana
pass
else:
self.assertEqual(size, 0)
finally:
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'dup'),
'test needs posix.dup()')
def test_dup(self):
fp = open(support.TESTFN)
try:
fd = posix.dup(fp.fileno())
self.assertIsInstance(fd, int)
os.close(fd)
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'confstr'),
'test needs posix.confstr()')
def test_confstr(self):
self.assertRaises(ValueError, posix.confstr, "CS_garbage")
self.assertEqual(len(posix.confstr("CS_PATH")) > 0, True)
@unittest.skipUnless(hasattr(posix, 'dup2'),
'test needs posix.dup2()')
def test_dup2(self):
fp1 = open(support.TESTFN)
fp2 = open(support.TESTFN)
try:
posix.dup2(fp1.fileno(), fp2.fileno())
finally:
fp1.close()
fp2.close()
@unittest.skipUnless(hasattr(os, 'O_CLOEXEC'), "needs os.O_CLOEXEC")
@support.requires_linux_version(2, 6, 23)
def test_oscloexec(self):
fd = os.open(support.TESTFN, os.O_RDONLY|os.O_CLOEXEC)
self.addCleanup(os.close, fd)
self.assertTrue(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC)
@unittest.skipUnless(hasattr(posix, 'O_EXLOCK'),
'test needs posix.O_EXLOCK')
def test_osexlock(self):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
if hasattr(posix, "O_SHLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_WRONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'O_SHLOCK'),
'test needs posix.O_SHLOCK')
def test_osshlock(self):
fd1 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
fd2 = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
os.close(fd2)
os.close(fd1)
if hasattr(posix, "O_EXLOCK"):
fd = os.open(support.TESTFN,
os.O_WRONLY|os.O_SHLOCK|os.O_CREAT)
self.assertRaises(OSError, os.open, support.TESTFN,
os.O_RDONLY|os.O_EXLOCK|os.O_NONBLOCK)
os.close(fd)
@unittest.skipUnless(hasattr(posix, 'fstat'),
'test needs posix.fstat()')
def test_fstat(self):
fp = open(support.TESTFN)
try:
self.assertTrue(posix.fstat(fp.fileno()))
self.assertTrue(posix.stat(fp.fileno()))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, float(fp.fileno()))
finally:
fp.close()
@unittest.skipUnless(hasattr(posix, 'stat'),
'test needs posix.stat()')
def test_stat(self):
self.assertTrue(posix.stat(support.TESTFN))
self.assertTrue(posix.stat(os.fsencode(support.TESTFN)))
self.assertTrue(posix.stat(bytearray(os.fsencode(support.TESTFN))))
self.assertRaisesRegex(TypeError,
'can\'t specify None for path argument',
posix.stat, None)
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(support.TESTFN))
self.assertRaisesRegex(TypeError,
'should be string, bytes or integer, not',
posix.stat, list(os.fsencode(support.TESTFN)))
@unittest.skipUnless(hasattr(posix, 'mkfifo'), "don't have mkfifo()")
def test_mkfifo(self):
support.unlink(support.TESTFN)
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
@unittest.skipUnless(hasattr(posix, 'mknod') and hasattr(stat, 'S_IFIFO'),
"don't have mknod()/S_IFIFO")
def test_mknod(self):
# Test using mknod() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
try:
posix.mknod(support.TESTFN, mode, 0)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
def _test_all_chown_common(self, chown_func, first_param, stat_func):
"""Common code for chown, fchown and lchown tests."""
def check_stat(uid, gid):
if stat_func is not None:
stat = stat_func(first_param)
self.assertEqual(stat.st_uid, uid)
self.assertEqual(stat.st_gid, gid)
uid = os.getuid()
gid = os.getgid()
# test a successful chown call
chown_func(first_param, uid, gid)
check_stat(uid, gid)
chown_func(first_param, -1, gid)
check_stat(uid, gid)
chown_func(first_param, uid, -1)
check_stat(uid, gid)
if uid == 0:
# Try an amusingly large uid/gid to make sure we handle
# large unsigned values. (chown lets you use any
# uid/gid you like, even if they aren't defined.)
#
# This problem keeps coming up:
# http://bugs.python.org/issue1747858
# http://bugs.python.org/issue4591
# http://bugs.python.org/issue15301
# Hopefully the fix in 4591 fixes it for good!
#
# This part of the test only runs when run as root.
# Only scary people run their tests as root.
big_value = 2**31
chown_func(first_param, big_value, big_value)
check_stat(big_value, big_value)
chown_func(first_param, -1, -1)
check_stat(big_value, big_value)
chown_func(first_param, uid, gid)
check_stat(uid, gid)
elif platform.system() in ('HP-UX', 'SunOS'):
# HP-UX and Solaris can allow a non-root user to chown() to root
# (issue #5113)
raise unittest.SkipTest("Skipping because of non-standard chown() "
"behavior")
else:
# non-root cannot chown to root, raises OSError
self.assertRaises(OSError, chown_func, first_param, 0, 0)
check_stat(uid, gid)
self.assertRaises(OSError, chown_func, first_param, 0, -1)
check_stat(uid, gid)
if 0 not in os.getgroups():
self.assertRaises(OSError, chown_func, first_param, -1, 0)
check_stat(uid, gid)
# test illegal types
for t in str, float:
self.assertRaises(TypeError, chown_func, first_param, t(uid), gid)
check_stat(uid, gid)
self.assertRaises(TypeError, chown_func, first_param, uid, t(gid))
check_stat(uid, gid)
@unittest.skipUnless(hasattr(posix, 'chown'), "test needs os.chown()")
def test_chown(self):
# raise an OSError if the file does not exist
os.unlink(support.TESTFN)
self.assertRaises(OSError, posix.chown, support.TESTFN, -1, -1)
# re-create the file
support.create_empty_file(support.TESTFN)
self._test_all_chown_common(posix.chown, support.TESTFN,
getattr(posix, 'stat', None))
@unittest.skipUnless(hasattr(posix, 'fchown'), "test needs os.fchown()")
def test_fchown(self):
os.unlink(support.TESTFN)
# re-create the file
test_file = open(support.TESTFN, 'w')
try:
fd = test_file.fileno()
self._test_all_chown_common(posix.fchown, fd,
getattr(posix, 'fstat', None))
finally:
test_file.close()
@unittest.skipUnless(hasattr(posix, 'lchown'), "test needs os.lchown()")
def test_lchown(self):
os.unlink(support.TESTFN)
# create a symlink
os.symlink(_DUMMY_SYMLINK, support.TESTFN)
self._test_all_chown_common(posix.lchown, support.TESTFN,
getattr(posix, 'lstat', None))
@unittest.skipUnless(hasattr(posix, 'chdir'), 'test needs posix.chdir()')
def test_chdir(self):
posix.chdir(os.curdir)
self.assertRaises(OSError, posix.chdir, support.TESTFN)
def test_listdir(self):
self.assertTrue(support.TESTFN in posix.listdir(os.curdir))
def test_listdir_default(self):
# When listdir is called without argument,
# it's the same as listdir(os.curdir).
self.assertTrue(support.TESTFN in posix.listdir())
def test_listdir_bytes(self):
# When listdir is called with a bytes object,
# the returned strings are of type bytes.
self.assertTrue(os.fsencode(support.TESTFN) in posix.listdir(b'.'))
@unittest.skipUnless(posix.listdir in os.supports_fd,
"test needs fd support for posix.listdir()")
def test_listdir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
self.addCleanup(posix.close, f)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
# Check that the fd offset was reset (issue #13739)
self.assertEqual(
sorted(posix.listdir('.')),
sorted(posix.listdir(f))
)
@unittest.skipUnless(hasattr(posix, 'access'), 'test needs posix.access()')
def test_access(self):
self.assertTrue(posix.access(support.TESTFN, os.R_OK))
@unittest.skipUnless(hasattr(posix, 'umask'), 'test needs posix.umask()')
def test_umask(self):
old_mask = posix.umask(0)
self.assertIsInstance(old_mask, int)
posix.umask(old_mask)
@unittest.skipUnless(hasattr(posix, 'strerror'),
'test needs posix.strerror()')
def test_strerror(self):
self.assertTrue(posix.strerror(0))
@unittest.skipUnless(hasattr(posix, 'pipe'), 'test needs posix.pipe()')
def test_pipe(self):
reader, writer = posix.pipe()
os.close(reader)
os.close(writer)
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2(self):
self.assertRaises(TypeError, os.pipe2, 'DEADBEEF')
self.assertRaises(TypeError, os.pipe2, 0, 0)
# try calling with flags = 0, like os.pipe()
r, w = os.pipe2(0)
os.close(r)
os.close(w)
# test flags
r, w = os.pipe2(os.O_CLOEXEC|os.O_NONBLOCK)
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
self.assertFalse(os.get_inheritable(r))
self.assertFalse(os.get_inheritable(w))
self.assertTrue(fcntl.fcntl(r, fcntl.F_GETFL) & os.O_NONBLOCK)
self.assertTrue(fcntl.fcntl(w, fcntl.F_GETFL) & os.O_NONBLOCK)
# try reading from an empty pipe: this should fail, not block
self.assertRaises(OSError, os.read, r, 1)
# try a write big enough to fill-up the pipe: this should either
# fail or perform a partial write, not block
try:
os.write(w, b'x' * support.PIPE_MAX_SIZE)
except OSError:
pass
@support.cpython_only
@unittest.skipUnless(hasattr(os, 'pipe2'), "test needs os.pipe2()")
@support.requires_linux_version(2, 6, 27)
def test_pipe2_c_limits(self):
# Issue 15989
import _testcapi
self.assertRaises(OverflowError, os.pipe2, _testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, os.pipe2, _testcapi.UINT_MAX + 1)
@unittest.skipUnless(hasattr(posix, 'utime'), 'test needs posix.utime()')
def test_utime(self):
now = time.time()
posix.utime(support.TESTFN, None)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None))
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now))
posix.utime(support.TESTFN, (int(now), int(now)))
posix.utime(support.TESTFN, (now, now))
def _test_chflags_regular_file(self, chflags_func, target_file, **kwargs):
st = os.stat(target_file)
self.assertTrue(hasattr(st, 'st_flags'))
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = st.st_flags | stat.UF_IMMUTABLE
try:
chflags_func(target_file, flags, **kwargs)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_st = os.stat(target_file)
self.assertEqual(st.st_flags | stat.UF_IMMUTABLE, new_st.st_flags)
try:
fd = open(target_file, 'w+')
except OSError as e:
self.assertEqual(e.errno, errno.EPERM)
finally:
posix.chflags(target_file, st.st_flags)
@unittest.skipUnless(hasattr(posix, 'chflags'), 'test needs os.chflags()')
def test_chflags(self):
self._test_chflags_regular_file(posix.chflags, support.TESTFN)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_regular_file(self):
self._test_chflags_regular_file(posix.lchflags, support.TESTFN)
self._test_chflags_regular_file(posix.chflags, support.TESTFN, follow_symlinks=False)
@unittest.skipUnless(hasattr(posix, 'lchflags'), 'test needs os.lchflags()')
def test_lchflags_symlink(self):
testfn_st = os.stat(support.TESTFN)
self.assertTrue(hasattr(testfn_st, 'st_flags'))
os.symlink(support.TESTFN, _DUMMY_SYMLINK)
self.teardown_files.append(_DUMMY_SYMLINK)
dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
def chflags_nofollow(path, flags):
return posix.chflags(path, flags, follow_symlinks=False)
for fn in (posix.lchflags, chflags_nofollow):
# ZFS returns EOPNOTSUPP when attempting to set flag UF_IMMUTABLE.
flags = dummy_symlink_st.st_flags | stat.UF_IMMUTABLE
try:
fn(_DUMMY_SYMLINK, flags)
except OSError as err:
if err.errno != errno.EOPNOTSUPP:
raise
msg = 'chflag UF_IMMUTABLE not supported by underlying fs'
self.skipTest(msg)
try:
new_testfn_st = os.stat(support.TESTFN)
new_dummy_symlink_st = os.lstat(_DUMMY_SYMLINK)
self.assertEqual(testfn_st.st_flags, new_testfn_st.st_flags)
self.assertEqual(dummy_symlink_st.st_flags | stat.UF_IMMUTABLE,
new_dummy_symlink_st.st_flags)
finally:
fn(_DUMMY_SYMLINK, dummy_symlink_st.st_flags)
def test_environ(self):
if os.name == "nt":
item_type = str
else:
item_type = bytes
for k, v in posix.environ.items():
self.assertEqual(type(k), item_type)
self.assertEqual(type(v), item_type)
@unittest.skipUnless(hasattr(posix, 'getcwd'), 'test needs posix.getcwd()')
def test_getcwd_long_pathnames(self):
dirname = 'getcwd-test-directory-0123456789abcdef-01234567890abcdef'
curdir = os.getcwd()
base_path = os.path.abspath(support.TESTFN) + '.getcwd'
try:
os.mkdir(base_path)
os.chdir(base_path)
except:
# Just returning nothing instead of the SkipTest exception, because
# the test results in Error in that case. Is that ok?
# raise unittest.SkipTest("cannot create directory for testing")
return
def _create_and_do_getcwd(dirname, current_path_length = 0):
try:
os.mkdir(dirname)
except:
raise unittest.SkipTest("mkdir cannot create directory sufficiently deep for getcwd test")
os.chdir(dirname)
try:
os.getcwd()
if current_path_length < 1027:
_create_and_do_getcwd(dirname, current_path_length + len(dirname) + 1)
finally:
os.chdir('..')
os.rmdir(dirname)
_create_and_do_getcwd(dirname)
finally:
os.chdir(curdir)
support.rmtree(base_path)
@unittest.skipUnless(hasattr(posix, 'getgrouplist'), "test needs posix.getgrouplist()")
@unittest.skipUnless(hasattr(pwd, 'getpwuid'), "test needs pwd.getpwuid()")
@unittest.skipUnless(hasattr(os, 'getuid'), "test needs os.getuid()")
def test_getgrouplist(self):
user = pwd.getpwuid(os.getuid())[0]
group = pwd.getpwuid(os.getuid())[3]
self.assertIn(group, posix.getgrouplist(user, group))
@unittest.skipUnless(hasattr(os, 'getegid'), "test needs os.getegid()")
def test_getgroups(self):
with os.popen('id -G 2>/dev/null') as idg:
groups = idg.read().strip()
ret = idg.close()
if ret is not None or not groups:
raise unittest.SkipTest("need working 'id -G'")
# Issues 16698: OS X ABIs prior to 10.6 have limits on getgroups()
if sys.platform == 'darwin':
import sysconfig
dt = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') or '10.0'
if tuple(int(n) for n in dt.split('.')[0:2]) < (10, 6):
raise unittest.SkipTest("getgroups(2) is broken prior to 10.6")
# 'id -G' and 'os.getgroups()' should return the same
# groups, ignoring order and duplicates.
# #10822 - it is implementation defined whether posix.getgroups()
# includes the effective gid so we include it anyway, since id -G does
self.assertEqual(
set([int(x) for x in groups.split()]),
set(posix.getgroups() + [posix.getegid()]))
# tests for the posix *at functions follow
@unittest.skipUnless(os.access in os.supports_dir_fd, "test needs dir_fd support for os.access()")
def test_access_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertTrue(posix.access(support.TESTFN, os.R_OK, dir_fd=f))
finally:
posix.close(f)
@unittest.skipUnless(os.chmod in os.supports_dir_fd, "test needs dir_fd support in os.chmod()")
def test_chmod_dir_fd(self):
os.chmod(support.TESTFN, stat.S_IRUSR)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chmod(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
s = posix.stat(support.TESTFN)
self.assertEqual(s[0] & stat.S_IRWXU, stat.S_IRUSR | stat.S_IWUSR)
finally:
posix.close(f)
@unittest.skipUnless(os.chown in os.supports_dir_fd, "test needs dir_fd support in os.chown()")
def test_chown_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.chown(support.TESTFN, os.getuid(), os.getgid(), dir_fd=f)
finally:
posix.close(f)
@unittest.skipUnless(os.stat in os.supports_dir_fd, "test needs dir_fd support in os.stat()")
def test_stat_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
s1 = posix.stat(support.TESTFN)
s2 = posix.stat(support.TESTFN, dir_fd=f)
self.assertEqual(s1, s2)
s2 = posix.stat(support.TESTFN, dir_fd=None)
self.assertEqual(s1, s2)
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=posix.getcwd())
self.assertRaisesRegex(TypeError, 'should be integer, not',
posix.stat, support.TESTFN, dir_fd=float(f))
self.assertRaises(OverflowError,
posix.stat, support.TESTFN, dir_fd=10**20)
finally:
posix.close(f)
@unittest.skipUnless(os.utime in os.supports_dir_fd, "test needs dir_fd support in os.utime()")
def test_utime_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
now = time.time()
posix.utime(support.TESTFN, None, dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, now, dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, None), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (None, now), dir_fd=f)
self.assertRaises(TypeError, posix.utime, support.TESTFN, (now, "x"), dir_fd=f)
posix.utime(support.TESTFN, (int(now), int(now)), dir_fd=f)
posix.utime(support.TESTFN, (now, now), dir_fd=f)
posix.utime(support.TESTFN,
(int(now), int((now - int(now)) * 1e9)), dir_fd=f)
posix.utime(support.TESTFN, dir_fd=f,
times=(int(now), int((now - int(now)) * 1e9)))
# try dir_fd and follow_symlinks together
if os.utime in os.supports_follow_symlinks:
try:
posix.utime(support.TESTFN, follow_symlinks=False, dir_fd=f)
except ValueError:
# whoops! using both together not supported on this platform.
pass
finally:
posix.close(f)
@unittest.skipUnless(os.link in os.supports_dir_fd, "test needs dir_fd support in os.link()")
def test_link_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.link(support.TESTFN, support.TESTFN + 'link', src_dir_fd=f, dst_dir_fd=f)
# should have same inodes
self.assertEqual(posix.stat(support.TESTFN)[1],
posix.stat(support.TESTFN + 'link')[1])
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.mkdir in os.supports_dir_fd, "test needs dir_fd support in os.mkdir()")
def test_mkdir_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkdir(support.TESTFN + 'dir', dir_fd=f)
posix.stat(support.TESTFN + 'dir') # should not raise exception
finally:
posix.close(f)
support.rmtree(support.TESTFN + 'dir')
@unittest.skipUnless((os.mknod in os.supports_dir_fd) and hasattr(stat, 'S_IFIFO'),
"test requires both stat.S_IFIFO and dir_fd support for os.mknod()")
def test_mknod_dir_fd(self):
# Test using mknodat() to create a FIFO (the only use specified
# by POSIX).
support.unlink(support.TESTFN)
mode = stat.S_IFIFO | stat.S_IRUSR | stat.S_IWUSR
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mknod(support.TESTFN, mode, 0, dir_fd=f)
except OSError as e:
# Some old systems don't allow unprivileged users to use
# mknod(), or only support creating device nodes.
self.assertIn(e.errno, (errno.EPERM, errno.EINVAL))
else:
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
@unittest.skipUnless(os.open in os.supports_dir_fd, "test needs dir_fd support in os.open()")
def test_open_dir_fd(self):
support.unlink(support.TESTFN)
with open(support.TESTFN, 'w') as outfile:
outfile.write("testline\n")
a = posix.open(posix.getcwd(), posix.O_RDONLY)
b = posix.open(support.TESTFN, posix.O_RDONLY, dir_fd=a)
try:
res = posix.read(b, 9).decode(encoding="utf-8")
self.assertEqual("testline\n", res)
finally:
posix.close(a)
posix.close(b)
@unittest.skipUnless(os.readlink in os.supports_dir_fd, "test needs dir_fd support in os.readlink()")
def test_readlink_dir_fd(self):
os.symlink(support.TESTFN, support.TESTFN + 'link')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
self.assertEqual(posix.readlink(support.TESTFN + 'link'),
posix.readlink(support.TESTFN + 'link', dir_fd=f))
finally:
support.unlink(support.TESTFN + 'link')
posix.close(f)
@unittest.skipUnless(os.rename in os.supports_dir_fd, "test needs dir_fd support in os.rename()")
def test_rename_dir_fd(self):
support.unlink(support.TESTFN)
support.create_empty_file(support.TESTFN + 'ren')
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.rename(support.TESTFN + 'ren', support.TESTFN, src_dir_fd=f, dst_dir_fd=f)
except:
posix.rename(support.TESTFN + 'ren', support.TESTFN)
raise
else:
posix.stat(support.TESTFN) # should not raise exception
finally:
posix.close(f)
@unittest.skipUnless(os.symlink in os.supports_dir_fd, "test needs dir_fd support in os.symlink()")
def test_symlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.symlink(support.TESTFN, support.TESTFN + 'link', dir_fd=f)
self.assertEqual(posix.readlink(support.TESTFN + 'link'), support.TESTFN)
finally:
posix.close(f)
support.unlink(support.TESTFN + 'link')
@unittest.skipUnless(os.unlink in os.supports_dir_fd, "test needs dir_fd support in os.unlink()")
def test_unlink_dir_fd(self):
f = posix.open(posix.getcwd(), posix.O_RDONLY)
support.create_empty_file(support.TESTFN + 'del')
posix.stat(support.TESTFN + 'del') # should not raise exception
try:
posix.unlink(support.TESTFN + 'del', dir_fd=f)
except:
support.unlink(support.TESTFN + 'del')
raise
else:
self.assertRaises(OSError, posix.stat, support.TESTFN + 'link')
finally:
posix.close(f)
@unittest.skipUnless(os.mkfifo in os.supports_dir_fd, "test needs dir_fd support in os.mkfifo()")
def test_mkfifo_dir_fd(self):
support.unlink(support.TESTFN)
f = posix.open(posix.getcwd(), posix.O_RDONLY)
try:
posix.mkfifo(support.TESTFN, stat.S_IRUSR | stat.S_IWUSR, dir_fd=f)
self.assertTrue(stat.S_ISFIFO(posix.stat(support.TESTFN).st_mode))
finally:
posix.close(f)
requires_sched_h = unittest.skipUnless(hasattr(posix, 'sched_yield'),
"don't have scheduling support")
requires_sched_affinity = unittest.skipUnless(hasattr(posix, 'sched_setaffinity'),
"don't have sched affinity support")
@requires_sched_h
def test_sched_yield(self):
# This has no error conditions (at least on Linux).
posix.sched_yield()
@requires_sched_h
@unittest.skipUnless(hasattr(posix, 'sched_get_priority_max'),
"requires sched_get_priority_max()")
def test_sched_priority(self):
# Round-robin usually has interesting priorities.
pol = posix.SCHED_RR
lo = posix.sched_get_priority_min(pol)
hi = posix.sched_get_priority_max(pol)
self.assertIsInstance(lo, int)
self.assertIsInstance(hi, int)
self.assertGreaterEqual(hi, lo)
# OSX evidently just returns 15 without checking the argument.
if sys.platform != "darwin":
self.assertRaises(OSError, posix.sched_get_priority_min, -23)
self.assertRaises(OSError, posix.sched_get_priority_max, -23)
@unittest.skipUnless(hasattr(posix, 'sched_setscheduler'), "can't change scheduler")
def test_get_and_set_scheduler_and_param(self):
possible_schedulers = [sched for name, sched in posix.__dict__.items()
if name.startswith("SCHED_")]
mine = posix.sched_getscheduler(0)
self.assertIn(mine, possible_schedulers)
try:
parent = posix.sched_getscheduler(os.getppid())
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
self.assertIn(parent, possible_schedulers)
self.assertRaises(OSError, posix.sched_getscheduler, -1)
self.assertRaises(OSError, posix.sched_getparam, -1)
param = posix.sched_getparam(0)
self.assertIsInstance(param.sched_priority, int)
# POSIX states that calling sched_setparam() or sched_setscheduler() on
# a process with a scheduling policy other than SCHED_FIFO or SCHED_RR
# is implementation-defined: NetBSD and FreeBSD can return EINVAL.
if not sys.platform.startswith(('freebsd', 'netbsd')):
try:
posix.sched_setscheduler(0, mine, param)
posix.sched_setparam(0, param)
except OSError as e:
if e.errno != errno.EPERM:
raise
self.assertRaises(OSError, posix.sched_setparam, -1, param)
self.assertRaises(OSError, posix.sched_setscheduler, -1, mine, param)
self.assertRaises(TypeError, posix.sched_setscheduler, 0, mine, None)
self.assertRaises(TypeError, posix.sched_setparam, 0, 43)
param = posix.sched_param(None)
self.assertRaises(TypeError, posix.sched_setparam, 0, param)
large = 214748364700
param = posix.sched_param(large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
param = posix.sched_param(sched_priority=-large)
self.assertRaises(OverflowError, posix.sched_setparam, 0, param)
@unittest.skipUnless(hasattr(posix, "sched_rr_get_interval"), "no function")
def test_sched_rr_get_interval(self):
try:
interval = posix.sched_rr_get_interval(0)
except OSError as e:
# This likely means that sched_rr_get_interval is only valid for
# processes with the SCHED_RR scheduler in effect.
if e.errno != errno.EINVAL:
raise
self.skipTest("only works on SCHED_RR processes")
self.assertIsInstance(interval, float)
# Reasonable constraints, I think.
self.assertGreaterEqual(interval, 0.)
self.assertLess(interval, 1.)
@requires_sched_affinity
def test_sched_getaffinity(self):
mask = posix.sched_getaffinity(0)
self.assertIsInstance(mask, set)
self.assertGreaterEqual(len(mask), 1)
self.assertRaises(OSError, posix.sched_getaffinity, -1)
for cpu in mask:
self.assertIsInstance(cpu, int)
self.assertGreaterEqual(cpu, 0)
self.assertLess(cpu, 1 << 32)
@requires_sched_affinity
def test_sched_setaffinity(self):
mask = posix.sched_getaffinity(0)
if len(mask) > 1:
# Empty masks are forbidden
mask.pop()
posix.sched_setaffinity(0, mask)
self.assertEqual(posix.sched_getaffinity(0), mask)
self.assertRaises(OSError, posix.sched_setaffinity, 0, [])
self.assertRaises(ValueError, posix.sched_setaffinity, 0, [-10])
self.assertRaises(OverflowError, posix.sched_setaffinity, 0, [1<<128])
self.assertRaises(OSError, posix.sched_setaffinity, -1, mask)
def test_rtld_constants(self):
# check presence of major RTLD_* constants
posix.RTLD_LAZY
posix.RTLD_NOW
posix.RTLD_GLOBAL
posix.RTLD_LOCAL
@unittest.skipUnless(hasattr(os, 'SEEK_HOLE'),
"test needs an OS that reports file holes")
def test_fs_holes(self):
# Even if the filesystem doesn't report holes,
# if the OS supports it the SEEK_* constants
# will be defined and will have a consistent
# behaviour:
# os.SEEK_DATA = current position
# os.SEEK_HOLE = end of file position
with open(support.TESTFN, 'r+b') as fp:
fp.write(b"hello")
fp.flush()
size = fp.tell()
fno = fp.fileno()
try :
for i in range(size):
self.assertEqual(i, os.lseek(fno, i, os.SEEK_DATA))
self.assertLessEqual(size, os.lseek(fno, i, os.SEEK_HOLE))
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_DATA)
self.assertRaises(OSError, os.lseek, fno, size, os.SEEK_HOLE)
except OSError :
# Some OSs claim to support SEEK_HOLE/SEEK_DATA
# but it is not true.
# For instance:
# http://lists.freebsd.org/pipermail/freebsd-amd64/2012-January/014332.html
raise unittest.SkipTest("OSError raised!")
def test_path_error2(self):
"""
Test functions that call path_error2(), providing two filenames in their exceptions.
"""
for name in ("rename", "replace", "link"):
function = getattr(os, name, None)
if function is None:
continue
for dst in ("noodly2", support.TESTFN):
try:
function('doesnotexistfilename', dst)
except OSError as e:
self.assertIn("'doesnotexistfilename' -> '{}'".format(dst), str(e))
break
else:
self.fail("No valid path_error2() test for os." + name)
class PosixGroupsTester(unittest.TestCase):
def setUp(self):
if posix.getuid() != 0:
raise unittest.SkipTest("not enough privileges")
if not hasattr(posix, 'getgroups'):
raise unittest.SkipTest("need posix.getgroups")
if sys.platform == 'darwin':
raise unittest.SkipTest("getgroups(2) is broken on OSX")
self.saved_groups = posix.getgroups()
def tearDown(self):
if hasattr(posix, 'setgroups'):
posix.setgroups(self.saved_groups)
elif hasattr(posix, 'initgroups'):
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, self.saved_groups[0])
@unittest.skipUnless(hasattr(posix, 'initgroups'),
"test needs posix.initgroups()")
def test_initgroups(self):
# find missing group
g = max(self.saved_groups or [0]) + 1
name = pwd.getpwuid(posix.getuid()).pw_name
posix.initgroups(name, g)
self.assertIn(g, posix.getgroups())
@unittest.skipUnless(hasattr(posix, 'setgroups'),
"test needs posix.setgroups()")
def test_setgroups(self):
for groups in [[0], list(range(16))]:
posix.setgroups(groups)
self.assertListEqual(groups, posix.getgroups())
def test_main():
try:
support.run_unittest(PosixTester, PosixGroupsTester)
finally:
support.reap_children()
if __name__ == '__main__':
test_main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| ArcherSys/ArcherSys | Lib/test/test_posix.py | Python | mit | 147,179 |
"""
WSGI config for pykrd project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pykrd.settings")
application = get_wsgi_application()
| python-krasnodar/python-krasnodar.ru | src/pykrd/wsgi.py | Python | mit | 388 |
import os
import cv2
import numpy as np
import pandas as pd
from scipy import stats
from plotnine import ggplot, aes, geom_line, scale_x_continuous, scale_color_manual, labs
from plantcv.plantcv import fatal_error
from plantcv.plantcv import deprecation_warning
from plantcv.plantcv import params
from plantcv.plantcv._debug import _debug
from plantcv.plantcv import outputs
from plantcv.plantcv.visualize import histogram
def analyze_color(rgb_img, mask, hist_plot_type=None, colorspaces="all", label="default"):
"""Analyze the color properties of an image object
Inputs:
rgb_img = RGB image data
mask = Binary mask made from selected contours
hist_plot_type = None, 'all', 'rgb','lab' or 'hsv' (to be deprecated)
colorspaces = 'all', 'rgb', 'lab', or 'hsv'
label = optional label parameter, modifies the variable name of observations recorded
Returns:
analysis_image = histogram output
:param rgb_img: numpy.ndarray
:param mask: numpy.ndarray
:param colorspaces: str
:param hist_plot_type: str
:param label: str
:return analysis_images: list
"""
# Save user debug setting
debug = params.debug
if hist_plot_type is not None:
deprecation_warning("'hist_plot_type' will be deprecated in a future version of PlantCV. "
"Please use 'colorspaces' instead.")
colorspaces = hist_plot_type
if len(np.shape(rgb_img)) < 3:
fatal_error("rgb_img must be an RGB image")
# Mask the input image
masked = cv2.bitwise_and(rgb_img, rgb_img, mask=mask)
# Extract the blue, green, and red channels
b, g, r = cv2.split(masked)
# Convert the BGR image to LAB
lab = cv2.cvtColor(masked, cv2.COLOR_BGR2LAB)
# Extract the lightness, green-magenta, and blue-yellow channels
l, m, y = cv2.split(lab)
# Convert the BGR image to HSV
hsv = cv2.cvtColor(masked, cv2.COLOR_BGR2HSV)
# Extract the hue, saturation, and value channels
h, s, v = cv2.split(hsv)
# Color channel dictionary
channels = {"b": b, "g": g, "r": r, "l": l, "m": m, "y": y, "h": h, "s": s, "v": v}
# Histogram plot types
hist_types = {"all": ("b", "g", "r", "l", "m", "y", "h", "s", "v"),
"rgb": ("b", "g", "r"),
"lab": ("l", "m", "y"),
"hsv": ("h", "s", "v")}
if colorspaces.lower() not in hist_types:
fatal_error(f"Colorspace '{colorspaces}' is not supported, must be be one of the following: "
f"{', '.join(map(str, hist_types.keys()))}")
# Calculate histogram
params.debug = None
histograms = {
"b": {"label": "blue", "graph_color": "blue",
"hist": histogram(channels["b"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"g": {"label": "green", "graph_color": "forestgreen",
"hist": histogram(channels["g"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"r": {"label": "red", "graph_color": "red",
"hist": histogram(channels["r"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"l": {"label": "lightness", "graph_color": "dimgray",
"hist": histogram(channels["l"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"m": {"label": "green-magenta", "graph_color": "magenta",
"hist": histogram(channels["m"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"y": {"label": "blue-yellow", "graph_color": "yellow",
"hist": histogram(channels["y"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"h": {"label": "hue", "graph_color": "blueviolet",
"hist": histogram(channels["h"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"s": {"label": "saturation", "graph_color": "cyan",
"hist": histogram(channels["s"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()},
"v": {"label": "value", "graph_color": "orange",
"hist": histogram(channels["v"], mask, 256, 0, 255,
hist_data=True)[1]['proportion of pixels (%)'].tolist()}
}
# Restore user debug setting
params.debug = debug
# Create list of bin labels for 8-bit data
binval = np.arange(0, 256)
# Create a dataframe of bin labels and histogram data
dataset = pd.DataFrame({'bins': binval, 'blue': histograms["b"]["hist"],
'green': histograms["g"]["hist"], 'red': histograms["r"]["hist"],
'lightness': histograms["l"]["hist"], 'green-magenta': histograms["m"]["hist"],
'blue-yellow': histograms["y"]["hist"], 'hue': histograms["h"]["hist"],
'saturation': histograms["s"]["hist"], 'value': histograms["v"]["hist"]})
# Make the histogram figure using plotnine
if colorspaces.upper() == 'RGB':
df_rgb = pd.melt(dataset, id_vars=['bins'], value_vars=['blue', 'green', 'red'],
var_name='color Channel', value_name='proportion of pixels (%)')
hist_fig = (ggplot(df_rgb, aes(x='bins', y='proportion of pixels (%)', color='color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blue', 'green', 'red'])
)
elif colorspaces.upper() == 'LAB':
df_lab = pd.melt(dataset, id_vars=['bins'],
value_vars=['lightness', 'green-magenta', 'blue-yellow'],
var_name='color Channel', value_name='proportion of pixels (%)')
hist_fig = (ggplot(df_lab, aes(x='bins', y='proportion of pixels (%)', color='color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['yellow', 'magenta', 'dimgray'])
)
elif colorspaces.upper() == 'HSV':
df_hsv = pd.melt(dataset, id_vars=['bins'],
value_vars=['hue', 'saturation', 'value'],
var_name='color Channel', value_name='proportion of pixels (%)')
hist_fig = (ggplot(df_hsv, aes(x='bins', y='proportion of pixels (%)', color='color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(['blueviolet', 'cyan', 'orange'])
)
elif colorspaces.upper() == 'ALL':
s = pd.Series(['blue', 'green', 'red', 'lightness', 'green-magenta',
'blue-yellow', 'hue', 'saturation', 'value'], dtype="category")
color_channels = ['blue', 'yellow', 'green', 'magenta', 'blueviolet',
'dimgray', 'red', 'cyan', 'orange']
df_all = pd.melt(dataset, id_vars=['bins'], value_vars=s, var_name='color Channel',
value_name='proportion of pixels (%)')
hist_fig = (ggplot(df_all, aes(x='bins', y='proportion of pixels (%)', color='color Channel'))
+ geom_line()
+ scale_x_continuous(breaks=list(range(0, 256, 25)))
+ scale_color_manual(color_channels)
)
hist_fig = hist_fig + labs(x="Pixel intensity", y="Proportion of pixels (%)")
# Hue values of zero are red but are also the value for pixels where hue is undefined. The hue value of a pixel will
# be undef. when the color values are saturated. Therefore, hue values of 0 are excluded from the calculations below
# Calculate the median hue value (median is rescaled from the encoded 0-179 range to the 0-359 degree range)
hue_median = np.median(h[np.where(h > 0)]) * 2
# Calculate the circular mean and standard deviation of the encoded hue values
# The mean and standard-deviation are rescaled from the encoded 0-179 range to the 0-359 degree range
hue_circular_mean = stats.circmean(h[np.where(h > 0)], high=179, low=0) * 2
hue_circular_std = stats.circstd(h[np.where(h > 0)], high=179, low=0) * 2
# Plot or print the histogram
analysis_image = hist_fig
_debug(visual=hist_fig, filename=os.path.join(params.debug_outdir, str(params.device) + '_analyze_color_hist.png'))
# Store into global measurements
# RGB signal values are in an unsigned 8-bit scale of 0-255
rgb_values = [i for i in range(0, 256)]
# Hue values are in a 0-359 degree scale, every 2 degrees at the midpoint of the interval
hue_values = [i * 2 + 1 for i in range(0, 180)]
# Percentage values on a 0-100 scale (lightness, saturation, and value)
percent_values = [round((i / 255) * 100, 2) for i in range(0, 256)]
# Diverging values on a -128 to 127 scale (green-magenta and blue-yellow)
diverging_values = [i for i in range(-128, 128)]
if colorspaces.upper() == 'RGB' or colorspaces.upper() == 'ALL':
outputs.add_observation(sample=label, variable='blue_frequencies', trait='blue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["b"]["hist"], label=rgb_values)
outputs.add_observation(sample=label, variable='green_frequencies', trait='green frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["g"]["hist"], label=rgb_values)
outputs.add_observation(sample=label, variable='red_frequencies', trait='red frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["r"]["hist"], label=rgb_values)
if colorspaces.upper() == 'LAB' or colorspaces.upper() == 'ALL':
outputs.add_observation(sample=label, variable='lightness_frequencies', trait='lightness frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["l"]["hist"], label=percent_values)
outputs.add_observation(sample=label, variable='green-magenta_frequencies',
trait='green-magenta frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["m"]["hist"], label=diverging_values)
outputs.add_observation(sample=label, variable='blue-yellow_frequencies', trait='blue-yellow frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["y"]["hist"], label=diverging_values)
if colorspaces.upper() == 'HSV' or colorspaces.upper() == 'ALL':
outputs.add_observation(sample=label, variable='hue_frequencies', trait='hue frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["h"]["hist"][0:180], label=hue_values)
outputs.add_observation(sample=label, variable='saturation_frequencies', trait='saturation frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["s"]["hist"], label=percent_values)
outputs.add_observation(sample=label, variable='value_frequencies', trait='value frequencies',
method='plantcv.plantcv.analyze_color', scale='frequency', datatype=list,
value=histograms["v"]["hist"], label=percent_values)
# Always save hue stats
outputs.add_observation(sample=label, variable='hue_circular_mean', trait='hue circular mean',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_circular_mean, label='degrees')
outputs.add_observation(sample=label, variable='hue_circular_std', trait='hue circular standard deviation',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_circular_std, label='degrees')
outputs.add_observation(sample=label, variable='hue_median', trait='hue median',
method='plantcv.plantcv.analyze_color', scale='degrees', datatype=float,
value=hue_median, label='degrees')
# Store images
outputs.images.append(analysis_image)
return analysis_image
| stiphyMT/plantcv | plantcv/plantcv/analyze_color.py | Python | mit | 13,218 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.