text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function, unicode_literals
__author__ = 'Miroslav Shubernetskiy'
__email__ = '[email protected]'
__version__ = '0.1.5'
| {
"content_hash": "c50db6c217c1f387c8188f90b9519049",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 55,
"avg_line_length": 25.5,
"alnum_prop": 0.6797385620915033,
"repo_name": "pombredanne/django-rest-framework-braces",
"id": "0dac0a8a28105d71fd2de1c66d283e73bcbdd4ec",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drf_braces/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1881"
},
{
"name": "Python",
"bytes": "85403"
}
],
"symlink_target": ""
} |
from covata.delta import Client, FileSystemKeyStore
key_store = FileSystemKeyStore("~/keystore/", "passPhrase")
client = Client(key_store)
# option 1: via identity object
identity = client.get_identity("8e91cb8c-1ea5-4b69-bedf-9a14940cce44")
secret_1 = identity.create_secret("here is my secret")
# option 2: via client object
secret_2 = client.create_secret("8e91cb8c-1ea5-4b69-bedf-9a14940cce44",
"here is my secret")
| {
"content_hash": "ad0b9615921379368c7300a7c86a4bde",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 71,
"avg_line_length": 37.916666666666664,
"alnum_prop": 0.7120879120879121,
"repo_name": "Covata/delta-sdk-python",
"id": "181b23fbf728414d380162f6e35a1d0e38cd7e4d",
"size": "1072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/snippets/creating_a_base_secret.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133712"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import sys
import os
import tempfile
import time
import multiprocessing as mp
import unittest
import random
import mxnet as mx
import numpy as np
import unittest
import math
from nose.tools import assert_raises
from mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal
from mxnet.base import MXNetError
from mxnet import autograd
from numpy.testing import assert_allclose
from mxnet.cuda_utils import get_device_count
from mxnet.test_utils import rand_ndarray
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied
from test_gluon import *
from test_loss import *
from test_gluon_rnn import *
set_default_context(mx.gpu(0))
def check_rnn_layer(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
with mx.gpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = mx.nd.ones((10, 16, 30))
states = layer.begin_state(16)
co, cs = layer(x, states)
# atol of 1e-6 required, as exposed by seed 2124685726
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
def check_rnn_layer_w_rand_inputs(layer):
layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])
x = mx.nd.uniform(shape=(10, 16, 30))
with mx.gpu(0):
x = x.copyto(mx.gpu(0))
states = layer.begin_state(16)
go, gs = layer(x, states)
with mx.cpu(0):
x = x.copyto(mx.cpu(0))
states = layer.begin_state(16)
co, cs = layer(x, states)
assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)
for g, c in zip(gs, cs):
assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='7.2.1')
def test_lstmp():
hidden_size, projection_size = 3, 2
rtol, atol = 1e-2, 1e-2
batch_size, seq_len = 7, 11
input_size = 5
lstm_input = mx.nd.uniform(shape=(seq_len, batch_size, input_size), ctx=mx.gpu(0))
shapes = {'i2h_weight': (hidden_size*4, input_size),
'h2h_weight': (hidden_size*4, projection_size),
'i2h_bias': (hidden_size*4,),
'h2h_bias': (hidden_size*4,),
'h2r_weight': (projection_size, hidden_size)}
weights = {k: rand_ndarray(v) for k, v in shapes.items()}
lstm_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,
input_size=input_size, prefix='lstm0_')
lstm_cell = gluon.contrib.rnn.LSTMPCell(hidden_size=hidden_size,
projection_size=projection_size,
input_size=input_size,
prefix='lstm0_l0_')
lstm_layer.initialize(ctx=mx.gpu(0))
lstm_cell.initialize(ctx=mx.gpu(0))
layer_params = lstm_layer.collect_params()
cell_params = lstm_cell.collect_params()
for k, v in weights.items():
layer_params['lstm0_l0_'+k].set_data(v.copy())
cell_params['lstm0_l0_'+k].set_data(v.copy())
with autograd.record():
layer_output = lstm_layer(lstm_input.copy())
cell_output = lstm_cell.unroll(seq_len, lstm_input.copy(), layout='TNC',
merge_outputs=True)[0]
assert_almost_equal(layer_output.asnumpy(), cell_output.asnumpy(), rtol=rtol, atol=atol)
layer_output.backward()
cell_output.backward()
for k, v in weights.items():
layer_grad = layer_params['lstm0_l0_'+k].grad()
cell_grad = cell_params['lstm0_l0_'+k].grad()
print('checking gradient for {}'.format('lstm0_l0_'+k))
assert_almost_equal(layer_grad.asnumpy(), cell_grad.asnumpy(),
rtol=rtol, atol=atol)
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, projection_size=5), mx.nd.ones((8, 3, 20)))
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, projection_size=5, bidirectional=True), mx.nd.ones((8, 3, 20)), [mx.nd.ones((4, 3, 5)), mx.nd.ones((4, 3, 10))])
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, dropout=0.5, projection_size=5), mx.nd.ones((8, 3, 20)),
run_only=True)
check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, bidirectional=True, dropout=0.5, projection_size=5),
mx.nd.ones((8, 3, 20)),
[mx.nd.ones((4, 3, 5)), mx.nd.ones((4, 3, 10))], run_only=True)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='7.2.1')
def test_lstm_clip():
class OnesDiag(mx.init.Initializer):
"""Initializes the diagonal of a potentially rectangular 2D array to 1's with the rest 0's
"""
def __init__(self):
super(OnesDiag, self).__init__()
def _init_weight(self, _, arr):
arr[:] = 0
min_size = min(arr.shape[0], arr.shape[1])
for i in range(min_size):
arr[i,i] = 1
# CuDNN clips the hidden state before the recurrent projection, so the output states may
# exceed the clipped range. However, a projection matrix with 1's along the diagonal will
# pass the first 'projection_size' elements intact, and thus give evidence that the
# clipping is being performed. Sadly the projection_size must be strictly less than
# the hidden_size, so full hidden state visibility is not possible with projection enabled.
hidden_size, projection_size = 2049, 2048
batch_size, seq_len = 32, 80
input_size = 50
clip_min, clip_max, clip_nan = -5, 5, True
lstm_input = mx.nd.uniform(shape=(seq_len, batch_size, input_size), ctx=mx.gpu(0))
lstm_states = [mx.nd.uniform(shape=(2, batch_size, projection_size), ctx=mx.gpu(0)),
mx.nd.uniform(shape=(2, batch_size, hidden_size), ctx=mx.gpu(0))]
lstm_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,
input_size=input_size, prefix='lstm0_',
h2r_weight_initializer=OnesDiag(),
bidirectional=True,
state_clip_min=clip_min,
state_clip_max=clip_max,
state_clip_nan=clip_nan)
lstm_layer.initialize(ctx=mx.gpu(0))
with autograd.record():
_, layer_output_states = lstm_layer(lstm_input, lstm_states)
cell_states = layer_output_states[0].asnumpy()
assert (cell_states >= clip_min).all() and (cell_states <= clip_max).all()
assert not np.isnan(cell_states).any()
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_layer():
check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))
check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))
check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))
check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))
def check_layer_bidirectional(size, in_size, proj_size):
class RefBiLSTM(gluon.Block):
def __init__(self, size, proj_size, **kwargs):
super(RefBiLSTM, self).__init__(**kwargs)
with self.name_scope():
self._lstm_fwd = gluon.rnn.LSTM(size, projection_size=proj_size, bidirectional=False, prefix='l0')
self._lstm_bwd = gluon.rnn.LSTM(size, projection_size=proj_size, bidirectional=False, prefix='r0')
def forward(self, inpt):
fwd = self._lstm_fwd(inpt)
bwd_inpt = nd.flip(inpt, 0)
bwd = self._lstm_bwd(bwd_inpt)
bwd = nd.flip(bwd, 0)
return nd.concat(fwd, bwd, dim=2)
weights = {}
for d in ['l', 'r']:
weights['lstm_{}0_i2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, in_size))
if proj_size:
weights['lstm_{}0_h2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, proj_size))
weights['lstm_{}0_h2r_weight'.format(d)] = mx.random.uniform(shape=(proj_size, size))
else:
weights['lstm_{}0_h2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, size))
weights['lstm_{}0_i2h_bias'.format(d)] = mx.random.uniform(shape=(size*4,))
weights['lstm_{}0_h2h_bias'.format(d)] = mx.random.uniform(shape=(size*4,))
net = gluon.rnn.LSTM(size, projection_size=proj_size, bidirectional=True, prefix='lstm_')
ref_net = RefBiLSTM(size, proj_size, prefix='lstm_')
net.initialize()
ref_net.initialize()
net_params = net.collect_params()
ref_net_params = ref_net.collect_params()
for k in weights:
net_params[k].set_data(weights[k])
ref_net_params[k.replace('l0', 'l0l0').replace('r0', 'r0l0')].set_data(weights[k])
data = mx.random.uniform(shape=(11, 10, in_size))
assert_allclose(net(data).asnumpy(), ref_net(data).asnumpy(), rtol=1e-7, atol=1e-7)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_layer_bidirectional():
check_layer_bidirectional(7, 5, 0)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='7.2.1')
def test_layer_bidirectional_proj():
check_layer_bidirectional(7, 5, 3)
@with_seed()
@assert_raises_cudnn_not_satisfied(min_version='5.1.10')
def test_rnn_layer_begin_state_type():
fake_data = nd.random.uniform(shape=(3, 5, 7), dtype='float16')
modeling_layer = gluon.rnn.LSTM(hidden_size=11, num_layers=2, dropout=0.2, bidirectional=True)
modeling_layer.cast('float16')
modeling_layer.initialize()
modeling_layer(fake_data)
def test_gluon_ctc_consistency():
loss = mx.gluon.loss.CTCLoss()
data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)
cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))
gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))
cpu_data = data.copy().as_in_context(mx.cpu(0))
cpu_data.attach_grad()
with mx.autograd.record():
l_cpu = loss(cpu_data, cpu_label)
l_cpu.backward()
gpu_data = data.copyto(mx.gpu(0))
gpu_data.attach_grad()
with mx.autograd.record():
l_gpu = loss(gpu_data, gpu_label)
l_gpu.backward()
assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_global_norm_clip_multi_device():
for check_isfinite in [True, False]:
x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))
x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))
norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite)
if check_isfinite:
assert norm == 5.0
else:
assert norm.asscalar() == 5.0
assert_almost_equal(x1.asnumpy(), np.ones((3, 3)) / 5)
assert_almost_equal(x2.asnumpy(), np.ones((4, 4)) / 5)
def _check_batchnorm_result(input, num_devices=1, cuda=False):
from mxnet.gluon.utils import split_and_load
def _find_bn(module):
if isinstance(module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):
return module
elif isinstance(module.module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):
return module.module
raise RuntimeError('BN not found')
def _syncParameters(bn1, bn2, ctx):
ctx = input.context
bn2.gamma.set_data(bn1.gamma.data(ctx))
bn2.beta.set_data(bn1.beta.data(ctx))
bn2.running_mean.set_data(bn1.running_mean.data(ctx))
bn2.running_var.set_data(bn1.running_var.data(ctx))
input1 = input.copy()
input2 = input.copy()
if cuda:
input1 = input.as_in_context(mx.gpu(0))
ctx_list = [mx.gpu(i) for i in range(num_devices)]
else:
ctx_list = [mx.cpu(0) for _ in range(num_devices)]
nch = input.shape[1]
bn1 = mx.gluon.nn.BatchNorm(in_channels=nch)
bn2 = mx.gluon.contrib.nn.SyncBatchNorm(in_channels=nch, num_devices=num_devices)
bn1.initialize(ctx=ctx_list[0])
bn2.initialize(ctx=ctx_list)
# using the same values for gamma and beta
#_syncParameters(_find_bn(bn1), _find_bn(bn2), ctx_list[0])
input1.attach_grad()
inputs2 = split_and_load(input2, ctx_list, batch_axis=0)
for xi in inputs2:
xi.attach_grad()
with mx.autograd.record():
output1 = bn1(input1)
output2 = [bn2(xi) for xi in inputs2]
loss1 = (output1 ** 2).sum()
loss2 = [(output ** 2).sum() for output in output2]
mx.autograd.backward(loss1)
mx.autograd.backward(loss2)
output2 = mx.nd.concat(*[output.as_in_context(input.context) for output in output2], dim=0)
# assert forwarding
assert_almost_equal(input1.asnumpy(), input2.asnumpy(), atol=1e-3, rtol=1e-3)
assert_almost_equal(output1.asnumpy(), output2.asnumpy(), atol=1e-3, rtol=1e-3)
assert_almost_equal(_find_bn(bn1).running_mean.data(ctx_list[0]).asnumpy(),
_find_bn(bn2).running_mean.data(ctx_list[0]).asnumpy(),
atol=1e-3, rtol=1e-3)
assert_almost_equal(_find_bn(bn1).running_var.data(ctx_list[0]).asnumpy(),
_find_bn(bn2).running_var.data(ctx_list[0]).asnumpy(),
atol=1e-3, rtol=1e-3)
input2grad = mx.nd.concat(*[output.grad.as_in_context(input.context) for output in inputs2], dim=0)
assert_almost_equal(input1.grad.asnumpy(), input2grad.asnumpy(), atol=1e-3, rtol=1e-3)
@with_seed()
def test_sync_batchnorm():
num_devices = get_device_count()
# no need to use SyncBN with 1 gpu
if num_devices < 2:
sys.stderr.write('bypassing test: needs 2 or more gpus, found {} ...'.format(num_devices))
return
ndev = 2
# check with unsync version
for i in range(10):
_check_batchnorm_result(mx.nd.random.uniform(shape=(4, 1, 4, 4)),
num_devices=ndev, cuda=True)
@with_seed()
def test_symbol_block_fp16():
# Test case to verify if initializing the SymbolBlock from a model with params
# other than fp32 param dtype.
# 1. Load a resnet model, cast it to fp16 and export
tmp = tempfile.mkdtemp()
tmpfile = os.path.join(tmp, 'resnet34_fp16')
ctx = mx.gpu(0)
net_fp32 = mx.gluon.model_zoo.vision.resnet34_v2(pretrained=True, ctx=ctx, root=tmp)
net_fp32.cast('float16')
net_fp32.hybridize()
data = mx.nd.zeros((1,3,224,224), dtype='float16', ctx=ctx)
net_fp32.forward(data)
net_fp32.export(tmpfile, 0)
# 2. Load the saved model and verify if all the params are loaded correctly.
# and choose one of the param to verify the type if fp16.
sm = mx.sym.load(tmpfile + '-symbol.json')
inputs = mx.sym.var('data', dtype='float16')
net_fp16 = mx.gluon.SymbolBlock(sm, inputs)
net_fp16.collect_params().load(tmpfile + '-0000.params', ctx=ctx)
# 3. Get a conv layer's weight parameter name. Conv layer's weight param is
# expected to be of dtype casted, fp16.
for param_name in net_fp16.params.keys():
if 'conv' in param_name and 'weight' in param_name:
break
assert np.dtype(net_fp16.params[param_name].dtype) == np.dtype(np.float16)
@with_seed()
def test_large_models():
ctx = default_context()
# Create model
net = gluon.nn.HybridSequential()
largest_num_features = 256
with net.name_scope():
net.add(nn.Conv2D(largest_num_features, 3))
net.hybridize()
net.initialize(mx.init.Normal(sigma=0.01), ctx=ctx)
# Compute the height (=width) of the square tensor of the given size in bytes
def tensor_size(big_tensor_bytes):
bytes_per_float = 4
sz = int(math.sqrt(big_tensor_bytes / largest_num_features / bytes_per_float))
return (sz // 100) * 100
# The idea is to create models with large tensors of (say) 20% of the total memory.
# This in the past has given cudnnFind() trouble when it needed to allocate similar I/O's
# from the area carved out by the MXNET_GPU_MEM_POOL_RESERVE setting (by default 5%).
(free_mem_bytes, total_mem_bytes) = mx.context.gpu_memory_info(ctx.device_id)
start_size = tensor_size(0.20 * total_mem_bytes)
num_trials = 10
sys.stderr.write(' testing global memory of size {} ... '.format(total_mem_bytes))
sys.stderr.flush()
for i in range(num_trials):
sz = start_size - 10 * i
(height, width) = (sz,sz)
sys.stderr.write(" {}x{} ".format(height,width))
sys.stderr.flush()
data_in = nd.random_uniform(low=0, high=255, shape=(1, 3, height, width),
ctx=ctx, dtype="float32")
# Evaluate model
net(data_in).asnumpy()
if __name__ == '__main__':
import nose
nose.runmodule()
| {
"content_hash": "1eddac55a742af2615c5335ece33687b",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 162,
"avg_line_length": 41.40338164251208,
"alnum_prop": 0.6184003267020594,
"repo_name": "mlperf/training_results_v0.6",
"id": "febadf8a071a64c84a5cdf8ab908cf0e9b99ff03",
"size": "17927",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/resnet/implementations/mxnet/tests/python/gpu/test_gluon_gpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13941"
},
{
"name": "C",
"bytes": "208630"
},
{
"name": "C++",
"bytes": "10999411"
},
{
"name": "CMake",
"bytes": "129712"
},
{
"name": "CSS",
"bytes": "64767"
},
{
"name": "Clojure",
"bytes": "396764"
},
{
"name": "Cuda",
"bytes": "2272433"
},
{
"name": "Dockerfile",
"bytes": "67820"
},
{
"name": "Groovy",
"bytes": "62557"
},
{
"name": "HTML",
"bytes": "19753082"
},
{
"name": "Java",
"bytes": "166294"
},
{
"name": "JavaScript",
"bytes": "71846"
},
{
"name": "Julia",
"bytes": "408765"
},
{
"name": "Jupyter Notebook",
"bytes": "2713169"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "115694"
},
{
"name": "Perl",
"bytes": "1535873"
},
{
"name": "Perl 6",
"bytes": "7280"
},
{
"name": "PowerShell",
"bytes": "6150"
},
{
"name": "Python",
"bytes": "24905683"
},
{
"name": "R",
"bytes": "351865"
},
{
"name": "Roff",
"bytes": "293052"
},
{
"name": "Scala",
"bytes": "1189019"
},
{
"name": "Shell",
"bytes": "794096"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "TypeScript",
"bytes": "361164"
}
],
"symlink_target": ""
} |
"""A Folder Resource."""
from google.cloud.security.common.gcp_type import resource
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc
class FolderLifecycleState(resource.LifecycleState):
"""Represents the Folder's LifecycleState."""
pass
class Folder(resource.Resource):
"""Folder Resource."""
RESOURCE_NAME_FMT = 'folders/%s'
def __init__(
self,
folder_id,
name=None,
display_name=None,
parent=None,
lifecycle_state=FolderLifecycleState.UNSPECIFIED):
"""Initialize.
Args:
folder_id: The string folder id.
name: The folder unique GCP name, i.e. "folders/{id}".
display_name: The folder display name.
parent: The parent Resource.
lifecycle_state: The folder's lifecycle state.
"""
super(Folder, self).__init__(
resource_id=folder_id,
resource_type=resource.ResourceType.FOLDER,
name=name,
display_name=display_name,
parent=parent,
lifecycle_state=lifecycle_state)
| {
"content_hash": "9454529658a84072e810f0ef0eb56d17",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 68,
"avg_line_length": 28.166666666666668,
"alnum_prop": 0.5959425190194421,
"repo_name": "cschnei3/forseti-security",
"id": "3b8a240ad57e921ce3d4fb585d1b31be025dc47b",
"size": "1758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "google/cloud/security/common/gcp_type/folder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5851"
},
{
"name": "Protocol Buffer",
"bytes": "10441"
},
{
"name": "Python",
"bytes": "2038262"
},
{
"name": "Shell",
"bytes": "2737"
}
],
"symlink_target": ""
} |
"""
Tests the LDAP-specific principal search adapter functionality.
"""
__version__ = "$Revision-Id:$"
| {
"content_hash": "17f384a1b78c34f4787fec23e1665451",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 63,
"avg_line_length": 14.625,
"alnum_prop": 0.6153846153846154,
"repo_name": "DLR-SC/DataFinder",
"id": "9fb457fd479a0f957f0fe2f90966c40fb0a5ea28",
"size": "1811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/unittest/datafinder_test/persistence/adapters/ldap_/principal_search/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "NSIS",
"bytes": "7649"
},
{
"name": "Python",
"bytes": "7056802"
},
{
"name": "QMake",
"bytes": "1975"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.utils.html import mark_safe
from ..core.tags.registry import register
from .poll.utils.render import render_polls
from .forms import CommentForm
from .models import MOVED, CLOSED, UNCLOSED, PINNED, UNPINNED
@register.inclusion_tag('spirit/comment/_form.html', takes_context=True)
def render_comments_form(context, topic, next=None):
request = context['request']
form = CommentForm(user=request.user)
return {'form': form, 'topic_id': topic.pk, 'next': next}
@register.simple_tag()
def get_comment_action_text(action):
if action == MOVED:
return _("This topic has been moved")
elif action == CLOSED:
return _("This topic has been closed")
elif action == UNCLOSED:
return _("This topic has been unclosed")
elif action == PINNED:
return _("This topic has been pinned")
elif action == UNPINNED:
return _("This topic has been unpinned")
else:
return _("Unknown topic moderation action")
@register.simple_tag(takes_context=True)
def post_render_comment(context, comment):
request = context['request']
csrf_token = context['csrf_token']
return mark_safe(render_polls(comment, request, csrf_token))
| {
"content_hash": "bb08320e5962138d19e0c629d84a74b5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 72,
"avg_line_length": 33.282051282051285,
"alnum_prop": 0.697226502311248,
"repo_name": "alesdotio/Spirit",
"id": "7cf53ed360e6fa4e185f7d051008592a0a7584cc",
"size": "1323",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spirit/comment/tags.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255435"
},
{
"name": "CoffeeScript",
"bytes": "128350"
},
{
"name": "HTML",
"bytes": "203306"
},
{
"name": "JavaScript",
"bytes": "28458"
},
{
"name": "Makefile",
"bytes": "187"
},
{
"name": "Python",
"bytes": "773246"
}
],
"symlink_target": ""
} |
import grpc
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
import simple_test_pb2 as simple__test__pb2
class SimpleTestStub(object):
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetOne = channel.unary_unary(
'/simpletest.SimpleTest/GetOne',
request_serializer=simple__test__pb2.RequestOne.SerializeToString,
response_deserializer=simple__test__pb2.ResponseOne.FromString,
)
self.GetList = channel.unary_stream(
'/simpletest.SimpleTest/GetList',
request_serializer=simple__test__pb2.RequestOne.SerializeToString,
response_deserializer=simple__test__pb2.ResponseList.FromString,
)
self.PostList = channel.stream_unary(
'/simpletest.SimpleTest/PostList',
request_serializer=simple__test__pb2.RequestList.SerializeToString,
response_deserializer=simple__test__pb2.ResponseOne.FromString,
)
self.PostAndPost = channel.stream_stream(
'/simpletest.SimpleTest/PostAndPost',
request_serializer=simple__test__pb2.RequestList.SerializeToString,
response_deserializer=simple__test__pb2.ResponseList.FromString,
)
class SimpleTestServicer(object):
def GetOne(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetList(self, request, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PostList(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PostAndPost(self, request_iterator, context):
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SimpleTestServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetOne': grpc.unary_unary_rpc_method_handler(
servicer.GetOne,
request_deserializer=simple__test__pb2.RequestOne.FromString,
response_serializer=simple__test__pb2.ResponseOne.SerializeToString,
),
'GetList': grpc.unary_stream_rpc_method_handler(
servicer.GetList,
request_deserializer=simple__test__pb2.RequestOne.FromString,
response_serializer=simple__test__pb2.ResponseList.SerializeToString,
),
'PostList': grpc.stream_unary_rpc_method_handler(
servicer.PostList,
request_deserializer=simple__test__pb2.RequestList.FromString,
response_serializer=simple__test__pb2.ResponseOne.SerializeToString,
),
'PostAndPost': grpc.stream_stream_rpc_method_handler(
servicer.PostAndPost,
request_deserializer=simple__test__pb2.RequestList.FromString,
response_serializer=simple__test__pb2.ResponseList.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'simpletest.SimpleTest', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "83872bafb3f73a5a4b393f1c2b46ed9a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 39.23255813953488,
"alnum_prop": 0.7131001778304683,
"repo_name": "ada-wang/grpc-py-helloworld",
"id": "f62b5138daafa7dda5dac3d9a2f59afb95cd3e97",
"size": "3444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simple_test_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Protocol Buffer",
"bytes": "502"
},
{
"name": "Python",
"bytes": "6129"
}
],
"symlink_target": ""
} |
"""Unit tests for local command-line-interface debug wrapper session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
import tempfile
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.cli import cli_shared
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.wrappers import local_cli_wrapper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import monitored_session
class LocalCLIDebuggerWrapperSessionForTest(
local_cli_wrapper.LocalCLIDebugWrapperSession):
"""Subclasses the wrapper class for testing.
Overrides its CLI-related methods for headless testing environments.
Inserts observer variables for assertions.
"""
def __init__(self,
command_sequence,
sess,
dump_root=None):
"""Constructor of the for-test subclass.
Args:
command_sequence: (list of list of str) A list of command arguments,
including the command prefix, each element of the list is such as:
["run", "-n"],
["print_feed", "input:0"].
sess: See the doc string of LocalCLIDebugWrapperSession.__init__.
dump_root: See the doc string of LocalCLIDebugWrapperSession.__init__.
"""
local_cli_wrapper.LocalCLIDebugWrapperSession.__init__(
self, sess, dump_root=dump_root, log_usage=False)
self._command_sequence = command_sequence
self._command_pointer = 0
# Observer variables.
self.observers = {
"debug_dumps": [],
"tf_errors": [],
"run_start_cli_run_numbers": [],
"run_end_cli_run_numbers": [],
"print_feed_responses": [],
"profiler_py_graphs": [],
"profiler_run_metadata": [],
}
def _prep_cli_for_run_start(self):
pass
def _prep_debug_cli_for_run_end(self, debug_dump, tf_error, passed_filter):
self.observers["debug_dumps"].append(debug_dump)
self.observers["tf_errors"].append(tf_error)
def _prep_profile_cli_for_run_end(self, py_graph, run_metadata):
self.observers["profiler_py_graphs"].append(py_graph)
self.observers["profiler_run_metadata"].append(run_metadata)
def _launch_cli(self):
if self._is_run_start:
self.observers["run_start_cli_run_numbers"].append(self._run_call_count)
else:
self.observers["run_end_cli_run_numbers"].append(self._run_call_count)
while True:
command = self._command_sequence[self._command_pointer]
self._command_pointer += 1
try:
if command[0] == "run":
self._run_handler(command[1:])
elif command[0] == "print_feed":
self.observers["print_feed_responses"].append(
self._print_feed_handler(command[1:]))
else:
raise ValueError("Unrecognized command prefix: %s" % command[0])
except debugger_cli_common.CommandLineExit as e:
return e.exit_token
class LocalCLIDebugWrapperSessionTest(test_util.TensorFlowTestCase):
def setUp(self):
self._tmp_dir = tempfile.mktemp()
self.v = variables.Variable(10.0, name="v")
self.w = variables.Variable(21.0, name="w")
self.delta = constant_op.constant(1.0, name="delta")
self.inc_v = state_ops.assign_add(self.v, self.delta, name="inc_v")
self.w_int = control_flow_ops.with_dependencies(
[self.inc_v],
math_ops.cast(self.w, dtypes.int32, name="w_int_inner"),
name="w_int_outer")
self.ph = array_ops.placeholder(dtypes.float32, name="ph")
self.xph = array_ops.transpose(self.ph, name="xph")
self.m = constant_op.constant(
[[0.0, 1.0, 2.0], [-4.0, -1.0, 0.0]], dtype=dtypes.float32, name="m")
self.y = math_ops.matmul(self.m, self.xph, name="y")
self.sparse_ph = array_ops.sparse_placeholder(
dtypes.float32, shape=([5, 5]), name="sparse_placeholder")
self.sparse_add = sparse_ops.sparse_add(self.sparse_ph, self.sparse_ph)
self.sess = session.Session()
# Initialize variable.
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
if os.path.isdir(self._tmp_dir):
shutil.rmtree(self._tmp_dir)
def testConstructWrapper(self):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), log_usage=False)
def testConstructWrapperWithExistingEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
self.assertTrue(os.path.isdir(self._tmp_dir))
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingNonEmptyDumpRoot(self):
os.mkdir(self._tmp_dir)
dir_path = os.path.join(self._tmp_dir, "foo")
os.mkdir(dir_path)
self.assertTrue(os.path.isdir(dir_path))
with self.assertRaisesRegexp(
ValueError, "dump_root path points to a non-empty directory"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=self._tmp_dir, log_usage=False)
def testConstructWrapperWithExistingFileDumpRoot(self):
os.mkdir(self._tmp_dir)
file_path = os.path.join(self._tmp_dir, "foo")
open(file_path, "a").close() # Create the file
self.assertTrue(os.path.isfile(file_path))
with self.assertRaisesRegexp(ValueError, "dump_root path points to a file"):
local_cli_wrapper.LocalCLIDebugWrapperSession(
session.Session(), dump_root=file_path, log_usage=False)
def testRunsUnderDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Assert correct run call numbers for which the CLI has been launched at
# run-start and run-end.
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1, 2], wrapped_sess.observers["run_end_cli_run_numbers"])
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the TensorFlow runtime errors are picked up and in this case,
# they should be both None.
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsWithEmptyStringDumpRootWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root="")
# run under debug mode.
wrapped_sess.run(self.inc_v)
self.assertAllClose(11.0, self.sess.run(self.v))
def testRunInfoOutputAtRunEndIsCorrect(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
tfdbg_logo = cli_shared.get_tfdbg_logo()
# The run_info output in the first run() call should contain the tfdbg logo.
self.assertEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
wrapped_sess.run(self.inc_v)
run_info_output = wrapped_sess._run_info_handler([])
# The run_info output in the second run() call should NOT contain the logo.
self.assertNotEqual(tfdbg_logo.lines,
run_info_output.lines[:len(tfdbg_logo.lines)])
menu = run_info_output.annotations[debugger_cli_common.MAIN_MENU_KEY]
self.assertIn("list_tensors", menu.captions())
def testRunsUnderNonDebugMode(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run", "-n"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
def testRunningWithSparsePlaceholderFeedWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
sparse_feed = ([[0, 1], [0, 2]], [10.0, 20.0])
sparse_result = wrapped_sess.run(
self.sparse_add, feed_dict={self.sparse_ph: sparse_feed})
self.assertAllEqual([[0, 1], [0, 2]], sparse_result.indices)
self.assertAllClose([20.0, 40.0], sparse_result.values)
def testRunsUnderNonDebugThenDebugMode(self):
# Do two NON_DEBUG_RUNs, followed by DEBUG_RUNs.
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-n"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1, 2, 3],
wrapped_sess.observers["run_start_cli_run_numbers"])
# Here, the CLI should have been launched only under the third run,
# because the first and second runs are NON_DEBUG.
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesWithinLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run three times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(13.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None], wrapped_sess.observers["tf_errors"])
def testRunMultipleTimesOverLimit(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-t", "3"]], self.sess, dump_root=self._tmp_dir)
# run twice, which is less than the number of times specified by the
# command.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(0, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([], wrapped_sess.observers["tf_errors"])
def testRunMixingDebugModeAndMultpleTimes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-n"], ["run", "-t", "2"], ["run"], ["run"]],
self.sess, dump_root=self._tmp_dir)
# run four times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(14.0, self.sess.run(self.v))
self.assertEqual([1, 2],
wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([3, 4], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testDebuggingMakeCallableTensorRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.Variable(42)
tensor_runner = wrapped_sess.make_callable(v)
self.sess.run(v.initializer)
self.assertAllClose(42, tensor_runner())
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testDebuggingMakeCallableTensorRunnerWithCustomRunOptionsWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
a = constant_op.constant(42)
tensor_runner = wrapped_sess.make_callable(a)
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
self.assertAllClose(
42, tensor_runner(options=run_options, run_metadata=run_metadata))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertGreater(len(run_metadata.step_stats.dev_stats), 0)
def testDebuggingMakeCallableOperationRunnerWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
v = variables.Variable(10.0)
inc_v = state_ops.assign_add(v, 1.0)
op_runner = wrapped_sess.make_callable(inc_v.op)
self.sess.run(v.initializer)
op_runner()
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual(11.0, self.sess.run(v))
def testDebuggingMakeCallableRunnerWithFeedListWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
ph1 = array_ops.placeholder(dtypes.float32)
ph2 = array_ops.placeholder(dtypes.float32)
a = math_ops.add(ph1, ph2)
tensor_runner = wrapped_sess.make_callable(a, feed_list=[ph1, ph2])
self.assertAllClose(42.0, tensor_runner(41.0, 1.0))
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
def testRuntimeErrorShouldBeCaught(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess, dump_root=self._tmp_dir)
# Do a run that should lead to an TensorFlow runtime error.
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0], [1.0], [2.0]]})
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
self.assertEqual([1], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
# Verify that the runtime error is caught by the wrapped session properly.
self.assertEqual(1, len(wrapped_sess.observers["tf_errors"]))
tf_error = wrapped_sess.observers["tf_errors"][0]
self.assertEqual("y", tf_error.op.name)
def testRuntimeErrorBeforeGraphExecutionIsRaised(self):
# Use an impossible device name to cause an error before graph execution.
with ops.device("/device:GPU:1337"):
w = variables.Variable([1.0] * 10, name="w")
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"]], self.sess, dump_root=self._tmp_dir)
with self.assertRaisesRegexp(errors.OpError, r".*[Dd]evice.*1337.*"):
wrapped_sess.run(w)
def testRunTillFilterPassesShouldLaunchCLIAtCorrectRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-f", "v_greater_than_twelve"],
["run", "-f", "v_greater_than_twelve"],
["run"]],
self.sess,
dump_root=self._tmp_dir)
def v_greater_than_twelve(datum, tensor):
return datum.node_name == "v" and tensor > 12.0
wrapped_sess.add_tensor_filter("v_greater_than_twelve",
v_greater_than_twelve)
# run five times.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
self.assertAllClose(15.0, self.sess.run(self.v))
self.assertEqual([1], wrapped_sess.observers["run_start_cli_run_numbers"])
# run-end CLI should NOT have been launched for run #2 and #3, because only
# starting from run #4 v becomes greater than 12.0.
self.assertEqual([4, 5], wrapped_sess.observers["run_end_cli_run_numbers"])
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
self.assertEqual([None, None], wrapped_sess.observers["tf_errors"])
def testRunsUnderDebugModeWithWatchFnFilteringNodeNames(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "inc.*"],
["run", "--node_name_filter", "delta"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringOpTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--node_name_filter", "delta"],
["run", "--op_type_filter", "AssignAdd"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.inc_v)
wrapped_sess.run(self.inc_v)
# Verify that the assign_add op did take effect.
self.assertAllClose(12.0, self.sess.run(self.v))
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("delta", dumps.dumped_tensor_data[0].node_name)
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(1, dumps.size)
self.assertEqual("inc_v", dumps.dumped_tensor_data[0].node_name)
def testRunsUnderDebugModeWithWatchFnFilteringTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Variable.*"],
["run", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(2, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(2, dumps.size)
self.assertItemsEqual(
["v", "w"], [dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
dumps = wrapped_sess.observers["debug_dumps"][1]
self.assertEqual(2, dumps.size)
self.assertEqual(
["w_int_inner", "w_int_outer"],
[dumps.dumped_tensor_data[i].node_name for i in [0, 1]])
def testRunsUnderDebugModeWithWatchFnFilteringOpTypesAndTensorDTypes(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "--op_type_filter", "Cast", "--tensor_dtype_filter", "int32"],
["run"]],
self.sess, dump_root=self._tmp_dir)
# run under debug mode twice.
wrapped_sess.run(self.w_int)
# Verify that the dumps have been generated and picked up during run-end.
self.assertEqual(1, len(wrapped_sess.observers["debug_dumps"]))
dumps = wrapped_sess.observers["debug_dumps"][0]
self.assertEqual(1, dumps.size)
self.assertEqual("w_int_inner", dumps.dumped_tensor_data[0].node_name)
def testPrintFeedPrintsFeedValueForTensorFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={self.ph: [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsFeedValueForTensorNameFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "ph:0"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["Tensor \"ph:0 (feed)\":", "", "[[0.0, 1.0, 2.0]]"],
print_feed_responses[0].lines)
def testPrintFeedPrintsErrorForInvalidFeedKey(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
self.assertAllClose(
[[5.0], [-1.0]],
wrapped_sess.run(self.y, feed_dict={"ph:0": [[0.0, 1.0, 2.0]]}))
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run does not contain the key "
"spam"], print_feed_responses[0].lines)
def testPrintFeedPrintsErrorWhenFeedDictIsNone(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["print_feed", "spam"], ["run"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
print_feed_responses = wrapped_sess.observers["print_feed_responses"]
self.assertEqual(1, len(print_feed_responses))
self.assertEqual(
["ERROR: The feed_dict of the current run is None or empty."],
print_feed_responses[0].lines)
def testRunUnderProfilerModeWorks(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run", "-p"], ["run"]], self.sess)
wrapped_sess.run(self.w_int)
self.assertEqual(1, len(wrapped_sess.observers["profiler_run_metadata"]))
self.assertTrue(
wrapped_sess.observers["profiler_run_metadata"][0].step_stats)
self.assertEqual(1, len(wrapped_sess.observers["profiler_py_graphs"]))
self.assertIsInstance(
wrapped_sess.observers["profiler_py_graphs"][0], ops.Graph)
def testCallingHookDelBeforeAnyRun(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
del wrapped_sess
def testCallingShouldStopMethodOnNonWrappedNonMonitoredSessionErrors(self):
wrapped_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], self.sess)
with self.assertRaisesRegexp(
ValueError,
r"The wrapped session .* does not have a method .*should_stop.*"):
wrapped_sess.should_stop()
def testLocalCLIDebugWrapperSessionWorksOnMonitoredSession(self):
monitored_sess = monitored_session.MonitoredSession()
wrapped_monitored_sess = LocalCLIDebuggerWrapperSessionForTest(
[["run"], ["run"]], monitored_sess)
self.assertFalse(wrapped_monitored_sess.should_stop())
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "2237668cb3baf0bcf8077446e33359dd",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 88,
"avg_line_length": 39.56177924217463,
"alnum_prop": 0.6774798034479886,
"repo_name": "mavenlin/tensorflow",
"id": "f65ffc95d756e59ea35bb6a6c5745ac6abde7a3d",
"size": "24703",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/debug/wrappers/local_cli_wrapper_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7666"
},
{
"name": "C",
"bytes": "193501"
},
{
"name": "C++",
"bytes": "28519915"
},
{
"name": "CMake",
"bytes": "636307"
},
{
"name": "Go",
"bytes": "946452"
},
{
"name": "Java",
"bytes": "403360"
},
{
"name": "Jupyter Notebook",
"bytes": "1833674"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38060"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "261095"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "25109562"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "371205"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import difflib
import warnings
from copy import deepcopy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.utils.color import stringc
from ansible.vars import strip_internal_keys
try:
from __main__ import display as global_display
except ImportError:
from ansible.utils.display import Display
global_display = Display()
try:
from __main__ import cli
except ImportError:
# using API w/o cli
cli = False
__all__ = ["CallbackBase"]
class CallbackBase:
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
def __init__(self, display=None):
if display:
self._display = display
else:
self._display = global_display
if cli:
self._options = cli.options
else:
self._options = None
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loading callback plugin %s of type %s, v%s from %s' % (name, ctype, version, __file__))
''' helper for callbacks, so they don't all have to include deepcopy '''
_copy_result = deepcopy
def _copy_result_exclude(self, result, exclude):
values = []
for e in exclude:
values.append(getattr(result, e))
setattr(result, e, None)
result_copy = deepcopy(result)
for i,e in enumerate(exclude):
setattr(result, e, values[i])
return result_copy
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
if not indent and (result.get('_ansible_verbose_always') or self._display.verbosity > 2):
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(result)
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
return json.dumps(abridged_result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.COMMAND_WARNINGS and 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
# format complex structures into 'files'
for x in ['before', 'after']:
if isinstance(diff[x], dict):
diff[x] = json.dumps(diff[x], sort_keys=True, indent=4)
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_text(diff['before']).splitlines(True),
to_text(diff['after']).splitlines(True),
fromfile=before_header,
tofile=after_header,
fromfiledate='',
tofiledate='',
n=C.DIFF_CONTEXT)
has_diff = False
for line in differ:
has_diff = True
if line.startswith('+'):
line = stringc(line, C.COLOR_DIFF_ADD)
elif line.startswith('-'):
line = stringc(line, C.COLOR_DIFF_REMOVE)
elif line.startswith('@@'):
line = stringc(line, C.COLOR_DIFF_LINES)
ret.append(line)
if has_diff:
ret.append('\n')
if 'prepared' in diff:
ret.append(to_text(diff['prepared']))
except UnicodeDecodeError:
ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n")
return u''.join(ret)
def _get_item(self, result):
if result.get('_ansible_no_log', False):
item = "(censored due to no_log)"
elif result.get('_ansible_item_label', False):
item = result.get('_ansible_item_label')
else:
item = result.get('item', None)
return item
def _process_items(self, result):
# just remove them as now they get handled by individual callbacks
del result._result['results']
def _clean_results(self, result, task_name):
if 'changed' in result and task_name in ['debug']:
del result['changed']
if 'invocation' in result and task_name in ['debug']:
del result['invocation']
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
####### V2 METHODS, by default they call v1 counterparts if possible ######
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
self.runner_on_skipped(host, self._get_item(getattr(result._result,'results',{})))
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_no_hosts(self, task):
self.runner_on_no_hosts()
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
#FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_runner_on_file_diff(self, result, diff):
pass #no v1 correspondance
def v2_playbook_on_start(self, playbook):
self.playbook_on_start()
def v2_playbook_on_notify(self, result, handler):
host = result._host.get_name()
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task.name, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_handler_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
def v2_playbook_on_setup(self):
self.playbook_on_setup()
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
if 'diff' in result._result:
host = result._host.get_name()
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_include(self, included_file):
pass #no v1 correspondance
def v2_runner_item_on_ok(self, result):
pass
def v2_runner_item_on_failed(self, result):
pass
def v2_runner_item_on_skipped(self, result):
pass
def v2_runner_retry(self, result):
pass
| {
"content_hash": "0f1f4c65c722e42eb41267cbee32ab95",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 147,
"avg_line_length": 35.6,
"alnum_prop": 0.5711610486891385,
"repo_name": "abtreece/ansible",
"id": "d02e5ac3bebe56498176b6ab5269a0747a31424c",
"size": "13027",
"binary": false,
"copies": "6",
"ref": "refs/heads/stable-2.2",
"path": "lib/ansible/plugins/callback/__init__.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Tests for emql.cm_sketch."""
from language.emql import cm_sketch
import numpy as np
import tensorflow.compat.v1 as tf
class CmSketchTest(tf.test.TestCase):
def setUp(self):
super(CmSketchTest, self).setUp()
self.xs = np.arange(100)
self.cm_context = cm_sketch.CountMinContext(width=1000, depth=17)
self.sketch = self.cm_context.get_sketch(self.xs)
self.cm_context.add_set(self.sketch, self.xs)
self.new_xs = np.arange(50, 200)
self.new_sketch = self.cm_context.get_sketch(self.new_xs)
def test_contain(self):
self.assertTrue(self.cm_context.contain(self.sketch, 50))
self.assertFalse(self.cm_context.contain(self.sketch, 200))
def test_intersection(self):
intersection_sketch = self.cm_context.intersection(
self.sketch, self.new_sketch)
self.assertTrue(self.cm_context.contain(intersection_sketch, 80))
self.assertFalse(self.cm_context.contain(intersection_sketch, 10))
self.assertFalse(self.cm_context.contain(intersection_sketch, 150))
def test_union(self):
union_sketch = self.cm_context.union(self.sketch, self.new_sketch)
self.assertTrue(self.cm_context.contain(union_sketch, 80))
self.assertTrue(self.cm_context.contain(union_sketch, 10))
self.assertTrue(self.cm_context.contain(union_sketch, 150))
self.assertFalse(self.cm_context.contain(union_sketch, 300))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "dbddd3a9c9670f7572ad185ba1ba0b36",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 71,
"avg_line_length": 36.17948717948718,
"alnum_prop": 0.7193479801559178,
"repo_name": "google-research/language",
"id": "48c90ff32bbc2b39f3bb9b6d71b5b35e714af11e",
"size": "2026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/emql/cm_sketch_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
from model.group import Group
def test_group_list(app,db):
ui_list = app.group.get_group_list()
def clean(group):
return Group(id=group.id, name=group.name.strip())
db_list = map(clean, db.get_group_list())
assert sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max) | {
"content_hash": "b4c3a9ef17a8aded906e725a067c3d6e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 87,
"avg_line_length": 35.111111111111114,
"alnum_prop": 0.6677215189873418,
"repo_name": "olga121/python_training",
"id": "899184b09c8d480ad0e75fb1aeed3f49d12daf98",
"size": "316",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_db_matches_ui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34769"
}
],
"symlink_target": ""
} |
"""
This module is the interface to the bayesian_structural_analysis (bsa) module
It handles the images provided as input and produces result images.
"""
import numpy as np
import os.path as op
from nipy.io.imageformats import load, save, Nifti1Image
from nipy.neurospin.utils.mask import intersect_masks
import nipy.neurospin.spatial_models.bayesian_structural_analysis as bsa
import nipy.neurospin.graph.field as ff
def make_bsa_image(mask_images, betas, theta=3., dmax= 5., ths=0, thq=0.5,
smin=0, swd="/tmp/", method='simple', subj_id=None,
nbeta='default', densPath=None, crPath=None, verbose=0):
"""
main function for performing bsa on a set of images.
It creates the some output images in the given directory
Parameters
------------
mask_images: A list of image paths that yield binary images,
one for each subject
the number os subjects, nsubj, is taken as len(mask_images)
betas: A list of image paths that yields the activation images,
one for each subject
theta=3., threshold used to ignore all the image data that si below
dmax=5., prior width of the spatial model;
corresponds to multi-subject uncertainty
ths=0: threshold on the representativity measure of the obtained
regions
thq=0.5: p-value of the representativity test:
test = p(representativity>ths)>thq
smin=0: minimal size (in voxels) of the extracted blobs
smaller blobs are merged into larger ones
swd='/tmp': writedir
method='simple': applied region detection method; to be chose among
'simple', 'dev','ipmi'
subj_id=None: list of strings, identifiers of the subjects.
by default it is range(nsubj)
nbeta='default', string, identifier of the contrast
densPath=None, string, path of the output density image
if False, no image is written
if None, the path is computed from swd, nbeta
crPath=None, string, path of the (4D) output label image
if False, no ime is written
if None, many images are written,
with paths computed from swd, subj_id and nbeta
Returns
-------
AF: an nipy.neurospin.spatial_models.structural_bfls.landmark_regions
instance that describes the structures found at the group level
None is returned if nothing has been found significant
at the group level
BF : a list of nipy.neurospin.spatial_models.hroi.Nroi instances
(one per subject) that describe the individual coounterpart of AF
if method=='loo', the output is different:
mll, float, the average likelihood of the data under H1 after cross validation
ll0, float the log-likelihood of the data under the global null
fixme: unique mask should be allowed
"""
# Sanity check
if len(mask_images)!=len(betas):
raise ValueError,"the number of masks and activation images\
should be the same"
nsubj = len(mask_images)
if subj_id==None:
subj_id = [str[i] for i in range(nsubj)]
# Read the referential information
nim = load(mask_images[0])
ref_dim = nim.get_shape()
affine = nim.get_affine()
# Read the masks and compute the "intersection"
mask = intersect_masks(mask_images)
xyz = np.array(np.where(mask)).T
nvox = xyz.shape[0]
# create the field strcture that encodes image topology
Fbeta = ff.Field(nvox)
Fbeta.from_3d_grid(xyz.astype(np.int),18)
# Get coordinates in mm
xyz = np.hstack((xyz,np.ones((nvox,1))))
coord = np.dot(xyz,affine.T)[:,:3]
xyz = xyz.astype(np.int)
# read the functional images
lbeta = []
for s in range(nsubj):
rbeta = load(betas[s])
beta = rbeta.get_data()
beta = beta[mask]
lbeta.append(beta)
lbeta = np.array(lbeta).T
# launch the method
g0 = 1.0/(np.absolute(np.linalg.det(affine))*nvox)
bdensity = 1
crmap = np.zeros(nvox)
p = np.zeros(nvox)
AF = None
BF = [None for s in range(nsubj)]
if method=='ipmi':
crmap,AF,BF,p = bsa.compute_BSA_ipmi(Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim, thq, smin, ths,
theta, g0, bdensity, verbose=verbose)
if method=='dev':
crmap,AF,BF,p = bsa.compute_BSA_dev (Fbeta, lbeta, coord,
dmax, xyz[:,:3], affine, ref_dim,
thq, smin,ths, theta, g0, bdensity, verbose=verbose)
if method=='simple':
crmap,AF,BF,p = bsa.compute_BSA_simple (Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim,
thq, smin, ths, theta, g0, verbose=verbose)
if method=='simple_quick':
crmap,AF,BF,co_clust = bsa.compute_BSA_simple_quick(Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim,
thq, smin, ths, theta, g0, verbose=verbose)
density = np.zeros(nvox)
crmap = AF.map_label(coord,0.95,dmax)
if method=='loo':
mll, ll0 = bsa.compute_BSA_loo (Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim,
thq, smin,ths, theta, g0, verbose=verbose)
return mll, ll0
# Write the results as images
# the spatial density image
if densPath != False:
density = np.zeros(ref_dim)
density[mask] = p
wim = Nifti1Image (density, affine)
wim.get_header()['descrip'] = 'group-level spatial density of active regions'
if densPath==None:
densPath = op.join(swd,"density_%s.nii"%nbeta)
save(wim, densPath)
if crPath==False:
return AF, BF
if AF==None:
default_idx = 0
else:
default_idx = AF.k+2
if crPath==None:
# write a 3D image for group-level labels
crPath = op.join(swd,"CR_%s.nii"%nbeta)
Label = -2*np.ones(ref_dim,'int16')
Label[mask] = crmap
wim = Nifti1Image (Label, affine)
wim.get_header()['descrip'] = 'group Level labels from bsa procedure'
save(wim, crPath)
#write 3d images for the subjects
for s in range(nsubj):
LabelImage = op.join(swd,"AR_s%s_%s.nii"%(subj_id[s],nbeta))
Label = -2*np.ones(ref_dim,'int16')
Label[mask]=-1
if BF[s]!=None:
nls = BF[s].get_roi_feature('label')
nls[nls==-1] = default_idx
for k in range(BF[s].k):
xyzk = BF[s].xyz[k].T
Label[xyzk[0],xyzk[1],xyzk[2]] = nls[k]
wim = Nifti1Image (Label, affine)
wim.get_header()['descrip'] = 'Individual label image from bsa procedure'
save(wim, LabelImage)
else:
# write everything in a single 4D image
wdim = (ref_dim[0], ref_dim[1], ref_dim[2], nsubj+1)
Label = -2*np.ones(wdim,'int16')
Label[mask,0] = crmap
for s in range(nsubj):
Label[mask,s+1]=-1
if BF[s]!=None:
nls = BF[s].get_roi_feature('label')
nls[nls==-1] = default_idx
for k in range(BF[s].k):
xyzk = BF[s].xyz[k].T
Label[xyzk[0],xyzk[1],xyzk[2],s+1] = nls[k]
wim = Nifti1Image (Label, affine)
wim.get_header()['descrip'] = 'group Level and individual labels\
from bsa procedure'
save(wim, crPath)
return AF,BF
def make_bsa_image_with_output_paths(mask_images, betas, denspath, crpath,
theta=3., dmax= 5., ths=0, thq=0.5, smin=0,
method='simple'):
"""
Deprecated : will be removed soon
idem make_bsa_image but paths of the output are set explictly.
Moreover the segmented regions are written in one single image
"""
# Sanity check
if len(mask_images)!=len(betas):
print len(mask_images),len(betas)
raise ValueError,"the number of masks and activation images\
should be the same"
nsubj = len(mask_images)
# Read the referential information
nim = load(mask_images[0])
ref_dim = nim.get_shape()
affine = nim.get_affine()
# Read the masks and compute the "intersection"
mask = intersect_masks(mask_images)
xyz = np.array(np.where(mask)).T
nvox = xyz.shape[0]
# create the field strcture that encodes image topology
Fbeta = ff.Field(nvox)
Fbeta.from_3d_grid(xyz.astype(np.int),18)
# Get coordinates in mm
xyz = np.hstack((xyz,np.ones((nvox,1))))
coord = np.dot(xyz,affine.T)[:,:3]
xyz = xyz.astype(np.int)
# read the functional images
lbeta = []
for s in range(nsubj):
rbeta = load(betas[s])
beta = rbeta.get_data()
beta = beta[mask]
lbeta.append(beta)
lbeta = np.array(lbeta).T
lbeta = np.reshape(lbeta,(nvox,nsubj))
# launch the method
g0 = 1.0/(np.absolute(np.linalg.det(affine))*nvox)
bdensity = 1
crmap = np.zeros(nvox)
p = np.zeros(nvox)
AF = None
BF = [None for s in range(nsubj)]
if method=='ipmi':
crmap,AF,BF,p = bsa.compute_BSA_ipmi(Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim, thq, smin, ths,
theta, g0, bdensity)
if method=='dev':
crmap,AF,BF,p = bsa.compute_BSA_dev (Fbeta, lbeta, coord,
dmax, xyz[:,:3], affine, ref_dim,
thq, smin,ths, theta, g0, bdensity,verbose=1)
if method=='simple':
crmap,AF,BF,p = bsa.compute_BSA_simple (Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim,
thq, smin, ths, theta, g0, verbose=0)
if method=='simple_quick':
crmap,AF,BF,co_clust = bsa.compute_BSA_simple_quick (Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim,
thq, smin, ths, theta, g0, verbose=0)
density = np.zeros(nvox)
crmap = AF.map_label(coord,0.95,dmax)
if method=='loo':
crmap,AF,BF,p = bsa.compute_BSA_loo (Fbeta, lbeta, coord, dmax,
xyz[:,:3], affine, ref_dim,
thq, smin,ths, theta, g0, verbose=0)
# Write the results
Label = -2*np.ones(ref_dim,'int16')
Label[mask] = crmap.astype('i')
wim = Nifti1Image (Label, affine)
wim.get_header()['descrip'] = 'group Level labels from bsa procedure'
save(wim, crpath)
density = np.zeros(ref_dim)
density[mask] = p
wim = Nifti1Image (density, affine)
wim.get_header()['descrip'] = 'group-level spatial density of active regions'
save(wim, denspath)
if AF==None:
default_idx = 0
else:
default_idx = AF.k+2
# write everything in one image
wdim = (ref_dim[0], ref_dim[1], ref_dim[2], nsubj+1)
Label = -2*np.ones(wdim,'int16')
Label[mask,0] = crmap.astype(np.int)
for s in range(nsubj):
Label[mask,s+1]=-1
if BF[s]!=None:
nls = BF[s].get_roi_feature('label')
nls[nls==-1] = default_idx
for k in range(BF[s].k):
xyzk = BF[s].xyz[k].T
Label[xyzk[0],xyzk[1],xyzk[2],s+1] = nls[k]
wim = Nifti1Image (Label, affine)
wim.get_header()['descrip'] = 'group Level and individual labels\
from bsa procedure'
save(wim, crpath)
return AF,BF, maxc
| {
"content_hash": "2294ba68e62318706883e7116aa9aa2b",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 88,
"avg_line_length": 37.300632911392405,
"alnum_prop": 0.5674047679647068,
"repo_name": "yarikoptic/NiPy-OLD",
"id": "dcbc29dc75f9c993825ee5acff06fb478b0cdc04",
"size": "11787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nipy/neurospin/spatial_models/bsa_io.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4411801"
},
{
"name": "Objective-C",
"bytes": "4262"
},
{
"name": "Python",
"bytes": "2617786"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "neighbors.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "b9f67f09eb48dd90ddd5473d7f721717",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 73,
"avg_line_length": 25.555555555555557,
"alnum_prop": 0.7130434782608696,
"repo_name": "pabulumm/neighbors",
"id": "47e79149633f10ad1bbd7b9535367c87759a1d59",
"size": "253",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "167622"
},
{
"name": "HTML",
"bytes": "221496"
},
{
"name": "JavaScript",
"bytes": "325471"
},
{
"name": "Python",
"bytes": "7896264"
},
{
"name": "Shell",
"bytes": "12645"
},
{
"name": "Smarty",
"bytes": "789"
}
],
"symlink_target": ""
} |
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
if platform.machine() == 'ppc64le':
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
else:
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
_ = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(_TF_WORKSPACE_ROOT, '.bazelrc')
data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
f.write('import %%workspace%%/%s\n' % _TF_BAZELRC_FILENAME)
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
"""Check installed bazel version is at least min_version.
Args:
min_version: string for minimum bazel version.
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath(
'%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
"""Check the revision number of an Android NDK path."""
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_api_level = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_version(environ_cp):
"""Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
# Configure the Cuda SDK version to use.
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
# Find out where the CUDA toolkit is installed
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
# If the default doesn't exist, try an alternative default.
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_paths = ['lib/x64/cudart.lib']
elif is_linux():
cuda_rt_lib_paths = [
'%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
'lib64',
'lib/powerpc64le-linux-gnu',
'lib/x86_64-linux-gnu',
]
]
elif is_macos():
cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
cuda_toolkit_paths_full = [
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
]
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_paths_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
"""Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists(
'%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_install_path(environ_cp):
"""Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION.
Adapted from code contributed by Sami Kama (https://github.com/samikama).
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
False))) != '1':
return
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
ask_tensorrt_path = (r'Please specify the location where TensorRT is '
'installed. [Default is %s]:') % (
_DEFAULT_TENSORRT_PATH_LINUX)
trt_install_path = get_from_env_or_user_or_default(
environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
_DEFAULT_TENSORRT_PATH_LINUX)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
"""Search for libnvinfer.so in "search_path"."""
fl = set()
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
for x in os.listdir(search_path)
if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
highest_ver = [0, None, None]
for lib_file in possible_files:
if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
matches = nvinfer_pattern.search(lib_file)
if not matches.groups():
continue
ver_str = matches.group(1)
ver = convert_version_to_int(ver_str) if len(ver_str) else 0
if ver > highest_ver[0]:
highest_ver = [ver, ver_str, lib_file]
if highest_ver[1] is not None:
trt_install_path = os.path.dirname(highest_ver[2])
tf_tensorrt_version = highest_ver[1]
break
# Try another alternative from ldconfig.
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
ldconfig_output = run_shell([ldconfig_bin, '-p'])
search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
ldconfig_output)
if search_result:
libnvinfer_path_from_ldconfig = search_result.group(2)
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
print('TensorRT libraries found in one the following directories',
'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
print(
'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
def set_tf_nccl_install_path(environ_cp):
"""Set NCCL_INSTALL_PATH, NCCL_HDR_PATH and TF_NCCL_VERSION.
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
'Please specify the locally installed NCCL version you want to use. '
'[Default is to use https://github.com/nvidia/nccl]: ')
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, '')
if not tf_nccl_version:
break # No need to get install path, building the open source code.
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
# Look with ldconfig first if we can find the library in paths
# like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
# include directory. This is where the NCCL .deb packages install them.
# First check to see if NCCL is in the ldconfig.
# If its found, use that location.
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
nccl2_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
nccl2_path_from_ldconfig = re.search('.*libnccl.so .* => (.*)',
nccl2_path_from_ldconfig)
if nccl2_path_from_ldconfig:
nccl2_path_from_ldconfig = nccl2_path_from_ldconfig.group(1)
if os.path.exists('%s.%s' % (nccl2_path_from_ldconfig, tf_nccl_version)):
nccl_install_path = os.path.dirname(nccl2_path_from_ldconfig)
print('NCCL libraries found in ' + nccl2_path_from_ldconfig)
# Check if this is the main system lib location
if re.search('.*linux-gnu', nccl_install_path):
trunc_nccl_install_path = '/usr'
print('This looks like a system path.')
else:
trunc_nccl_install_path = nccl_install_path + '/..'
# Look for header
nccl_hdr_path = trunc_nccl_install_path + '/include'
print('Assuming NCCL header path is ' + nccl_hdr_path)
if os.path.exists(nccl_hdr_path + '/nccl.h'):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
# Set NCCL_HDR_PATH
environ_cp['NCCL_HDR_PATH'] = nccl_hdr_path
write_action_env_to_bazelrc('NCCL_HDR_PATH', nccl_hdr_path)
break
else:
print(
'The header for NCCL2 cannot be found. Please install the libnccl-dev package.'
)
else:
print('NCCL2 is listed by ldconfig but the library is not found. '
'Your ldconfig is out of date. Please run sudo ldconfig.')
else:
# NCCL is not found in ldconfig. Ask the user for the location.
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_nccl_path = (
r'Please specify the location where NCCL %s library is '
'installed. Refer to README.md for more details. [Default '
'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
nccl_install_path = os.path.realpath(
os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
if is_windows():
nccl_lib_path = 'lib/x64/nccl.lib'
elif is_linux():
nccl_lib_filename = 'libnccl.so.%s' % tf_nccl_version
nccl_lpath = '%s/lib/%s' % (nccl_install_path, nccl_lib_filename)
if not os.path.exists(nccl_lpath):
for relative_path in NCCL_LIB_PATHS:
path = '%s/%s%s' % (nccl_install_path, relative_path,
nccl_lib_filename)
if os.path.exists(path):
print('NCCL found at ' + path)
nccl_lib_path = path
break
else:
nccl_lib_path = nccl_lpath
elif is_macos():
nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(
os.path.dirname(nccl_lib_path), '../include/nccl.h')
print('Assuming NCCL header path is ' + nccl_hdr_path)
if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = os.path.dirname(nccl_lib_path)
write_action_env_to_bazelrc('NCCL_INSTALL_PATH',
os.path.dirname(nccl_lib_path))
# Set NCCL_HDR_PATH
environ_cp['NCCL_HDR_PATH'] = os.path.dirname(nccl_hdr_path)
write_action_env_to_bazelrc('NCCL_HDR_PATH',
os.path.dirname(nccl_hdr_path))
break
# Reset and Retry
print(
'Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
raise UserInputError('Invalid TF_NCCL setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
# Enable short object file path to avoid long path issue on Windows.
# TODO(pcloudy): Remove this flag when upgrading Bazel to 0.16.0
# Short object file path will be enabled by default.
write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.15.0')
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_TENSORRT'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
xla_enabled_by_default = is_linux()
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
xla_enabled_by_default, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_install_path(environ_cp)
set_tf_nccl_install_path(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See .bazelrc for more '
'details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
print('Preconfigured Bazel build configs to DISABLE default on features:')
config_info_line('noaws', 'Disable AWS S3 filesystem support.')
config_info_line('nogcp', 'Disable GCP support.')
config_info_line('nohdfs', 'Disable HDFS support.')
config_info_line('noignite', 'Disable Apacha Ignite support.')
config_info_line('nokafka', 'Disable Apache Kafka support.')
if __name__ == '__main__':
main()
| {
"content_hash": "6f5c57223bc496a7a5215c20dad27b40",
"timestamp": "",
"source": "github",
"line_count": 1667,
"max_line_length": 93,
"avg_line_length": 37.2255548890222,
"alnum_prop": 0.6415276770606719,
"repo_name": "girving/tensorflow",
"id": "b564da27227ec07713f91e925ea292b35f0f02df",
"size": "62744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Message
# Register your models here.
admin.site.register(Message) | {
"content_hash": "1817501fc32064d25a04ee38ef13279c",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 32,
"avg_line_length": 20,
"alnum_prop": 0.8083333333333333,
"repo_name": "lyubomir1993/AlohaServer",
"id": "a45176be271cbbc190103636e028da945be9d30f",
"size": "120",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40"
},
{
"name": "HTML",
"bytes": "5144"
},
{
"name": "JavaScript",
"bytes": "2637"
},
{
"name": "Python",
"bytes": "71954"
}
],
"symlink_target": ""
} |
from flask import *
from config import app
import scrapers as scraper
from utils import *
jobs = Blueprint('jobs', __name__, template_folder = 'views')
#Exclusions, Skills, Position-type (PM, software dev), Field (AI, Medicine, sports), Experience Level
@jobs.route('/jobs', methods = ['GET'])
def jobs_route_get():
if session.get('signedIn') == None:
return redirect('/login')
sites = '-site:yelp.com/* -site:dice.com/* -site:indeed.com/* -site:monster.com/* -site:glassdoor.com/ -site:jobs.climber.com/* -site:ziprecruiter.com/* site:jobs.*.com/* OR site:careers.*.com/* OR site:*.com/careers/* OR site:*.com/jobs/* OR site:*.org/careers/* OR site:*.org/jobs/* OR site:jobs.lever.co/* OR site:boards.greenhouse.io/* OR site:linkedin.com/jobs/view/* '
results = pref_sql("SELECT skills, exclusions, postype, field, explevel FROM user WHERE uid = '{0}'", (session['uid'],))
if len(results) and not None in results[0][:5]: #if we have something in the database
skills = results[0][0].split(";")
exclusions = results[0][1].split(";")
postype = results[0][2].split(";")
fields = results[0][3].split(";")
experience_level = results[0][4].split(";")
experience_level = experience_level[0]
all_fields = fields[0]
for field in range(1, len(fields)):
if fields[field] != '':
all_fields+= ' OR ' + fields[field]
all_positions = postype[0]
for pos in range(1, len(postype)):
if postype[pos] != '':
all_positions += ' ' + postype[pos]
all_exclusions = exclusions[0]
for exclusion in range(1, len(exclusions)):
if exclusions[exclusion] != '':
all_exclusions+= ' -' + exclusions[exclusion]
if experience_level == 'New Grad' or experience_level == 'Intern' or experience_level == 'Entry Level':
all_exclusions += " -senior -lead"
all_skills = '"' + skills[0] + '"'
for skill in range(1, len(skills)):
if skills[skill] != '':
all_skills += ' OR ' + '"' + skills[skill] + '"'
query = sites + all_positions + ' ' + experience_level + ' ' + all_fields + ' ' + all_skills + ' -' + all_exclusions
jobs, summaries, num, full_desc = scraper.scrape(query)
match_skills = []
match_pos = []
match_fields = []
for desc in full_desc:
skilz = findAllMatches(skills, desc)
positionz = findAllMatches(postype, desc)
fieldz = findAllMatches(fields, desc)
match_skills.append(skilz)
match_pos.append(positionz)
match_fields.append(fieldz)
print(match_skills)
return render_template("jobs.html", jobs=jobs, summaries=summaries, num=num, match_skills=match_skills, match_pos=match_pos, match_fields=match_fields, signedIn=True)
| {
"content_hash": "943b99f919a956a25269a449a3609ccc",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 378,
"avg_line_length": 41.279411764705884,
"alnum_prop": 0.6134663341645885,
"repo_name": "preetsmohan/check-ai",
"id": "48dbcbc6e49dd53cc0091dc85f5d6a8156abeae0",
"size": "2807",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controllers/jobs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1508"
},
{
"name": "HTML",
"bytes": "17982"
},
{
"name": "JavaScript",
"bytes": "12380"
},
{
"name": "Python",
"bytes": "20181"
}
],
"symlink_target": ""
} |
import re
import copy
import random
import os, sys
import MySQLdb
import requests
from time import sleep
from threading import Thread
from bs4 import BeautifulSoup
reload(sys)
sys.setdefaultencoding('utf-8')
clade = 'http://dblp.uni-trier.de/db/conf/mobisys/'
months = {
'January': '01',
'February': '02',
'March': '03',
'April': '04',
'May': '05',
'June': '06',
'July': '07',
'August': '08',
'September': '09',
'October': '10',
'November': '11',
'December': '12'
}
# regex to match months in <h2> tags
re_mons=r'(January|February|March|April|May|June|July|August|September|October|November|December)'
repeato_mons=r'([ /-]*'+re_mons+r'*)*'
pattern_mons=re_mons+repeato_mons
# regex to match years in <h2> tags
re_year=r'((19|20)\d+)'
repeato_year=r'([ /-]*'+re_year+r'*)*'
pattern_year=re_year+repeato_year
def get_leaves(clade):
r = requests.get(clade)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
leaves = []
late = soup.find('ul', class_='publ-list')
tags = late.find_all('div', class_='data', itemprop='headline')
for tag in tags:
leaves.append(tag.find_all('a')[-1]['href'])
return leaves
def sub_months(match_obj):
""" transfer months to digital form (in-place change)
"""
for m in months:
match_obj = re.sub(m, months[m], match_obj)
return match_obj
def get_yymm(leaf):
r = requests.get(leaf)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
lat = soup.find('div', class_='data', itemprop='headline')
tag = lat.find('span', class_='title', itemprop='name')
txt = tag.get_text()
try:
match_obj_mons = re.search(pattern_mons, txt)
match_obj_mons = match_obj_mons.group().strip()
match_obj_mons = sub_months(match_obj_mons)
month = match_obj_mons
except Exception, error_mons:
print '[-]', error_mons
month = None
try:
match_obj_year = re.search(pattern_year, txt)
match_obj_year = match_obj_year.group().strip()
year = match_obj_year
except Exception, error_year:
print '[-]', error_year
year = None
return year, month
def get_titles(leaf):
r = requests.get(leaf)
if r.status_code == 200:
soup = BeautifulSoup(r.text, 'lxml')
title_lst = []
tags = soup.find_all('span', class_='title', itemprop='name')
for tag in tags:
title_lst.append(tag.get_text())
return title_lst
def incert_mysql(year, month, title_lst):
try:
tablename = 'papertitle'
conn = MySQLdb.connect(host='127.0.0.1', user='root', passwd='13917331612', db='conference')
c = conn.cursor()
conn.set_character_set('utf8')
c.execute('SET NAMES utf8;')
c.execute('SET CHARACTER SET utf8;')
c.execute('SET character_set_connection=utf8;')
for p in title_lst:
try:
sql = "insert into " + tablename + "(year, month, name, title, class, category) \
values(%s, %s, %s, %s, %s, %s)"
param = (year, month, 'MobiSys', p, 'B', 'network')
c.execute(sql, param)
print ">>>> [+] Insert paper <%s> : done." %(p)
except MySQLdb.Error, e:
print "[-] Mysql Error %d: %s" % (e.args[0], e.args[1])
continue
conn.commit()
c.close()
except MySQLdb.Error, e:
print "[-] Mysql Error %d: %s" % (e.args[0], e.args[1])
return None
def build():
leaves = get_leaves(clade)
for leaf in leaves:
title_lst = get_titles(leaf)
year, month = get_yymm(leaf)
incert_mysql(year, month, title_lst)
return None
build() | {
"content_hash": "28aafbb67852bf95ab5f38e225622305",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 98,
"avg_line_length": 24.985185185185184,
"alnum_prop": 0.6406759561221465,
"repo_name": "dcclogin/TextGenerator",
"id": "3115bd62c0516881ac275d0d2ba4fce8919d2dbf",
"size": "3398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TitleCrawler/ccf_conference/categories/network/mobisys2015.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "314159"
}
],
"symlink_target": ""
} |
'''
Created on 11 août 2014
@author: Marc-Antoine
'''
from django.apps import AppConfig
from django.db.models.signals import post_migrate
class RefAppConfig(AppConfig):
name='ref'
verbose_name = u'Gestion du référentiel'
def ready(self):
import ref.cache
from ref.management import post_migrate_handler
## Listen to the syncdb signal
post_migrate.connect(post_migrate_handler, sender=self)
| {
"content_hash": "b2ed04c185976649ddf256c75275ad48",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 63,
"avg_line_length": 23.31578947368421,
"alnum_prop": 0.6952595936794582,
"repo_name": "marcanpilami/MAGE",
"id": "bde64b052e66eb98514c29f474b054236b214931",
"size": "463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ref/apps.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16033"
},
{
"name": "Dockerfile",
"bytes": "1730"
},
{
"name": "HTML",
"bytes": "88971"
},
{
"name": "JavaScript",
"bytes": "6024"
},
{
"name": "Python",
"bytes": "401724"
},
{
"name": "Shell",
"bytes": "20159"
}
],
"symlink_target": ""
} |
"""
Use scikit-learn regressor interface with GPU histogram tree method
===================================================================
"""
from dask import array as da
from dask.distributed import Client
# It's recommended to use dask_cuda for GPU assignment
from dask_cuda import LocalCUDACluster
import xgboost
def main(client):
# generate some random data for demonstration
n = 100
m = 1000000
partition_size = 10000
X = da.random.random((m, n), partition_size)
y = da.random.random(m, partition_size)
regressor = xgboost.dask.DaskXGBRegressor(verbosity=1)
regressor.set_params(tree_method='gpu_hist')
# assigning client here is optional
regressor.client = client
regressor.fit(X, y, eval_set=[(X, y)])
prediction = regressor.predict(X)
bst = regressor.get_booster()
history = regressor.evals_result()
print('Evaluation history:', history)
# returned prediction is always a dask array.
assert isinstance(prediction, da.Array)
return bst # returning the trained model
if __name__ == '__main__':
# With dask cuda, one can scale up XGBoost to arbitrary GPU clusters.
# `LocalCUDACluster` used here is only for demonstration purpose.
with LocalCUDACluster() as cluster:
with Client(cluster) as client:
main(client)
| {
"content_hash": "198a8df15d5ce6f6acb310056e57b3d3",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 73,
"avg_line_length": 30.022222222222222,
"alnum_prop": 0.6543301258327165,
"repo_name": "dmlc/xgboost",
"id": "4c544e4e88c4f5f9cc0a5409f93b14f7a4cab07c",
"size": "1351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/dask/sklearn_gpu_training.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1383"
},
{
"name": "C",
"bytes": "23067"
},
{
"name": "C++",
"bytes": "2182522"
},
{
"name": "CMake",
"bytes": "52394"
},
{
"name": "CSS",
"bytes": "3812"
},
{
"name": "Cuda",
"bytes": "855374"
},
{
"name": "Dockerfile",
"bytes": "2364"
},
{
"name": "Groovy",
"bytes": "1251"
},
{
"name": "Java",
"bytes": "206549"
},
{
"name": "M4",
"bytes": "2131"
},
{
"name": "Makefile",
"bytes": "8179"
},
{
"name": "PowerShell",
"bytes": "4308"
},
{
"name": "Python",
"bytes": "1189411"
},
{
"name": "R",
"bytes": "342898"
},
{
"name": "Scala",
"bytes": "471040"
},
{
"name": "Shell",
"bytes": "45815"
},
{
"name": "TeX",
"bytes": "913"
}
],
"symlink_target": ""
} |
from __future__ import division
import time
import math
import smbus
class Hat:
address = 0x40
mode1 = 0x00
mode2 = 0x01
prescale = 0xFE
bus = smbus.SMBus(1)
def __init__(self):
self.set_all_pwm(0, 0)
self.bus.write_byte_data(self.address, self.mode2, 0x04)
self.bus.write_byte_data(self.address, self.mode1, 0x01)
time.sleep(0.005) # wait for oscillator
mode1 = self.bus.read_byte_data(self.address, self.mode1) & 0xFF
mode1 = mode1 & ~0x10 # wake up (reset sleep)
self.bus.write_byte_data(self.address, self.mode1, mode1)
time.sleep(0.005) # wait for oscillator
def set_pwm_freq(self, freq_hz):
prescaleval = 25000000.0 # 25MHz
prescaleval /= 4096.0 # 12-bit
prescaleval /= float(freq_hz)
prescaleval -= 1.0
prescale = int(math.floor(prescaleval + 0.5))
oldmode = self.bus.read_byte_data(self.address, self.mode1) & 0xFF
newmode = (oldmode & 0x7F) | 0x10 # sleep
self.bus.write_byte_data(self.address, self.mode1, newmode) # go to sleep
self.bus.write_byte_data(self.address, self.prescale, prescale)
self.bus.write_byte_data(self.address, self.mode1, oldmode)
time.sleep(0.005)
self.bus.write_byte_data(self.address, self.mode1, oldmode | 0x80)
def set_pwm(self, channel, on, off):
self.bus.write_byte_data(self.address, 0x06 + 4 * channel, on & 0xFF)
self.bus.write_byte_data(self.address, 0x07 + 4 * channel, on >> 8)
self.bus.write_byte_data(self.address, 0x08 + 4 * channel, off & 0xFF)
self.bus.write_byte_data(self.address, 0x09 + 4 * channel, off >> 8)
def set_all_pwm(self, on, off):
self.bus.write_byte_data(self.address, 0xFA, on & 0xFF)
self.bus.write_byte_data(self.address, 0xFB, on >> 8)
self.bus.write_byte_data(self.address, 0xFC, off & 0xFF)
self.bus.write_byte_data(self.address, 0xFD, off >> 8)
| {
"content_hash": "8b2bc5a7b01ff9afcd46b54fd44e8002",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 82,
"avg_line_length": 34.60344827586207,
"alnum_prop": 0.6203288490284006,
"repo_name": "romaneckert/robot",
"id": "c0a76f7239c96ee9b69509bb06829fcec0f0266b",
"size": "2031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "archive/bionics_old/hat.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8913"
},
{
"name": "HTML",
"bytes": "1179"
},
{
"name": "JavaScript",
"bytes": "44153"
},
{
"name": "Python",
"bytes": "18355"
}
],
"symlink_target": ""
} |
import inspect
import json
import logging
import os
import re
import shutil
from collections import OrderedDict
from traceback import print_exc
from cave.__version__ import __version__ as version
from cave.html.html_helpers import figure_to_html
from cave.utils.tooltips import get_tooltip
__author__ = "Marius Lindauer"
__copyright__ = "Copyright 2016, ML4AAD"
__license__ = "MIT"
__email__ = "[email protected]"
class HTMLBuilder(object):
def __init__(self,
output_dn: str,
scenario_name: str,
logo_fn: str,
logo_custom: bool=False):
'''
The dictionary structure in the HTML-Builder follows the following syntax:
::
{"top1" : {
"tooltip": str|None,
"subtop1: { # generates a further bottom if it is dictionary
"tooltip": str|None,
...
}
"table": str|None (html table)
"figure" : str|None (file name)
"bokeh" : (str, str)|None # (script, div as returned by components())
}
"top2: { ... }
}
Arguments
---------
output_dn:str
output directory name
scenario_name:str
name of scenario
logo_fn: str
path to the logo of the configurator
logo_custom: bool
if true, logo ist treated as external logo that needs to be copied
'''
self.logger = logging.getLogger("HTMLBuilder")
self.own_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
self.logo_fn = logo_fn
self.logo_custom = logo_custom
self.output_dn = output_dn
self.unique_id_counter = 0
self.budget = ''
self.relative_content_js = os.path.join('content', 'js')
self.relative_content_images = os.path.join('content', 'images')
if output_dn:
os.makedirs(os.path.join(self.output_dn, self.relative_content_js), exist_ok=True)
os.makedirs(os.path.join(self.output_dn, self.relative_content_images), exist_ok=True)
# todo make relative dirs again
# Copy subfolders
subfolders = ["css", "images", "js", "font"]
for sf in subfolders:
try:
shutil.rmtree(os.path.join(self.output_dn, "html", sf), ignore_errors=True)
shutil.copytree(os.path.join(self.own_folder, "web_files", sf),
os.path.join(self.output_dn, "html", sf))
except OSError:
print_exc()
self.header_part_1 = '''
<!DOCTYPE html>
<html>
<head>
<meta content="text/html;charset=utf-8" http-equiv="Content-Type">
<meta content="utf-8" http-equiv="encoding">
<title>CAVE</title>
<link href="html/css/accordion.css" rel="stylesheet" />
<link href="html/css/table.css" rel="stylesheet" />
<link href="html/css/lightbox.min.css" rel="stylesheet" />
<link href="html/css/help-tip.css" rel="stylesheet" />
<link href="html/css/global.css" rel="stylesheet" />
<link href="html/css/back-to-top.css" rel="stylesheet" />
<link href="html/css/tabs.css" rel="stylesheet" />
<link href="html/css/bokeh-1.1.0.min.css" rel="stylesheet" type="text/css">
<link href="html/css/bokeh-widgets-1.1.0.min.css" rel="stylesheet" type="text/css">
<link href="html/css/bokeh-tables-1.1.0.min.css" rel="stylesheet" type="text/css">
<script src="html/js/tabs.js"></script>
<script src="html/js/bokeh-1.1.0.min.js"></script>
<script src="html/js/bokeh-widgets-1.1.0.min.js"></script>
<script src="html/js/bokeh-tables-1.1.0.min.js"></script>
<!--Below here are the includes of scripts for the report (e.g. bokeh)-->
'''
self.header_part_2 = '''
<!--Above here are the includes of scripts for the report (e.g. bokeh)-->
</head>
<body>
<script src="http://www.w3schools.com/lib/w3data.js"></script>
<script src="html/js/lightbox-plus-jquery.min.js"></script>
<header>
<div class='l-wrapper'>
<img class='logo logo--configurator' src="html/images/{}" />
<img class='logo logo--cave' src="html/images/CAVE_logo.png" />
<img class='logo logo--ml' src="html/images/automl-logo.png" />
</div>
</header>
<div class='l-wrapper'>
<h1></h1>
'''.format(self.logo_fn if not self.logo_custom else 'custom_logo.png')
self.footer = '''
</div>
<footer>
<div class='l-wrapper'>
Generated by <a href="https://github.com/automl/CAVE">CAVE v{}</a> and developed by
<a href="http://www.automl.org">autoML</a> | Optimized for Chrome and Firefox
</div>
</footer>'''.format(version) + '''
<script>
var acc = document.getElementsByClassName("accordion");
var i;
for (i = 0; i < acc.length; i++) {
acc[i].onclick = function(){
this.classList.toggle("active");
this.nextElementSibling.classList.toggle("show");
}
}
</script>
<script src="html/js/back-to-top.js"></script>
</body>
</html>
'''
def generate_webpage(self, data_dict: OrderedDict):
'''
Arguments
---------
data_dict : OrderedDict
see constructor
'''
html_head, html_body = "", ""
html_head += self.header_part_1
# Get components (script, div) for each entry in report
scripts, divs = self.generate_html(data_dict)
# Scripts go into header, divs go into body
for script in scripts:
html_head += script # e.g. bokeh-scripts used for hover
for div in divs:
html_body += div
html_head += self.header_part_2 # Close header after adding all scripts
html = html_head + html_body + self.footer
# Write webpage to file
with open(os.path.join(self.output_dn, "report.html"), "w") as fp:
fp.write(html)
# If available, add custom logo
if self.logo_custom:
original_path = self.logo_fn
self.logo_fn = os.path.join(self.output_dn, "html", 'images', 'custom_logo.png')
self.logger.debug("Attempting to copy %s to %s", original_path, self.logo_fn)
shutil.copyfile(original_path, self.logo_fn)
self.logo_custom = False
def generate_html(self, data_dict: OrderedDict):
with open(os.path.join(self.output_dn, 'debug', 'webpage_dict.json'), 'w') as f:
f.write(json.dumps(data_dict, indent=2))
# Generate
scripts, divs = [], []
for k, v in data_dict.items():
if not v: # ignore empty entry
self.logger.debug("No content for %s, skipping in html-generation", k)
continue
script, div = self.add_layer(layer_name=k, data_dict=v)
if script:
scripts.append(script)
divs.append(div)
return scripts, divs
def add_layer(self, layer_name, data_dict: OrderedDict, is_tab: bool=False):
'''
add a further layer of top data_dict keys
Parameters
----------
layer_name: str
name of the layer
data_dict : OrderedDict
see constructor
is_tab: bool
if True, don't use accordion but tab-structure to wrap content
Returns
-------
(script, div): (str, str)
script goes into header, div goes into body
'''
script, div = "", ""
if layer_name is None:
layer_name = ""
unique_layer_name = layer_name + self.get_unique_id()
# Add tooltip, if possible
tooltip = data_dict.get("tooltip", None)
if tooltip is not None:
tooltip = "<div class=\"help-tip\"><p>{}</p></div>".format(tooltip)
# TODO elif is obsolete / can be merged into first option (simplify!)
elif get_tooltip(layer_name): # if no tooltip is parsed, try to look it up
tooltip = "<div class=\"help-tip\"><p>{}</p></div>".format(get_tooltip(layer_name))
else:
tooltip = ""
# Start accordion-panel
if not is_tab:
div += "<div class=\"accordion\">{0} {1}</div>\n".format(layer_name, tooltip)
div += "<div class=\"panel\">\n"
# If this layer represents budgets, add tabs for this layer, add tabs-code
sublayer_names = [k for k, v in data_dict.items() if isinstance(v, dict)]
use_tabs = False
if len(sublayer_names) >= 1 and all([sn.lower().startswith('budget') for sn in sublayer_names]):
use_tabs = True
if use_tabs:
div += "<div class=\"tab\">\n"
tabs_names = [k.replace('_', ' ') for k, v in data_dict.items() if isinstance(v, dict)]
default_open_id = "defaultOpen" + self.get_unique_id()
div += " <button class=\"tablinks\" onclick=\"openTab(event, '{0}', '{1}')\" "\
"id=\"{2}\">{1}</button>\n".format(unique_layer_name, tabs_names[0], default_open_id)
for name in tabs_names[1:]:
div += " <button class=\"tablinks\" onclick=\"openTab(event, '{0}', '{1}')\">{1}</button>\n".format(
unique_layer_name, name)
div += "</div>\n"
for k, v in data_dict.items():
if k == "tooltip":
continue
if k.startswith('budget'):
self.budget = k[7:]
if not v:
if isinstance(v, dict):
continue
else:
return '', ''
elif isinstance(v, dict):
if use_tabs:
div += "<div id=\"{0}\" class=\"tabcontent\">\n".format(unique_layer_name + k.replace('_', ' '))
div += "<div class=\"pane\">\n"
add_script, add_div = self.add_layer(k, v, is_tab=use_tabs)
script += add_script
div += add_div
if use_tabs: # close div
div += "</div>\n"
div += "</div>\n"
elif k == "figure":
div += figure_to_html(v, prefix=self.output_dn)
elif k == "figure_x2":
div += figure_to_html(v, prefix=self.output_dn, max_in_a_row=2)
elif k == "table":
div += "<div style=\"overflow-x: auto\" align=\"center\">\n{}\n</div>\n".format(v)
elif k == "html":
div += ("<div align=\"center\">\n<a href='{}'>Interactive "
"Plot</a>\n</div>\n".format(v[len(self.output_dn):].lstrip("/")))
elif k == "bokeh":
# Escape path for URL (remove spaces, slashes and single quotes)
path_script = os.path.join(self.relative_content_js, '_'.join([layer_name, self.budget,
self.get_unique_id(), 'script.js']))
path_script = path_script.translate({ord(c): None for c in ' \''})
# Write script to file
if self.output_dn:
with open(os.path.join(self.output_dn, path_script), 'w') as fn:
js_code = re.sub('<.*?>', '', v[0].strip()) # Remove script-tags
fn.write(js_code)
script += "<script src=\"" + path_script + "\"></script>\n"
else:
script += v[0]
div += "<div align=\"center\">\n{}\n</div>\n".format(v[1])
else:
try:
div += v
except Exception as err:
self.logger.warning("Failed on interpreting: %s, %s, %s (Error: %s)",
str(layer_name), str(k), str(v), err, exc_info=1)
if use_tabs: # close tab with selecting first element by default
div += "<script> \n"
div += "// Get the element with id=\"{}\" and click on it \n".format(default_open_id)
div += "document.getElementById(\"{}\").click(); \n".format(default_open_id)
div += "</script> \n"
if not is_tab:
div += "</div>"
return script, div
def get_unique_id(self):
self.unique_id_counter += 1
return str(self.unique_id_counter)
| {
"content_hash": "1c0d06d53d55eede9ed4b263b7f4cfe5",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 118,
"avg_line_length": 38.84375,
"alnum_prop": 0.53266291230893,
"repo_name": "automl/SpySMAC",
"id": "4a55ee83f85d6fb3e04f5a446b926d913839f113",
"size": "12430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cave/html/html_builder.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7251"
},
{
"name": "CSS",
"bytes": "7272"
},
{
"name": "JavaScript",
"bytes": "715"
},
{
"name": "Python",
"bytes": "162357"
},
{
"name": "Shell",
"bytes": "1841"
}
],
"symlink_target": ""
} |
import argparse
import re
import tempfile
import os
import util
logger = util.get_logger("MF")
def finish (i, o, p):
i.close()
os.close(o)
os.rename(p,i.name)
util.wrote(i.name,logger)
def run (rex, main, others):
util.reading(main.name,logger)
mainH,mainP = tempfile.mkstemp(dir=os.path.dirname(main.name))
othersIO = list()
for o in others:
util.reading(o.name,logger)
h,p = tempfile.mkstemp(dir=os.path.dirname(o.name))
othersIO.append((o,h,p))
# read the files in parallel
dropped = 0
lines = 0
for line in main:
lines += 1
prefix = line.split('\t')[0]
keep = rex.search(line) is None
if keep:
os.write(mainH,line)
else:
dropped += 1
for i,o,_p in othersIO:
line1 = i.readline()
prefix1 = line1.split('\t')[0].rstrip()
if prefix1 != prefix:
raise Exception("prefix mismatch",prefix,prefix,i.name)
if keep:
os.write(o,line1)
for i in others:
line1 = i.readline()
if line1 != '':
raise Exception('uneven files',line1,i.name)
logger.info("Dropped {:,d} lines out of {:,d}".format(dropped,lines))
# close streams and rename
finish(main,mainH,mainP)
for i,o,p in othersIO:
finish(i,o,p)
if __name__ == '__main__':
ap = argparse.ArgumentParser(description='Filter several parallel files')
ap.add_argument('-rex',help='the regular expression to filter on',
required=True)
ap.add_argument('main',help='the main file searched in',
type=argparse.FileType('r'))
ap.add_argument('others',help='other filtered files',nargs='*',
type=argparse.FileType('r'))
args = ap.parse_args()
run(re.compile(args.rex),args.main,args.others)
| {
"content_hash": "8df86120773c174db99700e0a3fef954",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 77,
"avg_line_length": 31.966101694915253,
"alnum_prop": 0.5768822905620361,
"repo_name": "Magnetic/proficiency-metric",
"id": "bfe0b98ae6c7cef92a6db6763458646ced3090be",
"size": "2044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "predeval/multifilter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75803"
}
],
"symlink_target": ""
} |
import time
def test_within_timeout():
time.sleep(0.001)
def test_exceeds_timeout():
time.sleep(1000)
| {
"content_hash": "bd16b500bbbe8de7cf6a2740521fdaaf",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 27,
"avg_line_length": 12.666666666666666,
"alnum_prop": 0.6754385964912281,
"repo_name": "tdyas/pants",
"id": "d9abffb6edf2602a035602899491181237d870d0",
"size": "246",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testprojects/tests/python/pants/timeout/test_exceeds_timeout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
import os
from jinja2 import Environment, FileSystemLoader
DIR = os.path.abspath(os.path.dirname(__file__))
PRODUCTION = int(os.environ.get("PRODUCTION", "1"))
KG_PORT = 10100
deployments = {}
for deployment in os.environ.get("DEPLOYMENTS", "ml:10002").split(","):
name, db, s3, tm, api_port = deployment.split(":")
portal_port = 8080 + int(api_port) % 10000
deployments[name] = {
"api_port": api_port,
"portal_port": portal_port,
"db": db,
"s3": s3,
"tm": tm.upper()
}
kwargs = {
"production": PRODUCTION,
"deployments": deployments,
"nworkers": 2 if PRODUCTION else 1,
"reload": int(not PRODUCTION),
"node_env": "production" if PRODUCTION else "development",
"flask_log_level": "INFO" if PRODUCTION else "DEBUG",
"jupyter_gateway_host": f"localhost:{KG_PORT}" if PRODUCTION else f"kernel-gateway:{KG_PORT}",
"dd_agent_host": "localhost" if PRODUCTION else "datadog",
"mpcontribs_api_host": "localhost" if PRODUCTION else "contribs-apis",
}
kwargs["flask_env"] = kwargs["node_env"]
kwargs["jupyter_gateway_url"] = "http://" + kwargs["jupyter_gateway_host"]
env = Environment(loader=FileSystemLoader(DIR))
template = env.get_template("supervisord.conf.jinja")
template.stream(**kwargs).dump("supervisord.conf")
| {
"content_hash": "a2886acacbf6788d10ac1555314fc648",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 98,
"avg_line_length": 35.351351351351354,
"alnum_prop": 0.6590214067278287,
"repo_name": "materialsproject/MPContribs",
"id": "9451e26235946b01fd088514da7056fd241e386e",
"size": "1308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mpcontribs-api/supervisord/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "7070"
},
{
"name": "HTML",
"bytes": "93413"
},
{
"name": "JavaScript",
"bytes": "86685"
},
{
"name": "Jinja",
"bytes": "4696"
},
{
"name": "Jupyter Notebook",
"bytes": "244012"
},
{
"name": "Makefile",
"bytes": "1682"
},
{
"name": "Python",
"bytes": "349352"
},
{
"name": "SCSS",
"bytes": "4141"
},
{
"name": "Shell",
"bytes": "2354"
}
],
"symlink_target": ""
} |
from rdmo.core.renderers import BaseXMLRenderer
from rdmo.core.utils import get_languages
class TasksRenderer(BaseXMLRenderer):
def render_task(self, xml, task):
xml.startElement('task', {'dc:uri': task['uri']})
self.render_text_element(xml, 'uri_prefix', {}, task['uri_prefix'])
self.render_text_element(xml, 'key', {}, task['key'])
self.render_text_element(xml, 'dc:comment', {}, task['comment'])
for lang_code, lang_string, lang_field in get_languages():
self.render_text_element(xml, 'title', {'lang': lang_code}, task['title_%s' % lang_code])
self.render_text_element(xml, 'text', {'lang': lang_code}, task['text_%s' % lang_code])
self.render_text_element(xml, 'start_attribute', {'dc:uri': task['start_attribute']}, None)
self.render_text_element(xml, 'end_attribute', {'dc:uri': task['end_attribute']}, None)
self.render_text_element(xml, 'days_before', {}, task['days_before'])
self.render_text_element(xml, 'days_after', {}, task['days_after'])
xml.startElement('conditions', {})
if 'conditions' in task and task['conditions']:
for condition in task['conditions']:
self.render_text_element(xml, 'condition', {'dc:uri': condition}, None)
xml.endElement('conditions')
xml.startElement('catalogs', {})
if 'catalogs' in task and task['catalogs']:
for catalog in task['catalogs']:
self.render_text_element(xml, 'catalog', {'dc:uri': catalog}, None)
xml.endElement('catalogs')
xml.endElement('task')
class TaskRenderer(TasksRenderer):
def render_document(self, xml, tasks):
xml.startElement('rdmo', {
'xmlns:dc': 'http://purl.org/dc/elements/1.1/',
'created': self.created
})
for task in tasks:
self.render_task(xml, task)
xml.endElement('rdmo')
| {
"content_hash": "1af326173ad8614b9d782355cbc2f090",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 101,
"avg_line_length": 42.30434782608695,
"alnum_prop": 0.6022610483042138,
"repo_name": "rdmorganiser/rdmo",
"id": "d618eba3f44cd6ecc8bd34ae90bab550aa316d0c",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rdmo/tasks/renderers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "426256"
},
{
"name": "JavaScript",
"bytes": "110821"
},
{
"name": "Python",
"bytes": "1265092"
},
{
"name": "SCSS",
"bytes": "20373"
}
],
"symlink_target": ""
} |
__author__ = 'User'
from telnetlib import Telnet
class JamesHelper:
def __init__(self, app):
self.app = app
def unsure_user_exists(self, username, password):
james_config = self.app.config['james']
session = JamesHelper.Session(
james_config['host'], james_config['port'], james_config['username'], james_config['password'])
if session.is_user_registred(username):
session.reset_password(username, password)
else:
session.create_user(username, password)
session.quit()
class Session:
def __init__(self, host, port, username, password):
self.telnet = Telnet(host, port, 5)
self.read_until("Login id:")
self.write(username + "\n")
self.read_until("Password:")
self.write(password + "\n")
self.read_until("Welcome root. HELP for a list of commands ")
def is_user_registred(self, username):
self.write("verify %s\n" % (username))
res = self.telnet.expect([b"exists", b"does not exist"])
return res[0] == 0
def create_user(self, username, password):
self.write("adduser %s %s\n" % (username, password))
self.read_until("User %s added" % username)
def reset_password(self, username, password):
self.write("setpassword %s %s\n" % (username, password))
self.read_until("Password for %s reset" % username)
def quit(self):
self.write("quit\n")
def read_until(self, text):
self.telnet.read_until(text.encode('ascii'), 5)
def write(self,text):
self.telnet.write(text.encode('ascii')) | {
"content_hash": "ee4c28f2b58a6a4f611c763826eb6989",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 107,
"avg_line_length": 34.56,
"alnum_prop": 0.5729166666666666,
"repo_name": "SvetlanaPopova/python_mantis",
"id": "081dc05bb00916c9d3732f64ef49eddd79fa84cb",
"size": "1728",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fixture/james.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "163"
},
{
"name": "Python",
"bytes": "22935"
}
],
"symlink_target": ""
} |
"""Count number of features in all building dataset"""
import codecs
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
import catatom2osm
import download
import layer
import setup
from osmxml import etree
baseurl = "http://www.catastro.minhap.es/INSPIRE/Buildings/"
fh = codecs.open('count_buildings.csv', 'w', 'utf-8')
ns = {
'atom': 'http://www.w3.org/2005/Atom',
'georss': 'http://www.georss.org/georss',
'gco': 'http://www.isotc211.org/2005/gco',
'gmd': 'http://www.isotc211.org/2005/gmd'
}
def run():
qgs = catatom2osm.QgsSingleton()
for prov_code in setup.valid_provinces:
url = setup.prov_url['BU'].format(code=prov_code)
response = download.get_response(url)
root = etree.fromstring(response.content)
for entry in root.findall("atom:entry[atom:title]", namespaces=ns):
title = entry.find('atom:title', ns).text
zip_code = title[1:6]
mun = title.replace('buildings', '').strip()[6:]
url = u"{0}{1}/{2}-{3}/A.ES.SDGC.BU.{2}.zip".format(baseurl, prov_code, zip_code, mun)
gml_fn = ".".join((setup.fn_prefix, 'BU', zip_code, 'building.gml'))
download.wget(url, 'temp.zip')
gml = layer.BaseLayer('/vsizip/temp.zip/'+gml_fn, 'temp', 'ogr')
sys.stdout.write(' '*70+'\r')
c = gml.featureCount()
print zip_code, mun, c
fh.write(u'{}\t{}\t{}\n'.format(zip_code, mun, c))
if os.path.exists('temp'):
os.remove('temp')
if __name__ == "__main__":
run()
| {
"content_hash": "8d64193ff36cfe4c3dfd0ea5c2d4a6bb",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 98,
"avg_line_length": 35.68181818181818,
"alnum_prop": 0.5898089171974522,
"repo_name": "javiersanp/CatAtom2Osm",
"id": "c2dc4daaaf4c02085579ca99ee2661b7eabacce9",
"size": "1594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/count_buildings.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3436"
},
{
"name": "Makefile",
"bytes": "17385"
},
{
"name": "Python",
"bytes": "400631"
},
{
"name": "Shell",
"bytes": "3020"
}
],
"symlink_target": ""
} |
import base64
from datetime import datetime
import hashlib
import hmac
import time
from hashlib import sha256
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import ET
from libcloud.common.base import ConnectionUserAndKey, XmlResponse, BaseDriver
from libcloud.common.base import JsonResponse
from libcloud.common.types import InvalidCredsError, MalformedResponseError
from libcloud.utils.py3 import b, httplib, urlquote
from libcloud.utils.xml import findtext, findall
__all__ = [
'AWSBaseResponse',
'AWSGenericResponse',
'AWSTokenConnection',
'SignedAWSConnection',
'AWSRequestSignerAlgorithmV2',
'AWSRequestSignerAlgorithmV4',
'AWSDriver'
]
DEFAULT_SIGNATURE_VERSION = '2'
UNSIGNED_PAYLOAD = 'UNSIGNED-PAYLOAD'
class AWSBaseResponse(XmlResponse):
namespace = None
def _parse_error_details(self, element):
"""
Parse code and message from the provided error element.
:return: ``tuple`` with two elements: (code, message)
:rtype: ``tuple``
"""
code = findtext(element=element, xpath='Code',
namespace=self.namespace)
message = findtext(element=element, xpath='Message',
namespace=self.namespace)
return code, message
class AWSGenericResponse(AWSBaseResponse):
# There are multiple error messages in AWS, but they all have an Error node
# with Code and Message child nodes. Xpath to select them
# None if the root node *is* the Error node
xpath = None
# This dict maps <Error><Code>CodeName</Code></Error> to a specific
# exception class that is raised immediately.
# If a custom exception class is not defined, errors are accumulated and
# returned from the parse_error method.
exceptions = {}
def success(self):
return self.status in [httplib.OK, httplib.CREATED, httplib.ACCEPTED]
def parse_error(self):
context = self.connection.context
status = int(self.status)
# FIXME: Probably ditch this as the forbidden message will have
# corresponding XML.
if status == httplib.FORBIDDEN:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
try:
body = self.parse_body()
except Exception:
raise MalformedResponseError('Failed to parse XML',
body=self.body,
driver=self.connection.driver)
if self.xpath:
errs = findall(element=body, xpath=self.xpath,
namespace=self.namespace)
else:
errs = [body]
msgs = []
for err in errs:
code, message = self._parse_error_details(element=err)
exceptionCls = self.exceptions.get(code, None)
if exceptionCls is None:
msgs.append('%s: %s' % (code, message))
continue
# Custom exception class is defined, immediately throw an exception
params = {}
if hasattr(exceptionCls, 'kwargs'):
for key in exceptionCls.kwargs:
if key in context:
params[key] = context[key]
raise exceptionCls(value=message, driver=self.connection.driver,
**params)
return "\n".join(msgs)
class AWSTokenConnection(ConnectionUserAndKey):
def __init__(self, user_id, key, secure=True,
host=None, port=None, url=None, timeout=None, proxy_url=None,
token=None, retry_delay=None, backoff=None):
self.token = token
super(AWSTokenConnection, self).__init__(user_id, key, secure=secure,
host=host, port=port, url=url,
timeout=timeout,
retry_delay=retry_delay,
backoff=backoff,
proxy_url=proxy_url)
def add_default_params(self, params):
# Even though we are adding it to the headers, we need it here too
# so that the token is added to the signature.
if self.token:
params['x-amz-security-token'] = self.token
return super(AWSTokenConnection, self).add_default_params(params)
def add_default_headers(self, headers):
if self.token:
headers['x-amz-security-token'] = self.token
return super(AWSTokenConnection, self).add_default_headers(headers)
class AWSRequestSigner(object):
"""
Class which handles signing the outgoing AWS requests.
"""
def __init__(self, access_key, access_secret, version, connection):
"""
:param access_key: Access key.
:type access_key: ``str``
:param access_secret: Access secret.
:type access_secret: ``str``
:param version: API version.
:type version: ``str``
:param connection: Connection instance.
:type connection: :class:`Connection`
"""
self.access_key = access_key
self.access_secret = access_secret
self.version = version
# TODO: Remove cycling dependency between connection and signer
self.connection = connection
def get_request_params(self, params, method='GET', path='/'):
return params
def get_request_headers(self, params, headers, method='GET', path='/',
data=None):
return params, headers
class AWSRequestSignerAlgorithmV2(AWSRequestSigner):
def get_request_params(self, params, method='GET', path='/'):
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'
params['AWSAccessKeyId'] = self.access_key
params['Version'] = self.version
params['Timestamp'] = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime())
params['Signature'] = self._get_aws_auth_param(
params=params,
secret_key=self.access_secret,
path=path)
return params
def _get_aws_auth_param(self, params, secret_key, path='/'):
"""
Creates the signature required for AWS, per
http://bit.ly/aR7GaQ [docs.amazonwebservices.com]:
StringToSign = HTTPVerb + "\n" +
ValueOfHostHeaderInLowercase + "\n" +
HTTPRequestURI + "\n" +
CanonicalizedQueryString <from the preceding step>
"""
connection = self.connection
keys = list(params.keys())
keys.sort()
pairs = []
for key in keys:
value = str(params[key])
pairs.append(urlquote(key, safe='') + '=' +
urlquote(value, safe='-_~'))
qs = '&'.join(pairs)
hostname = connection.host
if (connection.secure and connection.port != 443) or \
(not connection.secure and connection.port != 80):
hostname += ':' + str(connection.port)
string_to_sign = '\n'.join(('GET', hostname, path, qs))
b64_hmac = base64.b64encode(
hmac.new(b(secret_key), b(string_to_sign),
digestmod=sha256).digest()
)
return b64_hmac.decode('utf-8')
class AWSRequestSignerAlgorithmV4(AWSRequestSigner):
def get_request_params(self, params, method='GET', path='/'):
if method == 'GET':
params['Version'] = self.version
return params
def get_request_headers(self, params, headers, method='GET', path='/',
data=None):
now = datetime.utcnow()
headers['X-AMZ-Date'] = now.strftime('%Y%m%dT%H%M%SZ')
headers['X-AMZ-Content-SHA256'] = self._get_payload_hash(method, data)
headers['Authorization'] = \
self._get_authorization_v4_header(params=params, headers=headers,
dt=now, method=method, path=path,
data=data)
return params, headers
def _get_authorization_v4_header(self, params, headers, dt, method='GET',
path='/', data=None):
credentials_scope = self._get_credential_scope(dt=dt)
signed_headers = self._get_signed_headers(headers=headers)
signature = self._get_signature(params=params, headers=headers,
dt=dt, method=method, path=path,
data=data)
return 'AWS4-HMAC-SHA256 Credential=%(u)s/%(c)s, ' \
'SignedHeaders=%(sh)s, Signature=%(s)s' % {
'u': self.access_key,
'c': credentials_scope,
'sh': signed_headers,
's': signature
}
def _get_signature(self, params, headers, dt, method, path, data):
key = self._get_key_to_sign_with(dt)
string_to_sign = self._get_string_to_sign(params=params,
headers=headers, dt=dt,
method=method, path=path,
data=data)
return _sign(key=key, msg=string_to_sign, hex=True)
def _get_key_to_sign_with(self, dt):
return _sign(
_sign(
_sign(
_sign(('AWS4' + self.access_secret),
dt.strftime('%Y%m%d')),
self.connection.driver.region_name),
self.connection.service_name),
'aws4_request')
def _get_string_to_sign(self, params, headers, dt, method, path, data):
canonical_request = self._get_canonical_request(params=params,
headers=headers,
method=method,
path=path,
data=data)
return '\n'.join(['AWS4-HMAC-SHA256',
dt.strftime('%Y%m%dT%H%M%SZ'),
self._get_credential_scope(dt),
_hash(canonical_request)])
def _get_credential_scope(self, dt):
return '/'.join([dt.strftime('%Y%m%d'),
self.connection.driver.region_name,
self.connection.service_name,
'aws4_request'])
def _get_signed_headers(self, headers):
return ';'.join([k.lower() for k in sorted(headers.keys())])
def _get_canonical_headers(self, headers):
return '\n'.join([':'.join([k.lower(), str(v).strip()])
for k, v in sorted(headers.items())]) + '\n'
def _get_payload_hash(self, method, data=None):
if method in ('POST', 'PUT'):
if data:
if hasattr(data, 'next') or hasattr(data, '__next__'):
# File upload; don't try to read the entire payload
return UNSIGNED_PAYLOAD
return _hash(data)
else:
return UNSIGNED_PAYLOAD
else:
return _hash('')
def _get_request_params(self, params):
# For self.method == GET
return '&'.join(["%s=%s" %
(urlquote(k, safe=''), urlquote(str(v), safe='~'))
for k, v in sorted(params.items())])
def _get_canonical_request(self, params, headers, method, path, data):
return '\n'.join([
method,
path,
self._get_request_params(params),
self._get_canonical_headers(headers),
self._get_signed_headers(headers),
self._get_payload_hash(method, data)
])
class SignedAWSConnection(AWSTokenConnection):
version = None
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None, token=None,
retry_delay=None, backoff=None,
signature_version=DEFAULT_SIGNATURE_VERSION):
super(SignedAWSConnection, self).__init__(user_id=user_id, key=key,
secure=secure, host=host,
port=port, url=url,
timeout=timeout, token=token,
retry_delay=retry_delay,
backoff=backoff,
proxy_url=proxy_url)
self.signature_version = str(signature_version)
if self.signature_version == '2':
signer_cls = AWSRequestSignerAlgorithmV2
elif self.signature_version == '4':
signer_cls = AWSRequestSignerAlgorithmV4
else:
raise ValueError('Unsupported signature_version: %s' %
(signature_version))
self.signer = signer_cls(access_key=self.user_id,
access_secret=self.key,
version=self.version,
connection=self)
def add_default_params(self, params):
params = self.signer.get_request_params(params=params,
method=self.method,
path=self.action)
return params
def pre_connect_hook(self, params, headers):
params, headers = self.signer.get_request_headers(params=params,
headers=headers,
method=self.method,
path=self.action,
data=self.data)
return params, headers
class AWSJsonResponse(JsonResponse):
"""
Amazon ECS response class.
ECS API uses JSON unlike the s3, elb drivers
"""
def parse_error(self):
response = json.loads(self.body)
code = response.get('ErrorCode') or response.get('__type', '')
message = response.get('Message') or response.get('message', '')
return '%s: %s' % (code, message)
def _sign(key, msg, hex=False):
if hex:
return hmac.new(b(key), b(msg), hashlib.sha256).hexdigest()
else:
return hmac.new(b(key), b(msg), hashlib.sha256).digest()
def _hash(msg):
return hashlib.sha256(b(msg)).hexdigest()
class AWSDriver(BaseDriver):
def __init__(self, key, secret=None, secure=True, host=None, port=None,
api_version=None, region=None, token=None, **kwargs):
self.token = token
super(AWSDriver, self).__init__(key, secret=secret, secure=secure,
host=host, port=port,
api_version=api_version, region=region,
token=token, **kwargs)
def _ex_connection_class_kwargs(self):
kwargs = super(AWSDriver, self)._ex_connection_class_kwargs()
kwargs['token'] = self.token
return kwargs
| {
"content_hash": "9e459e3a91fa3a854cef07044720f389",
"timestamp": "",
"source": "github",
"line_count": 412,
"max_line_length": 79,
"avg_line_length": 37.8252427184466,
"alnum_prop": 0.5251540041067762,
"repo_name": "Scalr/libcloud",
"id": "2f7a1c5d3ca993376ddd69496589bf9013a5d3bc",
"size": "16366",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "libcloud/common/aws.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "859"
},
{
"name": "HTML",
"bytes": "2545"
},
{
"name": "PowerShell",
"bytes": "410"
},
{
"name": "Python",
"bytes": "7402898"
},
{
"name": "Shell",
"bytes": "5933"
}
],
"symlink_target": ""
} |
"""Project class representing a project directory."""
from __future__ import absolute_import
from copy import deepcopy, copy
import os
from conda_kapsel.env_spec import (EnvSpec, _anaconda_default_env_spec, _find_importable_spec,
_find_out_of_sync_importable_spec)
from conda_kapsel.conda_meta_file import CondaMetaFile, META_DIRECTORY
from conda_kapsel.plugins.registry import PluginRegistry
from conda_kapsel.plugins.requirement import EnvVarRequirement
from conda_kapsel.plugins.requirements.conda_env import CondaEnvRequirement
from conda_kapsel.plugins.requirements.download import DownloadRequirement
from conda_kapsel.plugins.requirements.service import ServiceRequirement
from conda_kapsel.project_commands import ProjectCommand
from conda_kapsel.project_file import ProjectFile
from conda_kapsel.archiver import _list_relative_paths_for_unignored_project_files
from conda_kapsel.internal.py2_compat import is_string
from conda_kapsel.internal.simple_status import SimpleStatus
from conda_kapsel.internal.slugify import slugify
import conda_kapsel.internal.conda_api as conda_api
import conda_kapsel.internal.pip_api as pip_api
# These strings are used in the command line options to conda-kapsel,
# so changing them has back-compat consequences.
COMMAND_TYPE_CONDA_APP_ENTRY = 'conda_app_entry'
COMMAND_TYPE_SHELL = 'unix'
COMMAND_TYPE_WINDOWS = 'windows'
COMMAND_TYPE_NOTEBOOK = 'notebook'
COMMAND_TYPE_BOKEH_APP = 'bokeh_app'
ALL_COMMAND_TYPES = (COMMAND_TYPE_CONDA_APP_ENTRY, COMMAND_TYPE_SHELL, COMMAND_TYPE_WINDOWS, COMMAND_TYPE_NOTEBOOK,
COMMAND_TYPE_BOKEH_APP)
class ProjectProblem(object):
"""A possibly-autofixable problem with a project."""
def __init__(self, text, fix_prompt=None, fix_function=None, no_fix_function=None, only_a_suggestion=False):
"""Create a project problem."""
self.text = text
self.fix_prompt = fix_prompt
self.fix_function = fix_function
self.no_fix_function = no_fix_function
self.only_a_suggestion = only_a_suggestion
@property
def can_fix(self):
"""True if the problem can be auto-fixed."""
return self.fix_function is not None
def fix(self, project):
"""Perform the auto-fix."""
if self.fix_function is not None:
return self.fix_function(project)
else:
return None
def no_fix(self, project):
"""Take an action on deciding not to fix."""
if self.no_fix_function is not None:
return self.no_fix_function(project)
else:
return None
# given a list of mixed strings and ProjectProblem, make
# them all into ProjectProblem
def _make_problems_into_objects(problems):
new_problems = []
for p in problems:
if isinstance(p, ProjectProblem):
new_problems.append(p)
else:
new_problems.append(ProjectProblem(text=p))
return new_problems
class _ConfigCache(object):
def __init__(self, directory_path, registry):
self.directory_path = directory_path
if registry is None:
registry = PluginRegistry()
self.registry = registry
self.name = None
self.description = ''
self.icon = None
self.commands = dict()
self.default_command_name = None
self.project_file_count = 0
self.conda_meta_file_count = 0
self.env_specs = dict()
self.default_env_spec_name = None
self.global_base_env_spec = None
def update(self, project_file, conda_meta_file):
if project_file.change_count == self.project_file_count and \
conda_meta_file.change_count == self.conda_meta_file_count:
return
self.project_file_count = project_file.change_count
self.conda_meta_file_count = conda_meta_file.change_count
requirements = []
problems = []
project_exists = os.path.isdir(self.directory_path)
if not project_exists:
problems.append("Project directory '%s' does not exist." % self.directory_path)
if project_file.corrupted:
problems.append("%s has a syntax error that needs to be fixed by hand: %s" %
(project_file.filename, project_file.corrupted_error_message))
if conda_meta_file.corrupted:
problems.append("%s has a syntax error that needs to be fixed by hand: %s" %
(conda_meta_file.filename, conda_meta_file.corrupted_error_message))
if project_exists and not (project_file.corrupted or conda_meta_file.corrupted):
self._update_name(problems, project_file, conda_meta_file)
self._update_description(problems, project_file)
self._update_icon(problems, project_file, conda_meta_file)
# future: we could un-hardcode this so plugins can add stuff here
self._update_variables(requirements, problems, project_file)
self._update_downloads(requirements, problems, project_file)
self._update_services(requirements, problems, project_file)
self._update_env_specs(problems, project_file)
# this MUST be after we _update_variables since we may get CondaEnvRequirement
# options in the variables section, and after _update_env_specs
# since we use those
self._update_conda_env_requirements(requirements, problems, project_file)
# this MUST be after we update env reqs so we have the valid env spec names
self._update_commands(problems, project_file, conda_meta_file, requirements)
self._verify_command_dependencies(problems, project_file)
self.requirements = requirements
self.problems = _make_problems_into_objects(problems)
self.problem_strings = list([p.text for p in self.problems if not p.only_a_suggestion])
def _update_name(self, problems, project_file, conda_meta_file):
name = project_file.get_value('name', None)
if name is not None:
if not is_string(name):
problems.append("%s: name: field should have a string value not %r" % (project_file.filename, name))
name = None
elif len(name.strip()) == 0:
problems.append("%s: name: field is an empty or all-whitespace string." % (project_file.filename))
name = None
if name is None:
name = conda_meta_file.name
if name is not None and not is_string(name):
problems.append("%s: package: name: field should have a string value not %r" %
(conda_meta_file.filename, name))
name = None
if name is None:
name = os.path.basename(self.directory_path)
self.name = name
def _update_description(self, problems, project_file):
desc = project_file.get_value('description', None)
if desc is not None and not is_string(desc):
problems.append("%s: description: field should have a string value not %r" % (project_file.filename, desc))
desc = None
if desc is None:
desc = ''
self.description = desc
def _update_icon(self, problems, project_file, conda_meta_file):
icon = project_file.get_value('icon', None)
if icon is not None and not is_string(icon):
problems.append("%s: icon: field should have a string value not %r" % (project_file.filename, icon))
icon = None
if icon is None:
icon = conda_meta_file.icon
if icon is not None and not is_string(icon):
problems.append("%s: app: icon: field should have a string value not %r" %
(conda_meta_file.filename, icon))
icon = None
if icon is not None:
# relative to conda.recipe
icon = os.path.join(META_DIRECTORY, icon)
if icon is not None:
icon = os.path.join(self.directory_path, icon)
if not os.path.isfile(icon):
problems.append("Icon file %s does not exist." % icon)
icon = None
self.icon = icon
def _update_variables(self, requirements, problems, project_file):
variables = project_file.get_value("variables")
def check_conda_reserved(key):
if key in ('CONDA_DEFAULT_ENV', 'CONDA_ENV_PATH', 'CONDA_PREFIX'):
problems.append(("Environment variable %s is reserved for Conda's use, " +
"so it can't appear in the variables section.") % key)
return True
else:
return False
# variables: section can contain a list of var names or a dict from
# var names to options OR default values. it can also be missing
# entirely which is the same as empty.
if variables is None:
pass
elif isinstance(variables, dict):
for key in variables.keys():
if check_conda_reserved(key):
continue
if key.strip() == '':
problems.append("Variable name cannot be empty string, found: '{}' as name".format(key))
continue
raw_options = variables[key]
if raw_options is None:
options = {}
elif isinstance(raw_options, dict):
options = deepcopy(raw_options) # so we can modify it below
else:
options = dict(default=raw_options)
assert (isinstance(options, dict))
if EnvVarRequirement._parse_default(options, key, problems):
requirement = self.registry.find_requirement_by_env_var(key, options)
requirements.append(requirement)
elif isinstance(variables, list):
for item in variables:
if is_string(item):
if item.strip() == '':
problems.append("Variable name cannot be empty string, found: '{}' as name".format(item))
continue
if check_conda_reserved(item):
continue
requirement = self.registry.find_requirement_by_env_var(item, options=dict())
requirements.append(requirement)
else:
problems.append(
"variables section should contain environment variable names, {item} is not a string".format(
item=item))
else:
problems.append(
"variables section contains wrong value type {value}, should be dict or list of requirements".format(
value=variables))
def _update_downloads(self, requirements, problems, project_file):
downloads = project_file.get_value('downloads')
if downloads is None:
return
if not isinstance(downloads, dict):
problems.append("{}: 'downloads:' section should be a dictionary, found {}".format(project_file.filename,
repr(downloads)))
return
for varname, item in downloads.items():
if varname.strip() == '':
problems.append("Download name cannot be empty string, found: '{}' as name".format(varname))
continue
DownloadRequirement._parse(self.registry, varname, item, problems, requirements)
def _update_services(self, requirements, problems, project_file):
services = project_file.get_value('services')
if services is None:
return
if not isinstance(services, dict):
problems.append(("{}: 'services:' section should be a dictionary from environment variable to " +
"service type, found {}").format(project_file.filename, repr(services)))
return
for varname, item in services.items():
if varname.strip() == '':
problems.append("Service name cannot be empty string, found: '{}' as name".format(varname))
continue
ServiceRequirement._parse(self.registry, varname, item, problems, requirements)
def _update_env_specs(self, problems, project_file):
def _parse_string_list_with_special(parent_dict, key, what, special_filter):
items = parent_dict.get(key, [])
if not isinstance(items, (list, tuple)):
problems.append("%s: %s: value should be a list of %ss, not '%r'" %
(project_file.filename, key, what, items))
return ([], [])
cleaned = []
special = []
for item in items:
if is_string(item):
cleaned.append(item.strip())
elif special_filter(item):
special.append(item)
else:
problems.append("%s: %s: value should be a %s (as a string) not '%r'" %
(project_file.filename, key, what, item))
return (cleaned, special)
def _parse_string_list(parent_dict, key, what):
return _parse_string_list_with_special(parent_dict, key, what, special_filter=lambda x: False)[0]
def _parse_channels(parent_dict):
return _parse_string_list(parent_dict, 'channels', 'channel name')
def _parse_packages(parent_dict):
(deps, pip_dicts) = _parse_string_list_with_special(parent_dict, 'packages', 'package name',
lambda x: isinstance(x, dict) and ('pip' in x))
for dep in deps:
parsed = conda_api.parse_spec(dep)
if parsed is None:
problems.append("%s: invalid package specification: %s" % (project_file.filename, dep))
# note that multiple "pip:" dicts are allowed
pip_deps = []
for pip_dict in pip_dicts:
pip_list = _parse_string_list(pip_dict, 'pip', 'pip package name')
pip_deps.extend(pip_list)
for dep in pip_deps:
parsed = pip_api.parse_spec(dep)
if parsed is None:
problems.append("%s: invalid pip package specifier: %s" % (project_file.filename, dep))
return (deps, pip_deps)
(shared_deps, shared_pip_deps) = _parse_packages(project_file.root)
shared_channels = _parse_channels(project_file.root)
env_specs = project_file.get_value('env_specs', default={})
first_env_spec_name = None
env_specs_is_empty_or_missing = False # this should be iff it's an empty dict or absent entirely
# this one isn't in the env_specs dict
self.global_base_env_spec = EnvSpec(name=None,
conda_packages=shared_deps,
pip_packages=shared_pip_deps,
channels=shared_channels,
description="Global packages and channels",
inherit_from_names=(),
inherit_from=())
env_spec_attrs = dict()
if isinstance(env_specs, dict):
if len(env_specs) == 0:
env_specs_is_empty_or_missing = True
for (name, attrs) in env_specs.items():
if name.strip() == '':
problems.append("Environment spec name cannot be empty string, found: '{}' as name".format(name))
continue
description = attrs.get('description', None)
if description is not None and not is_string(description):
problems.append("{}: 'description' field of environment {} must be a string".format(
project_file.filename, name))
continue
problem_count = len(problems)
inherit_from_names = attrs.get('inherit_from', None)
if inherit_from_names is None:
inherit_from_names = []
elif is_string(inherit_from_names):
inherit_from_names = [inherit_from_names.strip()]
else:
inherit_from_names = _parse_string_list(attrs, 'inherit_from', 'env spec name')
if len(problems) > problem_count:
# we got a new problem from the bad inherit_from
continue
(deps, pip_deps) = _parse_packages(attrs)
channels = _parse_channels(attrs)
env_spec_attrs[name] = dict(name=name,
conda_packages=deps,
pip_packages=pip_deps,
channels=channels,
description=description,
inherit_from_names=tuple(inherit_from_names),
inherit_from=())
if first_env_spec_name is None:
first_env_spec_name = name
else:
problems.append(
"%s: env_specs should be a dictionary from environment name to environment attributes, not %r" %
(project_file.filename, env_specs))
self.env_specs = dict()
def make_env_spec(name, trail):
assert name in env_spec_attrs
if name not in self.env_specs:
was_cycle = False
if name in trail:
problems.append(
"{}: 'inherit_from' fields create circular inheritance among these env specs: {}".format(
project_file.filename, ", ".join(sorted(trail))))
was_cycle = True
trail.append(name)
attrs = env_spec_attrs[name]
if not was_cycle:
inherit_from_names = attrs['inherit_from_names']
for parent in inherit_from_names:
if parent not in env_spec_attrs:
problems.append(("{}: name '{}' in 'inherit_from' field of env spec {} does not match " +
"the name of another env spec").format(project_file.filename, parent,
attrs['name']))
else:
inherit_from = make_env_spec(parent, trail)
attrs['inherit_from'] = attrs['inherit_from'] + (inherit_from, )
# All parent-less env specs get the global base spec as parent,
# which means the global base spec is in everyone's ancestry
if attrs['inherit_from'] == ():
attrs['inherit_from'] = (self.global_base_env_spec, )
self.env_specs[name] = EnvSpec(**attrs)
return self.env_specs[name]
for name in env_spec_attrs.keys():
make_env_spec(name, [])
assert name in self.env_specs
# it's important to create all the env specs when possible
# even if they are broken (e.g. bad inherit_from), so they
# can be edited in order to fix them
(importable_spec, importable_filename) = _find_out_of_sync_importable_spec(self.env_specs.values(),
self.directory_path)
if importable_spec is not None:
skip_spec_import = project_file.get_value(['skip_imports', 'environment'])
if skip_spec_import == importable_spec.channels_and_packages_hash:
importable_spec = None
if importable_spec is not None:
old = self.env_specs.get(importable_spec.name)
# this is a pretty bad hack, but if we injected "notebook"
# or "bokeh" deps to make a notebook/bokeh command work,
# we will end up out-of-sync for that reason
# alone. environment.yml seems to typically not have
# "notebook" in it because the environment.yml is used for
# the kernel but not Jupyter itself.
# We then end up in a problem loop where we complain about
# missing notebook dep, add it, then complain about environment.yml
# out of sync, and `conda-kapsel init` in a directory with a .ipynb
# and an environment.yml doesn't result in a valid project.
if importable_spec is not None and old is not None and \
importable_spec.diff_only_removes_notebook_or_bokeh(old):
importable_spec = None
if importable_spec is not None:
if old is None:
text = "Environment spec '%s' from %s is not in %s." % (importable_spec.name, importable_filename,
os.path.basename(project_file.filename))
prompt = "Add env spec %s to %s?" % (importable_spec.name, os.path.basename(project_file.filename))
else:
text = "Environment spec '%s' from %s is out of sync with %s. Diff:\n%s" % (
importable_spec.name, importable_filename, os.path.basename(project_file.filename),
importable_spec.diff_from(old))
prompt = "Overwrite env spec %s with the changes from %s?" % (importable_spec.name, importable_filename)
def overwrite_env_spec_from_importable(project):
project.project_file.set_value(['env_specs', importable_spec.name], importable_spec.to_json())
def remember_no_import_importable(project):
project.project_file.set_value(['skip_imports', 'environment'],
importable_spec.channels_and_packages_hash)
problems.append(ProjectProblem(text=text,
fix_prompt=prompt,
fix_function=overwrite_env_spec_from_importable,
no_fix_function=remember_no_import_importable))
elif env_specs_is_empty_or_missing:
# we do NOT want to add this problem if we merely
# failed to parse individual env specs; it must be
# safe to overwrite the env_specs key, so it has to
# be empty or missing entirely. Also, we do NOT want
# to add this if we are going to ask about environment.yml
# import, above.
def add_default_env_spec(project):
default_spec = _anaconda_default_env_spec(self.global_base_env_spec)
project.project_file.set_value(['env_specs', default_spec.name], default_spec.to_json())
problems.append(ProjectProblem(text=("%s has an empty env_specs section." % project_file.filename),
fix_prompt=("Add an environment spec to %s?" % os.path.basename(
project_file.filename)),
fix_function=add_default_env_spec))
# this is only used for commands that don't specify anything
# (when/if we require all commands to specify, then remove this.)
if 'default' in self.env_specs:
self.default_env_spec_name = 'default'
else:
self.default_env_spec_name = first_env_spec_name
def _update_conda_env_requirements(self, requirements, problems, project_file):
if problems:
return
env_requirement = CondaEnvRequirement(registry=self.registry, env_specs=self.env_specs)
requirements.append(env_requirement)
def _update_commands(self, problems, project_file, conda_meta_file, requirements):
failed = False
first_command_name = None
commands = dict()
commands_section = project_file.get_value('commands', None)
if commands_section is not None and not isinstance(commands_section, dict):
problems.append("%s: 'commands:' section should be a dictionary from command names to attributes, not %r" %
(project_file.filename, commands_section))
failed = True
elif commands_section is not None:
for (name, attrs) in commands_section.items():
if name.strip() == '':
problems.append("Command variable name cannot be empty string, found: '{}' as name".format(name))
failed = True
continue
if first_command_name is None:
first_command_name = name
if not isinstance(attrs, dict):
problems.append("%s: command name '%s' should be followed by a dictionary of attributes not %r" %
(project_file.filename, name, attrs))
failed = True
continue
if 'description' in attrs and not is_string(attrs['description']):
problems.append("{}: 'description' field of command {} must be a string".format(
project_file.filename, name))
failed = True
if 'supports_http_options' in attrs and not isinstance(attrs['supports_http_options'], bool):
problems.append("{}: 'supports_http_options' field of command {} must be a boolean".format(
project_file.filename, name))
failed = True
if 'env_spec' in attrs:
if not is_string(attrs['env_spec']):
problems.append(
"{}: 'env_spec' field of command {} must be a string (an environment spec name)".format(
project_file.filename, name))
failed = True
elif attrs['env_spec'] not in self.env_specs:
problems.append("%s: env_spec '%s' for command '%s' does not appear in the env_specs section" %
(project_file.filename, attrs['env_spec'], name))
failed = True
copied_attrs = deepcopy(attrs)
if 'env_spec' not in copied_attrs:
copied_attrs['env_spec'] = self.default_env_spec_name
command_types = []
for attr in ALL_COMMAND_TYPES:
if attr not in copied_attrs:
continue
# be sure we add this even if the command is broken, since it's
# confusing to say "does not have a command line in it" below
# if the issue is that the command line is broken.
command_types.append(attr)
if not is_string(copied_attrs[attr]):
problems.append("%s: command '%s' attribute '%s' should be a string not '%r'" %
(project_file.filename, name, attr, copied_attrs[attr]))
failed = True
if len(command_types) == 0:
problems.append("%s: command '%s' does not have a command line in it" %
(project_file.filename, name))
failed = True
if ('notebook' in copied_attrs or 'bokeh_app' in copied_attrs) and (len(command_types) > 1):
label = 'bokeh_app' if 'bokeh_app' in copied_attrs else 'notebook'
others = copy(command_types)
others.remove(label)
others = [("'%s'" % other) for other in others]
problems.append("%s: command '%s' has multiple commands in it, '%s' can't go with %s" %
(project_file.filename, name, label, ", ".join(others)))
failed = True
# note that once one command fails, we don't add any more
if not failed:
commands[name] = ProjectCommand(name=name, attributes=copied_attrs)
self._verify_notebook_commands(commands, problems, requirements, project_file)
if failed:
self.commands = dict()
self.default_command_name = None
else:
self.commands = commands
if 'default' in self.commands:
self.default_command_name = 'default'
else:
# 'default' is always mapped to the first-listed if none is named 'default'
# note: this may be None
self.default_command_name = first_command_name
def _verify_notebook_commands(self, commands, problems, requirements, project_file):
skipped_notebooks = project_file.get_value(['skip_imports', 'notebooks'])
if skipped_notebooks is not None:
if skipped_notebooks is True:
# skip ALL notebooks forever
return
elif not isinstance(skipped_notebooks, list):
problems.append("{}: 'skip_imports: notebooks:' value should be a list, found {}".format(
project_file.filename, repr(skipped_notebooks)))
return
else:
skipped_notebooks = []
files = _list_relative_paths_for_unignored_project_files(self.directory_path,
problems,
requirements=requirements)
if files is None:
assert problems != []
return
# chop out hidden directories. The
# main reason to ignore dot directories is that they
# might contain packages or git cache data or other
# such gunk, not because we really care about
# ".foo.ipynb" per se.
files = [f for f in files if not f[0] == '.']
# always use unix file separator
files = [f.replace("\\", "/") for f in files]
# use a deterministic order because the first command is the default
files = sorted(files)
def need_to_import_notebook(relative_name):
for command in commands.values():
if command.notebook == relative_name:
return False
if relative_name in skipped_notebooks:
return False
return True
def make_add_notebook_func(relative_name, env_spec_name):
def add_notebook(project):
command_dict = {'notebook': relative_name, 'env_spec': env_spec_name}
project.project_file.set_value(['commands', relative_name], command_dict)
return add_notebook
def make_no_add_notebook_func(relative_name):
def no_add_notebook(project):
skipped_notebooks = project.project_file.get_value(['skip_imports', 'notebooks'], default=[])
skipped_notebooks.append(relative_name)
project.project_file.set_value(['skip_imports', 'notebooks'], skipped_notebooks)
return no_add_notebook
need_to_import = []
for relative_name in files:
if relative_name.endswith('.ipynb'):
if need_to_import_notebook(relative_name):
need_to_import.append(relative_name)
# make tests deterministic
need_to_import.sort()
if len(need_to_import) == 1:
relative_name = need_to_import[0]
problem = ProjectProblem(
text="%s: No command runs notebook %s" % (project_file.filename, relative_name),
fix_prompt="Create a command in %s for %s?" % (os.path.basename(project_file.filename), relative_name),
fix_function=make_add_notebook_func(relative_name, self.default_env_spec_name),
no_fix_function=make_no_add_notebook_func(relative_name),
only_a_suggestion=True)
problems.append(problem)
elif len(need_to_import) > 1:
add_funcs = [make_add_notebook_func(relative_name, self.default_env_spec_name)
for relative_name in need_to_import]
no_add_funcs = [make_no_add_notebook_func(relative_name) for relative_name in need_to_import]
def add_all(project):
for f in add_funcs:
f(project)
def no_add_all(project):
for f in no_add_funcs:
f(project)
problem = ProjectProblem(
text="%s: No commands run notebooks %s" % (project_file.filename, ", ".join(need_to_import)),
fix_prompt="Create commands in %s for all missing notebooks?" % (os.path.basename(project_file.filename)
),
fix_function=add_all,
no_fix_function=no_add_all,
only_a_suggestion=True)
problems.append(problem)
def _verify_command_dependencies(self, problems, project_file):
for command in self.commands.values():
env_spec = self.env_specs[command.default_env_spec_name]
missing = command.missing_packages(env_spec)
if len(missing) > 0:
def add_packages_to_env_spec(project):
env_dict = project.project_file.get_value(['env_specs', env_spec.name])
assert env_dict is not None
packages = env_dict.get('packages', [])
for m in missing:
# m would already be in there if we fixed the same env spec
# twice because two commands used it, for example.
if m not in packages:
packages.append(m)
project.project_file.set_value(['env_specs', env_spec.name, 'packages'], packages)
problem = ProjectProblem(
text=("%s: Command %s uses env spec %s which does not have the packages: %s" % (
project_file.filename, command.name, env_spec.name, ", ".join(missing))),
fix_prompt=("Add %s to env spec %s in %s?" % (", ".join(missing), env_spec.name, os.path.basename(
project_file.filename))),
fix_function=add_packages_to_env_spec,
only_a_suggestion=True)
problems.append(problem)
class Project(object):
"""Represents the information we've inferred about a project.
The Project class encapsulates information from the project
file, and also anything else we've guessed by snooping around in
the project directory or global user configuration.
"""
def __init__(self, directory_path, plugin_registry=None):
"""Construct a Project with the given directory and plugin registry.
Args:
directory_path (str): path to the project directory
plugin_registry (PluginRegistry): where to look up Requirement and Provider instances, None for default
"""
self._directory_path = os.path.realpath(directory_path)
def load_default_specs():
(importable_spec, importable_filename) = _find_importable_spec(directory_path)
if importable_spec is not None:
return [importable_spec]
else:
return [_anaconda_default_env_spec(shared_base_spec=None)]
self._project_file = ProjectFile.load_for_directory(directory_path, default_env_specs_func=load_default_specs)
self._conda_meta_file = CondaMetaFile.load_for_directory(directory_path)
self._directory_basename = os.path.basename(self._directory_path)
self._config_cache = _ConfigCache(self._directory_path, plugin_registry)
def _updated_cache(self):
self._config_cache.update(self._project_file, self._conda_meta_file)
return self._config_cache
@property
def directory_path(self):
"""Get path to the project directory."""
return self._directory_path
@property
def project_file(self):
"""Get the ``ProjectFile`` for this project."""
return self._project_file
@property
def plugin_registry(self):
"""Get the ``PluginRegistry`` for this project."""
return self._config_cache.registry
@property
def conda_meta_file(self):
"""Get the ``CondaMetaFile`` for this project."""
return self._conda_meta_file
@property
def requirements(self):
"""Required items in order to run this project (list of ``Requirement`` instances)."""
return self._updated_cache().requirements
@property
def service_requirements(self):
"""All requirements that are ServiceRequirement instances."""
return self.find_requirements(klass=ServiceRequirement)
@property
def download_requirements(self):
"""All requirements that are DownloadRequirement instances."""
return self.find_requirements(klass=DownloadRequirement)
@property
def all_variable_requirements(self):
"""All requirements that have an associated environment variable.
Note: this will include services, downloads, and even CondaEnvRequirement.
"""
return self.find_requirements(klass=EnvVarRequirement)
@property
def plain_variable_requirements(self):
"""All 'plain' variables (that aren't services, downloads, or a Conda environment for example).
Use the ``all_variable_requirements`` property to get every variable.
"""
return [req for req in self.all_variable_requirements if req.__class__ is EnvVarRequirement]
def find_requirements(self, env_var=None, klass=None):
"""Find requirements that match the given env var and class.
If env_var and klass are both provided, BOTH must match.
Args:
env_var (str): if not None, filter requirements that have this env_var
klass (class): if not None, filter requirements that are an instance of this class
Returns:
list of matching requirements (may be empty)
"""
found = []
for req in self.requirements:
if env_var is not None and not (isinstance(req, EnvVarRequirement) and req.env_var == env_var):
continue
if klass is not None and not isinstance(req, klass):
continue
found.append(req)
return found
@property
def problems(self):
"""List of strings describing problems with the project configuration.
This list contains problems which keep the project from loading, such as corrupt
config files; it does not contain missing requirements and other "expected"
problems.
"""
return self._updated_cache().problem_strings
@property
def problem_objects(self):
"""List of ProjectProblem instances describing problems with the project configuration."""
return [problem for problem in self._updated_cache().problems if not problem.only_a_suggestion]
@property
def fixable_problems(self):
"""List of ProjectProblem that have associated fix prompts."""
return [p for p in self.problem_objects if p.can_fix and not p.only_a_suggestion]
def problems_status(self, description=None):
"""Get a ``Status`` describing project problems, or ``None`` if no problems."""
if len(self.problems) > 0:
errors = []
for problem in self.problems:
errors.append(problem)
if description is None:
description = "Unable to load the project."
return SimpleStatus(success=False, description=description, logs=[], errors=errors)
else:
return None
@property
def suggestions(self):
"""List of strings describing suggested changes to the project configuration."""
return [problem.text for problem in self.suggestion_objects]
@property
def suggestion_objects(self):
"""List of ProjectProblem instances describing suggested changes to the project configuration."""
return [problem for problem in self._updated_cache().problems if problem.only_a_suggestion]
def fix_problems_and_suggestions(self):
"""Fix fixable problems and suggestions."""
# the idea of this loop is that by fixing a problem we may
# create a new one, for example we add a notebook command
# and then the env spec needs to depend on "notebook".
# However, we have no real way to detect an infinite
# ping-pong of mutually-causing problems, so we cap
# the iterations at an arbitrary number.
iterations = 5
while iterations > 0:
fixed_a_thing = False
for problem in self._updated_cache().problems:
if problem.can_fix:
problem.fix(self)
fixed_a_thing = True
if fixed_a_thing:
self.project_file.use_changes_without_saving()
iterations -= 1
@property
def name(self):
"""Get the project's human-readable name.
Prefers in order: `name` field from kapsel.yml, `package:
name:` from meta.yaml, then project directory name.
"""
return self._updated_cache().name
@property
def url_friendly_name(self):
"""Get the project's url-friendly name."""
return slugify(self.name)
@property
def description(self):
"""Get the project description."""
return self._updated_cache().description
@property
def icon(self):
"""Get the project's icon as an absolute path or None if no icon.
Prefers in order: `icon` field from kapsel.yml, `app:
icon:` from meta.yaml.
"""
return self._updated_cache().icon
@property
def env_specs(self):
"""Get a dictionary of environment names to CondaEnvironment instances."""
return self._updated_cache().env_specs
@property
def global_base_env_spec(self):
"""Get the env spec representing global packages and channels sections.
This env spec has no name (its name is None) and can't be used directly
to create environments, but every other env spec inherits from it.
"""
return self._updated_cache().global_base_env_spec
@property
def all_variables(self):
"""Get a list of strings with the variables names from ``all_variable_requirements``."""
return [r.env_var for r in self.all_variable_requirements]
@property
def plain_variables(self):
"""Get a list of strings with the variables names from ``plain_variable_requirements``."""
return [r.env_var for r in self.plain_variable_requirements]
@property
def services(self):
"""Get a list of strings with the variable names for the project services requirements."""
return [r.env_var for r in self.service_requirements]
@property
def downloads(self):
"""Get a list of strings with the variable names for the project download requirements."""
return [r.env_var for r in self.download_requirements]
@property
def default_env_spec_name(self):
"""Get the named environment to use by default.
This will be the one named "default" if it exists, and
otherwise the first-listed one.
Note that each command may have its own default, so
this should only be used in contexts with no known
command.
"""
return self._updated_cache().default_env_spec_name
def default_env_spec_name_for_command(self, command):
"""Get the named environment to use by default for a given ProjectCommand.
the command may be ``None``
"""
if command is None:
return self.default_env_spec_name
else:
assert isinstance(command, ProjectCommand)
return command.default_env_spec_name
@property
def commands(self):
"""Get the dictionary of commands to run the project.
This dictionary can be empty.
Returns:
dictionary of command names to ``ProjectCommand``
"""
return self._updated_cache().commands
@property
def default_command(self):
"""Get the default ``ProjectCommand`` or None if we don't have one.
Returns:
the default ``ProjectCommand``
"""
cache = self._updated_cache()
if cache.default_command_name is None:
return None
else:
return cache.commands[cache.default_command_name]
def default_exec_info_for_environment(self, environ, extra_args=None):
"""Get the information needed to run the project's default command.
Args:
environ (dict): the environment
extra_args (list of str): extra args to append to the command line
Returns:
a CommandExecInfo instance
"""
command = self.default_command
if command is None:
return None
else:
return command.exec_info_for_environment(environ=environ, extra_args=extra_args)
def command_for_name(self, command_name):
"""Get the ProjectCommand for the given command name, or None if no commands.
Args:
command_name (str): the command name
Returns:
a ProjectCommand instance or None
"""
if command_name is None:
command_name = self._updated_cache().default_command_name
if command_name is None:
return None
if command_name in self._updated_cache().commands:
return self._updated_cache().commands[command_name]
else:
return None
def publication_info(self):
"""Get JSON-serializable information to be stored as metadata when publishing the project.
This is a "baked" version of kapsel.yml which also
includes any defaults or automatic configuration.
Before calling this, check that Project.problems is empty.
Returns:
A dictionary containing JSON-compatible types.
"""
json = dict()
json['name'] = self.name
# the recipient will have to validate this; including it here
# mostly because we might eventually allow the kapsel.yml to
# manually provide it.
json['url_friendly_name'] = self.url_friendly_name
json['description'] = self.description
commands = dict()
for key, command in self.commands.items():
commands[key] = dict(description=command.description)
if command.bokeh_app is not None:
commands[key]['bokeh_app'] = command.bokeh_app
if command.notebook is not None:
commands[key]['notebook'] = command.notebook
if command is self.default_command:
commands[key]['default'] = True
commands[key]['env_spec'] = command.default_env_spec_name
commands[key]['supports_http_options'] = command.supports_http_options
json['commands'] = commands
envs = dict()
for key, env in self.env_specs.items():
envs[key] = dict(packages=list(env.conda_packages),
channels=list(env.channels),
description=env.description)
json['env_specs'] = envs
variables = dict()
downloads = dict()
services = dict()
for req in self.requirements:
if isinstance(req, CondaEnvRequirement):
continue
elif isinstance(req, DownloadRequirement):
downloads[req.env_var] = dict(title=req.title,
description=req.description,
encrypted=req.encrypted,
url=req.url)
elif isinstance(req, ServiceRequirement):
services[req.env_var] = dict(title=req.title, description=req.description, type=req.service_type)
elif isinstance(req, EnvVarRequirement):
variables[req.env_var] = dict(title=req.title, description=req.description, encrypted=req.encrypted)
json['downloads'] = downloads
json['variables'] = variables
json['services'] = services
return json
| {
"content_hash": "c332445717d93742ede7adc2d0bce10d",
"timestamp": "",
"source": "github",
"line_count": 1104,
"max_line_length": 120,
"avg_line_length": 44.52445652173913,
"alnum_prop": 0.5734309836232326,
"repo_name": "conda/kapsel",
"id": "a5b86288fcdeb95747ae01be73d8e8cb85a68c1e",
"size": "49486",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conda_kapsel/project.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "1285062"
},
{
"name": "Shell",
"bytes": "378"
}
],
"symlink_target": ""
} |
from cryptography.fernet import Fernet
from girder.utility import config
from girder.utility.model_importer import ModelImporter
from girder.exceptions import AccessException
from girder.plugins.minerva.constants import PluginSettings
def findNamedFolder(currentUser, user, parent, parentType, name, create=False,
joinShareGroup=None, public=False):
folders = \
[ModelImporter.model('folder').filter(folder, currentUser) for folder in
ModelImporter.model('folder').childFolders(
parent=parent, parentType=parentType,
user=currentUser, filters={'name': name})]
# folders should have len of 0 or 1, since we are looking in a
# user folder for a folder with a certain name
if len(folders) == 0:
if create and currentUser:
folder = ModelImporter.model('folder').createFolder(
parent, name, parentType=parentType, public=public,
creator=currentUser)
if joinShareGroup:
groupModel = ModelImporter.model('group')
datasetSharingGroup = groupModel.findOne(query={
'name': PluginSettings.DATASET_SHARING_GROUP_NAME
})
ModelImporter.model('folder').setGroupAccess(
folder, datasetSharingGroup, 0, currentUser=currentUser, save=True)
return folder
else:
return None
else:
return folders[0]
def findMinervaFolder(currentUser, user, create=False):
return findNamedFolder(currentUser, user, user, 'user',
PluginSettings.MINERVA_FOLDER, create)
def findPublicFolder(currentUser, user, create=False):
return findNamedFolder(currentUser, user, user, 'user',
'Public', create)
def findSharedFolder(currentUser, user, create=False):
minervaSharedFolder = findNamedFolder(
currentUser, user, user, 'user', PluginSettings.MINERVA_SHARED_DATASET,
create, joinShareGroup=True, public=False)
return minervaSharedFolder
def findDatasetFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.DATASET_FOLDER, create)
def findSharedDatasetFolders(currentUser):
folderModel = ModelImporter.model('folder')
groupModel = ModelImporter.model('group')
datasetSharingGroup = groupModel.findOne(query={
'name': PluginSettings.DATASET_SHARING_GROUP_NAME
})
if not datasetSharingGroup:
raise AccessException('user group "{0}" doesn\'t exist'.format(
PluginSettings.DATASET_SHARING_GROUP_NAME))
if datasetSharingGroup['_id'] not in currentUser['groups']:
raise AccessException('user doesn\'t belong to user group "{0}"'.format(
PluginSettings.DATASET_SHARING_GROUP_NAME))
folders = folderModel.find({
'baseParentType': 'user',
'parentCollection': 'user',
'access.groups.id': datasetSharingGroup['_id'],
'name': PluginSettings.MINERVA_SHARED_DATASET
})
return folders
def findSourceFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.SOURCE_FOLDER, create)
def findSessionFolder(currentUser, user, create=False):
minervaFolder = findMinervaFolder(currentUser, user, create)
if minervaFolder is None:
return minervaFolder
else:
return findNamedFolder(currentUser, user, minervaFolder, 'folder',
PluginSettings.SESSION_FOLDER, create)
def findNamedCollection(currentUser, name, create=False):
collections = \
[ModelImporter.model('collection').filter(c, currentUser) for c in
ModelImporter.model('collection').textSearch(name, user=currentUser)]
# collections should have len of 0 or 1, since we are looking
# for a collection with a certain name
if len(collections) == 0:
if create:
return ModelImporter.model('collection').createCollection(
name, description='', public=True, creator=currentUser)
else:
return None
else:
return collections[0]
def findMinervaCollection(currentUser, create=False):
return findNamedCollection(currentUser, PluginSettings.MINERVA_COLLECTION,
create)
def findAnalysisFolder(currentUser, create=False):
minervaCollection = findMinervaCollection(currentUser, create)
if minervaCollection is None:
return None
else:
analysisFolder = findNamedFolder(currentUser, currentUser,
minervaCollection, 'collection',
'analysis', create, public=True)
return analysisFolder
def findAnalysisByName(currentUser, name):
analysisFolder = findAnalysisFolder(currentUser)
filters = {}
filters['$text'] = {
'$search': name
}
analyses = [ModelImporter.model('item').filter(item, currentUser)
for item in
ModelImporter.model('folder').childItems(folder=analysisFolder,
filters=filters)]
if len(analyses) > 0:
return analyses[0]
else:
return None
def mM(item, minerva_metadata=None):
if minerva_metadata is None:
if 'meta' not in item or 'minerva' not in item['meta']:
return {}
return item['meta']['minerva']
else:
return updateMinervaMetadata(item, minerva_metadata)
def updateMinervaMetadata(item, minerva_metadata):
if 'meta' not in item:
item['meta'] = {}
item['meta']['minerva'] = minerva_metadata
ModelImporter.model('item').setMetadata(item, item['meta'])
return item['meta']['minerva']
def decryptCredentials(credentials):
cur_config = config.getConfig()
key = cur_config['minerva']['crypto_key']
f = Fernet(key)
return f.decrypt(bytes(credentials))
def encryptCredentials(credentials):
cur_config = config.getConfig()
key = cur_config['minerva']['crypto_key']
f = Fernet(key)
return f.encrypt(bytes(credentials))
def jobMM(job, minerva_metadata=None, save=True):
if minerva_metadata is None:
if 'meta' not in job or 'minerva' not in job['meta']:
return {}
return job['meta']['minerva']
else:
if 'meta' not in job:
job['meta'] = {}
job['meta']['minerva'] = minerva_metadata
if save:
ModelImporter.model('job', 'jobs').save(job)
return job['meta']['minerva']
def addJobOutput(job, output, output_type='dataset', save=True):
mm = jobMM(job)
outputs = mm.get('outputs', [])
job_output = None
if output_type == 'dataset':
job_output = {
'type': 'dataset',
'dataset_id': output.get('_id')
}
else:
raise NotImplementedError('unknown job output %s' % output_type)
outputs.append(job_output)
mm['outputs'] = outputs
jobMM(job, mm, save)
| {
"content_hash": "8323f2354983c699712d2ffe4803f073",
"timestamp": "",
"source": "github",
"line_count": 210,
"max_line_length": 87,
"avg_line_length": 35.44761904761905,
"alnum_prop": 0.6386351423965609,
"repo_name": "Kitware/minerva",
"id": "efed1b56f1a5609be49cb3cd2660ce631aadf3cd",
"size": "8232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/utility/minerva_utility.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "4557"
},
{
"name": "CSS",
"bytes": "197"
},
{
"name": "HTML",
"bytes": "320"
},
{
"name": "JavaScript",
"bytes": "32194"
},
{
"name": "Mako",
"bytes": "2037"
},
{
"name": "Python",
"bytes": "210330"
}
],
"symlink_target": ""
} |
def test_simple(qibuild_action, record_messages):
# More complex tests should be written at a lower level
qibuild_action.create_project("world")
qibuild_action.create_project("hello", build_depends=["world"])
qibuild_action("depends", "hello")
| {
"content_hash": "f8e3567873f47044c40206c780181fef",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 67,
"avg_line_length": 52,
"alnum_prop": 0.7230769230769231,
"repo_name": "dmerejkowsky/qibuild",
"id": "a6e84472a3c5278d3240b3d4cfdaaea70fbdf8dc",
"size": "432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qibuild/test/test_qibuild_depends.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7442"
},
{
"name": "C++",
"bytes": "22059"
},
{
"name": "CMake",
"bytes": "267118"
},
{
"name": "Java",
"bytes": "4132"
},
{
"name": "Makefile",
"bytes": "2222"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1145711"
},
{
"name": "Shell",
"bytes": "1085"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'opensourcedjango.views.home', name='home'),
# url(r'^opensourcedjango/', include('opensourcedjango.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "d8794c41184988940e49e2996df49fa4",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 34.470588235294116,
"alnum_prop": 0.6928327645051194,
"repo_name": "juliomenendez/opensourcedjango.com",
"id": "82eab6f0c0ba12a53788835691b1b9b35a3008e6",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "opensourcedjango/opensourcedjango/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "88056"
},
{
"name": "JavaScript",
"bytes": "61782"
},
{
"name": "Python",
"bytes": "7488"
}
],
"symlink_target": ""
} |
import sys
import pytest
def supported():
import django.test.testcases
return hasattr(django.test.testcases, 'LiveServerThread')
class LiveServer(object):
"""The liveserver fixture
This is the object which is returned to the actual user when they
request the ``live_server`` fixture. The fixture handles creation
and stopping however.
"""
def __init__(self, addr):
try:
from django.test.testcases import LiveServerThread
except ImportError:
pytest.skip('live_server tests not supported in Django < 1.4')
from django.db import connections
connections_override = {}
for conn in connections.all():
# If using in-memory sqlite databases, pass the connections to
# the server thread.
if (conn.settings_dict['ENGINE'] == 'django.db.backends.sqlite3'
and conn.settings_dict['NAME'] == ':memory:'):
# Explicitly enable thread-shareability for this connection
conn.allow_thread_sharing = True
connections_override[conn.alias] = conn
try:
from django.test.testcases import _StaticFilesHandler
static_handler_kwargs = {'static_handler': _StaticFilesHandler}
except ImportError:
static_handler_kwargs = {}
host, possible_ports = parse_addr(addr)
self.thread = LiveServerThread(host, possible_ports,
connections_override=connections_override,
**static_handler_kwargs)
self.thread.daemon = True
self.thread.start()
self.thread.is_ready.wait()
if self.thread.error:
raise self.thread.error
def stop(self):
"""Stop the server"""
# .terminate() was added in Django 1.7
terminate = getattr(self.thread, 'terminate', lambda: None)
terminate()
self.thread.join()
@property
def url(self):
return 'http://%s:%s' % (self.thread.host, self.thread.port)
if sys.version_info < (3, 0):
def __unicode__(self):
return self.url
def __add__(self, other):
return unicode(self) + other
else:
def __str__(self):
return self.url
def __add__(self, other):
return str(self) + other
def __repr__(self):
return '<LiveServer listening at %s>' % self.url
def parse_addr(specified_address):
"""Parse the --liveserver argument into a host/IP address and port range"""
# This code is based on
# django.test.testcases.LiveServerTestCase.setUpClass
# The specified ports may be of the form '8000-8010,8080,9200-9300'
# i.e. a comma-separated list of ports or ranges of ports, so we break
# it down into a detailed list of all possible ports.
possible_ports = []
try:
host, port_ranges = specified_address.split(':')
for port_range in port_ranges.split(','):
# A port range can be of either form: '8000' or '8000-8010'.
extremes = list(map(int, port_range.split('-')))
assert len(extremes) in [1, 2]
if len(extremes) == 1:
# Port range of the form '8000'
possible_ports.append(extremes[0])
else:
# Port range of the form '8000-8010'
for port in range(extremes[0], extremes[1] + 1):
possible_ports.append(port)
except Exception:
raise Exception(
'Invalid address ("%s") for live server.' % specified_address)
return (host, possible_ports)
| {
"content_hash": "c397131409d86af5215a1bdfaff2e713",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 81,
"avg_line_length": 34.42056074766355,
"alnum_prop": 0.5848493076296497,
"repo_name": "blueyed/pytest_django",
"id": "ed03a1a405dd1a4d851315adb875603251e3dbd8",
"size": "3683",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest_django/live_server_helper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Implementation of JSONEncoder
"""
import re
try:
_speedups = __import__('simplejson._speedups')._speedups
c_encode_basestring_ascii = _speedups.encode_basestring_ascii
c_make_encoder = _speedups.make_encoder
except ImportError:
c_encode_basestring_ascii = None
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
| {
"content_hash": "4c9351b4d61fdfc98f3f31b153804663",
"timestamp": "",
"source": "github",
"line_count": 432,
"max_line_length": 136,
"avg_line_length": 36.4537037037037,
"alnum_prop": 0.5254000508001015,
"repo_name": "SRabbelier/Melange",
"id": "a33f4730a46d24c66aa5ba1e5e06829d62d8cb09",
"size": "15748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/lib/simplejson/simplejson/encoder.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce
version_added: "1.4"
short_description: create or terminate GCE instances
description:
- Creates or terminates Google Compute Engine (GCE) instances. See
U(https://cloud.google.com/compute) for an overview.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
image:
description:
- image string to use for the instance (default will follow latest
stable debian image)
required: false
default: "debian-8"
image_family:
description:
- image family from which to select the image. The most recent
non-deprecated image in the family will be used.
required: false
default: null
version_added: "2.4"
external_projects:
description:
- A list of other projects (accessible with the provisioning credentials)
to be searched for the image.
required: false
default: null
version_added: "2.4"
instance_names:
description:
- a comma-separated list of instance names to create or destroy
required: false
default: null
machine_type:
description:
- machine type to use for the instance, use 'n1-standard-1' by default
required: false
default: "n1-standard-1"
metadata:
description:
- a hash/dictionary of custom data for the instance;
'{"key":"value", ...}'
required: false
default: null
service_account_email:
version_added: "1.5.1"
description:
- service account email
required: false
default: null
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
required: false
default: null
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
pem_file:
version_added: "1.5.1"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
default: null
required: false
project_id:
version_added: "1.5.1"
description:
- your GCE project ID
required: false
default: null
name:
description:
- either a name of a single instance or when used with 'num_instances',
the base name of a cluster of nodes
required: false
aliases: ['base_name']
num_instances:
description:
- can be used with 'name', specifies
the number of nodes to provision using 'name'
as a base name
required: false
version_added: "2.3"
network:
description:
- name of the network, 'default' will be used if not specified
required: false
default: "default"
subnetwork:
description:
- name of the subnetwork in which the instance should be created
required: false
default: null
version_added: "2.2"
persistent_boot_disk:
description:
- if set, create the instance with a persistent boot disk
required: false
default: "false"
disks:
description:
- a list of persistent disks to attach to the instance; a string value
gives the name of the disk; alternatively, a dictionary value can
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
will be the boot disk (which must be READ_WRITE).
required: false
default: null
version_added: "1.7"
state:
description:
- desired state of the resource
required: false
default: "present"
choices: ["active", "present", "absent", "deleted", "started", "stopped", "terminated"]
tags:
description:
- a comma-separated list of tags to associate with the instance
required: false
default: null
zone:
description:
- the GCE zone to use. The list of available zones is at U(https://cloud.google.com/compute/docs/regions-zones/regions-zones#available).
required: true
default: "us-central1-a"
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for
gateways)
required: false
default: "false"
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default; alternatively, a fixed gce ip or ip name can be given. Specify 'none' if no external ip is desired.
required: false
default: "ephemeral"
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
preemptible:
version_added: "2.1"
description:
- if set to true, instances will be preemptible and time-limited.
(requires libcloud >= 0.20.0)
required: false
default: "false"
disk_size:
description:
- The size of the boot disk created for this instance (in GB)
required: false
default: 10
version_added: "2.3"
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials,
>= 0.20.0 if using preemptible option"
notes:
- Either I(instance_names) or I(name) is required.
- JSON credentials strongly preferred.
author: "Eric Johnson (@erjohnso) <[email protected]>, Tom Melendez (@supertom) <[email protected]>"
'''
EXAMPLES = '''
# Basic provisioning example. Create a single Debian 8 instance in the
# us-central1-a Zone of the n1-standard-1 machine type.
# Create multiple instances by specifying multiple names, separated by
# commas in the instance_names field
# (e.g. my-test-instance1,my-test-instance2)
gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "[email protected]"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single instance of an image from the "my-base-image" image family
# in the us-central1-a Zone of the n1-standard-1 machine type.
# This image family is in the "my-other-project" GCP project.
gce:
instance_names: my-test-instance1
zone: us-central1-a
machine_type: n1-standard-1
image_family: my-base-image
external_projects:
- my-other-project
state: present
service_account_email: "[email protected]"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
disk_size: 32
# Create a single Debian 8 instance in the us-central1-a Zone
# Use existing disks, custom network/subnetwork, set service account permissions
# add tags and metadata.
gce:
instance_names: my-test-instance
zone: us-central1-a
machine_type: n1-standard-1
state: present
metadata: '{"db":"postgres", "group":"qa", "id":500}'
tags:
- http-server
- my-other-tag
disks:
- name: disk-2
mode: READ_WRITE
- name: disk-3
mode: READ_ONLY
disk_auto_delete: false
network: foobar-network
subnetwork: foobar-subnetwork-1
preemptible: true
ip_forward: true
service_account_permissions:
- storage-full
- taskqueue
- bigquery
- https://www.googleapis.com/auth/ndev.clouddns.readwrite
service_account_email: "[email protected]"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
---
# Example Playbook
- name: Compute Engine Instance Examples
hosts: localhost
vars:
service_account_email: "[email protected]"
credentials_file: "/path/to/your-key.json"
project_id: "your-project-name"
tasks:
- name: create multiple instances
# Basic provisioning example. Create multiple Debian 8 instances in the
# us-central1-a Zone of n1-standard-1 machine type.
gce:
instance_names: test1,test2,test3
zone: us-central1-a
machine_type: n1-standard-1
image: debian-8
state: present
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
metadata : '{ "startup-script" : "apt-get update" }'
register: gce
- name: Save host data
add_host:
hostname: "{{ item.public_ip }}"
groupname: gce_instances_ips
with_items: "{{ gce.instance_data }}"
- name: Wait for SSH for instances
wait_for:
delay: 1
host: "{{ item.public_ip }}"
port: 22
state: started
timeout: 30
with_items: "{{ gce.instance_data }}"
- name: Configure Hosts
hosts: gce_instances_ips
become: yes
become_method: sudo
roles:
- my-role-one
- my-role-two
tags:
- config
- name: delete test-instances
# Basic termination of instance.
gce:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
instance_names: "{{ gce.instance_names }}"
zone: us-central1-a
state: absent
tags:
- delete
'''
import socket
import logging
try:
from ast import literal_eval
HAS_PYTHON26 = True
except ImportError:
HAS_PYTHON26 = False
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
from libcloud.compute.drivers.gce import GCEAddress
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect, unexpected_error_msg
from ansible.module_utils.gcp import get_valid_location
from ansible.module_utils.six.moves import reduce
def get_instance_info(inst):
"""Retrieves instance information from an instance object and returns it
as a dictionary.
"""
metadata = {}
if 'metadata' in inst.extra and 'items' in inst.extra['metadata']:
for md in inst.extra['metadata']['items']:
metadata[md['key']] = md['value']
try:
netname = inst.extra['networkInterfaces'][0]['network'].split('/')[-1]
except:
netname = None
try:
subnetname = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1]
except:
subnetname = None
if 'disks' in inst.extra:
disk_names = [disk_info['source'].split('/')[-1]
for disk_info
in sorted(inst.extra['disks'],
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return({
'image': inst.image is not None and inst.image.split('/')[-1] or None,
'disks': disk_names,
'machine_type': inst.size,
'metadata': metadata,
'name': inst.name,
'network': netname,
'subnetwork': subnetname,
'private_ip': inst.private_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
def create_instances(module, gce, instance_names, number, lc_zone):
"""Creates new instances. Attributes other than instance_names are picked
up from 'module'
module : AnsibleModule object
gce: authenticated GCE libcloud driver
instance_names: python list of instance names to create
number: number of instances to create
lc_zone: GCEZone object
Returns:
A list of dictionaries with instance information
about the instances that were launched.
"""
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
machine_type = module.params.get('machine_type')
metadata = module.params.get('metadata')
network = module.params.get('network')
subnetwork = module.params.get('subnetwork')
persistent_boot_disk = module.params.get('persistent_boot_disk')
disks = module.params.get('disks')
tags = module.params.get('tags')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
preemptible = module.params.get('preemptible')
disk_size = module.params.get('disk_size')
service_account_permissions = module.params.get('service_account_permissions')
if external_ip == "none":
instance_external_ip = None
elif external_ip != "ephemeral":
instance_external_ip = external_ip
try:
# check if instance_external_ip is an ip or a name
try:
socket.inet_aton(instance_external_ip)
instance_external_ip = GCEAddress(id='unknown', name='unknown', address=instance_external_ip, region='unknown', driver=gce)
except socket.error:
instance_external_ip = gce.ex_get_address(instance_external_ip)
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to get a static ip %s, error: %s' % (external_ip, e.value))
else:
instance_external_ip = external_ip
new_instances = []
changed = False
lc_disks = []
disk_modes = []
for i, disk in enumerate(disks or []):
if isinstance(disk, dict):
lc_disks.append(gce.ex_get_volume(disk['name'], lc_zone))
disk_modes.append(disk['mode'])
else:
lc_disks.append(gce.ex_get_volume(disk, lc_zone))
# boot disk is implicitly READ_WRITE
disk_modes.append('READ_ONLY' if i > 0 else 'READ_WRITE')
lc_network = gce.ex_get_network(network)
lc_machine_type = gce.ex_get_size(machine_type, lc_zone)
# Try to convert the user's metadata value into the format expected
# by GCE. First try to ensure user has proper quoting of a
# dictionary-like syntax using 'literal_eval', then convert the python
# dict into a python list of 'key' / 'value' dicts. Should end up
# with:
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
if metadata:
if isinstance(metadata, dict):
md = metadata
else:
try:
md = literal_eval(str(metadata))
if not isinstance(md, dict):
raise ValueError('metadata must be a dict')
except ValueError as e:
module.fail_json(msg='bad metadata: %s' % str(e))
except SyntaxError as e:
module.fail_json(msg='bad metadata syntax')
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
items = []
for k, v in md.items():
items.append({"key": k, "value": v})
metadata = {'items': items}
else:
metadata = md
lc_image = LazyDiskImage(module, gce, image, lc_disks, family=image_family, projects=external_projects)
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if perm not in gce.SA_SCOPES_MAP and not perm.startswith('https://www.googleapis.com/auth'):
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case
if not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable',
changed=False)
gce_args = dict(
location=lc_zone,
ex_network=network, ex_tags=tags, ex_metadata=metadata,
ex_can_ip_forward=ip_forward,
external_ip=instance_external_ip, ex_disk_auto_delete=disk_auto_delete,
ex_service_accounts=ex_sa_perms
)
if preemptible is not None:
gce_args['ex_preemptible'] = preemptible
if subnetwork is not None:
gce_args['ex_subnetwork'] = subnetwork
if isinstance(instance_names, str) and not number:
instance_names = [instance_names]
if isinstance(instance_names, str) and number:
instance_responses = gce.ex_create_multiple_nodes(instance_names, lc_machine_type,
lc_image(), number, **gce_args)
for resp in instance_responses:
n = resp
if isinstance(resp, libcloud.compute.drivers.gce.GCEFailedNode):
try:
n = gce.ex_get_node(n.name, lc_zone)
except ResourceNotFoundError:
pass
else:
# Assure that at least one node has been created to set changed=True
changed = True
new_instances.append(n)
else:
for instance in instance_names:
pd = None
if lc_disks:
pd = lc_disks[0]
elif persistent_boot_disk:
try:
pd = gce.ex_get_volume("%s" % instance, lc_zone)
except ResourceNotFoundError:
pd = gce.create_volume(disk_size, "%s" % instance, image=lc_image())
gce_args['ex_boot_disk'] = pd
inst = None
try:
inst = gce.ex_get_node(instance, lc_zone)
except ResourceNotFoundError:
inst = gce.create_node(
instance, lc_machine_type, lc_image(), **gce_args
)
changed = True
except GoogleBaseError as e:
module.fail_json(msg='Unexpected error attempting to create ' +
'instance %s, error: %s' % (instance, e.value))
if inst:
new_instances.append(inst)
for inst in new_instances:
for i, lc_disk in enumerate(lc_disks):
# Check whether the disk is already attached
if (len(inst.extra['disks']) > i):
attached_disk = inst.extra['disks'][i]
if attached_disk['source'] != lc_disk.extra['selfLink']:
module.fail_json(
msg=("Disk at index %d does not match: requested=%s found=%s" % (
i, lc_disk.extra['selfLink'], attached_disk['source'])))
elif attached_disk['mode'] != disk_modes[i]:
module.fail_json(
msg=("Disk at index %d is in the wrong mode: requested=%s found=%s" % (
i, disk_modes[i], attached_disk['mode'])))
else:
continue
gce.attach_volume(inst, lc_disk, ex_mode=disk_modes[i])
# Work around libcloud bug: attached volumes don't get added
# to the instance metadata. get_instance_info() only cares about
# source and index.
if len(inst.extra['disks']) != i+1:
inst.extra['disks'].append(
{'source': lc_disk.extra['selfLink'], 'index': i})
instance_names = []
instance_json_data = []
for inst in new_instances:
d = get_instance_info(inst)
instance_names.append(d['name'])
instance_json_data.append(d)
return (changed, instance_json_data, instance_names)
def change_instance_state(module, gce, instance_names, number, zone, state):
"""Changes the state of a list of instances. For example,
change from started to stopped, or started to absent.
module: Ansible module object
gce: authenticated GCE connection object
instance_names: a list of instance names to terminate
zone: GCEZone object where the instances reside prior to termination
state: 'state' parameter passed into module as argument
Returns a dictionary of instance names that were changed.
"""
changed = False
nodes = []
state_instance_names = []
if isinstance(instance_names, str) and number:
node_names = ['%s-%03d' % (instance_names, i) for i in range(number)]
elif isinstance(instance_names, str) and not number:
node_names = [instance_names]
else:
node_names = instance_names
for name in node_names:
inst = None
try:
inst = gce.ex_get_node(name, zone)
except ResourceNotFoundError:
state_instance_names.append(name)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
else:
nodes.append(inst)
state_instance_names.append(name)
if state in ['absent', 'deleted'] and number:
changed_nodes = gce.ex_destroy_multiple_nodes(nodes) or [False]
changed = reduce(lambda x, y: x or y, changed_nodes)
else:
for node in nodes:
if state in ['absent', 'deleted']:
gce.destroy_node(node)
changed = True
elif state == 'started' and \
node.state == libcloud.compute.types.NodeState.STOPPED:
gce.ex_start_node(node)
changed = True
elif state in ['stopped', 'terminated'] and \
node.state == libcloud.compute.types.NodeState.RUNNING:
gce.ex_stop_node(node)
changed = True
return (changed, state_instance_names)
def main():
module = AnsibleModule(
argument_spec = dict(
image = dict(default='debian-8'),
image_family = dict(),
external_projects = dict(type='list'),
instance_names = dict(),
machine_type = dict(default='n1-standard-1'),
metadata = dict(),
name = dict(aliases=['base_name']),
num_instances = dict(type='int'),
network = dict(default='default'),
subnetwork = dict(),
persistent_boot_disk = dict(type='bool', default=False),
disks = dict(type='list'),
state = dict(choices=['active', 'present', 'absent', 'deleted',
'started', 'stopped', 'terminated'],
default='present'),
tags = dict(type='list'),
zone = dict(default='us-central1-a'),
service_account_email = dict(),
service_account_permissions = dict(type='list'),
pem_file = dict(type='path'),
credentials_file = dict(type='path'),
project_id = dict(),
ip_forward = dict(type='bool', default=False),
external_ip=dict(default='ephemeral'),
disk_auto_delete = dict(type='bool', default=True),
disk_size = dict(type='int', default=10),
preemptible = dict(type='bool', default=None),
),
mutually_exclusive=[('instance_names', 'name')]
)
if not HAS_PYTHON26:
module.fail_json(msg="GCE module requires python's 'ast' module, python v2.6+")
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) required for this module')
gce = gce_connect(module)
image = module.params.get('image')
image_family = module.params.get('image_family')
external_projects = module.params.get('external_projects')
instance_names = module.params.get('instance_names')
name = module.params.get('name')
number = module.params.get('num_instances')
subnetwork = module.params.get('subnetwork')
state = module.params.get('state')
zone = module.params.get('zone')
preemptible = module.params.get('preemptible')
changed = False
inames = None
if isinstance(instance_names, list):
inames = instance_names
elif isinstance(instance_names, str):
inames = instance_names.split(',')
if name:
inames = name
if not inames:
module.fail_json(msg='Must specify a "name" or "instance_names"',
changed=False)
if not zone:
module.fail_json(msg='Must specify a "zone"', changed=False)
lc_zone = get_valid_location(module, gce, zone)
if preemptible is not None and hasattr(libcloud, '__version__') and libcloud.__version__ < '0.20':
module.fail_json(msg="Apache Libcloud 0.20.0+ is required to use 'preemptible' option",
changed=False)
if subnetwork is not None and not hasattr(gce, 'ex_get_subnetwork'):
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'subnetwork' option",
changed=False)
json_output = {'zone': zone}
if state in ['absent', 'deleted', 'started', 'stopped', 'terminated']:
json_output['state'] = state
(changed, state_instance_names) = change_instance_state(
module, gce, inames, number, lc_zone, state)
# based on what user specified, return the same variable, although
# value could be different if an instance could not be destroyed
if instance_names or name and number:
json_output['instance_names'] = state_instance_names
elif name:
json_output['name'] = name
elif state in ['active', 'present']:
json_output['state'] = 'present'
(changed, instance_data, instance_name_list) = create_instances(
module, gce, inames, number, lc_zone)
json_output['instance_data'] = instance_data
if instance_names:
json_output['instance_names'] = instance_name_list
elif name:
json_output['name'] = name
json_output['changed'] = changed
module.exit_json(**json_output)
class LazyDiskImage:
"""
Object for lazy instantiation of disk image
gce.ex_get_image is a very expensive call, so we want to avoid calling it as much as possible.
"""
def __init__(self, module, gce, name, has_pd, family=None, projects=None):
self.image = None
self.was_called = False
self.gce = gce
self.name = name
self.has_pd = has_pd
self.module = module
self.family = family
self.projects = projects
def __call__(self):
if not self.was_called:
self.was_called = True
if not self.has_pd:
if self.family:
self.image = self.gce.ex_get_image_from_family(self.family, ex_project_list=self.projects)
else:
self.image = self.gce.ex_get_image(self.name, ex_project_list=self.projects)
if not self.image:
self.module.fail_json(msg='image or disks missing for create instance', changed=False)
return self.image
if __name__ == '__main__':
main()
| {
"content_hash": "4c4f7676989881d8e7ab78b467e40114",
"timestamp": "",
"source": "github",
"line_count": 774,
"max_line_length": 150,
"avg_line_length": 36.33204134366925,
"alnum_prop": 0.6070907862451549,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "9aab4cf6ca00eae21d0e52366e354925e0761b23",
"size": "28262",
"binary": false,
"copies": "25",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/cloud/google/gce.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
"""
======================================
Plotting sensor layouts of MEG systems
======================================
In this example, sensor layouts of different MEG systems are shown.
"""
# Author: Eric Larson <[email protected]>
#
# License: BSD (3-clause)
import os.path as op
from mayavi import mlab
import mne
from mne.io import read_raw_fif, read_raw_ctf, read_raw_bti, read_raw_kit
from mne.io import read_raw_artemis123
from mne.datasets import sample, spm_face, testing
from mne.viz import plot_alignment
print(__doc__)
bti_path = op.abspath(op.dirname(mne.__file__)) + '/io/bti/tests/data/'
kit_path = op.abspath(op.dirname(mne.__file__)) + '/io/kit/tests/data/'
raws = {
'Neuromag': read_raw_fif(sample.data_path() +
'/MEG/sample/sample_audvis_raw.fif'),
'CTF 275': read_raw_ctf(spm_face.data_path() +
'/MEG/spm/SPM_CTF_MEG_example_faces1_3D.ds'),
'Magnes 3600wh': read_raw_bti(op.join(bti_path, 'test_pdf_linux'),
op.join(bti_path, 'test_config_linux'),
op.join(bti_path, 'test_hs_linux')),
'KIT': read_raw_kit(op.join(kit_path, 'test.sqd')),
'Artemis123': read_raw_artemis123(op.join(
testing.data_path(), 'ARTEMIS123',
'Artemis_Data_2017-04-14-10h-38m-59s_Phantom_1k_HPI_1s.bin')),
}
for system, raw in sorted(raws.items()):
meg = ['helmet', 'sensors']
# We don't have coil definitions for KIT refs, so exclude them
if system != 'KIT':
meg.append('ref')
fig = plot_alignment(raw.info, trans=None, dig=False, eeg=False,
surfaces=[], meg=meg, coord_frame='meg',
verbose=True)
text = mlab.title(system)
text.x_position = 0.5
text.y_position = 0.95
text.property.vertical_justification = 'top'
text.property.justification = 'center'
text.actor.text_scale_mode = 'none'
text.property.bold = True
text.property.font_size = 40
mlab.draw(fig)
| {
"content_hash": "4d71ee9a24b0851cc6dd751c0c842d03",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 73,
"avg_line_length": 36.375,
"alnum_prop": 0.5895925380461463,
"repo_name": "teonlamont/mne-python",
"id": "8a1bbee0a2bf3bb63e445e175d8ed8befdd93f41",
"size": "2037",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/visualization/plot_meg_sensors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3117"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4354605"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
from datetime import date
from datetime import datetime
from unittest import TestCase
from unittest.mock import patch
import dateutil.relativedelta as rd
import pandas
from . import CoreCalendarTest, GenericCalendarTest
from ..core import Holiday
from ..core import (
MON, TUE, THU, FRI, WED, SAT, SUN,
ISO_TUE, ISO_FRI,
Calendar, LunarMixin, WesternCalendar,
CalverterMixin, IslamicMixin,
daterange,
)
from ..exceptions import UnsupportedDateType, CalendarError
class CalendarTest(CoreCalendarTest):
def test_private_variables(self):
self.assertTrue(hasattr(self.cal, '_holidays'))
private_holidays = self.cal._holidays
self.assertTrue(isinstance(private_holidays, dict))
self.cal.holidays(2011)
self.cal.holidays(2012)
private_holidays = self.cal._holidays
self.assertTrue(isinstance(private_holidays, dict))
self.assertIn(2011, self.cal._holidays)
self.assertIn(2012, self.cal._holidays)
def test_year(self):
holidays = self.cal.holidays()
self.assertTrue(isinstance(holidays, (tuple, list)))
self.assertEqual(self.cal._holidays[self.year], holidays)
def test_another_year(self):
holidays = self.cal.holidays(2011)
self.assertTrue(isinstance(holidays, (tuple, list)))
self.assertEqual(self.cal._holidays[2011], holidays)
def test_is_working_day(self):
self.assertRaises(
NotImplementedError,
self.cal.is_working_day, date(2012, 1, 1))
def test_nth_weekday(self):
# first monday in january 2013
self.assertEqual(
Calendar.get_nth_weekday_in_month(2013, 1, MON),
date(2013, 1, 7)
)
# second monday in january 2013
self.assertEqual(
Calendar.get_nth_weekday_in_month(2013, 1, MON, 2),
date(2013, 1, 14)
)
# let's test the limits
# Jan 1st is a TUE
self.assertEqual(
Calendar.get_nth_weekday_in_month(2013, 1, TUE),
date(2013, 1, 1)
)
# There's no 6th MONday
self.assertEqual(
Calendar.get_nth_weekday_in_month(2013, 1, MON, 6),
None
)
def test_nth_weekday_start(self):
# first thursday after 18th april
start = date(2013, 4, 18)
self.assertEqual(
Calendar.get_nth_weekday_in_month(2013, 4, THU, start=start),
date(2013, 4, 18)
)
# first friday after 18th april
start = date(2013, 4, 18)
self.assertEqual(
Calendar.get_nth_weekday_in_month(2013, 4, FRI, start=start),
date(2013, 4, 19)
)
def test_last_weekday(self):
# last monday in january 2013
self.assertEqual(
Calendar.get_last_weekday_in_month(2013, 1, MON),
date(2013, 1, 28)
)
# last thursday
self.assertEqual(
Calendar.get_last_weekday_in_month(2013, 1, THU),
date(2013, 1, 31)
)
def test_get_next_weekday_after(self):
# the first monday after Apr 1 2015
self.assertEqual(
Calendar.get_first_weekday_after(date(2015, 4, 1), MON),
date(2015, 4, 6)
)
# the first tuesday after Apr 14 2015
self.assertEqual(
Calendar.get_first_weekday_after(date(2015, 4, 14), TUE),
date(2015, 4, 14)
)
def test_get_iso_week_date(self):
# Find the MON of the week 1 in 2021
self.assertEqual(
Calendar.get_iso_week_date(2021, 1),
date(2021, 1, 4)
)
# Find the FRI of the week 1 in 2021
self.assertEqual(
Calendar.get_iso_week_date(2021, 1, ISO_FRI),
date(2021, 1, 8)
)
# Find the TUE of the week 44 in 2021
self.assertEqual(
Calendar.get_iso_week_date(2021, 44, ISO_TUE),
date(2021, 11, 2)
)
# Remove this test when dropping support for Python 3.7
@patch('calendra.core.sys')
def test_get_iso_week_date_patched(self, mock_sys):
# The Python 3.6-3.7 backport should always work
mock_sys.version_info = (3, 6, 0)
self.assertEqual(
Calendar.get_iso_week_date(2021, 44, ISO_TUE),
date(2021, 11, 2)
)
class LunarCalendarTest(TestCase):
def test_lunar_new_year(self):
self.assertEqual(
LunarMixin.lunar(2014, 1, 1),
date(2014, 1, 31)
)
class MockCalendar(Calendar):
def holidays(self, year=None):
return (
Holiday(date(year, 12, 25), 'Christmas'),
Holiday(date(year, 1, 1), 'New year'),
)
def get_weekend_days(self):
return [] # no week-end, yes, it's sad
class MockCalendarTest(CoreCalendarTest):
cal_class = MockCalendar
def test_holidays_set(self):
self.assertIn(
date(self.year, 12, 25), self.cal.holidays_set(self.year))
self.assertIn(
date(self.year, 1, 1), self.cal.holidays_set(self.year))
def test_sorted_dates(self):
holidays = list(self.cal.holidays(self.year))
day, label = holidays.pop()
for next_day, label in holidays:
self.assertTrue(day <= next_day)
day = next_day
def test_add_workingdays_simple(self):
# day is out of non-working-day
self.assertEqual(
self.cal.add_working_days(date(self.year, 12, 20), 0),
date(self.year, 12, 20)
)
self.assertEqual(
self.cal.add_working_days(date(self.year, 12, 20), 1),
date(self.year, 12, 21)
)
def test_add_workingdays_on_holiday(self):
# day is in holidays
self.assertEqual(
self.cal.add_working_days(date(self.year, 12, 25), 0),
date(self.year, 12, 25)
)
self.assertEqual(
self.cal.add_working_days(date(self.year, 12, 24), 1),
date(self.year, 12, 26)
)
self.assertEqual(
self.cal.add_working_days(date(self.year, 12, 24), 2),
date(self.year, 12, 27)
)
def test_add_workingdays_span(self):
day = date(self.year, 12, 20)
# since this calendar has no weekends, we'll just have a 2-day-shift
self.assertEqual(
self.cal.add_working_days(day, 20),
date(self.year + 1, 1, 11)
)
def test_add_working_days_exceptions(self):
day = date(self.year, 12, 20)
christmas = date(self.year, 12, 25)
boxing = date(self.year, 12, 26)
# exceptional workday
self.assertEqual(
self.cal.add_working_days(day, 20, extra_working_days=[christmas]),
date(self.year + 1, 1, 10)
)
# exceptional holiday + exceptional workday
self.assertEqual(
self.cal.add_working_days(day, 20,
extra_working_days=[christmas],
extra_holidays=[boxing]),
date(self.year + 1, 1, 11)
)
def test_add_exceptions(self):
december_20th = date(self.year, 12, 20)
christmas = date(self.year, 12, 25)
# target_working_day *is* a working day
target_working_day = self.cal.add_working_days(december_20th, 1)
# Add extra working days
extra_working_days = [christmas]
# add extra holidays
extra_holidays = [target_working_day]
self.assertFalse(self.cal.is_working_day(christmas))
self.assertTrue(
self.cal.is_working_day(christmas,
extra_working_days=extra_working_days))
self.assertTrue(self.cal.is_working_day(target_working_day))
self.assertFalse(
self.cal.is_working_day(target_working_day,
extra_holidays=extra_holidays))
# test is_holiday
self.assertTrue(self.cal.is_holiday(christmas))
def test_get_holiday_label(self):
self.assertEqual(
self.cal.get_holiday_label(date(2014, 1, 1)), 'New year')
self.assertIsNone(
self.cal.get_holiday_label(date(2014, 1, 2)))
def test_add_working_days_backwards(self):
day = date(self.year, 1, 3)
# since this calendar has no weekends, we'll just have a 1-day-shift
self.assertEqual(
self.cal.add_working_days(day, -7),
date(self.year - 1, 12, 26)
)
self.assertEqual(
self.cal.sub_working_days(day, 7),
date(self.year - 1, 12, 26)
)
# Negative argument to sub_working_days -> converted to positive.
self.assertEqual(
self.cal.sub_working_days(day, -7),
date(self.year - 1, 12, 26)
)
class SimpleObservanceCalendar(Calendar):
"""
A simple calendar with a couple of holidays with typical observance rules:
If a holiday falls on a weekend, then its observance is shifted to a
nearby weekday.
"""
include_new_years_day = False
FIXED_HOLIDAYS = (
Holiday(
date(2000, 12, 24), 'Christmas Eve', indication='December 24th',
observance_shift=dict(weekday=rd.FR(-1)),
),
Holiday(date(2000, 12, 25), 'Christmas', indication='December 25th'),
)
def get_weekend_days(self):
return SAT, SUN
class ObservanceCalendarTest(CoreCalendarTest):
"""
A simple calendar with days shifted for observance.
"""
cal_class = SimpleObservanceCalendar
def test_observance(self):
"""
Each Holiday returned by the calendar should be aware of its indicated
date and observance date.
"""
holidays = list(self.cal.holidays(2011))
assert len(holidays) == 2
xmas_eve, xmas_day = holidays
assert xmas_eve == date(2011, 12, 24)
assert xmas_eve.get_observed_date(self.cal) == date(2011, 12, 23)
assert xmas_day == date(2011, 12, 25)
assert xmas_day.get_observed_date(self.cal) == date(2011, 12, 26)
class IslamicMixinTest(CoreCalendarTest):
cal_class = IslamicMixin
def test_year_conversion(self):
days = self.cal.converted(2013)
self.assertEqual(len(days), 365)
class CalverterClassNoConversionMethod(CalverterMixin):
pass
class NoConversionMethodTest(TestCase):
def test_no_conversion_method(self):
with self.assertRaises(NotImplementedError):
CalverterClassNoConversionMethod()
class IncludeLaylatAlQadr(IslamicMixin):
include_laylat_al_qadr = True
class DoesNotIncludeLaylatAlQadr(IslamicMixin):
include_laylat_al_qadr = False
class LaylatAlQadrTest(TestCase):
def test_warning_laylat_al_qadr(self):
cal = IncludeLaylatAlQadr()
with patch('warnings.warn') as patched:
cal.get_islamic_holidays()
patched.assert_called_with(
'The Islamic holiday named Laylat al-Qadr is decided by the '
'religious authorities. It is not possible to compute it. '
"You'll have to add it manually."
)
def test_no_warning_laylat_al_qadr(self):
cal = DoesNotIncludeLaylatAlQadr()
with patch('warnings.warn') as patched:
cal.get_islamic_holidays()
patched.assert_not_called()
class MockChristianCalendar(WesternCalendar):
# WesternCalendar inherits from ChristianMixin
pass
class MockChristianCalendarTest(CoreCalendarTest):
cal_class = MockChristianCalendar
def test_year_2014(self):
holidays = self.cal.holidays_set(2014)
self.assertNotIn(date(2014, 1, 6), holidays) # Epiphany
self.assertNotIn(date(2014, 3, 3), holidays) # Clean Monday
self.assertNotIn(date(2014, 3, 5), holidays) # Ash Wednesday
self.assertNotIn(date(2014, 3, 25), holidays) # Annunciation
self.assertNotIn(date(2014, 4, 17), holidays) # Holy Thursday
self.assertNotIn(date(2014, 4, 18), holidays) # 'Good Friday
self.assertNotIn(date(2014, 4, 19), holidays) # Easter sat
self.assertNotIn(date(2014, 4, 20), holidays) # Easter Sun
self.assertNotIn(date(2014, 4, 21), holidays) # Easter Mon
self.assertNotIn(date(2014, 5, 29), holidays) # Ascension
self.assertNotIn(date(2014, 6, 8), holidays) # Whit Sunday
self.assertNotIn(date(2014, 6, 9), holidays) # Whit Monday
self.assertNotIn(date(2014, 6, 19), holidays) # Corp. Christi
self.assertNotIn(date(2014, 8, 15), holidays) # Assumption
self.assertNotIn(date(2014, 11, 1), holidays) # All Saints
self.assertNotIn(date(2014, 12, 8), holidays) # Imm. Conc.
self.assertNotIn(date(2014, 12, 24), holidays) # Xmas Eve
self.assertNotIn(date(2014, 12, 26), holidays) # Boxing Day
# The only Christian day that is a holiday for every calendar
self.assertIn(date(2014, 12, 25), holidays) # XMas
# Only 2 days: Jan 1st and Christmas
self.assertEqual(len(holidays), 2)
class NoWeekendCalendar(Calendar):
"""
This calendar class has no WEEKEND_DAYS and no `get_weekend_days()` method.
It has to fail when trying to fetch its weekend days / holidays
"""
class NoWeekendCalendarTest(CoreCalendarTest):
cal_class = NoWeekendCalendar
def test_weekend(self):
day = date(2017, 5, 13) # This is a Saturday
with self.assertRaises(NotImplementedError):
self.cal.is_working_day(day)
day = date(2017, 5, 17) # This is a Wednesday
with self.assertRaises(NotImplementedError):
self.cal.is_working_day(day)
class GenericCalendarTestTest(GenericCalendarTest):
cal_class = NoWeekendCalendar
def test_weekend_days(self):
with self.assertRaises(AssertionError):
super().test_weekend_days()
class WeekendOnWednesdayCalendar(Calendar):
"""
This calendar class weekend days is on Wednesday and we don't overwrite
the `get_weekend_days()` method. It should be fine.
"""
WEEKEND_DAYS = (WED,)
class WeekendOnWednesdayCalendarTest(CoreCalendarTest):
cal_class = WeekendOnWednesdayCalendar
def test_weekend(self):
day = date(2017, 5, 13) # This is a Saturday
self.assertTrue(self.cal.is_working_day(day))
day = date(2017, 5, 17) # This is a Wednesday
self.assertFalse(self.cal.is_working_day(day))
class OverwriteGetWeekendDaysCalendar(Calendar):
"""
This calendar class has no WEEKEND_DAYS and we overwrite
its `get_weekend_days` method.
Should work.
"""
def get_weekend_days(self):
return WED,
class OverwriteGetWeekendDaysCalendarTest(CoreCalendarTest):
cal_class = OverwriteGetWeekendDaysCalendar
def test_weekend(self):
day = date(2017, 5, 13) # This is a Saturday
self.assertTrue(self.cal.is_working_day(day))
day = date(2017, 5, 17) # This is a Wednesday
self.assertFalse(self.cal.is_working_day(day))
class NoHolidayCalendar(Calendar):
include_new_years_day = False
WEEKEND_DAYS = (SAT, SUN)
class WorkingDaysDeltatest(TestCase):
def test_zero(self):
days = (
date(2018, 12, 21), # a Thursday
date(2018, 12, 23), # a Sunday
date(2018, 12, 25), # a holiday in Christian calendars
)
for day in days:
cal = NoHolidayCalendar()
self.assertEqual(cal.get_working_days_delta(day, day), 0)
cal = MockChristianCalendar()
self.assertEqual(cal.get_working_days_delta(day, day), 0)
def test_no_holidays_simple(self):
cal = NoHolidayCalendar()
day1 = date(2018, 12, 21)
day2 = date(2018, 12, 26)
delta = cal.get_working_days_delta(day1, day2)
# there are 3 days, because of the week-ends
self.assertEqual(delta, 3)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(day2, day1)
self.assertEqual(delta, 3)
def test_no_holidays_over_2_years(self):
cal = NoHolidayCalendar()
day1 = date(2018, 12, 21)
day2 = date(2019, 1, 4)
delta = cal.get_working_days_delta(day1, day2)
# there are 10 days, because of the week-ends
self.assertEqual(delta, 10)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(day2, day1)
self.assertEqual(delta, 10)
def test_christian_simple(self):
cal = MockChristianCalendar()
day1 = date(2018, 12, 21)
day2 = date(2018, 12, 26)
delta = cal.get_working_days_delta(day1, day2)
# there are 2 days, because of the week-end + Christmas Day
self.assertEqual(delta, 2)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(day2, day1)
self.assertEqual(delta, 2)
def test_christian_over_2_years(self):
cal = MockChristianCalendar()
day1 = date(2018, 12, 21)
day2 = date(2019, 1, 4)
delta = cal.get_working_days_delta(day1, day2)
# there are 8 days, because of the week-ends + Xmas day + New Year
self.assertEqual(delta, 8)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(day2, day1)
self.assertEqual(delta, 8)
def test_with_datetimes(self):
cal = MockChristianCalendar()
day1 = datetime(2018, 12, 21)
day2 = date(2018, 12, 26)
delta = cal.get_working_days_delta(day1, day2)
# there are 2 days, because of the week-end + Christmas Day
self.assertEqual(delta, 2)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(day2, day1)
self.assertEqual(delta, 2)
def test_with_including_first_day(self):
# linked to #393
cal = MockChristianCalendar()
day1 = date(2018, 12, 24) # December 24th: not holiday so working day
day2 = date(2018, 12, 25) # December 25th: Christmas
# not including the first day, should return 0
delta = cal.get_working_days_delta(day1, day2)
self.assertEqual(delta, 0)
# including the first day, should return 1
delta = cal.get_working_days_delta(day1, day2, include_start=True)
self.assertEqual(delta, 1)
def test_use_extra_working_days(self):
cal = MockChristianCalendar()
day1 = date(2018, 12, 21)
day2 = date(2018, 12, 26)
delta = cal.get_working_days_delta(
day1, day2,
extra_working_days=[date(2018, 12, 25)]
)
# there are 3 days, because of the week-end
# and Christmas Day is a working day
self.assertEqual(delta, 3)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(
day2, day1,
extra_working_days=[date(2018, 12, 25)])
self.assertEqual(delta, 3)
def test_use_extra_holidays(self):
cal = MockChristianCalendar()
day1 = date(2018, 12, 21)
day2 = date(2018, 12, 26)
delta = cal.get_working_days_delta(
day1, day2,
extra_holidays=[date(2018, 12, 24)]
)
# Only 1 day, because of the week-end + XMas + XMas Eve
self.assertEqual(delta, 1)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(
day2, day1,
extra_holidays=[date(2018, 12, 24)])
self.assertEqual(delta, 1)
def test_use_both_extra(self):
cal = MockChristianCalendar()
day1 = date(2018, 12, 21)
day2 = date(2018, 12, 26)
delta = cal.get_working_days_delta(
day1, day2,
extra_working_days=[date(2018, 12, 25)],
extra_holidays=[date(2018, 12, 24)]
)
# Only 1 day, because of the week-end + XMas Eve
# And Christmas is a working day
self.assertEqual(delta, 2)
# No difference if you swap the two dates
delta = cal.get_working_days_delta(
day2, day1,
extra_working_days=[date(2018, 12, 25)],
extra_holidays=[date(2018, 12, 24)])
self.assertEqual(delta, 2)
class NoDocstring(Calendar):
pass
class EmptyDocstring(Calendar):
""
class OneLineDocstring(Calendar):
"One line"
class MultipleLineDocstring(Calendar):
"""Multiple line
docstrings can span over multiple lines.
"""
class MultipleLineEmptyFirstDocstring(Calendar):
"""
Multiple line empty first
docstrings can span over multiple lines.
"""
class CalendarClassNameTest(TestCase):
def test_no_docstring(self):
self.assertEqual(NoDocstring.name, "NoDocstring")
def test_empty_docstring(self):
self.assertEqual(EmptyDocstring.name, "EmptyDocstring")
def test_oneline_docstring(self):
self.assertEqual(OneLineDocstring.name, "One line")
def test_multiple_line_docstring(self):
self.assertEqual(MultipleLineDocstring.name, "Multiple line")
def test_multiple_line_empty_first_docstring(self):
self.assertEqual(
MultipleLineEmptyFirstDocstring.name, "Multiple line empty first"
)
class TestAcceptableDateTypes(CoreCalendarTest):
"""
Test cases about accepted date and datetime types.
"""
cal_class = MockCalendar
unsupported = ('hello', 1)
def test_unsupported_type_is_working_day(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.is_working_day(arg)
# Extra holidays optional argument
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.is_working_day(
date(2018, 1, 1),
extra_holidays=[arg]
)
# Extra working days optional argument
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.is_working_day(
date(2018, 1, 1),
extra_working_days=[arg]
)
def test_unsupported_type_is_holiday(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.is_holiday(arg)
# Extra holidays optional argument
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.is_holiday(
date(2018, 1, 1),
extra_holidays=[arg]
)
def test_unsupported_type_holiday_label(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.get_holiday_label(arg)
def test_unsupported_type_add_sub_working_days(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.add_working_days(arg, 1)
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.sub_working_days(arg, 1)
# Extra holidays optional argument
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.add_working_days(
date(2018, 1, 1), 1,
extra_holidays=[arg]
)
# Extra working days optional argument
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.add_working_days(
date(2018, 1, 1), 1,
extra_working_days=[arg]
)
# NOTE: no need to test "sub", they're calling each other.
def test_unsupported_type_find_following_working_day(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.find_following_working_day(arg)
def test_unsupported_type_get_nth_weekday_in_month(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.get_nth_weekday_in_month(2018, 1, MON, start=arg)
def test_unsupported_type_get_working_days_delta(self):
for arg in self.unsupported:
with self.assertRaises(UnsupportedDateType):
self.cal.get_working_days_delta(date(2018, 1, 1), arg)
with self.assertRaises(UnsupportedDateType):
self.cal.get_working_days_delta(arg, date(2018, 1, 1))
def test_datetime(self):
self.assertFalse(
self.cal.is_working_day(datetime(2014, 1, 1)))
self.assertTrue(
self.cal.is_holiday(datetime(2014, 1, 1)))
def test_add_working_days_datetime(self):
# datetime inside, date outside
self.assertEqual(
self.cal.add_working_days(
datetime(self.year, 12, 20, 12, 34, 56), 0),
date(self.year, 12, 20)
)
self.assertEqual(
self.cal.add_working_days(
datetime(self.year, 12, 20, 12, 34, 56), 1),
date(self.year, 12, 21)
)
# Use the `keep_datetime` option
self.assertEqual(
self.cal.add_working_days(
datetime(self.year, 12, 20, 12, 34, 56),
0, keep_datetime=True),
datetime(self.year, 12, 20, 12, 34, 56)
)
self.assertEqual(
self.cal.add_working_days(
datetime(self.year, 12, 20, 12, 34, 56),
1, keep_datetime=True),
datetime(self.year, 12, 21, 12, 34, 56)
)
def test_sub_working_days_datetime(self):
# datetime inside, date outside
self.assertEqual(
self.cal.sub_working_days(
datetime(self.year, 12, 20, 12, 34, 56), 0),
date(self.year, 12, 20)
)
self.assertEqual(
self.cal.sub_working_days(
datetime(self.year, 12, 20, 12, 34, 56), 1),
date(self.year, 12, 19)
)
# Use the `keep_datetime` option
self.assertEqual(
self.cal.sub_working_days(
datetime(self.year, 12, 20, 12, 34, 56),
0, keep_datetime=True),
datetime(self.year, 12, 20, 12, 34, 56)
)
self.assertEqual(
self.cal.sub_working_days(
datetime(self.year, 12, 20, 12, 34, 56),
1, keep_datetime=True),
datetime(self.year, 12, 19, 12, 34, 56)
)
def test_get_holiday_label_with_datetime(self):
self.assertEqual(
self.cal.get_holiday_label(datetime(2014, 1, 1)), 'New year')
self.assertIsNone(
self.cal.get_holiday_label(datetime(2014, 1, 2)))
class PandasTimestampTest(CoreCalendarTest):
cal_class = MockCalendar
def test_panda_type_is_working_day(self):
self.assertFalse(
self.cal.is_working_day(pandas.to_datetime("2018-1-1"))
)
# Extra holidays optional argument
self.assertFalse(
self.cal.is_working_day(
date(2018, 1, 2),
extra_holidays=[pandas.to_datetime("2018-1-2")]
)
)
# Extra working days optional argument
self.assertTrue(
self.cal.is_working_day(
date(2018, 1, 1),
extra_working_days=[pandas.to_datetime("2018-1-1")]
)
)
def test_panda_type_is_holiday(self):
self.assertTrue(self.cal.is_holiday(pandas.to_datetime("2018-1-1")))
# Extra holidays optional argument
self.assertTrue(
self.cal.is_holiday(
date(2018, 2, 1),
extra_holidays=[pandas.to_datetime("2018-2-1")]
)
)
def test_panda_type_holiday_label(self):
label = self.cal.get_holiday_label(pandas.to_datetime("2018-1-1"))
self.assertEqual(label, "New year")
def test_panda_type_add_sub_working_days(self):
day = pandas.to_datetime("2018-12-24")
next_day = self.cal.add_working_days(day, 1)
self.assertEqual(next_day, date(2018, 12, 26))
previous_day = self.cal.sub_working_days(next_day, 1)
self.assertEqual(previous_day, date(2018, 12, 24))
next_day = self.cal.add_working_days(
date(2018, 12, 24), 1,
extra_holidays=[pandas.to_datetime("2018-12-26")]
)
self.assertEqual(next_day, date(2018, 12, 27))
next_day = self.cal.add_working_days(
date(2018, 12, 24), 1,
extra_working_days=[pandas.to_datetime("2018-12-25")]
)
self.assertEqual(next_day, date(2018, 12, 25))
def test_unsupported_type_find_following_working_day(self):
following_day = self.cal.find_following_working_day(
pandas.to_datetime("2018-1-1")
)
# No weekend days, the next day is "today"
self.assertEqual(following_day, date(2018, 1, 1))
def test_unsupported_type_get_nth_weekday_in_month(self):
start = pandas.to_datetime("2018-1-4")
monday = self.cal.get_nth_weekday_in_month(2018, 1, MON, start=start)
self.assertEqual(monday, date(2018, 1, 8))
def test_unsupported_type_get_working_days_delta(self):
start, end = date(2018, 12, 23), pandas.to_datetime("2018-12-26")
delta = self.cal.get_working_days_delta(start, end)
self.assertEqual(delta, 2)
delta = self.cal.get_working_days_delta(end, start)
self.assertEqual(delta, 2)
start, end = pandas.to_datetime("2018-12-23"), date(2018, 12, 26)
delta = self.cal.get_working_days_delta(start, end)
self.assertEqual(delta, 2)
delta = self.cal.get_working_days_delta(end, start)
self.assertEqual(delta, 2)
class MockCalendarNoFatTuesdayLabel(WesternCalendar):
fat_tuesday_label = None
class FatTuesdayLabelTest(TestCase):
def test_fat_tuesday_label(self):
cal = MockCalendarNoFatTuesdayLabel()
with self.assertRaises(CalendarError):
cal.get_fat_tuesday(2020)
def test_daterange_start_end():
start = date(2020, 4, 1)
end = date(2020, 4, 10)
date_list = list(daterange(start, end))
assert date_list == [
date(2020, 4, 1),
date(2020, 4, 2),
date(2020, 4, 3),
date(2020, 4, 4),
date(2020, 4, 5),
date(2020, 4, 6),
date(2020, 4, 7),
date(2020, 4, 8),
date(2020, 4, 9),
date(2020, 4, 10),
]
def test_daterange_end_start():
end = date(2020, 4, 1)
start = date(2020, 4, 10)
date_list = list(daterange(start, end))
assert date_list == [
date(2020, 4, 1),
date(2020, 4, 2),
date(2020, 4, 3),
date(2020, 4, 4),
date(2020, 4, 5),
date(2020, 4, 6),
date(2020, 4, 7),
date(2020, 4, 8),
date(2020, 4, 9),
date(2020, 4, 10),
]
def test_daterange_same_date():
# Stupid usecase, but nonetheless
start = end = date(2020, 4, 1)
date_list = list(daterange(start, end))
assert date_list == [date(2020, 4, 1)]
| {
"content_hash": "225416e800e844b04a4fd104d56122fb",
"timestamp": "",
"source": "github",
"line_count": 947,
"max_line_length": 79,
"avg_line_length": 33.24498416050687,
"alnum_prop": 0.5920020328431217,
"repo_name": "jaraco/calendra",
"id": "8ba14c7a06da012cfeba5d860660c46ef5a688e3",
"size": "31483",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "calendra/tests/test_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "854988"
}
],
"symlink_target": ""
} |
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from .StarFile import StarBlock,StarFile,StarList,StarDict
from io import StringIO
# An alternative specification for the Cif Parser, based on Yapps2
# by Amit Patel (http://theory.stanford.edu/~amitp/Yapps)
#
# helper code: we define our match tokens
lastval = ''
def monitor(location,value):
global lastval
#print 'At %s: %s' % (location,repr(value))
lastval = repr(value)
return value
# Strip extras gets rid of leading and trailing whitespace, and
# semicolons.
def stripextras(value):
from .StarFile import remove_line_folding, remove_line_prefix
# we get rid of semicolons and leading/trailing terminators etc.
import re
jj = re.compile("[\n\r\f \t\v]*")
semis = re.compile("[\n\r\f \t\v]*[\n\r\f]\n*;")
cut = semis.match(value)
if cut: #we have a semicolon-delimited string
nv = value[cut.end():len(value)-2]
try:
if nv[-1]=='\r': nv = nv[:-1]
except IndexError: #empty data value
pass
# apply protocols
nv = remove_line_prefix(nv)
nv = remove_line_folding(nv)
return nv
else:
cut = jj.match(value)
if cut:
return stripstring(value[cut.end():])
return value
# helper function to get rid of inverted commas etc.
def stripstring(value):
if value:
if value[0]== '\'' and value[-1]=='\'':
return value[1:-1]
if value[0]=='"' and value[-1]=='"':
return value[1:-1]
return value
# helper function to get rid of triple quotes
def striptriple(value):
if value:
if value[:3] == '"""' and value[-3:] == '"""':
return value[3:-3]
if value[:3] == "'''" and value[-3:] == "'''":
return value[3:-3]
return value
# helper function to populate a StarBlock given a list of names
# and values .
#
# Note that there may be an empty list at the very end of our itemlists,
# so we remove that if necessary.
#
def makeloop(target_block,loopdata):
loop_seq,itemlists = loopdata
if itemlists[-1] == []: itemlists.pop(-1)
# print('Making loop with %s' % repr(itemlists))
step_size = len(loop_seq)
for col_no in range(step_size):
target_block.AddItem(loop_seq[col_no], itemlists[col_no::step_size],precheck=True)
# now construct the loop
try:
target_block.CreateLoop(loop_seq) #will raise ValueError on problem
except ValueError:
error_string = 'Incorrect number of loop values for loop containing %s' % repr(loop_seq)
print(error_string, file=sys.stderr)
raise ValueError(error_string)
# return an object with the appropriate amount of nesting
def make_empty(nestlevel):
gd = []
for i in range(1,nestlevel):
gd = [gd]
return gd
# this function updates a dictionary first checking for name collisions,
# which imply that the CIF is invalid. We need case insensitivity for
# names.
# Unfortunately we cannot check loop item contents against non-loop contents
# in a non-messy way during parsing, as we may not have easy access to previous
# key value pairs in the context of our call (unlike our built-in access to all
# previous loops).
# For this reason, we don't waste time checking looped items against non-looped
# names during parsing of a data block. This would only match a subset of the
# final items. We do check against ordinary items, however.
#
# Note the following situations:
# (1) new_dict is empty -> we have just added a loop; do no checking
# (2) new_dict is not empty -> we have some new key-value pairs
#
def cif_update(old_dict,new_dict,loops):
old_keys = map(lambda a:a.lower(),old_dict.keys())
if new_dict != {}: # otherwise we have a new loop
#print 'Comparing %s to %s' % (repr(old_keys),repr(new_dict.keys()))
for new_key in new_dict.keys():
if new_key.lower() in old_keys:
raise CifError("Duplicate dataname or blockname %s in input file" % new_key)
old_dict[new_key] = new_dict[new_key]
#
# this takes two lines, so we couldn't fit it into a one line execution statement...
def order_update(order_array,new_name):
order_array.append(new_name)
return new_name
# and finally...turn a sequence into a python dict (thanks to Stackoverflow)
def pairwise(iterable):
try:
it = iter(iterable)
while 1:
yield next(it), next(it)
except StopIteration:
return
# Begin -- grammar generated by Yapps
import sys, re
from . import yapps3_compiled_rt as yappsrt
class StarParserScanner(yappsrt.Scanner):
def __init__(self, *args,**kwargs):
patterns = [
('([ \t\n\r](?!;))|[ \t]', '([ \t\n\r](?!;))|[ \t]'),
('(#.*[\n\r](?!;))|(#.*)', '(#.*[\n\r](?!;))|(#.*)'),
('LBLOCK', '(L|l)(O|o)(O|o)(P|p)_'),
('GLOBAL', '(G|g)(L|l)(O|o)(B|b)(A|a)(L|l)_'),
('STOP', '(S|s)(T|t)(O|o)(P|p)_'),
('save_heading', '(S|s)(A|a)(V|v)(E|e)_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_-]+'),
('save_end', '(S|s)(A|a)(V|v)(E|e)_'),
('data_name', '_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_-]+'),
('data_heading', '(D|d)(A|a)(T|t)(A|a)_[][!%&\\(\\)*+,./:<=>?@0-9A-Za-z\\\\^`{}\\|~"#$\';_-]+'),
('start_sc_line', '(\n|\r\n);([^\n\r])*(\r\n|\r|\n)+'),
('sc_line_of_text', '[^;\r\n]([^\r\n])*(\r\n|\r|\n)+'),
('end_sc_line', ';'),
('data_value_1', '((?!(((S|s)(A|a)(V|v)(E|e)_[^\\s]*)|((G|g)(L|l)(O|o)(B|b)(A|a)(L|l)_[^\\s]*)|((S|s)(T|t)(O|o)(P|p)_[^\\s]*)|((D|d)(A|a)(T|t)(A|a)_[^\\s]*)))[^\\s"#$\'_][^\\s]*)|\'((\'(?=\\S))|([^\n\r\x0c\']))*\'+|"(("(?=\\S))|([^\n\r"]))*"+'),
('END', '$'),
]
yappsrt.Scanner.__init__(self,patterns,['([ \t\n\r](?!;))|[ \t]', '(#.*[\n\r](?!;))|(#.*)'],*args,**kwargs)
class StarParser(yappsrt.Parser):
Context = yappsrt.Context
def input(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'input', [prepared])
_token = self._peek('END', 'data_heading')
if _token == 'data_heading':
dblock = self.dblock(prepared, _context)
allblocks = prepared;allblocks.merge_fast(dblock)
while self._peek('END', 'data_heading') == 'data_heading':
dblock = self.dblock(prepared, _context)
allblocks.merge_fast(dblock)
if self._peek() not in ['END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['END', 'data_heading']))
END = self._scan('END')
else: # == 'END'
END = self._scan('END')
allblocks = prepared
allblocks.unlock();return allblocks
def dblock(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'dblock', [prepared])
data_heading = self._scan('data_heading')
heading = data_heading[5:];thisbc=StarFile(characterset='unicode',standard=prepared.standard);newname = thisbc.NewBlock(heading,prepared.blocktype(overwrite=False));act_block=thisbc[newname]
while self._peek('save_heading', 'LBLOCK', 'data_name', 'save_end', 'END', 'data_heading') in ['save_heading', 'LBLOCK', 'data_name']:
_token = self._peek('save_heading', 'LBLOCK', 'data_name')
if _token != 'save_heading':
dataseq = self.dataseq(thisbc[heading], _context)
else: # == 'save_heading'
save_frame = self.save_frame(prepared, _context)
thisbc.merge_fast(save_frame,parent=act_block)
if self._peek() not in ['save_heading', 'LBLOCK', 'data_name', 'save_end', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_heading', 'LBLOCK', 'data_name', 'save_end', 'END', 'data_heading']))
thisbc[heading].setmaxnamelength(thisbc[heading].maxnamelength);return (monitor('dblock',thisbc))
def dataseq(self, starblock, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'dataseq', [starblock])
data = self.data(starblock, _context)
while self._peek('LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading') in ['LBLOCK', 'data_name']:
data = self.data(starblock, _context)
if self._peek() not in ['LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']))
def data(self, currentblock, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'data', [currentblock])
_token = self._peek('LBLOCK', 'data_name')
if _token == 'LBLOCK':
top_loop = self.top_loop(_context)
makeloop(currentblock,top_loop)
else: # == 'data_name'
datakvpair = self.datakvpair(_context)
currentblock.AddItem(datakvpair[0],datakvpair[1],precheck=True)
def datakvpair(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'datakvpair', [])
data_name = self._scan('data_name')
data_value = self.data_value(_context)
return [data_name,data_value]
def data_value(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'data_value', [])
_token = self._peek('data_value_1', 'start_sc_line')
if _token == 'data_value_1':
data_value_1 = self._scan('data_value_1')
thisval = stripstring(data_value_1)
else: # == 'start_sc_line'
sc_lines_of_text = self.sc_lines_of_text(_context)
thisval = stripextras(sc_lines_of_text)
return monitor('data_value',thisval)
def sc_lines_of_text(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'sc_lines_of_text', [])
start_sc_line = self._scan('start_sc_line')
lines = StringIO();lines.write(start_sc_line)
while self._peek('end_sc_line', 'sc_line_of_text') == 'sc_line_of_text':
sc_line_of_text = self._scan('sc_line_of_text')
lines.write(sc_line_of_text)
if self._peek() not in ['end_sc_line', 'sc_line_of_text']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['sc_line_of_text', 'end_sc_line']))
end_sc_line = self._scan('end_sc_line')
lines.write(end_sc_line);return monitor('sc_line_of_text',lines.getvalue())
def top_loop(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'top_loop', [])
LBLOCK = self._scan('LBLOCK')
loopfield = self.loopfield(_context)
loopvalues = self.loopvalues(_context)
return loopfield,loopvalues
def loopfield(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'loopfield', [])
toploop=[]
while self._peek('data_name', 'data_value_1', 'start_sc_line') == 'data_name':
data_name = self._scan('data_name')
toploop.append(data_name)
if self._peek() not in ['data_name', 'data_value_1', 'start_sc_line']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_name', 'data_value_1', 'start_sc_line']))
return toploop
def loopvalues(self, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'loopvalues', [])
data_value = self.data_value(_context)
dataloop=[data_value]
while self._peek('data_value_1', 'start_sc_line', 'LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading') in ['data_value_1', 'start_sc_line']:
data_value = self.data_value(_context)
dataloop.append(monitor('loopval',data_value))
if self._peek() not in ['data_value_1', 'start_sc_line', 'LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['data_value_1', 'start_sc_line', 'LBLOCK', 'data_name', 'save_heading', 'save_end', 'END', 'data_heading']))
return dataloop
def save_frame(self, prepared, _parent=None):
_context = self.Context(_parent, self._scanner, self._pos, 'save_frame', [prepared])
save_heading = self._scan('save_heading')
savehead = save_heading[5:];savebc = StarFile();newname=savebc.NewBlock(savehead,prepared.blocktype(overwrite=False));act_block=savebc[newname]
while self._peek('save_end', 'save_heading', 'LBLOCK', 'data_name', 'END', 'data_heading') in ['save_heading', 'LBLOCK', 'data_name']:
_token = self._peek('save_heading', 'LBLOCK', 'data_name')
if _token != 'save_heading':
dataseq = self.dataseq(savebc[savehead], _context)
else: # == 'save_heading'
save_frame = self.save_frame(prepared, _context)
savebc.merge_fast(save_frame,parent=act_block)
if self._peek() not in ['save_end', 'save_heading', 'LBLOCK', 'data_name', 'END', 'data_heading']:
raise yappsrt.YappsSyntaxError(charpos=self._scanner.get_prev_char_pos(), context=_context, msg='Need one of ' + ', '.join(['save_end', 'save_heading', 'LBLOCK', 'data_name', 'END', 'data_heading']))
save_end = self._scan('save_end')
return monitor('save_frame',savebc)
def parse(rule, text):
P = StarParser(StarParserScanner(text))
return yappsrt.wrap_error_reporter(P, rule)
# End -- grammar generated by Yapps
| {
"content_hash": "1ea103d0acd427ea64c6c49c063f6df9",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 254,
"avg_line_length": 49.28472222222222,
"alnum_prop": 0.5896153304213048,
"repo_name": "heprom/pymicro",
"id": "02225c927ee23d706a2d7c558d93ca96c010e0cc",
"size": "14238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymicro/external/YappsStarParser_1_0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1221411"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tflite_micro.tensorflow.lite.micro.python.interpreter.src import tflm_runtime
from tflite_micro.tensorflow.lite.micro.examples.mnist_lstm import evaluate
PREFIX_PATH = resource_loader.get_path_to_datafile("")
class LSTMModelTest(test_util.TensorFlowTestCase):
model_path = os.path.join(PREFIX_PATH, "trained_lstm.tflite")
input_shape = (1, 28, 28)
output_shape = (1, 10)
tflm_interpreter = tflm_runtime.Interpreter.from_file(model_path)
def testCompareWithTFLite(self):
tflite_interpreter = tf.lite.Interpreter(model_path=self.model_path)
tflite_interpreter.allocate_tensors()
tflite_output_details = tflite_interpreter.get_output_details()[0]
tflite_input_details = tflite_interpreter.get_input_details()[0]
np.random.seed(42) #Seed the random number generator
num_steps = 100
for _ in range(0, num_steps):
# Clear the internal states of the TfLite and TFLM interpreters so that we can call invoke multiple times (LSTM is stateful).
tflite_interpreter.reset_all_variables()
self.tflm_interpreter.reset()
# Give the same (random) input to both interpreters can confirm that the output is identical.
data_x = np.random.random(self.input_shape)
data_x = data_x.astype("float32")
# Run inference on TFLite
tflite_interpreter.set_tensor(tflite_input_details["index"], data_x)
tflite_interpreter.invoke()
tflite_output = tflite_interpreter.get_tensor(
tflite_output_details["index"])
# Run inference on TFLM
self.tflm_interpreter.set_input(data_x, 0)
self.tflm_interpreter.invoke()
tflm_output = self.tflm_interpreter.get_output(0)
# Check that TFLM has correct output
self.assertDTypeEqual(tflm_output, np.float32)
self.assertEqual(tflm_output.shape, self.output_shape)
self.assertAllLess((tflite_output - tflm_output), 1e-5)
def testInputErrHandling(self):
wrong_size_image_path = os.path.join(PREFIX_PATH, "samples/resized9.png")
with self.assertRaises(RuntimeError):
evaluate.predict_image(self.tflm_interpreter, wrong_size_image_path)
def testModelAccuracy(self):
# Test prediction accuracy on digits 0-9 using sample images
for label in range(10):
image_path = os.path.join(PREFIX_PATH, f"samples/sample{label}.png")
# Run inference on the sample image
# Note that the TFLM state is reset inside the predict_image function.
category_probabilities = evaluate.predict_image(self.tflm_interpreter,
image_path)
# Check the prediction result
predicted_category = np.argmax(category_probabilities)
self.assertEqual(predicted_category, label)
if __name__ == "__main__":
test.main()
| {
"content_hash": "bb788785c6c5bfb03c6d3b0561fc761e",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 131,
"avg_line_length": 39.23684210526316,
"alnum_prop": 0.7116029510395707,
"repo_name": "google/CFU-Playground",
"id": "24d288042b030342e4650732734823d5487e4929",
"size": "3670",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "third_party/tflite-micro/tensorflow/lite/micro/examples/mnist_lstm/evaluate_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3800"
},
{
"name": "C",
"bytes": "449862"
},
{
"name": "C++",
"bytes": "4931362"
},
{
"name": "CMake",
"bytes": "976"
},
{
"name": "Dockerfile",
"bytes": "1026"
},
{
"name": "Jupyter Notebook",
"bytes": "35820"
},
{
"name": "Makefile",
"bytes": "40046"
},
{
"name": "Python",
"bytes": "1764584"
},
{
"name": "RobotFramework",
"bytes": "6125"
},
{
"name": "Scala",
"bytes": "18649"
},
{
"name": "Shell",
"bytes": "25687"
},
{
"name": "SystemVerilog",
"bytes": "6923"
},
{
"name": "Verilog",
"bytes": "6884686"
}
],
"symlink_target": ""
} |
import unittest
import mock
import sys
import os
sys.path.append(os.path.join('..\..', 'src'))
from backend.game import Game
from backend.menus.menus import GameMenu, StartGameMenu, PauseGameMenu
class GameMenuTest(unittest.TestCase):
@mock.patch('backend.game.Game')
def setUp(self, mock_game):
self.mock_game = mock_game
self.menus = [
StartGameMenu(mock_game),
PauseGameMenu(mock_game)
]
def test_select(self):
for menu in self.menus:
test_option = menu.get_available_options()[0]
if not menu.options[test_option] == "":
expected_prop = menu.options[test_option]
menu.select(test_option)
callable_attr = getattr(
self.mock_game, expected_prop)
self.assertTrue(callable_attr)
def test_select_error(self):
non_existent_option = "TestTest"
for menu in self.menus:
with self.assertRaises(ValueError):
menu.select(non_existent_option)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "adac1083339672a5be2f5c87d68ab0d2",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 70,
"avg_line_length": 28.512820512820515,
"alnum_prop": 0.5953237410071942,
"repo_name": "tsvetelina-aleksandrova/Spacekatz",
"id": "4fb432848b2fbd4526bc0798e50cefaa2985881b",
"size": "1112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/menus/menus_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39082"
}
],
"symlink_target": ""
} |
"""
Handles basic connections to AWS
"""
from datetime import datetime
import errno
import os
import random
import re
import socket
import sys
import time
import xml.sax
import copy
from boto import auth
from boto import auth_handler
import boto
import boto.utils
import boto.handler
import boto.cacerts
from boto import config, UserAgent
from boto.compat import six, http_client, urlparse, quote, encodebytes
from boto.exception import AWSConnectionError
from boto.exception import BotoClientError
from boto.exception import BotoServerError
from boto.exception import PleaseRetryException
from boto.provider import Provider
from boto.resultset import ResultSet
HAVE_HTTPS_CONNECTION = False
try:
import ssl
from boto import https_connection
# Google App Engine runs on Python 2.5 so doesn't have ssl.SSLError.
if hasattr(ssl, 'SSLError'):
HAVE_HTTPS_CONNECTION = True
except ImportError:
pass
try:
import threading
except ImportError:
import dummy_threading as threading
ON_APP_ENGINE = all(key in os.environ for key in (
'USER_IS_ADMIN', 'CURRENT_VERSION_ID', 'APPLICATION_ID'))
PORTS_BY_SECURITY = {True: 443,
False: 80}
DEFAULT_CA_CERTS_FILE = os.path.join(os.path.dirname(os.path.abspath(boto.cacerts.__file__)), "cacerts.txt")
class HostConnectionPool(object):
"""
A pool of connections for one remote (host,port,is_secure).
When connections are added to the pool, they are put into a
pending queue. The _mexe method returns connections to the pool
before the response body has been read, so they connections aren't
ready to send another request yet. They stay in the pending queue
until they are ready for another request, at which point they are
returned to the pool of ready connections.
The pool of ready connections is an ordered list of
(connection,time) pairs, where the time is the time the connection
was returned from _mexe. After a certain period of time,
connections are considered stale, and discarded rather than being
reused. This saves having to wait for the connection to time out
if AWS has decided to close it on the other end because of
inactivity.
Thread Safety:
This class is used only from ConnectionPool while it's mutex
is held.
"""
def __init__(self):
self.queue = []
def size(self):
"""
Returns the number of connections in the pool for this host.
Some of the connections may still be in use, and may not be
ready to be returned by get().
"""
return len(self.queue)
def put(self, conn):
"""
Adds a connection to the pool, along with the time it was
added.
"""
self.queue.append((conn, time.time()))
def get(self):
"""
Returns the next connection in this pool that is ready to be
reused. Returns None if there aren't any.
"""
# Discard ready connections that are too old.
self.clean()
# Return the first connection that is ready, and remove it
# from the queue. Connections that aren't ready are returned
# to the end of the queue with an updated time, on the
# assumption that somebody is actively reading the response.
for _ in range(len(self.queue)):
(conn, _) = self.queue.pop(0)
if self._conn_ready(conn):
return conn
else:
self.put(conn)
return None
def _conn_ready(self, conn):
"""
There is a nice state diagram at the top of http_client.py. It
indicates that once the response headers have been read (which
_mexe does before adding the connection to the pool), a
response is attached to the connection, and it stays there
until it's done reading. This isn't entirely true: even after
the client is done reading, the response may be closed, but
not removed from the connection yet.
This is ugly, reading a private instance variable, but the
state we care about isn't available in any public methods.
"""
if ON_APP_ENGINE:
# Google AppEngine implementation of HTTPConnection doesn't contain
# _HTTPConnection__response attribute. Moreover, it's not possible
# to determine if given connection is ready. Reusing connections
# simply doesn't make sense with App Engine urlfetch service.
return False
else:
response = getattr(conn, '_HTTPConnection__response', None)
return (response is None) or response.isclosed()
def clean(self):
"""
Get rid of stale connections.
"""
# Note that we do not close the connection here -- somebody
# may still be reading from it.
while len(self.queue) > 0 and self._pair_stale(self.queue[0]):
self.queue.pop(0)
def _pair_stale(self, pair):
"""
Returns true of the (connection,time) pair is too old to be
used.
"""
(_conn, return_time) = pair
now = time.time()
return return_time + ConnectionPool.STALE_DURATION < now
class ConnectionPool(object):
"""
A connection pool that expires connections after a fixed period of
time. This saves time spent waiting for a connection that AWS has
timed out on the other end.
This class is thread-safe.
"""
#
# The amout of time between calls to clean.
#
CLEAN_INTERVAL = 5.0
#
# How long before a connection becomes "stale" and won't be reused
# again. The intention is that this time is less that the timeout
# period that AWS uses, so we'll never try to reuse a connection
# and find that AWS is timing it out.
#
# Experimentation in July 2011 shows that AWS starts timing things
# out after three minutes. The 60 seconds here is conservative so
# we should never hit that 3-minute timout.
#
STALE_DURATION = 60.0
def __init__(self):
# Mapping from (host,port,is_secure) to HostConnectionPool.
# If a pool becomes empty, it is removed.
self.host_to_pool = {}
# The last time the pool was cleaned.
self.last_clean_time = 0.0
self.mutex = threading.Lock()
ConnectionPool.STALE_DURATION = \
config.getfloat('Boto', 'connection_stale_duration',
ConnectionPool.STALE_DURATION)
def __getstate__(self):
pickled_dict = copy.copy(self.__dict__)
pickled_dict['host_to_pool'] = {}
del pickled_dict['mutex']
return pickled_dict
def __setstate__(self, dct):
self.__init__()
def size(self):
"""
Returns the number of connections in the pool.
"""
return sum(pool.size() for pool in self.host_to_pool.values())
def get_http_connection(self, host, port, is_secure):
"""
Gets a connection from the pool for the named host. Returns
None if there is no connection that can be reused. It's the caller's
responsibility to call close() on the connection when it's no longer
needed.
"""
self.clean()
with self.mutex:
key = (host, port, is_secure)
if key not in self.host_to_pool:
return None
return self.host_to_pool[key].get()
def put_http_connection(self, host, port, is_secure, conn):
"""
Adds a connection to the pool of connections that can be
reused for the named host.
"""
with self.mutex:
key = (host, port, is_secure)
if key not in self.host_to_pool:
self.host_to_pool[key] = HostConnectionPool()
self.host_to_pool[key].put(conn)
def clean(self):
"""
Clean up the stale connections in all of the pools, and then
get rid of empty pools. Pools clean themselves every time a
connection is fetched; this cleaning takes care of pools that
aren't being used any more, so nothing is being gotten from
them.
"""
with self.mutex:
now = time.time()
if self.last_clean_time + self.CLEAN_INTERVAL < now:
to_remove = []
for (host, pool) in self.host_to_pool.items():
pool.clean()
if pool.size() == 0:
to_remove.append(host)
for host in to_remove:
del self.host_to_pool[host]
self.last_clean_time = now
class HTTPRequest(object):
def __init__(self, method, protocol, host, port, path, auth_path,
params, headers, body):
"""Represents an HTTP request.
:type method: string
:param method: The HTTP method name, 'GET', 'POST', 'PUT' etc.
:type protocol: string
:param protocol: The http protocol used, 'http' or 'https'.
:type host: string
:param host: Host to which the request is addressed. eg. abc.com
:type port: int
:param port: port on which the request is being sent. Zero means unset,
in which case default port will be chosen.
:type path: string
:param path: URL path that is being accessed.
:type auth_path: string
:param path: The part of the URL path used when creating the
authentication string.
:type params: dict
:param params: HTTP url query parameters, with key as name of
the param, and value as value of param.
:type headers: dict
:param headers: HTTP headers, with key as name of the header and value
as value of header.
:type body: string
:param body: Body of the HTTP request. If not present, will be None or
empty string ('').
"""
self.method = method
self.protocol = protocol
self.host = host
self.port = port
self.path = path
if auth_path is None:
auth_path = path
self.auth_path = auth_path
self.params = params
# chunked Transfer-Encoding should act only on PUT request.
if headers and 'Transfer-Encoding' in headers and \
headers['Transfer-Encoding'] == 'chunked' and \
self.method != 'PUT':
self.headers = headers.copy()
del self.headers['Transfer-Encoding']
else:
self.headers = headers
self.body = body
def __str__(self):
return (('method:(%s) protocol:(%s) host(%s) port(%s) path(%s) '
'params(%s) headers(%s) body(%s)') % (self.method,
self.protocol, self.host, self.port, self.path, self.params,
self.headers, self.body))
def authorize(self, connection, **kwargs):
if not getattr(self, '_headers_quoted', False):
for key in self.headers:
val = self.headers[key]
if isinstance(val, six.text_type):
safe = '!"#$%&\'()*+,/:;<=>?@[\\]^`{|}~'
self.headers[key] = quote(val.encode('utf-8'), safe)
setattr(self, '_headers_quoted', True)
self.headers['User-Agent'] = UserAgent
connection._auth_handler.add_auth(self, **kwargs)
# I'm not sure if this is still needed, now that add_auth is
# setting the content-length for POST requests.
if 'Content-Length' not in self.headers:
if 'Transfer-Encoding' not in self.headers or \
self.headers['Transfer-Encoding'] != 'chunked':
self.headers['Content-Length'] = str(len(self.body))
class HTTPResponse(http_client.HTTPResponse):
def __init__(self, *args, **kwargs):
http_client.HTTPResponse.__init__(self, *args, **kwargs)
self._cached_response = ''
def read(self, amt=None):
"""Read the response.
This method does not have the same behavior as
http_client.HTTPResponse.read. Instead, if this method is called with
no ``amt`` arg, then the response body will be cached. Subsequent
calls to ``read()`` with no args **will return the cached response**.
"""
if amt is None:
# The reason for doing this is that many places in boto call
# response.read() and except to get the response body that they
# can then process. To make sure this always works as they expect
# we're caching the response so that multiple calls to read()
# will return the full body. Note that this behavior only
# happens if the amt arg is not specified.
if not self._cached_response:
self._cached_response = http_client.HTTPResponse.read(self)
return self._cached_response
else:
return http_client.HTTPResponse.read(self, amt)
class AWSAuthConnection(object):
def __init__(self, host, aws_access_key_id=None,
aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, debug=0,
https_connection_factory=None, path='/',
provider='aws', security_token=None,
suppress_consec_slashes=True,
validate_certs=True, profile_name=None):
"""
:type host: str
:param host: The host to make the connection to
:keyword str aws_access_key_id: Your AWS Access Key ID (provided by
Amazon). If none is specified, the value in your
``AWS_ACCESS_KEY_ID`` environmental variable is used.
:keyword str aws_secret_access_key: Your AWS Secret Access Key
(provided by Amazon). If none is specified, the value in your
``AWS_SECRET_ACCESS_KEY`` environmental variable is used.
:keyword str security_token: The security token associated with
temporary credentials issued by STS. Optional unless using
temporary credentials. If none is specified, the environment
variable ``AWS_SECURITY_TOKEN`` is used if defined.
:type is_secure: boolean
:param is_secure: Whether the connection is over SSL
:type https_connection_factory: list or tuple
:param https_connection_factory: A pair of an HTTP connection
factory and the exceptions to catch. The factory should have
a similar interface to L{http_client.HTTPSConnection}.
:param str proxy: Address/hostname for a proxy server
:type proxy_port: int
:param proxy_port: The port to use when connecting over a proxy
:type proxy_user: str
:param proxy_user: The username to connect with on the proxy
:type proxy_pass: str
:param proxy_pass: The password to use when connection over a proxy.
:type port: int
:param port: The port to use to connect
:type suppress_consec_slashes: bool
:param suppress_consec_slashes: If provided, controls whether
consecutive slashes will be suppressed in key paths.
:type validate_certs: bool
:param validate_certs: Controls whether SSL certificates
will be validated or not. Defaults to True.
:type profile_name: str
:param profile_name: Override usual Credentials section in config
file to use a named set of keys instead.
"""
self.suppress_consec_slashes = suppress_consec_slashes
self.num_retries = 6
# Override passed-in is_secure setting if value was defined in config.
if config.has_option('Boto', 'is_secure'):
is_secure = config.getboolean('Boto', 'is_secure')
self.is_secure = is_secure
# Whether or not to validate server certificates.
# The default is now to validate certificates. This can be
# overridden in the boto config file are by passing an
# explicit validate_certs parameter to the class constructor.
self.https_validate_certificates = config.getbool(
'Boto', 'https_validate_certificates',
validate_certs)
if self.https_validate_certificates and not HAVE_HTTPS_CONNECTION:
raise BotoClientError(
"SSL server certificate validation is enabled in boto "
"configuration, but Python dependencies required to "
"support this feature are not available. Certificate "
"validation is only supported when running under Python "
"2.6 or later.")
certs_file = config.get_value(
'Boto', 'ca_certificates_file', DEFAULT_CA_CERTS_FILE)
if certs_file == 'system':
certs_file = None
self.ca_certificates_file = certs_file
if port:
self.port = port
else:
self.port = PORTS_BY_SECURITY[is_secure]
self.handle_proxy(proxy, proxy_port, proxy_user, proxy_pass)
# define exceptions from http_client that we want to catch and retry
self.http_exceptions = (http_client.HTTPException, socket.error,
socket.gaierror, http_client.BadStatusLine)
# define subclasses of the above that are not retryable.
self.http_unretryable_exceptions = []
if HAVE_HTTPS_CONNECTION:
self.http_unretryable_exceptions.append(
https_connection.InvalidCertificateException)
# define values in socket exceptions we don't want to catch
self.socket_exception_values = (errno.EINTR,)
if https_connection_factory is not None:
self.https_connection_factory = https_connection_factory[0]
self.http_exceptions += https_connection_factory[1]
else:
self.https_connection_factory = None
if (is_secure):
self.protocol = 'https'
else:
self.protocol = 'http'
self.host = host
self.path = path
# if the value passed in for debug
if not isinstance(debug, six.integer_types):
debug = 0
self.debug = config.getint('Boto', 'debug', debug)
self.host_header = None
# Timeout used to tell http_client how long to wait for socket timeouts.
# Default is to leave timeout unchanged, which will in turn result in
# the socket's default global timeout being used. To specify a
# timeout, set http_socket_timeout in Boto config. Regardless,
# timeouts will only be applied if Python is 2.6 or greater.
self.http_connection_kwargs = {}
if (sys.version_info[0], sys.version_info[1]) >= (2, 6):
# If timeout isn't defined in boto config file, use 70 second
# default as recommended by
# http://docs.aws.amazon.com/amazonswf/latest/apireference/API_PollForActivityTask.html
self.http_connection_kwargs['timeout'] = config.getint(
'Boto', 'http_socket_timeout', 70)
if isinstance(provider, Provider):
# Allow overriding Provider
self.provider = provider
else:
self._provider_type = provider
self.provider = Provider(self._provider_type,
aws_access_key_id,
aws_secret_access_key,
security_token,
profile_name)
# Allow config file to override default host, port, and host header.
if self.provider.host:
self.host = self.provider.host
if self.provider.port:
self.port = self.provider.port
if self.provider.host_header:
self.host_header = self.provider.host_header
self._pool = ConnectionPool()
self._connection = (self.host, self.port, self.is_secure)
self._last_rs = None
self._auth_handler = auth.get_auth_handler(
host, config, self.provider, self._required_auth_capability())
if getattr(self, 'AuthServiceName', None) is not None:
self.auth_service_name = self.AuthServiceName
self.request_hook = None
def __repr__(self):
return '%s:%s' % (self.__class__.__name__, self.host)
def _required_auth_capability(self):
return []
def _get_auth_service_name(self):
return getattr(self._auth_handler, 'service_name')
# For Sigv4, the auth_service_name/auth_region_name properties allow
# the service_name/region_name to be explicitly set instead of being
# derived from the endpoint url.
def _set_auth_service_name(self, value):
self._auth_handler.service_name = value
auth_service_name = property(_get_auth_service_name, _set_auth_service_name)
def _get_auth_region_name(self):
return getattr(self._auth_handler, 'region_name')
def _set_auth_region_name(self, value):
self._auth_handler.region_name = value
auth_region_name = property(_get_auth_region_name, _set_auth_region_name)
def connection(self):
return self.get_http_connection(*self._connection)
connection = property(connection)
def aws_access_key_id(self):
return self.provider.access_key
aws_access_key_id = property(aws_access_key_id)
gs_access_key_id = aws_access_key_id
access_key = aws_access_key_id
def aws_secret_access_key(self):
return self.provider.secret_key
aws_secret_access_key = property(aws_secret_access_key)
gs_secret_access_key = aws_secret_access_key
secret_key = aws_secret_access_key
def profile_name(self):
return self.provider.profile_name
profile_name = property(profile_name)
def get_path(self, path='/'):
# The default behavior is to suppress consecutive slashes for reasons
# discussed at
# https://groups.google.com/forum/#!topic/boto-dev/-ft0XPUy0y8
# You can override that behavior with the suppress_consec_slashes param.
if not self.suppress_consec_slashes:
return self.path + re.sub('^(/*)/', "\\1", path)
pos = path.find('?')
if pos >= 0:
params = path[pos:]
path = path[:pos]
else:
params = None
if path[-1] == '/':
need_trailing = True
else:
need_trailing = False
path_elements = self.path.split('/')
path_elements.extend(path.split('/'))
path_elements = [p for p in path_elements if p]
path = '/' + '/'.join(path_elements)
if path[-1] != '/' and need_trailing:
path += '/'
if params:
path = path + params
return path
def server_name(self, port=None):
if not port:
port = self.port
if port == 80:
signature_host = self.host
else:
# This unfortunate little hack can be attributed to
# a difference in the 2.6 version of http_client. In old
# versions, it would append ":443" to the hostname sent
# in the Host header and so we needed to make sure we
# did the same when calculating the V2 signature. In 2.6
# (and higher!)
# it no longer does that. Hence, this kludge.
if ((ON_APP_ENGINE and sys.version[:3] == '2.5') or
sys.version[:3] in ('2.6', '2.7')) and port == 443:
signature_host = self.host
else:
signature_host = '%s:%d' % (self.host, port)
return signature_host
def handle_proxy(self, proxy, proxy_port, proxy_user, proxy_pass):
self.proxy = proxy
self.proxy_port = proxy_port
self.proxy_user = proxy_user
self.proxy_pass = proxy_pass
if 'http_proxy' in os.environ and not self.proxy:
pattern = re.compile(
'(?:http://)?'
'(?:(?P<user>[\w\-\.]+):(?P<pass>.*)@)?'
'(?P<host>[\w\-\.]+)'
'(?::(?P<port>\d+))?'
)
match = pattern.match(os.environ['http_proxy'])
if match:
self.proxy = match.group('host')
self.proxy_port = match.group('port')
self.proxy_user = match.group('user')
self.proxy_pass = match.group('pass')
else:
if not self.proxy:
self.proxy = config.get_value('Boto', 'proxy', None)
if not self.proxy_port:
self.proxy_port = config.get_value('Boto', 'proxy_port', None)
if not self.proxy_user:
self.proxy_user = config.get_value('Boto', 'proxy_user', None)
if not self.proxy_pass:
self.proxy_pass = config.get_value('Boto', 'proxy_pass', None)
if not self.proxy_port and self.proxy:
print("http_proxy environment variable does not specify "
"a port, using default")
self.proxy_port = self.port
self.no_proxy = os.environ.get('no_proxy', '') or os.environ.get('NO_PROXY', '')
self.use_proxy = (self.proxy is not None)
def get_http_connection(self, host, port, is_secure):
conn = self._pool.get_http_connection(host, port, is_secure)
if conn is not None:
return conn
else:
return self.new_http_connection(host, port, is_secure)
def skip_proxy(self, host):
if not self.no_proxy:
return False
if self.no_proxy == "*":
return True
hostonly = host
hostonly = host.split(':')[0]
for name in self.no_proxy.split(','):
if name and (hostonly.endswith(name) or host.endswith(name)):
return True
return False
def new_http_connection(self, host, port, is_secure):
if host is None:
host = self.server_name()
# Make sure the host is really just the host, not including
# the port number
host = boto.utils.parse_host(host)
http_connection_kwargs = self.http_connection_kwargs.copy()
# Connection factories below expect a port keyword argument
http_connection_kwargs['port'] = port
# Override host with proxy settings if needed
if self.use_proxy and not is_secure and \
not self.skip_proxy(host):
host = self.proxy
http_connection_kwargs['port'] = int(self.proxy_port)
if is_secure:
boto.log.debug(
'establishing HTTPS connection: host=%s, kwargs=%s',
host, http_connection_kwargs)
if self.use_proxy and not self.skip_proxy(host):
connection = self.proxy_ssl(host, is_secure and 443 or 80)
elif self.https_connection_factory:
connection = self.https_connection_factory(host)
elif self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
connection = https_connection.CertValidatingHTTPSConnection(
host, ca_certs=self.ca_certificates_file,
**http_connection_kwargs)
else:
connection = http_client.HTTPSConnection(
host, **http_connection_kwargs)
else:
boto.log.debug('establishing HTTP connection: kwargs=%s' %
http_connection_kwargs)
if self.https_connection_factory:
# even though the factory says https, this is too handy
# to not be able to allow overriding for http also.
connection = self.https_connection_factory(
host, **http_connection_kwargs)
else:
connection = http_client.HTTPConnection(
host, **http_connection_kwargs)
if self.debug > 1:
connection.set_debuglevel(self.debug)
# self.connection must be maintained for backwards-compatibility
# however, it must be dynamically pulled from the connection pool
# set a private variable which will enable that
if host.split(':')[0] == self.host and is_secure == self.is_secure:
self._connection = (host, port, is_secure)
# Set the response class of the http connection to use our custom
# class.
connection.response_class = HTTPResponse
return connection
def put_http_connection(self, host, port, is_secure, connection):
self._pool.put_http_connection(host, port, is_secure, connection)
def proxy_ssl(self, host=None, port=None):
if host and port:
host = '%s:%d' % (host, port)
else:
host = '%s:%d' % (self.host, self.port)
# Seems properly to use timeout for connect too
timeout = self.http_connection_kwargs.get("timeout")
if timeout is not None:
sock = socket.create_connection((self.proxy,
int(self.proxy_port)), timeout)
else:
sock = socket.create_connection((self.proxy, int(self.proxy_port)))
boto.log.debug("Proxy connection: CONNECT %s HTTP/1.0\r\n", host)
sock.sendall(("CONNECT %s HTTP/1.0\r\n" % host).encode())
sock.sendall(("User-Agent: %s\r\n" % UserAgent).encode())
if self.proxy_user and self.proxy_pass:
for k, v in self.get_proxy_auth_header().items():
sock.sendall(("%s: %s\r\n" % (k, v)).encode())
# See discussion about this config option at
# https://groups.google.com/forum/?fromgroups#!topic/boto-dev/teenFvOq2Cc
if config.getbool('Boto', 'send_crlf_after_proxy_auth_headers', False):
sock.sendall(("\r\n").encode())
else:
sock.sendall(("\r\n").encode())
resp = http_client.HTTPResponse(sock, debuglevel=self.debug)
resp.begin()
if resp.status != 200:
# Fake a socket error, use a code that make it obvious it hasn't
# been generated by the socket library
raise socket.error(-71,
"Error talking to HTTP proxy %s:%s: %s (%s)" %
(self.proxy, self.proxy_port,
resp.status, resp.reason))
# We can safely close the response, it duped the original socket
resp.close()
h = http_client.HTTPConnection(host)
if self.https_validate_certificates and HAVE_HTTPS_CONNECTION:
msg = "wrapping ssl socket for proxied connection; "
if self.ca_certificates_file:
msg += "CA certificate file=%s" % self.ca_certificates_file
else:
msg += "using system provided SSL certs"
boto.log.debug(msg)
key_file = self.http_connection_kwargs.get('key_file', None)
cert_file = self.http_connection_kwargs.get('cert_file', None)
sslSock = ssl.wrap_socket(sock, keyfile=key_file,
certfile=cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certificates_file)
cert = sslSock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not https_connection.ValidateCertificateHostname(cert, hostname):
raise https_connection.InvalidCertificateException(
hostname, cert, 'hostname mismatch')
else:
# Fallback for old Python without ssl.wrap_socket
if hasattr(http_client, 'ssl'):
sslSock = http_client.ssl.SSLSocket(sock)
else:
sslSock = socket.ssl(sock, None, None)
sslSock = http_client.FakeSocket(sock, sslSock)
# This is a bit unclean
h.sock = sslSock
return h
def prefix_proxy_to_path(self, path, host=None):
path = self.protocol + '://' + (host or self.server_name()) + path
return path
def get_proxy_auth_header(self):
auth = encodebytes(self.proxy_user + ':' + self.proxy_pass)
return {'Proxy-Authorization': 'Basic %s' % auth}
# For passing proxy information to other connection libraries, e.g. cloudsearch2
def get_proxy_url_with_auth(self):
if not self.use_proxy:
return None
if self.proxy_user or self.proxy_pass:
if self.proxy_pass:
login_info = '%s:%s@' % (self.proxy_user, self.proxy_pass)
else:
login_info = '%s@' % self.proxy_user
else:
login_info = ''
return 'http://%s%s:%s' % (login_info, self.proxy, str(self.proxy_port or self.port))
def set_host_header(self, request):
try:
request.headers['Host'] = \
self._auth_handler.host_header(self.host, request)
except AttributeError:
request.headers['Host'] = self.host.split(':', 1)[0]
def set_request_hook(self, hook):
self.request_hook = hook
def _mexe(self, request, sender=None, override_num_retries=None,
retry_handler=None):
"""
mexe - Multi-execute inside a loop, retrying multiple times to handle
transient Internet errors by simply trying again.
Also handles redirects.
This code was inspired by the S3Utils classes posted to the boto-users
Google group by Larry Bates. Thanks!
"""
boto.log.debug('Method: %s' % request.method)
boto.log.debug('Path: %s' % request.path)
boto.log.debug('Data: %s' % request.body)
boto.log.debug('Headers: %s' % request.headers)
boto.log.debug('Host: %s' % request.host)
boto.log.debug('Port: %s' % request.port)
boto.log.debug('Params: %s' % request.params)
response = None
body = None
ex = None
if override_num_retries is None:
num_retries = config.getint('Boto', 'num_retries', self.num_retries)
else:
num_retries = override_num_retries
i = 0
connection = self.get_http_connection(request.host, request.port,
self.is_secure)
# Convert body to bytes if needed
if not isinstance(request.body, bytes) and hasattr(request.body,
'encode'):
request.body = request.body.encode('utf-8')
while i <= num_retries:
# Use binary exponential backoff to desynchronize client requests.
next_sleep = min(random.random() * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
try:
# we now re-sign each request before it is retried
boto.log.debug('Token: %s' % self.provider.security_token)
request.authorize(connection=self)
# Only force header for non-s3 connections, because s3 uses
# an older signing method + bucket resource URLs that include
# the port info. All others should be now be up to date and
# not include the port.
if 's3' not in self._required_auth_capability():
if not getattr(self, 'anon', False):
if not request.headers.get('Host'):
self.set_host_header(request)
boto.log.debug('Final headers: %s' % request.headers)
request.start_time = datetime.now()
if callable(sender):
response = sender(connection, request.method, request.path,
request.body, request.headers)
else:
connection.request(request.method, request.path,
request.body, request.headers)
response = connection.getresponse()
boto.log.debug('Response headers: %s' % response.getheaders())
location = response.getheader('location')
# -- gross hack --
# http_client gets confused with chunked responses to HEAD requests
# so I have to fake it out
if request.method == 'HEAD' and getattr(response,
'chunked', False):
response.chunked = 0
if callable(retry_handler):
status = retry_handler(response, i, next_sleep)
if status:
msg, i, next_sleep = status
if msg:
boto.log.debug(msg)
time.sleep(next_sleep)
continue
if response.status in [500, 502, 503, 504]:
msg = 'Received %d response. ' % response.status
msg += 'Retrying in %3.1f seconds' % next_sleep
boto.log.debug(msg)
body = response.read()
if isinstance(body, bytes):
body = body.decode('utf-8')
elif response.status < 300 or response.status >= 400 or \
not location:
# don't return connection to the pool if response contains
# Connection:close header, because the connection has been
# closed and default reconnect behavior may do something
# different than new_http_connection. Also, it's probably
# less efficient to try to reuse a closed connection.
conn_header_value = response.getheader('connection')
if conn_header_value == 'close':
connection.close()
else:
self.put_http_connection(request.host, request.port,
self.is_secure, connection)
if self.request_hook is not None:
self.request_hook.handle_request_data(request, response)
return response
else:
scheme, request.host, request.path, \
params, query, fragment = urlparse(location)
if query:
request.path += '?' + query
# urlparse can return both host and port in netloc, so if
# that's the case we need to split them up properly
if ':' in request.host:
request.host, request.port = request.host.split(':', 1)
msg = 'Redirecting: %s' % scheme + '://'
msg += request.host + request.path
boto.log.debug(msg)
connection = self.get_http_connection(request.host,
request.port,
scheme == 'https')
response = None
continue
except PleaseRetryException as e:
boto.log.debug('encountered a retry exception: %s' % e)
connection = self.new_http_connection(request.host, request.port,
self.is_secure)
response = e.response
ex = e
except self.http_exceptions as e:
for unretryable in self.http_unretryable_exceptions:
if isinstance(e, unretryable):
boto.log.debug(
'encountered unretryable %s exception, re-raising' %
e.__class__.__name__)
raise
boto.log.debug('encountered %s exception, reconnecting' %
e.__class__.__name__)
connection = self.new_http_connection(request.host, request.port,
self.is_secure)
ex = e
time.sleep(next_sleep)
i += 1
# If we made it here, it's because we have exhausted our retries
# and stil haven't succeeded. So, if we have a response object,
# use it to raise an exception.
# Otherwise, raise the exception that must have already happened.
if self.request_hook is not None:
self.request_hook.handle_request_data(request, response, error=True)
if response:
raise BotoServerError(response.status, response.reason, body)
elif ex:
raise ex
else:
msg = 'Please report this exception as a Boto Issue!'
raise BotoClientError(msg)
def build_base_http_request(self, method, path, auth_path,
params=None, headers=None, data='', host=None):
path = self.get_path(path)
if auth_path is not None:
auth_path = self.get_path(auth_path)
if params is None:
params = {}
else:
params = params.copy()
if headers is None:
headers = {}
else:
headers = headers.copy()
if self.host_header and not boto.utils.find_matching_headers('host', headers):
headers['host'] = self.host_header
host = host or self.host
if self.use_proxy:
if not auth_path:
auth_path = path
path = self.prefix_proxy_to_path(path, host)
if self.proxy_user and self.proxy_pass and not self.is_secure:
# If is_secure, we don't have to set the proxy authentication
# header here, we did that in the CONNECT to the proxy.
headers.update(self.get_proxy_auth_header())
return HTTPRequest(method, self.protocol, host, self.port,
path, auth_path, params, headers, data)
def make_request(self, method, path, headers=None, data='', host=None,
auth_path=None, sender=None, override_num_retries=None,
params=None, retry_handler=None):
"""Makes a request to the server, with stock multiple-retry logic."""
if params is None:
params = {}
http_request = self.build_base_http_request(method, path, auth_path,
params, headers, data, host)
return self._mexe(http_request, sender, override_num_retries,
retry_handler=retry_handler)
def close(self):
"""(Optional) Close any open HTTP connections. This is non-destructive,
and making a new request will open a connection again."""
boto.log.debug('closing all HTTP connections')
self._connection = None # compat field
class AWSQueryConnection(AWSAuthConnection):
APIVersion = ''
ResponseError = BotoServerError
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
is_secure=True, port=None, proxy=None, proxy_port=None,
proxy_user=None, proxy_pass=None, host=None, debug=0,
https_connection_factory=None, path='/', security_token=None,
validate_certs=True, profile_name=None, provider='aws'):
super(AWSQueryConnection, self).__init__(
host, aws_access_key_id,
aws_secret_access_key,
is_secure, port, proxy,
proxy_port, proxy_user, proxy_pass,
debug, https_connection_factory, path,
security_token=security_token,
validate_certs=validate_certs,
profile_name=profile_name,
provider=provider)
def _required_auth_capability(self):
return []
def get_utf8_value(self, value):
return boto.utils.get_utf8_value(value)
def make_request(self, action, params=None, path='/', verb='GET'):
http_request = self.build_base_http_request(verb, path, None,
params, {}, '',
self.host)
if action:
http_request.params['Action'] = action
if self.APIVersion:
http_request.params['Version'] = self.APIVersion
return self._mexe(http_request)
def build_list_params(self, params, items, label):
if isinstance(items, six.string_types):
items = [items]
for i in range(1, len(items) + 1):
params['%s.%d' % (label, i)] = items[i - 1]
def build_complex_list_params(self, params, items, label, names):
"""Serialize a list of structures.
For example::
items = [('foo', 'bar', 'baz'), ('foo2', 'bar2', 'baz2')]
label = 'ParamName.member'
names = ('One', 'Two', 'Three')
self.build_complex_list_params(params, items, label, names)
would result in the params dict being updated with these params::
ParamName.member.1.One = foo
ParamName.member.1.Two = bar
ParamName.member.1.Three = baz
ParamName.member.2.One = foo2
ParamName.member.2.Two = bar2
ParamName.member.2.Three = baz2
:type params: dict
:param params: The params dict. The complex list params
will be added to this dict.
:type items: list of tuples
:param items: The list to serialize.
:type label: string
:param label: The prefix to apply to the parameter.
:type names: tuple of strings
:param names: The names associated with each tuple element.
"""
for i, item in enumerate(items, 1):
current_prefix = '%s.%s' % (label, i)
for key, value in zip(names, item):
full_key = '%s.%s' % (current_prefix, key)
params[full_key] = value
# generics
def get_list(self, action, params, markers, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet(markers)
h = boto.handler.XmlHandler(rs, parent)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return rs
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_object(self, action, params, cls, path='/',
parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
obj = cls(parent)
h = boto.handler.XmlHandler(obj, parent)
if isinstance(body, six.text_type):
body = body.encode('utf-8')
xml.sax.parseString(body, h)
return obj
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
def get_status(self, action, params, path='/', parent=None, verb='GET'):
if not parent:
parent = self
response = self.make_request(action, params, path, verb)
body = response.read()
boto.log.debug(body)
if not body:
boto.log.error('Null body %s' % body)
raise self.ResponseError(response.status, response.reason, body)
elif response.status == 200:
rs = ResultSet()
h = boto.handler.XmlHandler(rs, parent)
xml.sax.parseString(body, h)
return rs.status
else:
boto.log.error('%s %s' % (response.status, response.reason))
boto.log.error('%s' % body)
raise self.ResponseError(response.status, response.reason, body)
| {
"content_hash": "0fd0bb539d89d5f3ac6b7947539b0d7a",
"timestamp": "",
"source": "github",
"line_count": 1186,
"max_line_length": 108,
"avg_line_length": 41.30016863406408,
"alnum_prop": 0.5699236454207668,
"repo_name": "felix-d/boto",
"id": "cfd0fff4d61dddde289ad6f3a0a627be4dbfd078",
"size": "51006",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "boto/connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6536649"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20170308_1026'),
]
operations = [
migrations.CreateModel(
name='Amis',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Utilisateur',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pseudo', models.CharField(max_length=20)),
('mdp', models.CharField(max_length=32)),
('mail', models.CharField(max_length=20)),
],
),
migrations.AddField(
model_name='amis',
name='amisde',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='amis_ami', to='core.Utilisateur'),
),
migrations.AddField(
model_name='amis',
name='utilisateur',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='amis_base', to='core.Utilisateur'),
),
]
| {
"content_hash": "25cf46bd24eb6fa7ed235f8a7c2dca55",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 130,
"avg_line_length": 34.35897435897436,
"alnum_prop": 0.5671641791044776,
"repo_name": "batebates/L3ProjetWeb",
"id": "beac3b6a9c9cb7259a3c286648401593f9a4fa40",
"size": "1413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BDR/core/migrations/0003_auto_20170308_1105.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2445"
},
{
"name": "HTML",
"bytes": "11469"
},
{
"name": "JavaScript",
"bytes": "1521"
},
{
"name": "Python",
"bytes": "27997"
}
],
"symlink_target": ""
} |
import six
import sys
import os
from unittest import TestCase
from unittest.mock import patch
from msgiver.configure import Configure
Configure.CONF_FILE_PATH = "._test_msgiver.yml"
class TestSlack(TestCase):
@classmethod
def tearDownClass(self):
os.remove(Configure.CONF_FILE_PATH)
def test_load_config(self):
pass
def test_generate_config(self):
configure = Configure()
with patch.object(configure, "_Configure__input_slack", return_value={ "token": "aaa", "channel": "bbbb", "bot_icon": "" }) as method:
# six.print_(configure.generate())
self.assertIsNotNone(configure.generate())
method.assert_called_once_with()
configure = Configure()
self.assertIsNotNone(configure.all())
self.assertIsNotNone(configure.slack())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0372249d5327469be4c69a00e939b9b4",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 142,
"avg_line_length": 30.75862068965517,
"alnum_prop": 0.647982062780269,
"repo_name": "kitaro-tn/msgiver",
"id": "df7218d30e6e8021af4f4159eb51f58eb41ba25b",
"size": "917",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_configure.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "25758"
},
{
"name": "HTML",
"bytes": "33855"
},
{
"name": "JavaScript",
"bytes": "95397"
},
{
"name": "Makefile",
"bytes": "1794"
},
{
"name": "Python",
"bytes": "19141"
}
],
"symlink_target": ""
} |
"""
[12/11/13] Challenge #144 [Easy] Nuts & Bolts
https://www.reddit.com/r/dailyprogrammer/comments/1sob1e/121113_challenge_144_easy_nuts_bolts/
# [](#EasyIcon) *(Easy)*: Nuts & Bolts
You have just been hired at a local home improvement store to help compute the proper costs of inventory. The current
prices are out of date and wrong; you have to figure out which items need to be re-labeled with the correct price.
You will be first given a list of item-names and their current price. You will then be given another list of the same
item-names but with the correct price. You must then print a list of items that have changed, and by how much.
# Formal Inputs & Outputs
## Input Description
The first line of input will be an integer N, which is for the number of rows in each list. Each list has N-lines of
two space-delimited strings: the first string will be the unique item name (without spaces), the second string will be
the price (in whole-integer cents). The second list, following the same format, will have the same unique item-names,
but with the correct price. Note that the lists may not be in the same order!
## Output Description
For each item that has had its price changed, print a row with the item name and the price difference (in cents). Print
the sign of the change (e.g. '+' for a growth in price, or '-' for a loss in price). Order does not matter for output.
# Sample Inputs & Outputs
## Sample Input 1
4
CarriageBolt 45
Eyebolt 50
Washer 120
Rivet 10
CarriageBolt 45
Eyebolt 45
Washer 140
Rivet 10
## Sample Output 1
Eyebolt -5
Washer +20
## Sample Input 2
3
2DNail 3
4DNail 5
8DNail 10
8DNail 11
4DNail 5
2DNail 2
## Sample Output 2
2DNail -1
8DNail +1
"""
def main():
pass
if __name__ == "__main__":
main()
| {
"content_hash": "6a8eea83635382346094a8a18f41e2fe",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 119,
"avg_line_length": 34.54716981132076,
"alnum_prop": 0.7127252867285636,
"repo_name": "DayGitH/Python-Challenges",
"id": "a236f68bd7730c27113e3bc4169559b21b024b79",
"size": "1831",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DailyProgrammer/DP20131211A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "5002"
},
{
"name": "Python",
"bytes": "2471582"
}
],
"symlink_target": ""
} |
from conan.packager import ConanMultiPackager
import os, platform
if __name__ == "__main__":
builder = ConanMultiPackager(args="--build missing")
builder.add_common_builds()
filtered_builds = []
for settings, options, env_vars, build_requires in builder.builds:
if not (settings["arch"] == "x86"):
filtered_builds.append([settings, options, env_vars, build_requires])
builder.builds = filtered_builds
builder.run()
| {
"content_hash": "8cff8280eeb754f9f1708357ee5e7725",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 81,
"avg_line_length": 38.333333333333336,
"alnum_prop": 0.6717391304347826,
"repo_name": "DiligentGraphics/DiligentCore",
"id": "43b1b55a2730f89eb7bc44a3f6d6ca957444e1ca",
"size": "460",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "ThirdParty/glew/build/conan/build.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2069"
},
{
"name": "C",
"bytes": "1167087"
},
{
"name": "C++",
"bytes": "10521014"
},
{
"name": "CMake",
"bytes": "140252"
},
{
"name": "GLSL",
"bytes": "30386"
},
{
"name": "HLSL",
"bytes": "140428"
},
{
"name": "Objective-C",
"bytes": "49891"
},
{
"name": "Objective-C++",
"bytes": "163885"
},
{
"name": "Python",
"bytes": "14195"
},
{
"name": "Shell",
"bytes": "6853"
}
],
"symlink_target": ""
} |
import numpy
from PyQt5 import Qt
class Property(property):
"""A convenience class for making properties that have a default value and a change Qt-signal. An
example of a class with two Property attributes, with the default value of the "bar" Property
depending on the value of "foo" (if .foo is None, .bar defaults to 42):
class C(Qt.QObject):
changed = Qt.pyqtSignal(object)
def __init__(self, parent=None):
super().__init__(parent)
for property in self.properties:
property.instantiate(self)
properties = []
foo = Property(
properties,
"foo",
default_value_callback=lambda c_instance: None,
take_arg_callback=lambda c_instance, in_: None if (in_ is None or in_ == '') else str(in_),
post_set_callback=lambda c_instance, in_: c_instance.__class__.bar.update_default(c_instance),
doc="From wiktionary: \"[foo is a] metasyntactic variable used to represent an unspecified entity. "
"If part of a series of such entities, it is often the first in the series, and followed immediately by bar.\"")
bar = Property(
properties,
"bar",
default_value_callback=lambda c_instance: None if c_instance.foo is None else 42)
for property in properties:
exec(property.changed_signal_name + ' = Qt.pyqtSignal(object)')
del property
c = C()
c.bar_changed.connect(lambda: print("bar's apparent value changed to:", c.bar))
print(c.foo, c.bar) # -> None None
c.foo='baz' # -> bar's apparent value changed to: 42
print(c.foo, c.bar) # -> baz 42
c.bar=55 # -> bar's apparent value changed to: 55
print(c.foo, c.bar) # -> baz 55
del c.foo
print(c.foo, c.bar) # -> None 55
del c.bar
bar's apparent value changed to: None
print(c.foo, c.bar) # -> None None
Additionally, if the class bearing a Property has a .changed attribute and that .changed attribute is a Qt
signal, a_property.instantiate(bearer) connects the a_changed signal created for that property with the .changed signal.
NB: Property is derived from "property" for the sole reason that IPython's question-mark magic is special-cased for
properties. Deriving from property causes Property to receive the same treatment, providing useful output for
something.prop? in IPython (where prop is a Property instance)."""
def __init__(self, properties, name, default_value_callback, take_arg_callback=None, pre_set_callback=None, post_set_callback=None, doc=None):
self.name = name
self.var_name = '_' + name
self.default_val_var_name = '_default_' + name
self.changed_signal_name = name + '_changed'
self.default_value_callback = default_value_callback
self.take_arg_callback = take_arg_callback
self.pre_set_callback = pre_set_callback
self.post_set_callback = post_set_callback
if doc is not None:
self.__doc__ = doc
properties.append(self)
@staticmethod
def eq(a, b):
r = a == b
if isinstance(r, bool):
return r
if isinstance(r, numpy.bool_):
return bool(r)
return all(r)
def instantiate(self, obj):
setattr(obj, self.default_val_var_name, self.default_value_callback(obj))
if hasattr(obj, 'changed') and isinstance(obj.changed, Qt.pyqtBoundSignal):
getattr(obj, self.changed_signal_name).connect(obj.changed)
def update_default(self, obj):
if hasattr(obj, self.var_name):
# An explicitly set value is overriding the default, so even if the default has changed, the apparent value of the property has not
setattr(obj, self.default_val_var_name, self.default_value_callback(obj))
else:
# The default value is the apparent value, meaning that we must check if the default has changed and signal an apparent value change
# if it has
old_default = getattr(obj, self.default_val_var_name)
new_default = self.default_value_callback(obj)
if not self.eq(new_default, old_default):
setattr(obj, self.default_val_var_name, new_default)
getattr(obj, self.changed_signal_name).emit(obj)
def copy_instance_value(self, src_obj, dst_obj):
"""Replace value for this property in dst_obj if src_obj has a non-default value
for this property."""
try:
v = getattr(src_obj, self.var_name)
except AttributeError:
return
setattr(dst_obj, self.var_name, v)
def __get__(self, obj, _=None):
if obj is None:
return self
try:
return getattr(obj, self.var_name)
except AttributeError:
return getattr(obj, self.default_val_var_name)
def __set__(self, obj, v):
if self.take_arg_callback is not None:
v = self.take_arg_callback(obj, v)
if not hasattr(obj, self.var_name) or not self.eq(v, getattr(obj, self.var_name)):
if self.pre_set_callback is not None:
if self.pre_set_callback(obj, v) == False:
return
setattr(obj, self.var_name, v)
if self.post_set_callback is not None:
self.post_set_callback(obj, v)
getattr(obj, self.changed_signal_name).emit(obj)
def __delete__(self, obj):
"""Reset to default value by way of removing the explicitly set override, causing the apparent value to be default."""
try:
old_value = getattr(obj, self.var_name)
delattr(obj, self.var_name)
new_value = getattr(obj, self.default_val_var_name)
if not self.eq(old_value, new_value):
if self.post_set_callback is not None:
self.post_set_callback(obj, new_value)
getattr(obj, self.changed_signal_name).emit(obj)
except AttributeError:
# Property was already using default value
pass
def is_default(self, obj):
if not hasattr(obj, self.var_name):
return True
val = getattr(obj, self.var_name)
def_val = getattr(obj, self.default_val_var_name)
eq = self.eq(val, def_val)
return eq | {
"content_hash": "0e1bfc44576460037187837e71ab89cd",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 146,
"avg_line_length": 44.2,
"alnum_prop": 0.6127320954907162,
"repo_name": "erikhvatum/RisWidget",
"id": "f1af8b532ed4a4f99a5d7d1613d12f989e20309a",
"size": "7572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ris_widget/om/property.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "276275"
},
{
"name": "GLSL",
"bytes": "7012"
},
{
"name": "Python",
"bytes": "600996"
},
{
"name": "QML",
"bytes": "2311"
}
],
"symlink_target": ""
} |
import os
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup, find_packages
VERSION = '0.10.0'
PATH = os.path.dirname(os.path.abspath(__file__))
try:
LONG_DESC = '\n===='+open(os.path.join(PATH, 'README.rst'), 'r').read().split('====', 1)[-1]
except IOError: #happens when using tox
LONG_DESC = ''
setup(name='django-hyperadmin',
version=VERSION,
description="A hypermedia API framework for Django.",
long_description=LONG_DESC,
classifiers=[
'Programming Language :: Python',
'Environment :: Web Environment',
'Framework :: Django',
'Operating System :: OS Independent',
'Natural Language :: English',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
keywords='django hypermedia HATEOAS REST',
author = 'Jason Kraus',
author_email = '[email protected]',
maintainer = 'Jason Kraus',
maintainer_email = '[email protected]',
url='http://github.com/webcube/django-hyperadmin',
license='New BSD License',
packages=find_packages(exclude=['tests']),
test_suite='tests.runtests.runtests',
tests_require=(
'pep8',
'coverage',
'django',
'Mock',
'nose',
'django-nose',
),
install_requires=[
'mimeparse',
'django-datatap',
],
include_package_data = True,
zip_safe = False,
)
| {
"content_hash": "59f7a384a90eadabaa394f1177cda0c3",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 96,
"avg_line_length": 31.26923076923077,
"alnum_prop": 0.5836408364083641,
"repo_name": "zbyte64/django-hyperadmin",
"id": "b88232f972e85542be5a2f384d0dbae4a2204181",
"size": "1648",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "307844"
}
],
"symlink_target": ""
} |
__revision__ = "test/MSVC/pch-basics.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Verify PCH works to build a simple exe and a simple dll.
"""
import time
import TestSCons
test = TestSCons.TestSCons(match = TestSCons.match_re)
test.skip_if_not_msvc()
test.write('Main.cpp', """\
#include "Precompiled.h"
int main()
{
return testf();
}
""")
test.write('Precompiled.cpp', """\
#include "Precompiled.h"
""")
test.write('Precompiled.h', """\
#pragma once
static int testf()
{
return 0;
}
""")
test.write('SConstruct', """\
env = Environment()
env['PCHSTOP'] = 'Precompiled.h'
env['PCH'] = env.PCH('Precompiled.cpp')[0]
env.SharedLibrary('pch_dll', 'Main.cpp')
env.Program('pch_exe', 'Main.cpp')
""")
test.run(arguments='.', stderr=None)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "096700125850f775ecd2581512affe03",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 97,
"avg_line_length": 16.428571428571427,
"alnum_prop": 0.6478260869565218,
"repo_name": "EmanueleCannizzaro/scons",
"id": "37effb10f8f1104b1947bef9b43d8bcfd5d51465",
"size": "2054",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/MSVC/pch-basics.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
from __future__ import division
#for training part in classification
from misc import partitionSampleByMetadatumValue
from randomSampling import randomChoice
import numpy as np
from multiDimList import MultiDimList
#@dataArray = [samplesInfoList,infoList,idSequences,sampleList,matchingNodes]
#Computes classes according to metadatum values
#Returns the MDL @classes of the expected partition of the set of samples by values of metadata
def computeClasses(dataArray,metadata):
#@clustersOneMetadatum[i] is a list of classes according to the value of metadatum @metadata[i]
#@valueSets[i] is the set of (known) values of @metadata[i]
clustersOneMetadatum,valueSets = [],[]
for metadatum in metadata:
valueSet,classes = partitionSampleByMetadatumValue(metadatum,dataArray[1],dataArray[0])
if not valueSet:
print "\n/!\ ERROR: metadatum",metadatum,"having abnormal values."
raise ValueError
valueSets.append(valueSet)
clustersOneMetadatum.append(classes)
shape = []
n = len(valueSets)
for valueSetID in range(n):
shape.append(len(valueSets[valueSetID]))
#Initializing the list with empty classes
classes = MultiDimList([],shape)
#@classes is a list containing partition of the samples ID according to the value of the metadata
#@dataArray[3] = sampleList
for sample in dataArray[3]:
#path to the class of this sample in @classes
dimList = []
#n = len(valueSets) = len(clustersOneMetadatum) = len(metadata) = len(shape)
for clustersID in range(n):
i = 0
while i < shape[clustersID] and not (clustersOneMetadatum[clustersID] == sample):
i += 1
if i == shape[clustersID]:
#Sample not in partition: must have an unknown value
print "Sample",sample,"not in partition."
else:
dimList.append(i)
#Assigns the sample to its corresponding class
previousClass = classes.accessMDL(dimList)
classes = classes.modifyMDL(dimList,previousClass + [sample])
return classes,valueSets
#______________________________________________________________________________________________________
#Training step #1: selects a random subset of the set of features vectors (samples)
#knuth=True uses Knuth's algorithm S, knuth=False uses Algorithm R
def selectTrainingSample(dataArray,n,knuth=False):
#@dataArray[3] = sampleList
trainSubset,unchosen = randomChoice(dataArray[3],n,knuth)
return trainSubset,unchosen
#______________________________________________________________________________________________________
#Training step #2: according to the values of metadata, assigns a class to each sample of the training subset ONLY
#@classes (see @computeClasses) is the known partition of the whole set of samples ID, that will be useful to
#compute the Youden's J coefficient
#returns @assignedClasses that is the partial partition of the set of samples restricted to the samples in @trainSubset
def assignClass(trainSubset,classes):
classLength = classes.mapIntoListMDL(len)
assignedClasses = MultiDimList([],classes.shape)
for sampleID in trainSubset:
dimList = classes.searchMDL(sampleID)
#if sampleID is in @classes
if dimList:
#assigns sampleID in the corresponding class
previousClass = classes.accessMDL(dimList)
classes = classes.modifyMDL(dimList,previousClass + [sampleID])
return assignedClasses
#______________________________________________________________________________________________________
#Training step #3: computes the prior probability (also called posterior probability)
#of a certain node n of being in the whole training subset using Bayesian average (to deal with zero probabilities)
#Computes mean for a list of integer values
def computeMean(vList):
n = len(vList)
s = 0
for v in vList:
s += v
return 1/n*s
#Returns an array @probList such as @probList[i] is the probability of having node @nodesList[i]
#See report at section about Bayesian average for formula below
def getPriorProbability(nodesList,trainSubset,dataArray):
probList = []
#The number of nodes being both in @nodesList and in the matching lists of samples in the training set
numberNodesInTrainSubset = 0
numberNodes = len(nodesList)
numberSstart = len(trainSubset)
#matchingNodes = @dataArray[4] is a dictionary of (key=name of sample,value=list of nodes matching in sample) pairs
n = len(dataArray[4])
#@nodesPresence is a list such as @nodesPresence[i][j] = 1 if node nodesList[i] matches in sample matchingNodes[j][0]
#@dataArray[8] = @matchingNodes
nodesPresence = [[0]*n]*numberNodes
#@nodesPositive is a list such as @nodesPositive[i] is the number of samples in the training subset containing node @nodesList[i]
nodesPositive = [0]*numberNodes
for sample in trainSubset:
nodesSampleList = dataArray[4].get(sample)
i = 0
for node in nodesList:
nodesPresence[i][j] = int((node in nodesSampleList))
#if @nodesPresence[i][j] == 1
if nodesPresence[i][j]:
nodesPositive[i] += 1
numberNodesInTrainSubset += 1
i += 1
for i in range(numberNodes):
M = nodesPositive[i]/numberSstart
v = 0
for j in range(numberSstart):
v += (nodesPresence[i][j]-M)*(nodesPresence[i][j]-M)
v = np.sqrt(v)
c = numberSstart/(2*(v+1))
m = c/numberSstart
probList.append((c*m+nodesPositive[i])/(c+numberSstart))
return probList,nodesPresence
#Returns @classes, which is the partition of the whole set of samples according to the values of metadatum
#and @assignedClasses the partial partition of the training subset of samples
#and @valuesClasses is the list of lists of (expectation,standard deviation) pairs for each node considered
#and @unchosen is the set of remaining samples to cluster
def trainingPart(dataArray,metadatum,nodesList,numberStartingSamples):
n = len(nodesList)
classes,valueSets = computeClasses(dataArray,metadatum)
trainSubset,unchosen = selectTrainingSample(dataArray,numberStartingSamples)
probList,nodesPresence = getPriorProbability(nodesList,trainSubset,dataArray)
assignedClasses = assignClass(trainSubset,classes)
return classes,valueSets,assignedClasses,unchosen,probList,nodesPresence
| {
"content_hash": "c3bd2374bad28f738640a99aed826f76",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 133,
"avg_line_length": 47.67883211678832,
"alnum_prop": 0.6722290263319045,
"repo_name": "kuredatan/taxoclassifier",
"id": "e1afeb65521ae1afa79a810d1fe4e3b56d86f35c",
"size": "6532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "training.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52677"
},
{
"name": "Shell",
"bytes": "922"
}
],
"symlink_target": ""
} |
import os
import sys
import ray
from ray.streaming import StreamingContext
def test_union_stream():
ray.init(job_config=ray.job_config.JobConfig(code_search_path=sys.path))
ctx = StreamingContext.Builder() \
.option("streaming.metrics.reporters", "") \
.build()
sink_file = "/tmp/test_union_stream.txt"
if os.path.exists(sink_file):
os.remove(sink_file)
def sink_func(x):
with open(sink_file, "a") as f:
print("sink_func", x)
f.write(str(x))
stream1 = ctx.from_values(1, 2)
stream2 = ctx.from_values(3, 4)
stream3 = ctx.from_values(5, 6)
stream1.union(stream2, stream3).sink(sink_func)
ctx.submit("test_union_stream")
import time
slept_time = 0
while True:
if os.path.exists(sink_file):
time.sleep(3)
with open(sink_file, "r") as f:
result = f.read()
print("sink result", result)
assert set(result) == {"1", "2", "3", "4", "5", "6"}
print("Execution succeed")
break
if slept_time >= 60:
raise Exception("Execution not finished")
slept_time = slept_time + 1
print("Wait finish...")
time.sleep(1)
ray.shutdown()
if __name__ == "__main__":
test_union_stream()
| {
"content_hash": "7d9acb8b6077f21476236800bf8ff0b9",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 27.6875,
"alnum_prop": 0.5568096313017307,
"repo_name": "pcmoritz/ray-1",
"id": "bab75e624ac00ea46e64b3020e0e5bfc5467847a",
"size": "1329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "streaming/python/tests/test_union_stream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
import uasyncio as asyncio
from gui.core.lcd160_gui import Screen, IFont
from gui.core.constants import *
from gui.widgets.buttons import Button, RadioButtons
from gui.widgets.label import Label
from gui.widgets.textbox import Textbox
import font10
import font6
font3 = IFont(3)
from lcd_local import setup
# **** STANDARD BUTTON TYPES ****
def quitbutton():
def quit(button):
Screen.shutdown()
Button((109, 107), font = font10, callback = quit, fgcolor = RED, text = 'Quit')
def fwdbutton(x, y, cls_screen, text='Next'):
def fwd(button):
Screen.change(cls_screen)
Button((x, y), font = font10, callback = fwd, fgcolor = RED,
text = text, shape = RECTANGLE)
def backbutton():
def back(button):
Screen.back()
Button((109, 107), font = font10, fontcolor = BLACK, callback = back,
fgcolor = CYAN, text = 'Back', shape = RECTANGLE)
# **** STANDARDISE CONTROLS ACROSS SCREENS ****
# Appearance for Textbox instances
tbargs = {'fontcolor' : GREEN,
'fgcolor' : RED,
'bgcolor' : DARKGREEN,
'repeat' : False,
}
# Appearance for buttons
btntable = {'fgcolor' : LIGHTBLUE,
'font' : font10,
'width' : 50,
'litcolor' : GREEN,
}
# Appearance for labels
labels = {'fontcolor' : WHITE,
'border' : 2,
'fgcolor' : RED,
'bgcolor' : DARKGREEN,
'font' : font10,
}
# **** NEXT SCREEN CLASS ****
# Fast populate a textbox
def populate(button, tb):
s = '''The textbox displays multiple lines of text in a field of fixed dimensions. \
Text may be clipped to the width of the control or may be word-wrapped. If the number \
of lines of text exceeds the height available, scrolling may be performed, either \
by calling a method or by touching the control.
'''
tb.append(s, ntrim = 100, line = 0)
def clear(button, tb):
tb.clear()
class FastScreen(Screen):
def __init__(self, clip):
super().__init__()
backbutton()
tb = Textbox((0, 0), 159, 7, font=font3, clip=clip, tab=64, **tbargs)
Button((0, 107), text = 'Fill', callback = populate, args = (tb,), **btntable)
Button((54, 107), text = 'Clear', callback = lambda b, tb: tb.clear(), args = (tb,), **btntable)
# **** TAB SCREEN CLASS ****
def pop_tabs(button, tb):
s = '''x\t100\t1
alpha\t173\t251
beta\t9184\t876
gamma\t929\t0
abc\tdef\tghi
tabs are text tabs, not decimal tabs.
'''
tb.append(s)
def clear(button, tb):
tb.clear()
class TabScreen(Screen):
def __init__(self, clip):
super().__init__()
backbutton()
tb = Textbox((0, 0), 159, 7, font=font6, clip=clip, tab=50, **tbargs)
Button((0, 107), text = 'Fill', callback = pop_tabs, args = (tb,), **btntable)
Button((54, 107), text = 'Clear', callback = lambda b, tb: tb.clear(), args = (tb,), **btntable)
# **** MAIN SCREEEN CLASS ****
# Coroutine slowly populates a text box
async def txt_test(textbox, btns):
phr0 = ('short', 'longer line', 'much longer line with spaces',
'antidisestablishmentarianism', 'with\nline break')
for n in range(len(phr0)):
textbox.append('Test {:3d} {:s}'.format(n, phr0[n]), 15)
await asyncio.sleep(1)
for n in range(n, 15):
textbox.append('Scroll test {:3d}'.format(n), 15)
await asyncio.sleep(1)
if isinstance(btns, tuple):
for btn in btns:
btn.greyed_out(False)
# Callback for scroll buttons
def btn_cb(button, tb, n):
tb.scroll(n)
class SScreen(Screen):
def __init__(self, clip):
super().__init__()
backbutton()
tb = Textbox((0, 0), 159, 7, font=font3, clip=clip, **tbargs)
btns = (Button((0, 107), text = 'Up', callback = btn_cb, args = (tb, 1), **btntable),
Button((54, 107), text = 'Down', callback = btn_cb, args = (tb, -1), **btntable))
for btn in btns:
btn.greyed_out(True) # Disallow until textboxes are populated
self.reg_task(txt_test(tb, btns))
# **** BASE SCREEN ****
class BaseScreen(Screen):
def __init__(self):
super().__init__()
table_highlight = [
{'text' : 'Slow', 'args' : (SScreen, False)},
{'text' : 'Fast', 'args' : (FastScreen, False)},
{'text' : 'Tabs', 'args' : (TabScreen, False)},
{'text' : 'Clip', 'args' : (SScreen, True)},
]
quitbutton()
def rbcb(button, screen, clip): # RadioButton callback
Screen.change(screen, args=(clip,))
rb = RadioButtons(BLUE, rbcb) # color of selected button
y = 0
for t in table_highlight:
rb.add_button((0, y), font = font10, fgcolor = DARKBLUE,
onrelease = False, width = 80, fontcolor = WHITE, **t)
y += 25
def test():
print('''Main screen populates text box slowly to show
wrapping in action. "Fast" screen
shows fast updates using internal font. "Tab"
screen shows use of tab characters with Python
font.
Text boxes may be scrolled by touching them near
the top or bottom.''')
setup()
Screen.change(BaseScreen)
test()
| {
"content_hash": "31a3c4b5cce46c615254c95bcbcb772c",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 104,
"avg_line_length": 31.047337278106507,
"alnum_prop": 0.5829998094149037,
"repo_name": "peterhinch/micropython-lcd160cr-gui",
"id": "b1840d995480cd84320b2b9efd85c2881e96febb",
"size": "5372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/demos/tbox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "193374"
}
],
"symlink_target": ""
} |
'''
--------------------------------------------------------------------
This file is part of the raster object recognition project.
https://github.com/woodbri/raster-object-recognition
MIT License. See LICENSE file for details.
Copyright 2017, Stephen Woodbridge
--------------------------------------------------------------------
'''
import os
import sys
import re
import subprocess
import psycopg2
from config import *
DEVNULL = open(os.devnull, 'w')
def unique(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def runCommand(cmd, verbose):
if verbose:
print ' '.join( cmd )
subprocess.call( cmd )
else:
subprocess.call( cmd, stdout=DEVNULL, stderr=subprocess.STDOUT )
def getDatabase():
try:
conn = psycopg2.connect( CONFIG['dsn'] )
except:
print "ERROR: failed to connect to database '%s'" % ( CONFIG['dbname'] )
sys.exit(1)
conn.set_session(autocommit=True)
cur = conn.cursor()
return ( conn, cur )
def parseBBOX(bbox):
bbox = bbox.strip("BOX() ")
tmp = bbox.split(',')
(xmin, ymin) = tmp[0].split()
(xmax, ymax) = tmp[1].split()
return [float(xmin), float(ymin), float(xmax), float(ymax)]
def getBboxFromFIPS(cur, fips):
if len(fips) == 2:
sql = "select Box2D(st_extent(geom)) from census.county where statefp='%s'" % (fips)
elif len(fips) == 5:
sql = "select Box2D(st_extent(geom)) from census.county where geoid='%s'" % ( fips )
elif len(fips) == 10:
sql = "select Box2D(st_extent(geom)) from census.cousub where geoid='%s'" % ( fips )
else:
print "ERROR: fips code must be ss[ccc[bbbbb]]!"
sys.exit(2)
cur = conn.cursor()
cur.execute( sql )
row = cur.fetchone()
return parseBBOX(row[0])
def getDoqqFiles( cur, bbox ):
table = CONFIG.get('naip.shptable', '')
doqqs = CONFIG.get('naip.doqq_dir', '')
if table == '' or doqqs == '':
print "ERROR: naip.shptable: '%s' or naip.doqq_dir: '%s' are not set in config.py!"
sys.exit(1)
polygon = 'POLYGON((%f %f,%f %f,%f %f,%f %f,%f %f))' % (bbox[0], bbox[1], bbox[0], bbox[3], bbox[2], bbox[3], bbox[2], bbox[1], bbox[0], bbox[1])
sql = "select filename from %s where geom && st_setsrid('%s'::geometry, 4326)" % ( table, polygon )
cur.execute( sql )
rows = cur.fetchall()
if len(rows) == 0: return []
files = []
for row in rows:
filename = row[0]
name = filename[:26] + '.tif'
subdir = filename[2:7]
f = os.path.join( DOQQS, subdir, name )
files.append(f)
return files
def getStatesCountiesFromBbox( cur, bbox ):
polygon = 'POLYGON((%f %f,%f %f,%f %f,%f %f,%f %f))' % (bbox[0], bbox[1], bbox[0], bbox[3], bbox[2], bbox[3], bbox[2], bbox[1], bbox[0], bbox[1])
try:
conn = psycopg2.connect( CONFIG['dsn'] )
except:
print "ERROR: failed to connect to database!"
sys.exit(1)
cur = conn.cursor()
sql = "select geoid from census.county where geom && st_setsrid('%s'::geometry, 4326)" % ( polygon )
cur.execute( sql )
states = []
counties = []
for row in cur:
states.append( row[0][:2] )
counties.append( row[0] )
states = unique(states)
counties = unique(counties)
return (states, counties)
def loadZippedShape( table, path, re_zipfile, geomType ):
# create a tmp dir
tmp = os.path.join( CONFIG.get('tmpdir', path), 'tmp-'+str(os.getpid()) )
if not os.path.exists( tmp ):
os.makedirs(tmp)
verbose = CONFIG.get('verbose', False)
dsn = 'PG:' + CONFIG['dsn'] + ' active_schema=census'
ogr_opts = ['-overwrite', '-lco', 'OVERWRITE=YES', '-lco', 'PRECISION=NO',
'-lco', 'GEOMETRY_NAME=geom', '-lco', 'FID=gid']
# gdal/ogr 1.10 does not drop the tables with -overwrite
# so we will do it like this.
conn, cur = getDatabase()
cur.execute( 'drop table if exists %s cascade' % ( table ) )
conn.commit()
conn.close()
first = True
for root, dirs, files in os.walk( path ):
for f in files:
if not re.match( re_zipfile, f ): continue
# unzip it to tmp
cmd = ['unzip', '-q', '-d', tmp, '-j', os.path.join( root, f )]
if verbose:
print ' '.join( cmd )
subprocess.call( cmd )
for root2, dirs2, files2 in os.walk( tmp ):
for shpfile in files2:
if not re.match( r'.*.shp$', shpfile ): continue
# ogr2ogr to load it
cmd = ['ogr2ogr', '-t_srs', 'EPSG:4326', '-nln', table,
'-nlt', geomType,
'-f', 'PostgreSQL'] + ogr_opts + \
[ dsn, os.path.join( root2, shpfile) ]
if verbose:
print ' '.join( cmd )
subprocess.call( cmd )
if first:
first = False
ogr_opts = [ '-append' ]
# remove files in tmp
for root2, dirs2, files2 in os.walk( tmp ):
for f2 in files2:
os.remove( os.path.join( root2, f2 ) )
# remove tmp dir
os.rmdir( tmp )
| {
"content_hash": "d52bd3b335ae990598e2dbb7db034806",
"timestamp": "",
"source": "github",
"line_count": 206,
"max_line_length": 149,
"avg_line_length": 27.398058252427184,
"alnum_prop": 0.5249822820694543,
"repo_name": "woodbri/raster-object-recognition",
"id": "10609b29df4b8ff696181425f3dd560320e0e95d",
"size": "5644",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/ror/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "103246"
},
{
"name": "Shell",
"bytes": "2232"
}
],
"symlink_target": ""
} |
import logging
import os
import random
import time
from webkitpy.port.server_process import ServerProcess
from webkitpy.port.driver import Driver
_log = logging.getLogger(__name__)
class WestonDriver(Driver):
@staticmethod
def check_driver(port):
weston_found = port.host.executive.run_command(['which', 'weston'], return_exit_code=True) is 0
if not weston_found:
_log.error("No weston found. Cannot run layout tests.")
return weston_found
def __init__(self, *args, **kwargs):
Driver.__init__(self, *args, **kwargs)
self._startup_delay_secs = 1.0
def _start(self, pixel_tests, per_test_args):
self.stop()
driver_name = self._port.driver_name()
self._driver_directory = self._port.host.filesystem.mkdtemp(prefix='%s-' % driver_name)
weston_socket = 'WKTesting-weston-%032x' % random.getrandbits(128)
weston_command = ['weston', '--socket=%s' % weston_socket, '--width=800', '--height=600']
with open(os.devnull, 'w') as devnull:
self._weston_process = self._port.host.executive.popen(weston_command, stderr=devnull)
# Give Weston a bit of time to set itself up.
time.sleep(self._startup_delay_secs)
driver_environment = self._port.setup_environ_for_server(driver_name)
driver_environment['LOCAL_RESOURCE_ROOT'] = self._port.layout_tests_dir()
# Currently on WebKit2, there is no API for setting the application cache directory.
# Each worker should have its own and it should be cleaned afterwards, when the worker stops.
driver_environment['XDG_CACHE_HOME'] = self._ensure_driver_tmpdir_subdirectory('appcache')
driver_environment['DUMPRENDERTREE_TEMP'] = self._ensure_driver_tmpdir_subdirectory('drt-temp')
driver_environment['WAYLAND_DISPLAY'] = weston_socket
driver_environment['GDK_BACKEND'] = 'wayland'
if driver_environment.get('DISPLAY'):
del driver_environment['DISPLAY']
self._crashed_process_name = None
self._crashed_pid = None
self._server_process = self._port._server_process_constructor(self._port, driver_name, self.cmd_line(pixel_tests, per_test_args), driver_environment)
self._server_process.start()
def stop(self):
super(WestonDriver, self).stop()
if getattr(self, '_weston_process', None):
# The Weston process is terminated instead of killed, giving the Weston a chance to clean up after itself.
self._weston_process.terminate()
self._weston_process = None
if getattr(self, '_driver_directory', None):
self._port.host.filesystem.rmtree(str(self._driver_directory))
def _ensure_driver_tmpdir_subdirectory(self, subdirectory):
assert getattr(self, '_driver_directory', None)
directory_path = self._port.host.filesystem.join(str(self._driver_directory), subdirectory)
self._port.host.filesystem.maybe_make_directory(directory_path)
return directory_path
| {
"content_hash": "923af9c04dc64a780aadd88b7044ec2b",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 157,
"avg_line_length": 44.333333333333336,
"alnum_prop": 0.6655769859431186,
"repo_name": "klim-iv/phantomjs-qt5",
"id": "7fdffd3a12dbea92c74aa6c789c540bca8e0a0bd",
"size": "4585",
"binary": false,
"copies": "5",
"ref": "refs/heads/qt5",
"path": "src/webkit/Tools/Scripts/webkitpy/port/westondriver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "291913"
},
{
"name": "Awk",
"bytes": "3965"
},
{
"name": "C",
"bytes": "42431954"
},
{
"name": "C++",
"bytes": "128347641"
},
{
"name": "CSS",
"bytes": "778535"
},
{
"name": "CoffeeScript",
"bytes": "46367"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "8191"
},
{
"name": "IDL",
"bytes": "191984"
},
{
"name": "Java",
"bytes": "232840"
},
{
"name": "JavaScript",
"bytes": "16127527"
},
{
"name": "Objective-C",
"bytes": "10465655"
},
{
"name": "PHP",
"bytes": "1223"
},
{
"name": "Perl",
"bytes": "1296504"
},
{
"name": "Python",
"bytes": "5916339"
},
{
"name": "Ruby",
"bytes": "381483"
},
{
"name": "Shell",
"bytes": "1005210"
},
{
"name": "Smalltalk",
"bytes": "1308"
},
{
"name": "VimL",
"bytes": "3731"
},
{
"name": "XSLT",
"bytes": "50637"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import pytest
from .. import idfn, assert_eq, assert_in
from rakuten_ws.compat import callable
def assert_response(params, result, callback=None):
assert 'error' not in result, result['error_description']
assert len(result['Items']) > 0
if callable(callback):
callback(params, result, result['Items'][0])
total_search_parameters = [
({'keyword': "ドン・キホーテ"}, lambda p, r, i: assert_in(p['keyword'], i['title'])),
# The booksGenreId returned is '001025001'
({'booksGenreId': '001025'}, lambda p, r, i: assert_eq(i['booksGenreId'].startswith(p['booksGenreId']), True)),
({'isbnjan': '9784088820545'}, lambda p, r, i: assert_eq(i['isbn'], p['isbnjan'])),
({'isbnjan': '4988021149594'}, lambda p, r, i: assert_eq(i['jan'], p['isbnjan'])),
({'booksGenreId': '001025', 'page': '2'}, lambda p, r, i: assert_eq(r['page'], 2)),
({'availability': '1'}, lambda p, r, i: assert_eq(i['availability'], p['availability'])),
({'availability': '3'}, lambda p, r, i: assert_eq(i['availability'], p['availability'])),
({'outOfStockFlag': '0'}, lambda p, r, i: assert_eq(i['availability'], '1')),
({'chirayomiFlag': '1'}, lambda *args: True),
({'sort': 'standard'}, lambda *args: True),
({'sort': 'sales'}, lambda *args: True),
({'sort': '+itemPrice'}, lambda *args: True),
({'sort': '-itemPrice'}, lambda *args: True),
({'limitedFlag': 1}, lambda *args: True),
({'field': '1'}, lambda *args: True),
({'carrier': '1'}, lambda *args: True),
({'genreInformationFlag': 1}, lambda p, r, i: assert_in('GenreInformation', r)),
]
@pytest.mark.parametrize('params,check', total_search_parameters, ids=idfn)
def test_total_search(ws, params, check):
params.update({'hits': 3})
if all(key not in params for key in ('keyword', 'booksGenreId', 'isbnjan')):
params.update({'keyword': 'One Piece'})
assert_response(params, ws.books.total.search(**params), callback=check)
book_search_parameters = [
({'title': "ワンピース"}, lambda p, r, i: assert_in(p['title'], i['titleKana'])),
({'author': "尾田・栄一郎"}, lambda p, r, i: assert_eq(i['author'], i['author'])),
({'isbn': "9784088701752"}, lambda p, r, i: assert_in(p['isbn'], i['isbn'])),
({'size': "9"}, lambda p, r, i: assert_in(i['size'], 'コミック')),
({'sort': 'standard'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': 'sales'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': '+releaseDate'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': '-releaseDate'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': '+itemPrice'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': '-itemPrice'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': 'reviewCount'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'sort': 'reviewAverage'}, lambda p, r, i: assert_eq(len(r['Items']), p['hits'])),
({'booksGenreId': '001001001008'}, lambda p, r, i: assert_eq(i['booksGenreId'], p['booksGenreId'])),
({'elements': 'title,author'}, lambda p, r, i: assert_eq(set(i.keys()), set(p['elements'].split(',')))),
]
@pytest.mark.parametrize('params,check', book_search_parameters, ids=idfn)
def test_book_search(ws, params, check):
params.update({'hits': 3})
assert_response(params, ws.books.book.search(**params), callback=check)
def test_cd_search(ws):
params = {'artistName': "Speed", 'hits': 3}
assert_response(params, ws.books.cd.search(**params))
def test_dvd_search(ws):
params = {'title': "ワンピース", 'hits': 3}
assert_response(params, ws.books.dvd.search(**params))
def test_foreign_book_search(ws):
params = {'author': "cervantes", 'hits': 3}
assert_response(params, ws.books.foreign_book.search(**params))
def test_magazine_search(ws):
params = {'title': "ファミ通", 'hits': 3}
assert_response(params, ws.books.magazine.search(**params))
def test_game_search(ws):
params = {'title': "mario", 'hits': 3}
assert_response(params, ws.books.game.search(**params))
def test_software_search(ws):
params = {'os': "windows", 'hits': 3}
assert_response(params, ws.books.software.search(**params))
def test_genre_search(ws):
params = {'booksGenreId': "001", 'hits': 3}
result = ws.books.genre.search(**params)
assert len(result["children"]) > 0
assert len(result["parents"]) == 0
| {
"content_hash": "6e6ed32681032627ec41c9137796e34e",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 115,
"avg_line_length": 43.06730769230769,
"alnum_prop": 0.6068318821165438,
"repo_name": "alexandriagroup/rakuten-ws",
"id": "4af43f4596e7d4975d189f0bb04365c37c0bf6fb",
"size": "4557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/webservice/test_book.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "3127"
},
{
"name": "Python",
"bytes": "104602"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2009 John Markus Bjoerndalen <[email protected]>,
Brian Vinter <[email protected]>, Rune M. Friborg <[email protected]>.
See LICENSE.txt for licensing details (MIT License).
"""
from pycsp_import import *
# Based on the exercise q7.occ from the source distribution of kroc-1.4
#
# This is an example, showing how to do the equivalent in python using PyCSP
#
@process
def philosopher(id, left, right, down, up):
try:
eat = 0
while True:
# think
# Skip
# get permission to sit down
down(True)
# pick up the forks (left and right)
FairSelect(
OutputGuard(left, msg=True, action="right(True)"),
OutputGuard(right, msg=True, action="left(True)")
)
# eat
eat += 1
# put down the forks (left and right)
FairSelect(
OutputGuard(left, msg=True, action="right(True)"),
OutputGuard(right, msg=True, action="left(True)")
)
# notify security you have finished
up(True)
except ChannelRetireException:
print('philosopher '+str(id)+' has eaten '+str(eat)+' times')
retire(left, right)
@process
def fork(left, right):
while True:
FairSelect(
# philosopher left picks up fork
# philosopher left puts down fork
InputGuard(left, "left()"),
# philosopher right picks up fork
# philosopher right puts down fork
InputGuard(right, "right()")
)
@process
def security(steps, down, up):
max = 4
n_sat_down = [0] # use call by reference
for step in range(steps):
guards = []
if n_sat_down[0] < max: # don't allow max at a time
for i in range(5):
# philosopher wanting to sit down
guards.append(InputGuard(down[i], action="n_sat_down[0] += 1"))
for i in range(5):
# philosopher wanting to stand up
# always allow this
guards.append(InputGuard(up[i], action="n_sat_down[0] -= 1"))
FairSelect(*guards)
retire(*down)
retire(*up)
@process
def secure_college(steps):
left = Channel() * 5
right = Channel() * 5
up = Channel() * 5
down = Channel() * 5
Parallel(
security(steps, [d.reader() for d in down] , [u.reader() for u in up]),
[philosopher(i, left[i].writer(), right[i].writer(), down[i].writer(), up[i].writer()) for i in range(5)],
[fork(left[i].reader(), right[(i+1) % 5].reader()) for i in range(5)]
)
Sequence(secure_college(1000))
shutdown()
| {
"content_hash": "259274a08b74193d95cf53fce85178b3",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 114,
"avg_line_length": 28.636363636363637,
"alnum_prop": 0.5301587301587302,
"repo_name": "runefriborg/pycsp",
"id": "821a3e91e11d9af050b61e61ba42498e713f8e6a",
"size": "2835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/Dining-Phil.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "309"
},
{
"name": "Makefile",
"bytes": "1020"
},
{
"name": "Python",
"bytes": "373060"
},
{
"name": "Shell",
"bytes": "531"
}
],
"symlink_target": ""
} |
def linkEndFromDefCode(defCode):
"""
returns a Param representing the entry on a link into a NodeType. This allows the Nodes to have their own internal names
for their inputs and outputs that are seperate from the names of Links. The types should match though!
"""
t=defCode.split()
name=t[-1]
type=" ".join(t[:-1])
return Param(name,type)
def shaderParamFromDefCode(defCode):
"""
example usage:
shaderParam=shaderParamFromDefCode("uniform sampler2D k_grassData: TEXUNIT0")
shaderParam=shaderParamFromDefCode("float4 o_color")
"""
i=defCode.find(':')
if i==-1:
semantic=None
t=defCode.split()
else:
semantic=defCode[i+1:].strip()
t=defCode[:i].split()
name=t[-1]
type=" ".join(t[:-1])
return ShaderParam(name,type,semantic)
class Param(object):
def __init__(self,name,type):
self.name=name
self.type=type
def getName(self): return self.name
def getType(self): return self.type
def __repr__(self): return self.__class__.__name__+"("+self.name+", "+self.type+")"
def __str__(self): return self.type+" "+self.name
def __hash__(self):
return hash(self.name)^hash(self.type)
def __eq__(self,other):
return self.__class__==other.__class__ and self.name==other.name and self.type==other.type
class ShaderParam(Param):
def __init__(self,name,type,semantic=None):
Param.__init__(self,name,type)
self.semantic=semantic
def getSemantic(self): return self.semantic
def getDefCode(self): return self.type+" "+self.name+((" : "+self.semantic) if self.semantic else "")
def __eq__(self,other):
return Param.__eq__(self,other) and self.semantic==other.semantic
def getShortType(self): return self.type.split()[-1]
def __str__(self):
s=Param.__str__(self)
if self.semantic:
return s+" : "+self.semantic
else:
return s
class ShaderInput(ShaderParam): pass
class ShaderOutput(ShaderParam): pass
| {
"content_hash": "ab95eb5437aee81c0dca1c5a748dbd62",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 124,
"avg_line_length": 32.04615384615385,
"alnum_prop": 0.6178588574171867,
"repo_name": "Craig-Macomber/Panda3D-Shader-Generator",
"id": "723c81423d84ff79f86e8ea915557c3bfb8dfb75",
"size": "2083",
"binary": false,
"copies": "1",
"ref": "refs/heads/v3",
"path": "param.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54574"
}
],
"symlink_target": ""
} |
import os
import sys
import contextlib
import subprocess
import argparse
import smtplib
from datetime import datetime
def sendEmail(starttime, usr, pw, fromaddr, toaddr, i, idx_start, idx_end):
"""
Sends an email message through Gmail once the script is completed.
Developed to be used with AWS so that instances can be terminated
once a long job is done. Only works for those with Gmail accounts.
starttime : a datetime() object for when to start run time clock
usr : the Gmail username, as a string
psw : the Gmail password, as a string
fromaddr : the email address the message will be from, as a string
toaddr : a email address, or a list of addresses, to send the
message to
"""
# calculate runtime
runtime = datetime.now() - starttime
# initialize SMTP server
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login(usr, pw)
# send email
send_date = datetime.strftime(datetime.now(), '%Y-%m-%d')
subject = 'AWS Process Complete'
body = 'The process number ' + str(i) + ' (' + str(i - idx_start + 1) + ' of ' + str(idx_end - idx_start + 1) + ') has completed.'
email_text = """\
Date: %s
From: %s
To: %s
Subject: %s
%s
Elapsed time: %s
""" % (send_date, fromaddr, toaddr, subject, body, str(runtime))
server.sendmail(fromaddr, toaddr, email_text)
server.close()
print('Email sent!')
parser = argparse.ArgumentParser()
parser.add_argument('iterations', help='number of iterations')
parser.add_argument('inputpath', help='path to data folder')
parser.add_argument('outputpath', help='path to output folder')
args = parser.parse_args()
filepath = os.path.dirname(args.inputpath)
fileoutpath = os.path.dirname(args.outputpath)
iterations = int(args.iterations)
os.makedirs(fileoutpath, exist_ok=True)
N = iterations // 100
gmail_user = '[email protected]'
gmail_password = 'put password here'
filename = 'init_args.rds'
starttime = datetime.now()
subprocess.call(['Rscript', '02-createdata.R', filepath, str(15), str(50000)])
subprocess.call(['Rscript', '03-fitdamped.R', filepath, filename, fileoutpath, str(iterations)])
sendEmail(starttime, gmail_user, gmail_password, '[email protected]', '[email protected]', 1, 1, 1)
| {
"content_hash": "76669ee09bd7d8335cdf2b6e82ac1660",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 134,
"avg_line_length": 29.4,
"alnum_prop": 0.6794217687074829,
"repo_name": "ensley/thesis",
"id": "bab727aaa98f3b2c81ebb28d5c85eece1eda5cec",
"size": "2352",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "realdata/script-fitdamped.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "36794"
},
{
"name": "Cuda",
"bytes": "2990"
},
{
"name": "HTML",
"bytes": "3746100"
},
{
"name": "M4",
"bytes": "862"
},
{
"name": "PostScript",
"bytes": "454526"
},
{
"name": "Python",
"bytes": "62254"
},
{
"name": "R",
"bytes": "224857"
},
{
"name": "Shell",
"bytes": "2085"
},
{
"name": "TeX",
"bytes": "247498"
}
],
"symlink_target": ""
} |
import csv
import os
from os.path import join
import sys
filePath1 = str(sys.argv[1])
filePath2 = str(sys.argv[2])
filePath3 = str(sys.argv[3])
filePath4 = str(sys.argv[4])
assetDict = {}
dupeDict = {}
with open(filePath1, newline = '', encoding='utf-8') as f:
assetData = csv.reader(f, dialect='excel', delimiter=',', quotechar='"')
next(assetData, None) #skip headers
idChecklist = []
for row in assetData:
assetID = row[0]
eventFolderID = row[1]
assetDict[str(eventFolderID)] = {}
assetDict[str(eventFolderID)]['Asset ID'] = assetID
assetDict[str(eventFolderID)]['Keywords'] = ''
if eventFolderID not in idChecklist:
idChecklist.append(eventFolderID)
else:
print(eventFolderID, '\t', assetID)
with open(filePath2, newline = '', encoding='utf-8') as h:
eventData = csv.reader(h, dialect='excel', delimiter=',', quotechar='"')
next(eventData, None)
for row in eventData:
eventFolderID = row[0]
eventPerformers = row[1]
eventPresenters = row[2]
if eventPerformers:
keywordString = f'{eventPerformers}{eventPresenters}'
else:
keywordString = f'{eventPresenters}'
try:
assetDict[str(eventFolderID)]['Keywords'] = keywordString
except KeyError:
pass
with open(filePath3, newline = '', encoding='utf-8') as j:
workPerfData = csv.reader(j, dialect='excel', delimiter=',', quotechar='"')
next(workPerfData, None)
idChecklist = []
keywordList = []
workPerfDict = {}
for row in workPerfData:
workPerfFolderID = row[0]
eventFolderID = row[1]
workPerfEntities = row[2]
if workPerfEntities:
workPerfKeywords = workPerfEntities.split('|')
if eventFolderID not in idChecklist:
idChecklist.append(eventFolderID)
keywordList = []
for keyword in workPerfKeywords:
if keyword:
keywordList.append(keyword)
workPerfDict[str(eventFolderID)] = keywordList
else:
keywordList = workPerfDict[str(eventFolderID)]
for keyword in workPerfKeywords:
if keyword:
if keyword not in keywordList:
keywordList.append(keyword)
workPerfDict[str(eventFolderID)] = keywordList
for key in workPerfDict:
keywords = workPerfDict[key]
s = '|'
keywordString = s.join(keywords)
workPerfDict[key] = keywordString
for key in assetDict:
assetKeywords = assetDict[key]['Keywords']
try:
workPerfKeywords = workPerfDict[key]
keywordString = f'{assetKeywords}{workPerfKeywords}'
assetDict[key]['Keywords'] = keywordString
except KeyError:
pass
outputPath = ''.join([str(filePath4), '/assetKeywords.csv'])
fields = ['Asset ID', 'Keywords']
with open(outputPath, 'w', newline='') as csvfile:
w = csv.DictWriter(csvfile, fields)
w.writeheader()
for k in assetDict:
w.writerow({field: assetDict[k].get(field) for field in fields})
print('Done assembling asset keywords')
| {
"content_hash": "163176c3158d0cbbe9fe66fbbd43995c",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 102,
"avg_line_length": 35.12149532710281,
"alnum_prop": 0.5231506120276743,
"repo_name": "CarnegieHall/metadata-matching",
"id": "68066f8c377c2e9e21dae67df8c07b11bdaea14e",
"size": "4239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assetKeywords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "35192"
}
],
"symlink_target": ""
} |
"""Command-line flag library.
Emulates gflags by wrapping cfg.ConfigOpts.
The idea is to move fully to cfg eventually, and this wrapper is a
stepping stone.
"""
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_middleware import cors
from jacket.common import config
CONF = config.CONF
# logging.register_options(CONF)
core_opts = []
debug_opts = [
]
CONF.register_cli_opts(core_opts)
CONF.register_cli_opts(debug_opts)
def set_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
# CORS Defaults
# TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/
cfg.set_defaults(cors.CORS_OPTS,
allow_headers=['X-Auth-Token',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id',
'X-OpenStack-Request-ID',
'X-Trace-Info',
'X-Trace-HMAC',
'OpenStack-API-Version'],
expose_headers=['X-Auth-Token',
'X-Subject-Token',
'X-Service-Token',
'X-OpenStack-Request-ID',
'OpenStack-API-Version'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH',
'HEAD']
)
| {
"content_hash": "369bf32e4da3392410c167a7fc6204f3",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 76,
"avg_line_length": 32.55357142857143,
"alnum_prop": 0.4267690619857378,
"repo_name": "HybridF5/jacket",
"id": "c4a74b8aad79eb07dbd0ee686142a1c10402fa85",
"size": "2613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/common/storage/config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
import pytest
from django.conf import settings
from django_docutils.lib.fixtures.publisher import publish_post
@pytest.mark.django_db(transaction=True)
def test_publish_post_from_source_file(tmpdir):
test_file = tmpdir.join("test.rst")
test_file.write(
"""
===
moo
===
:Author: anonymous
:Slug_Id: tEst
foo
""".strip()
)
post_data = publish_post(source_path=str(test_file))
assert isinstance(post_data, dict)
@pytest.mark.django_db(transaction=True)
def test_publish_post_explicitness():
"""If title, subtitle, created, updated, etc. is declared in metadata,
that is treated as a source of truth, and the database entry must
respect that, upon initial import and subsequent re-imports.
@todo a test also needs to be made for this and the directory style
configurations.
@todo parametrize this with message, source, results
"""
assert publish_post(source="") == {}
# test with title
assert (
publish_post(
source="""
==============
Document title
==============
""".strip()
)
== {"title": "Document title"}
)
# test with subtitle
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
""".strip()
)
== {"title": "Document title", "pages": [{"subtitle": "Document subtitle"}]}
)
# test with content
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
Content
-------
hi
""".strip()
)
== {
"title": "Document title",
"pages": [
{
"subtitle": "Document subtitle",
"body": """
Content
-------
hi""".strip(),
}
],
}
)
# assert with header content
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
Content
-------
hi1
""".strip()
)
== {
"title": "Document title",
"pages": [
{
"subtitle": "Document subtitle",
"body": """
Content
-------
hi1""".strip(),
}
],
}
)
# assert with basic docinfo
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Author: anonymous
:Slug_Id: tEst
Content
-------
hi2
""".strip()
)
== {
"title": "Document title",
"author": settings.ANONYMOUS_USER_NAME,
"slug_id": "tEst",
"pages": [
{
"subtitle": "Document subtitle",
"body": """
Content
-------
hi2""".strip(),
}
],
}
)
# assert with title/subtitle docinfo override
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Title: Overridden Title
:Subtitle: Overridden Subtitle
Content
-------
moo
""".strip()
)
== {
"title": "Overridden Title",
"pages": [
{
"subtitle": "Overridden Subtitle",
"body": """
Content
-------
moo""".strip(),
}
],
}
)
# assert with title/subtitle docinfo override + new fields
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Title: Overridden Title
:Subtitle: Overridden Subtitle
:Slug_Id: tEst
Content
-------
hi
""".strip()
)
== {
"title": "Overridden Title",
"slug_id": "tEst",
"pages": [
{
"subtitle": "Overridden Subtitle",
"body": """
Content
-------
hi""".strip(),
}
],
}
)
def test_publish_post_defaults():
# default value pass-through
assert publish_post(
source="", defaults={"moo": "moo", "title": "default title"}
) == {"moo": "moo", "title": "default title"}
# title and subtitle from doc override defaults
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
""".strip(),
defaults={"moo": "moo"},
)
== {
"title": "Document title",
"moo": "moo",
"pages": [{"subtitle": "Document subtitle"}],
}
)
# Docinfo overrides doc nodes and defaults
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Title: Overridden Title
:Subtitle: Overridden Subtitle
:Slug_Id: tEst
Content
-------
hi
""".strip(),
defaults={
"title": "You should not",
"subtitle": "See this",
"a_default_property": "a_default_value",
},
)
== {
"title": "Overridden Title",
"slug_id": "tEst",
"a_default_property": "a_default_value",
"pages": [
{
"subtitle": "Overridden Subtitle",
"body": """
Content
-------
hi""".strip(),
}
],
}
)
def test_publish_post_overrides():
# default value pass-through
assert publish_post(source="", overrides={"moo": "moo"}) == {"moo": "moo"}
# override defaults
assert publish_post(
source="",
defaults={"moo": "moo", "title": "default title"},
overrides={"moo": "moo2"},
) == {"moo": "moo2", "title": "default title"}
# override overrides doc title/subtitle
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
""".strip(),
overrides={"title": "Over", "subtitle": "Written"},
)
== {"title": "Over", "pages": [{"subtitle": "Written"}]}
)
# overrides overrides docinfo
assert (
publish_post(
source="""
==============
Document title
==============
-----------------
Document subtitle
-----------------
:Title: Overridden Title
:Subtitle: Overridden Subtitle
:Slug_Id: tEst
Content
-------
hi
""".strip(),
overrides={
"title": "You should",
"subtitle": "See this",
"slug_id": "and this",
"a_default_property": "a_default_value",
},
)
== {
"title": "You should",
"slug_id": "and this",
"a_default_property": "a_default_value",
"pages": [
{
"subtitle": "See this",
"body": """
Content
-------
hi""".strip(),
}
],
}
)
| {
"content_hash": "136cf654df5c49cff48eff785e25a4db",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 84,
"avg_line_length": 19.276315789473685,
"alnum_prop": 0.4289419795221843,
"repo_name": "tony/django-docutils",
"id": "8261b8a7e5d8d714ebe891e7e177a6b79ebba7ae",
"size": "7325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/django_docutils/lib/fixtures/tests/test_publishers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "493"
},
{
"name": "Makefile",
"bytes": "1463"
},
{
"name": "Python",
"bytes": "221630"
}
],
"symlink_target": ""
} |
from robotide.lib.robot.errors import DataError
from robotide.lib.robot.utils import (get_error_details, is_string,
split_args_from_name_or_path, type_name, Importer)
from .visitor import SuiteVisitor
class ModelModifier(SuiteVisitor):
def __init__(self, visitors, empty_suite_ok, logger):
self._log_error = logger.error
self._empty_suite_ok = empty_suite_ok
self._visitors = list(self._yield_visitors(visitors))
def visit_suite(self, suite):
for visitor in self._visitors:
try:
suite.visit(visitor)
except:
message, details = get_error_details()
self._log_error("Executing model modifier '%s' failed: %s\n%s"
% (type_name(visitor), message, details))
if not (suite.test_count or self._empty_suite_ok):
raise DataError("Suite '%s' contains no tests after model "
"modifiers." % suite.name)
def _yield_visitors(self, visitors):
importer = Importer('model modifier')
for visitor in visitors:
try:
if not is_string(visitor):
yield visitor
else:
name, args = split_args_from_name_or_path(visitor)
yield importer.import_class_or_module(name, args)
except DataError as err:
self._log_error(err.message)
| {
"content_hash": "a21531a8c086c8eae6ac34d7aa5c9dc4",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 39.78378378378378,
"alnum_prop": 0.5652173913043478,
"repo_name": "robotframework/RIDE",
"id": "94c3c88503b33a67503e25e0ff4558eff5b557a1",
"size": "2116",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/robotide/lib/robot/model/modifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31131"
},
{
"name": "HTML",
"bytes": "96342"
},
{
"name": "JavaScript",
"bytes": "42656"
},
{
"name": "Python",
"bytes": "3703410"
},
{
"name": "RobotFramework",
"bytes": "378004"
},
{
"name": "Shell",
"bytes": "1873"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from django import template
from django.db import models
from django.db.models import Q
from tola.util import getCountry
from indicators.models import Indicator
from workflow.models import Program, Country
register = template.Library()
@register.inclusion_tag('workflow/tags/program_menu.html', takes_context=True)
def program_menu(context):
request = context['request']
if request.user.is_authenticated:
countries = request.user.tola_user.available_countries
indicator_query = Indicator.objects.filter(
deleted__isnull=True,
program=models.OuterRef('pk')
).order_by().values('program').annotate(i_count=models.Count('pk')).values('i_count')
programs = request.user.tola_user.available_programs.annotate(
indicator_count=models.Subquery(indicator_query[:1], output_field=models.IntegerField())
).filter(
funding_status="Funded",
indicator_count__gt=0
).prefetch_related('country')
else:
countries = Country.objects.none()
programs = Program.objects.none()
programs_by_country = OrderedDict((country.country, []) for country in countries)
if request.user.is_authenticated:
for program in programs:
for country in program.country.all():
# a program can be in multiple countries, including a country a user is not privy to
if country.country in programs_by_country:
programs_by_country[country.country].append(program)
return {
'programs': programs,
'countries': countries,
'programs_by_country': programs_by_country,
}
| {
"content_hash": "3467ec0d3bbb74a30c3bb4d1da180eb2",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 100,
"avg_line_length": 37.955555555555556,
"alnum_prop": 0.6715456674473068,
"repo_name": "mercycorps/TolaActivity",
"id": "9b91d0c8c48bfe252d750955da84d19b1f422700",
"size": "1708",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workflow/templatetags/menu_tag.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "432462"
},
{
"name": "Dockerfile",
"bytes": "109"
},
{
"name": "HTML",
"bytes": "437661"
},
{
"name": "JavaScript",
"bytes": "5654491"
},
{
"name": "Python",
"bytes": "1741812"
},
{
"name": "Shell",
"bytes": "4752"
}
],
"symlink_target": ""
} |
"""Provide a (g)dbm-compatible interface to bsdhash.hashopen."""
import bsddb
error = bsddb.error # Exported for anydbm
def open(file, flag, mode=0666):
return bsddb.hashopen(file, flag, mode)
| {
"content_hash": "db7d2d49a2f954d6285af5e08524a137",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 27.5,
"alnum_prop": 0.6590909090909091,
"repo_name": "MalloyPower/parsing-python",
"id": "ff51630bb338eac23ac4ee3a4f240be6e6563a83",
"size": "220",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.0/Lib/dbhash.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
"""Test flash_air_music.setup_logging functions/classes."""
import logging
import logging.handlers
import time
from io import StringIO
import pytest
from flash_air_music import setup_logging
@pytest.mark.parametrize('mode', ['', 'rem file', 'bad file', 'dup file', 'rem con', 'bad con', 'dup out', 'dup err'])
def test_cleanup_logging(monkeypatch, request, tmpdir, mode):
"""Test _cleanup_logging().
:param monkeypatch: pytest fixture.
:param request: pytest fixture.
:param tmpdir: pytest fixture.
:param str mode: Test scenario.
"""
stdout, stderr = StringIO(), StringIO()
monkeypatch.setattr('flash_air_music.setup_logging.sys', type('', (), {'stdout': stdout, 'stderr': stderr}))
log, name, quiet = str(tmpdir.join('sample.log')), request.function.__name__, False
logger = logging.getLogger(name)
if mode == 'rem file':
logger.addHandler(logging.handlers.WatchedFileHandler(log))
log = ''
elif mode == 'bad file':
logger.addHandler(logging.handlers.WatchedFileHandler(str(tmpdir.join('bad.log'))))
elif mode == 'dup file':
logger.addHandler(logging.handlers.WatchedFileHandler(log))
logger.addHandler(logging.handlers.WatchedFileHandler(log))
elif mode == 'rem con':
logger.addHandler(logging.StreamHandler(stdout))
quiet = True
elif mode == 'bad con':
logger.addHandler(logging.StreamHandler(StringIO()))
elif mode == 'dup out':
logger.addHandler(logging.StreamHandler(stdout))
logger.addHandler(logging.StreamHandler(stdout))
elif mode == 'dup err':
logger.addHandler(logging.StreamHandler(stderr))
logger.addHandler(logging.StreamHandler(stderr))
handlers_file, handlers_out, handlers_err = getattr(setup_logging, '_cleanup_logging')(logger, quiet, log)
if mode == 'dup file':
assert [h.baseFilename for h in handlers_file] == [str(tmpdir.join('sample.log'))]
else:
assert not handlers_file
if mode == 'dup out':
assert [h.stream for h in handlers_out] == [stdout]
else:
assert not handlers_out
if mode == 'dup err':
assert [h.stream for h in handlers_err] == [stderr]
else:
assert not handlers_err
@pytest.mark.parametrize('log', ['sample.log', ''])
@pytest.mark.parametrize('quiet', [True, False])
@pytest.mark.parametrize('verbose', [True, False])
def test_setup_logging_new(capsys, request, tmpdir, log, quiet, verbose):
"""Test setup_logging() function with no previous config.
:param capsys: pytest fixture.
:param request: pytest fixture.
:param tmpdir: pytest fixture.
:param str log: Test --log (to file) option.
:param bool quiet: Test --quiet (console only) option.
:param bool verbose: Test --verbose (debug logging) option.
"""
config = {'--log': str(tmpdir.join(log)) if log else log, '--quiet': quiet, '--verbose': verbose}
name = '{}_{}'.format(request.function.__name__, '_'.join(k[2:] for k, v in sorted(config.items()) if v))
setup_logging.setup_logging(config, name)
# Emit.
logger = logging.getLogger(name)
for attr in ('debug', 'info', 'warning', 'error', 'critical'):
getattr(logger, attr)('Test {}.'.format(attr))
time.sleep(0.01)
# Collect.
stdout, stderr = capsys.readouterr()
disk = tmpdir.join(log).read() if log else None
# Check log file.
if log:
assert 'Test critical.' in disk
assert 'Test error.' in disk
assert 'Test warning.' in disk
assert 'Test info.' in disk
if verbose:
assert 'Test debug.' in disk
else:
assert 'Test debug.' not in disk
else:
assert not tmpdir.listdir()
# Check quiet console.
if quiet:
assert not stdout
assert not stderr
return
# Check normal/verbose console.
if verbose:
assert name in stdout
assert name in stderr
assert 'Test debug.' in stdout
else:
assert name not in stdout
assert name not in stderr
assert 'Test debug.' not in stdout
assert 'Test debug.' not in stderr
assert 'Test info.' in stdout
assert 'Test warning.' not in stdout
assert 'Test error.' not in stdout
assert 'Test critical.' not in stdout
assert 'Test info.' not in stderr
assert 'Test warning.' in stderr
assert 'Test error.' in stderr
assert 'Test critical.' in stderr
@pytest.mark.parametrize('log', range(3))
def test_setup_logging_on_off_on(capsys, request, tmpdir, log):
"""Test enabling, disabling, and re-enabling logging.
:param capsys: pytest fixture.
:param request: pytest fixture.
:param tmpdir: pytest fixture.
:param int log: Test iteration (0 1 2: on off on).
"""
# Setup config.
config = {'--log': str(tmpdir.join('sample.log')), '--quiet': False, '--verbose': False}
if log == 1:
config['--log'] = ''
config['--quiet'] = True
# Check for loggers before logging.
name = request.function.__name__
logger = logging.getLogger(name)
if log == 1:
assert logger.handlers
else:
assert not logger.handlers
# Log.
setup_logging.setup_logging(config, name)
logger.info('Test info.')
# Collect.
stdout, stderr = capsys.readouterr()
disk = tmpdir.join('sample.log').read() if log != 1 else None
# Check.
assert not stderr
if log == 1:
assert not stdout
assert not tmpdir.listdir()
else:
assert 'Test info.' in stdout
assert 'Test info.' in disk
def test_logrotate(request, tmpdir):
"""Test logrotate support.
:param request: pytest fixture.
:param tmpdir: pytest fixture.
"""
# Setup.
config = {'--log': str(tmpdir.join('sample.log')), '--quiet': True, '--verbose': False}
name = request.function.__name__
logger = logging.getLogger(name)
# Log.
setup_logging.setup_logging(config, name)
logger.info('Test one.')
tmpdir.join('sample.log').move(tmpdir.join('sample.log.old'))
logger.info('Test two.')
# Collect.
sample_one = tmpdir.join('sample.log.old').read()
sample_two = tmpdir.join('sample.log').read()
# Check.
assert 'Test one.' in sample_one
assert 'Test two.' not in sample_one
assert 'Test one.' not in sample_two
assert 'Test two.' in sample_two
| {
"content_hash": "4847a108074c8cba1c943019d7adf5f2",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 118,
"avg_line_length": 32.28643216080402,
"alnum_prop": 0.6314396887159534,
"repo_name": "Robpol86/FlashAirMusic",
"id": "c9d11fc7367a2f76ca2d6cd9b4757f270f8022de",
"size": "6425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_setup_logging.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "4920"
},
{
"name": "Makefile",
"bytes": "2929"
},
{
"name": "Python",
"bytes": "187861"
}
],
"symlink_target": ""
} |
from django.conf.urls import include
from django.contrib import admin
from django.urls import path, reverse_lazy
from django.views.generic import RedirectView
urlpatterns = [
path(r'^$', RedirectView.as_view(
url=reverse_lazy('list_index'),
permanent=True)),
path(r'postorius/', include('postorius.urls')),
path(r'', include('django_mailman3.urls')),
path(r'accounts/', include('allauth.urls')),
# Django admin
path(r'^admin/', admin.site.urls),
]
| {
"content_hash": "04882c06f25d6bb38f4880a1171bfcce",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 51,
"avg_line_length": 32.6,
"alnum_prop": 0.6809815950920245,
"repo_name": "maxking/docker-mailman",
"id": "6a9bbafc2c4c01dae0c27656c3fe2e6f2d2d6e5b",
"size": "1241",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "postorius/mailman-web/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "4492"
},
{
"name": "Python",
"bytes": "33326"
},
{
"name": "Shell",
"bytes": "18589"
}
],
"symlink_target": ""
} |
import imp
import sys
import os
import unittest
import subprocess
from test import test_support
# This little helper class is essential for testing pdb under doctest.
from test_doctest import _FakeInput
class PdbTestInput(object):
"""Context manager that makes testing Pdb in doctests easier."""
def __init__(self, input):
self.input = input
def __enter__(self):
self.real_stdin = sys.stdin
sys.stdin = _FakeInput(self.input)
def __exit__(self, *exc):
sys.stdin = self.real_stdin
def write(x):
print x
def test_pdb_displayhook():
"""This tests the custom displayhook for pdb.
>>> def test_function(foo, bar):
... import pdb; pdb.Pdb().set_trace()
... pass
>>> with PdbTestInput([
... 'foo',
... 'bar',
... 'for i in range(5): write(i)',
... 'continue',
... ]):
... test_function(1, None)
> <doctest test.test_pdb.test_pdb_displayhook[0]>(3)test_function()
-> pass
(Pdb) foo
1
(Pdb) bar
(Pdb) for i in range(5): write(i)
0
1
2
3
4
(Pdb) continue
"""
def test_pdb_breakpoint_commands():
"""Test basic commands related to breakpoints.
>>> def test_function():
... import pdb; pdb.Pdb().set_trace()
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
Now test the breakpoint commands. NORMALIZE_WHITESPACE is needed because
the breakpoint list outputs a tab for the "stop only" and "ignore next"
lines, which we don't want to put in here.
>>> with PdbTestInput([ # doctest: +NORMALIZE_WHITESPACE
... 'break 3',
... 'disable 1',
... 'ignore 1 10',
... 'condition 1 1 < 2',
... 'break 4',
... 'break 4',
... 'break',
... 'clear 3',
... 'break',
... 'condition 1',
... 'enable 1',
... 'clear 1',
... 'commands 2',
... 'print 42',
... 'end',
... 'continue', # will stop at breakpoint 2 (line 4)
... 'clear', # clear all!
... 'y',
... 'tbreak 5',
... 'continue', # will stop at temporary breakpoint
... 'break', # make sure breakpoint is gone
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(3)test_function()
-> print(1)
(Pdb) break 3
Breakpoint 1 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
(Pdb) disable 1
(Pdb) ignore 1 10
Will ignore next 10 crossings of breakpoint 1.
(Pdb) condition 1 1 < 2
(Pdb) break 4
Breakpoint 2 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break 4
Breakpoint 3 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
3 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) clear 3
Deleted breakpoint 3
(Pdb) break
Num Type Disp Enb Where
1 breakpoint keep no at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:3
stop only if 1 < 2
ignore next 10 hits
2 breakpoint keep yes at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:4
(Pdb) condition 1
Breakpoint 1 is now unconditional.
(Pdb) enable 1
(Pdb) clear 1
Deleted breakpoint 1
(Pdb) commands 2
(com) print 42
(com) end
(Pdb) continue
1
42
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(4)test_function()
-> print(2)
(Pdb) clear
Clear all breaks? y
(Pdb) tbreak 5
Breakpoint 4 at <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>:5
(Pdb) continue
2
Deleted breakpoint 4
> <doctest test.test_pdb.test_pdb_breakpoint_commands[0]>(5)test_function()
-> print(3)
(Pdb) break
(Pdb) continue
3
4
"""
def test_pdb_skip_modules():
"""This illustrates the simple case of module skipping.
>>> def skip_module():
... import string
... import pdb; pdb.Pdb(skip=['string*']).set_trace()
... string.lower('FOO')
>>> with PdbTestInput([
... 'step',
... 'continue',
... ]):
... skip_module()
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()
-> string.lower('FOO')
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules[0]>(4)skip_module()->None
-> string.lower('FOO')
(Pdb) continue
"""
# Module for testing skipping of module that makes a callback
mod = imp.new_module('module_to_skip')
exec 'def foo_pony(callback): x = 1; callback(); return None' in mod.__dict__
def test_pdb_skip_modules_with_callback():
"""This illustrates skipping of modules that call into other code.
>>> def skip_module():
... def callback():
... return None
... import pdb; pdb.Pdb(skip=['module_to_skip*']).set_trace()
... mod.foo_pony(callback)
>>> with PdbTestInput([
... 'step',
... 'step',
... 'step',
... 'step',
... 'step',
... 'continue',
... ]):
... skip_module()
... pass # provides something to "step" to
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()
-> mod.foo_pony(callback)
(Pdb) step
--Call--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(2)callback()
-> def callback():
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(3)callback()->None
-> return None
(Pdb) step
--Return--
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[0]>(5)skip_module()->None
-> mod.foo_pony(callback)
(Pdb) step
> <doctest test.test_pdb.test_pdb_skip_modules_with_callback[1]>(10)<module>()
-> pass # provides something to "step" to
(Pdb) continue
"""
def test_pdb_continue_in_bottomframe():
"""Test that "continue" and "next" work properly in bottom frame (issue #5294).
>>> def test_function():
... import pdb, sys; inst = pdb.Pdb()
... inst.set_trace()
... inst.botframe = sys._getframe() # hackery to get the right botframe
... print(1)
... print(2)
... print(3)
... print(4)
First, need to clear bdb state that might be left over from previous tests.
Otherwise, the new breakpoints might get assigned different numbers.
>>> from bdb import Breakpoint
>>> Breakpoint.next = 1
>>> Breakpoint.bplist = {}
>>> Breakpoint.bpbynumber = [None]
>>> with PdbTestInput([
... 'next',
... 'break 7',
... 'continue',
... 'next',
... 'continue',
... 'continue',
... ]):
... test_function()
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(4)test_function()
-> inst.botframe = sys._getframe() # hackery to get the right botframe
(Pdb) next
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(5)test_function()
-> print(1)
(Pdb) break 7
Breakpoint 1 at <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>:7
(Pdb) continue
1
2
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(7)test_function()
-> print(3)
(Pdb) next
3
> <doctest test.test_pdb.test_pdb_continue_in_bottomframe[0]>(8)test_function()
-> print(4)
(Pdb) continue
4
"""
class ModuleInitTester(unittest.TestCase):
@unittest.skipIf(test_support.is_jython, "FIXME: not working in Jython")
def test_filename_correct(self):
"""
In issue 7750, it was found that if the filename has a sequence that
resolves to an escape character in a Python string (such as \t), it
will be treated as the escaped character.
"""
# the test_fn must contain something like \t
# on Windows, this will create 'test_mod.py' in the current directory.
# on Unix, this will create '.\test_mod.py' in the current directory.
test_fn = '.\\test_mod.py'
code = 'print("testing pdb")'
with open(test_fn, 'w') as f:
f.write(code)
self.addCleanup(os.remove, test_fn)
cmd = [sys.executable, '-m', 'pdb', test_fn,]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout, stderr = proc.communicate('quit\n')
self.assertIn(code, stdout, "pdb munged the filename")
def test_main():
from test import test_pdb
test_support.run_doctest(test_pdb, verbosity=True)
test_support.run_unittest(ModuleInitTester)
if __name__ == '__main__':
test_main()
| {
"content_hash": "cd9245d10a038f6daa8bf01589fd7c62",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 92,
"avg_line_length": 30.4185303514377,
"alnum_prop": 0.5799810944228547,
"repo_name": "azoft-dev-team/imagrium",
"id": "bd1ae508e7b0584fee25d7d10a4bf5383549c4b9",
"size": "9631",
"binary": false,
"copies": "4",
"ref": "refs/heads/win",
"path": "env/Lib/test/test_pdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22116"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111703"
},
{
"name": "Java",
"bytes": "448343"
},
{
"name": "Python",
"bytes": "14076342"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Ruby",
"bytes": "5269"
},
{
"name": "Shell",
"bytes": "3193"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
"""Tests for TPUEstimator with model parallelism."""
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.python.tpu import tpu_feed
from tensorflow.python.tpu.device_assignment import device_assignment
from tensorflow.python.tpu.topology import Topology
from tensorflow.python.training import evaluation
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from tensorflow_estimator.python.estimator.export import export_output
from tensorflow_estimator.python.estimator.tpu import tpu_config
from tensorflow_estimator.python.estimator.tpu import tpu_estimator
# pylint: enable=g-direct-tensorflow-import
FLAGS = flags.FLAGS
_TRAIN = model_fn_lib.ModeKeys.TRAIN
_EVAL = model_fn_lib.ModeKeys.EVAL
_PREDICT = model_fn_lib.ModeKeys.PREDICT
_PER_HOST = 'per_host_sharding'
_PER_SHARD = 'per_shard_sharding'
_UNSHARDED = 'unsharded'
_INPUT_PIPELINE_WITH_QUEUE_RUNNER = (
'Input pipeline contains one or more QueueRunners')
def dense_computation(features):
return tf.compat.v1.layers.dense(
features['x'], 1, kernel_initializer=tf.compat.v1.zeros_initializer())
def model_fn_global_step_incrementer(features, labels, mode, params):
del params
loss = None
train_op = None
predictions = dense_computation(features)
if mode != _PREDICT:
loss = tf.compat.v1.losses.mean_squared_error(labels, predictions)
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(
tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.5))
train_op = optimizer.minimize(loss, tf.compat.v1.train.get_global_step())
return tpu_estimator.TPUEstimatorSpec(
mode,
loss=loss,
train_op=train_op,
predictions={'predictions': predictions},
export_outputs={
'test': export_output.PredictOutput({
'prediction': predictions
})
})
def dummy_input_fn_with_dataset(batch_size, repeat=True, x=None):
if x is None:
x = np.random.normal(size=[batch_size, 1]).astype(np.float32)
labels = [[2.0]] * batch_size
dataset1 = tf.compat.v1.data.Dataset.from_tensor_slices(x)
dataset2 = tf.compat.v1.data.Dataset.from_tensor_slices(labels)
dataset = tf.compat.v1.data.Dataset.zip((dataset1, dataset2))
if repeat:
dataset = dataset.repeat()
dataset = dataset.batch(batch_size, drop_remainder=True)
def _map(x, y):
return {'x': x}, y
return dataset.map(_map)
def dummy_input_fn(batch_size, repeat=True):
dataset = dummy_input_fn_with_dataset(batch_size, repeat)
iterator = tf.compat.v1.data.make_one_shot_iterator(dataset)
return iterator.get_next()
def create_run_config(iterations_per_loop, num_shards, num_cores_per_replica,
**kwargs):
return tpu_config.RunConfig(
master='',
tpu_config=tpu_config.TPUConfig(
iterations_per_loop=iterations_per_loop,
num_shards=num_shards,
num_cores_per_replica=num_cores_per_replica,
**kwargs))
class TPUEstimatorModelParallelismConstructorTest(tf.test.TestCase):
def test_fail_model_parallelism_for_per_core_input(self):
run_config = create_run_config(
iterations_per_loop=4,
num_shards=1,
num_cores_per_replica=2,
per_host_input_for_training=False)
with self.assertRaisesRegex(ValueError, 'Model parallelism only supports'):
tpu_estimator.TPUEstimator(
model_fn=model_fn_global_step_incrementer,
config=run_config,
train_batch_size=128)
class TPUEstimatorModelParallelismTrainingTest(tf.test.TestCase):
def _train_and_return_global_steps(self,
iterations_per_loop,
steps=None,
max_steps=None,
pre_train_steps=None,
**kwargs):
"""Trains the model and returns the list of global steps after each loop."""
def input_fn(params):
return dummy_input_fn(params['batch_size'])
def _model_fn(features, labels, mode, params):
return model_fn_global_step_incrementer(features, labels, mode, params)
run_config = create_run_config(
iterations_per_loop=iterations_per_loop,
num_shards=1,
num_cores_per_replica=2,
**kwargs)
est = tpu_estimator.TPUEstimator(
model_fn=_model_fn,
config=run_config,
train_batch_size=16,
eval_batch_size=16)
class _TrainStepCheckHook(tf.compat.v1.train.SessionRunHook):
"""Check eval step counter after one session.run."""
def __init__(self):
"""Constructs the run hook."""
self._global_steps = []
@property
def global_steps(self):
return self._global_steps
def after_run(self, run_context, run_values):
global_step = run_context.session.run(tf.compat.v1.train.get_global_step())
self._global_steps.append(global_step)
if pre_train_steps:
est.train(input_fn, steps=pre_train_steps)
hook = _TrainStepCheckHook()
est.train(input_fn, steps=steps, max_steps=max_steps, hooks=[hook])
return hook.global_steps
def test_train_steps_with_model_parallelism(self):
# From scratch.
global_steps_per_loop = self._train_and_return_global_steps(
iterations_per_loop=40, steps=12)
self.assertEqual([12], global_steps_per_loop)
# From existing checkpoint.
global_steps_per_loop = self._train_and_return_global_steps(
iterations_per_loop=40, steps=12, pre_train_steps=3)
self.assertEqual([15], global_steps_per_loop)
class TPUEstimatorModelParallelismEvaluationTest(tf.test.TestCase):
def _create_input_fn(self):
def _input_fn(params):
return dummy_input_fn(params['batch_size'])
return _input_fn
def _create_head(self, mode, loss, eval_metrics):
"""Creates a head returning `TPUEstimatorSpec` based on mode."""
if mode == _EVAL:
return tpu_estimator.TPUEstimatorSpec(
mode=mode, eval_metrics=eval_metrics, loss=loss)
# Train
optimizer = tf.compat.v1.tpu.CrossShardOptimizer(
tf.compat.v1.train.GradientDescentOptimizer(learning_rate=0.5))
train_op = optimizer.minimize(loss, global_step=tf.compat.v1.train.get_global_step())
return tpu_estimator.TPUEstimatorSpec(
mode=mode, train_op=train_op, loss=loss)
def _metric_fn_on_cpu(self, labels, predictions):
return {
'mse': tf.compat.v1.metrics.mean_absolute_error(labels, predictions),
}
def _model_fn_with_eval_tensor_list(self, features, labels, mode, params):
del params # unused.
predictions = tf.compat.v1.layers.dense(
features['x'], 1, kernel_initializer=tf.compat.v1.zeros_initializer())
loss = tf.compat.v1.losses.mean_squared_error(labels, predictions)
return self._create_head(
mode,
loss,
eval_metrics=(self._metric_fn_on_cpu, [labels, predictions]))
def _model_fn_with_eval_dict(self, features, labels, mode, params):
del params # unused.
predictions = tf.compat.v1.layers.dense(
features['x'], 1, kernel_initializer=tf.compat.v1.zeros_initializer())
loss = tf.compat.v1.losses.mean_squared_error(labels, predictions)
return self._create_head(
mode,
loss,
eval_metrics=(self._metric_fn_on_cpu, {
'labels': labels,
'predictions': predictions
}))
def _test_eval_steps(self, expected_eval_steps, iterations):
run_config = create_run_config(
iterations_per_loop=iterations, num_shards=1, num_cores_per_replica=2)
est = tpu_estimator.TPUEstimator(
model_fn=self._model_fn_with_eval_tensor_list,
config=run_config,
train_batch_size=16,
eval_batch_size=16)
est.train(self._create_input_fn(), steps=1)
class _EvalStepCheckHook(tf.compat.v1.train.SessionRunHook):
"""Check eval step counter after one session.run.
As the evaluation sets the eval iterations as the eval steps, the
after_run should be invoked only once.
"""
def __init__(self, iterations_per_loop, test_case):
"""Constructs the run hook."""
self._iterations = iterations_per_loop
self._invoked = False
self._test_case = test_case
def before_run(self, run_context):
return tf.compat.v1.train.SessionRunArgs({
'eval_steps': evaluation._get_or_create_eval_step()
})
def after_run(self, run_context, run_values):
eval_steps = run_values.results['eval_steps']
self._test_case.assertEqual(expected_eval_steps, eval_steps)
self._test_case.assertFalse(self._invoked)
self._invoked = True
est.evaluate(
self._create_input_fn(),
steps=expected_eval_steps,
hooks=[_EvalStepCheckHook(iterations, self)])
def test_eval_metrics_with_tensor_list(self):
run_config = create_run_config(
iterations_per_loop=2, num_shards=1, num_cores_per_replica=2)
est = tpu_estimator.TPUEstimator(
model_fn=self._model_fn_with_eval_tensor_list,
config=run_config,
train_batch_size=16,
eval_batch_size=16)
est.train(self._create_input_fn(), steps=1)
est.evaluate(self._create_input_fn(), steps=1)
def test_eval_metrics_with_dict(self):
run_config = create_run_config(
iterations_per_loop=2, num_shards=1, num_cores_per_replica=2)
est = tpu_estimator.TPUEstimator(
model_fn=self._model_fn_with_eval_dict,
config=run_config,
train_batch_size=16,
eval_batch_size=16)
est.train(self._create_input_fn(), steps=1)
est.evaluate(self._create_input_fn(), steps=1)
def test_fail_with_wrong_num_shards(self):
run_config = create_run_config(
iterations_per_loop=2, num_shards=2, num_cores_per_replica=2)
est = tpu_estimator.TPUEstimator(
model_fn=self._model_fn_with_eval_tensor_list,
config=run_config,
train_batch_size=16,
eval_batch_size=16)
with self.assertRaisesRegex(ValueError, 'num_shards is not set correctly'):
est.train(self._create_input_fn(), steps=1)
class TPUEstimatorModelParallelismInFeedTest(tf.test.TestCase):
def setUp(self):
self._topology_2x2x2 = Topology(
device_coordinates=np.array(
[[[0, 0, 0, 0], [0, 0, 0, 1], [0, 1, 0, 0], [0, 1, 0, 1],
[1, 0, 0, 0], [1, 0, 0, 1], [1, 1, 0, 0], [1, 1, 0, 1]]],
dtype=np.int32),
mesh_shape=np.array([2, 2, 1, 2], dtype=np.int32))
def test_infeed_even_partition(self):
"""Tests even infeed tensors partition."""
ds = device_assignment(
self._topology_2x2x2, num_replicas=1, computation_shape=[1, 1, 1, 2])
input_partition_dims = [[2, 1]]
# pylint: disable=protected-access
partitioned_infeed = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=1,
host_id=0,
input_partition_dims=input_partition_dims,
device_assignment=ds)
x = tf.zeros((14, 5))
tensors = partitioned_infeed._check_dims_and_partition_or_replicate_on_host(
x, dims=input_partition_dims[0])
self.assertEqual(2, len(tensors))
self.assertEqual([(7, 5), (7, 5)], [t.shape for t in tensors])
# pylint: enable=protected-access
def test_infeed_uneven_partition(self):
"""Tests uneven infeed tensors partition."""
ds = device_assignment(
self._topology_2x2x2, num_replicas=1, computation_shape=[2, 2, 1, 2])
input_partition_dims = [[4, 2]]
# pylint: disable=protected-access
partitioned_infeed = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=1,
host_id=0,
input_partition_dims=input_partition_dims,
device_assignment=ds)
x = tf.zeros((14, 5))
tensors = partitioned_infeed._check_dims_and_partition_or_replicate_on_host(
x, dims=input_partition_dims[0])
self.assertEqual(8, len(tensors))
self.assertEqual((2, 2), tensors[-1].shape)
# pylint: enable=protected-access
def test_infeed_tailing_zero_partition(self):
"""Tests infeed tensors partition which causes zero-size tensors."""
ds = device_assignment(
self._topology_2x2x2, num_replicas=1, computation_shape=[1, 2, 1, 2])
input_partition_dims = [[4, 1]]
# pylint: disable=protected-access
partitioned_infeed = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=1,
host_id=0,
input_partition_dims=input_partition_dims,
device_assignment=ds)
x = tf.zeros((5, 5))
tensors = partitioned_infeed._check_dims_and_partition_or_replicate_on_host(
x, dims=input_partition_dims[0])
self.assertEqual(4, len(tensors))
self.assertEqual((1, 5), tensors[2].shape)
self.assertEqual((0, 5), tensors[3].shape)
# pylint: enable=protected-access
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "c2c2a0a75fba52f0f2769fed97c02160",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 89,
"avg_line_length": 35.693370165745854,
"alnum_prop": 0.6568377060599024,
"repo_name": "tensorflow/estimator",
"id": "d276004b329cd4713d405ad3c42c3f7c3c2c37bc",
"size": "13610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_estimator/python/estimator/tpu/tpu_estimator_model_parallelism_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11293"
},
{
"name": "Python",
"bytes": "3919795"
},
{
"name": "Shell",
"bytes": "4038"
},
{
"name": "Starlark",
"bytes": "86773"
}
],
"symlink_target": ""
} |
from cloudferry_devlab.tests import test_exceptions
class Blacklisted(object):
def __init__(self, config, glanceclient):
self.config = config
self.glanceclient = glanceclient
def get_blacklisted_img_ids(self):
"""Get Blacklisted Image IDs."""
blacklisted_img_ids = []
try:
blacklisted_img_ids = [self._get_image_id(img)
for img in self.config.images_blacklisted]
except test_exceptions.NotFound:
pass
return blacklisted_img_ids
def _get_image_id(self, image_name):
for image in self.glanceclient.images.list():
if image.name == image_name:
return image.id
raise test_exceptions.NotFound('Image with name "%s" was not found'
% image_name)
def filter_images(self):
img_list = [x.__dict__['id'] for x in
self.glanceclient.images.list(search_opts={
'is_public': False})]
exclude_img_ids = self.get_blacklisted_img_ids()
included_img_ids = [img for img in img_list
if img not in exclude_img_ids]
return included_img_ids
| {
"content_hash": "2ae9ed10fadf939030cee8b6af8435a3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 77,
"avg_line_length": 35.42857142857143,
"alnum_prop": 0.5556451612903226,
"repo_name": "SVilgelm/CloudFerry",
"id": "a1863cd66f3a2a07f76281ea7c7e814fe9b0652a",
"size": "1817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cloudferry_devlab/cloudferry_devlab/tests/images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "1718937"
},
{
"name": "Ruby",
"bytes": "2507"
},
{
"name": "Shell",
"bytes": "11910"
}
],
"symlink_target": ""
} |
from .Code import SExpression, Atom, StringLiteral, Syntax
from .Closure import Closure
from .SymbolTable import SymbolTable
from .Continuation import ( TopCont, IfCont, SeqCont, DefineCont, TryCont, LetCont, ForCont,
UnaryOpCont, BinaryOpCont, TernaryOpCont, Ambi1OpCont,
ConcatCont, MkCont,
AndCont, OrCont,
ApplyCont, InvokeCont, GetAttrCont )
from .PLambdaException import PLambdaException
from .Flags import DONE, EVAL, RETURN, CONTINUE
from ..util.Util import string2error
class State:
def __init__(self, interpreter, exp, env):
self.interpreter = interpreter
self.tag = EVAL
self.val = None
self.exp = exp
self.env = env
self.k = TopCont()
def isDone(self):
return self.tag is DONE
def step(self):
if self.tag is EVAL:
if self.exp is None:
self.val = None
self.tag = RETURN
elif isinstance(self.exp, StringLiteral):
self.val = self.exp.string
self.tag = RETURN
elif isinstance(self.exp, Atom):
(ok, value) = self.interpreter.lookup(self.exp, self.env)
if ok:
self.val = value
self.tag = RETURN
else:
self.k.excep = value
self.tag = RETURN
elif isinstance(self.exp, SExpression):
code = self.exp.code
opexp = self.exp.spine[0]
if not isinstance(opexp, Atom):
string2error(f'not atom: {opexp} {type(opexp)} {self.exp} {self.exp.code}')
assert isinstance(opexp, Atom)
op = opexp.string
if code is Syntax.SEQ:
self.k = SeqCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.LET:
self.k = LetCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.DEFINE:
self.k = DefineCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.LAMBDA:
spine = self.exp.spine
self.val = Closure(self.interpreter, spine[1], spine[2], self.env, spine[0].location)
self.tag = RETURN
elif code is Syntax.INVOKE:
self.k = InvokeCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.APPLY:
self.k = ApplyCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.PRIMITIVE_DATA_OP:
self.val = self.interpreter.evalPrimitiveDataOp(self.exp, self.env)
self.tag = RETURN
elif code is Syntax.UNARY_OP:
self.k = UnaryOpCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.BINARY_OP:
self.k = BinaryOpCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.TERNARY_OP:
self.k = TernaryOpCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.AMBI1_OP:
self.k = Ambi1OpCont(op, self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.AMBI2_OP:
if op is SymbolTable.IF:
self.k = IfCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif op is SymbolTable.GETATTR:
self.k = GetAttrCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
else:
raise PLambdaException(f'Unhandled ambi2 op form in State.step {op} {self.exp.spine[0].location}')
elif code is Syntax.N_ARY_OP:
if op is SymbolTable.AND:
self.k = AndCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif op is SymbolTable.OR:
self.k = OrCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif op is SymbolTable.CONCAT:
self.k = ConcatCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif op in (SymbolTable.MKTUPLE, SymbolTable.MKLIST, SymbolTable.MKDICT):
self.k = MkCont(op, self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
else:
raise PLambdaException(f'Unhandled n-ary op form in State.step {op} {self.exp.spine[0].location}')
elif code is Syntax.TRY:
self.k = TryCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.FOR:
self.k = ForCont(self.exp, self.exp.spine[1:], self.env, self.k)
self.tag = CONTINUE
elif code is Syntax.CATCH:
raise PLambdaException("Orphan catch. Should not happen")
else:
raise PLambdaException("Unhandled form in State.step")
else:
self.val = self.exp
self.tag = RETURN
elif self.tag is CONTINUE:
self.k.cont(self)
elif self.tag is RETURN:
self.k.ret(self)
elif self.tag is DONE:
pass
else:
pass
| {
"content_hash": "a6e79b15cfd0980f0ac0ba10452c4c72",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 122,
"avg_line_length": 45.38970588235294,
"alnum_prop": 0.4994330147416167,
"repo_name": "SRI-CSL/PLambda",
"id": "eaec4e39b224fe68c8ee144fb85a93cd00c22a27",
"size": "6173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plambda/eval/State.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "7484"
},
{
"name": "Common Lisp",
"bytes": "244"
},
{
"name": "Makefile",
"bytes": "2718"
},
{
"name": "NewLisp",
"bytes": "6179"
},
{
"name": "Python",
"bytes": "199073"
}
],
"symlink_target": ""
} |
import json
import os
import sys
from multiprocessing.dummy import Pool as ThreadPool
from typing import Iterable, Optional
from license_grep.deduction import deduce_license_from_dir
from license_grep.models import PackageInfo
from license_grep.utils import is_excluded_dir
def find_js_manifests(base_path):
for path, dirs, files in os.walk(base_path):
dirs[:] = [dir for dir in dirs if not is_excluded_dir(dir)]
for filename in files:
if filename == "package.json":
path = os.path.join(path, filename)
if "/test/" not in path:
yield path
def process_js_manifest(package_json_path, allow_parent=True) -> Optional[PackageInfo]:
package_json_dir = os.path.dirname(package_json_path)
with open(package_json_path) as fp:
data = json.load(fp)
name = data.get("name")
version = data.get("version")
if not name or data.get("private"):
return None
raw_licenses = data.get("licenses", [])
if raw_licenses and isinstance(raw_licenses, dict):
raw_licenses = [l["type"] for l in raw_licenses]
elif raw_licenses and isinstance(raw_licenses, str):
raw_licenses = [raw_licenses]
else:
package_json_license = data.get("license")
if package_json_license:
if isinstance(package_json_license, dict):
package_json_license = package_json_license.get("type", False) or package_json_license.get("name")
raw_licenses = [package_json_license]
pkg_info = PackageInfo(
name=name,
version=version,
type="JavaScript",
location=package_json_path,
context=None,
raw_licenses=raw_licenses,
)
if not raw_licenses:
deduced_license = deduce_license_from_dir(package_json_dir)
if deduced_license:
pkg_info.raw_licenses = [deduced_license]
else:
if allow_parent:
parent_node_manifest = os.path.join(
package_json_dir, "..", "package.json"
)
if os.path.isfile(parent_node_manifest):
parent_pkg_info = process_js_manifest(
parent_node_manifest, allow_parent=False
)
if parent_pkg_info:
print(
f"{package_json_path}: using parent {parent_node_manifest} licenses for license",
file=sys.stderr,
)
pkg_info.raw_licenses = parent_pkg_info.raw_licenses
if not pkg_info.raw_licenses:
print(f"No license: {package_json_path}", file=sys.stderr)
if not version:
print(f"No version: {package_json_path}", file=sys.stderr)
return pkg_info
def process_js_environment(directory) -> Iterable[PackageInfo]:
manifests = find_js_manifests(directory)
with ThreadPool() as pool:
for package_info in pool.imap_unordered(process_js_manifest, manifests):
if package_info:
package_info.context = directory
yield package_info
| {
"content_hash": "677d4ec91811d48311bff2e1cf33af84",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 114,
"avg_line_length": 36.35632183908046,
"alnum_prop": 0.5940562756876383,
"repo_name": "akx/license-grep",
"id": "671eb4018d3d2982fef5c287dce5ce7faa38d40f",
"size": "3163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "license_grep/input/javascript.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22318"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from __future__ import absolute_import, unicode_literals
from celery import shared_task
from celery.task.schedules import crontab
from celery.decorators import periodic_task
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@periodic_task(run_every=(crontab(minute="*/1")), name="check_folders", ignore_result=False)
def check_folders():
print("WHY!")
directories = [[x[0],os.stat(x)]for x in os.walk(settings.MEDIA_ROOT)]
print(directories)
logger.info(directories) | {
"content_hash": "ca8d28785db037d49e63d5f2caf2dec9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 92,
"avg_line_length": 34.0625,
"alnum_prop": 0.7651376146788991,
"repo_name": "Yeasayer/YoutubeVideoPage",
"id": "e793a4129d4f04317f93ed9b215707aa47dd86bc",
"size": "545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ytdownload/tasks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "53556"
},
{
"name": "HTML",
"bytes": "73191"
},
{
"name": "JavaScript",
"bytes": "157652"
},
{
"name": "Python",
"bytes": "20804"
},
{
"name": "Shell",
"bytes": "24010"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
from twilio.rest.preview.deployed_devices.fleet.certificate import CertificateList
from twilio.rest.preview.deployed_devices.fleet.deployment import DeploymentList
from twilio.rest.preview.deployed_devices.fleet.device import DeviceList
from twilio.rest.preview.deployed_devices.fleet.key import KeyList
class FleetList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version):
"""
Initialize the FleetList
:param Version version: Version that contains the resource
:returns: twilio.rest.preview.deployed_devices.fleet.FleetList
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetList
"""
super(FleetList, self).__init__(version)
# Path Solution
self._solution = {}
self._uri = '/Fleets'.format(**self._solution)
def create(self, friendly_name=values.unset):
"""
Create the FleetInstance
:param unicode friendly_name: A human readable description for this Fleet.
:returns: The created FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
data = values.of({'FriendlyName': friendly_name, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return FleetInstance(self._version, payload, )
def stream(self, limit=None, page_size=None):
"""
Streams FleetInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.FleetInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists FleetInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.deployed_devices.fleet.FleetInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of FleetInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return FleetPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of FleetInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return FleetPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a FleetContext
:param sid: A string that uniquely identifies the Fleet.
:returns: twilio.rest.preview.deployed_devices.fleet.FleetContext
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
return FleetContext(self._version, sid=sid, )
def __call__(self, sid):
"""
Constructs a FleetContext
:param sid: A string that uniquely identifies the Fleet.
:returns: twilio.rest.preview.deployed_devices.fleet.FleetContext
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
return FleetContext(self._version, sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.DeployedDevices.FleetList>'
class FleetPage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the FleetPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.preview.deployed_devices.fleet.FleetPage
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetPage
"""
super(FleetPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of FleetInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.deployed_devices.fleet.FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
return FleetInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.DeployedDevices.FleetPage>'
class FleetContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, sid):
"""
Initialize the FleetContext
:param Version version: Version that contains the resource
:param sid: A string that uniquely identifies the Fleet.
:returns: twilio.rest.preview.deployed_devices.fleet.FleetContext
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
super(FleetContext, self).__init__(version)
# Path Solution
self._solution = {'sid': sid, }
self._uri = '/Fleets/{sid}'.format(**self._solution)
# Dependents
self._devices = None
self._deployments = None
self._certificates = None
self._keys = None
def fetch(self):
"""
Fetch the FleetInstance
:returns: The fetched FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return FleetInstance(self._version, payload, sid=self._solution['sid'], )
def delete(self):
"""
Deletes the FleetInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def update(self, friendly_name=values.unset,
default_deployment_sid=values.unset):
"""
Update the FleetInstance
:param unicode friendly_name: A human readable description for this Fleet.
:param unicode default_deployment_sid: A default Deployment SID.
:returns: The updated FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
data = values.of({'FriendlyName': friendly_name, 'DefaultDeploymentSid': default_deployment_sid, })
payload = self._version.update(method='POST', uri=self._uri, data=data, )
return FleetInstance(self._version, payload, sid=self._solution['sid'], )
@property
def devices(self):
"""
Access the devices
:returns: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
"""
if self._devices is None:
self._devices = DeviceList(self._version, fleet_sid=self._solution['sid'], )
return self._devices
@property
def deployments(self):
"""
Access the deployments
:returns: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
:rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
"""
if self._deployments is None:
self._deployments = DeploymentList(self._version, fleet_sid=self._solution['sid'], )
return self._deployments
@property
def certificates(self):
"""
Access the certificates
:returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
:rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
"""
if self._certificates is None:
self._certificates = CertificateList(self._version, fleet_sid=self._solution['sid'], )
return self._certificates
@property
def keys(self):
"""
Access the keys
:returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList
:rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList
"""
if self._keys is None:
self._keys = KeyList(self._version, fleet_sid=self._solution['sid'], )
return self._keys
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.DeployedDevices.FleetContext {}>'.format(context)
class FleetInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, sid=None):
"""
Initialize the FleetInstance
:returns: twilio.rest.preview.deployed_devices.fleet.FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
super(FleetInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'sid': payload.get('sid'),
'url': payload.get('url'),
'unique_name': payload.get('unique_name'),
'friendly_name': payload.get('friendly_name'),
'account_sid': payload.get('account_sid'),
'default_deployment_sid': payload.get('default_deployment_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'links': payload.get('links'),
}
# Context
self._context = None
self._solution = {'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: FleetContext for this FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetContext
"""
if self._context is None:
self._context = FleetContext(self._version, sid=self._solution['sid'], )
return self._context
@property
def sid(self):
"""
:returns: A string that uniquely identifies this Fleet.
:rtype: unicode
"""
return self._properties['sid']
@property
def url(self):
"""
:returns: URL of this Fleet.
:rtype: unicode
"""
return self._properties['url']
@property
def unique_name(self):
"""
:returns: A unique, addressable name of this Fleet.
:rtype: unicode
"""
return self._properties['unique_name']
@property
def friendly_name(self):
"""
:returns: A human readable description for this Fleet.
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def account_sid(self):
"""
:returns: The unique SID that identifies this Account.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def default_deployment_sid(self):
"""
:returns: The unique SID that identifies this Fleet's default Deployment.
:rtype: unicode
"""
return self._properties['default_deployment_sid']
@property
def date_created(self):
"""
:returns: The date this Fleet was created.
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date this Fleet was updated.
:rtype: datetime
"""
return self._properties['date_updated']
@property
def links(self):
"""
:returns: Nested resource URLs.
:rtype: unicode
"""
return self._properties['links']
def fetch(self):
"""
Fetch the FleetInstance
:returns: The fetched FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the FleetInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def update(self, friendly_name=values.unset,
default_deployment_sid=values.unset):
"""
Update the FleetInstance
:param unicode friendly_name: A human readable description for this Fleet.
:param unicode default_deployment_sid: A default Deployment SID.
:returns: The updated FleetInstance
:rtype: twilio.rest.preview.deployed_devices.fleet.FleetInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
default_deployment_sid=default_deployment_sid,
)
@property
def devices(self):
"""
Access the devices
:returns: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
:rtype: twilio.rest.preview.deployed_devices.fleet.device.DeviceList
"""
return self._proxy.devices
@property
def deployments(self):
"""
Access the deployments
:returns: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
:rtype: twilio.rest.preview.deployed_devices.fleet.deployment.DeploymentList
"""
return self._proxy.deployments
@property
def certificates(self):
"""
Access the certificates
:returns: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
:rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateList
"""
return self._proxy.certificates
@property
def keys(self):
"""
Access the keys
:returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList
:rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList
"""
return self._proxy.keys
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.DeployedDevices.FleetInstance {}>'.format(context)
| {
"content_hash": "b33c2800c4a30c9b44817f50b47380e3",
"timestamp": "",
"source": "github",
"line_count": 526,
"max_line_length": 107,
"avg_line_length": 34.14448669201521,
"alnum_prop": 0.6284521158129176,
"repo_name": "twilio/twilio-python",
"id": "ae5c09614fb6e23bdea81a899a298cee4a782d6e",
"size": "17975",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/preview/deployed_devices/fleet/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
} |
'''
The following code requires python-stix v1.1.0.4 or greater installed.
For installation instructions, please refer to https://github.com/STIXProject/python-stix.
'''
from stix.core import STIXPackage
from stix.exploit_target import ExploitTarget,Vulnerability
def main():
pkg = STIXPackage()
vuln = Vulnerability()
vuln.cve_id = "CVE-2013-3893"
et = ExploitTarget(title="Javascript vulnerability in MSIE 6-11")
et.add_vulnerability(vuln)
pkg.add_exploit_target(et)
print pkg.to_xml()
if __name__ == '__main__':
main()
| {
"content_hash": "2448e072fef1ce004016f158cae2b8aa",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 90,
"avg_line_length": 24.91304347826087,
"alnum_prop": 0.68760907504363,
"repo_name": "johnwunder/johnwunder.github.io",
"id": "c4a1dd7744e3e0dbfc183a680fd539afd3a4afd2",
"size": "700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "documentation/idioms/cve/cve-in-exploit-target_producer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9605"
},
{
"name": "HTML",
"bytes": "18133902"
},
{
"name": "JavaScript",
"bytes": "3731"
},
{
"name": "Ruby",
"bytes": "24306"
},
{
"name": "Shell",
"bytes": "1640"
}
],
"symlink_target": ""
} |
from hidparser.Item import ItemType, Item
from hidparser.enums import CollectionType, ReportFlags, ReportType
from hidparser.DeviceBuilder import DeviceBuilder
class InputItem(Item):
flags = None # type: ReportFlags
def visit(self, descriptor: DeviceBuilder):
descriptor.add_report(ReportType.INPUT, self.flags)
@classmethod
def _get_tag(cls):
return 0x80
@classmethod
def _get_type(cls):
return ItemType.MAIN
def __init__(self, **kwargs):
super(InputItem, self).__init__(**kwargs)
self.flags = ReportFlags.from_bytes(self.data)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.flags)
class OutputItem(Item):
flags = None
def visit(self, descriptor: DeviceBuilder):
descriptor.add_report(ReportType.OUTPUT, self.flags)
@classmethod
def _get_tag(cls):
return 0x90
@classmethod
def _get_type(cls):
return ItemType.MAIN
def __init__(self, **kwargs):
super(OutputItem, self).__init__(**kwargs)
self.flags = ReportFlags.from_bytes(self.data)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.flags)
class FeatureItem(Item):
flags = None
def visit(self, descriptor: DeviceBuilder):
descriptor.add_report(ReportType.FEATURE, self.flags)
@classmethod
def _get_tag(cls):
return 0xB0
@classmethod
def _get_type(cls):
return ItemType.MAIN
def __init__(self, **kwargs):
super(FeatureItem, self).__init__(**kwargs)
self.flags = ReportFlags.from_bytes(self.data)
def __repr__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self.flags)
class CollectionItem(Item):
collection = None
@classmethod
def _get_tag(cls):
return 0xA0
@classmethod
def _get_type(cls):
return ItemType.MAIN
def visit(self, descriptor: DeviceBuilder):
if not isinstance(self.collection, CollectionType):
raise ValueError("CollectionItem does not have a valid collection set")
descriptor.push_collection(self.collection)
def __init__(self, **kwargs):
super(CollectionItem, self).__init__(**kwargs)
if self.data is None or len(self.data) is not 1:
raise ValueError("Collection must contain one byte of data")
self.collection = CollectionType(self.data[0])
def __repr__(self):
return "<{}: {}>".format(self.__class__.__name__, self.collection)
class EndCollectionItem(Item):
def visit(self, descriptor: DeviceBuilder):
descriptor.pop_collection()
@classmethod
def _get_tag(cls):
return 0xC0
@classmethod
def _get_type(cls):
return ItemType.MAIN
| {
"content_hash": "59f12682e22880681ed81c1d3009283a",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 83,
"avg_line_length": 25.053571428571427,
"alnum_prop": 0.6261582323592302,
"repo_name": "NZSmartie/PyHIDParser",
"id": "4fca8ce70dc07e62e2403d378336236e4d51b2cc",
"size": "2806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hidparser/ItemMain.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "127050"
}
],
"symlink_target": ""
} |
"""
Django settings for forocacao project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
from __future__ import absolute_import, unicode_literals
import environ
ROOT_DIR = environ.Path(__file__) - 3 # (/a/b/myfile.py - 3 = /)
APPS_DIR = ROOT_DIR.path('forocacao')
env = environ.Env()
# APP CONFIGURATION
# ------------------------------------------------------------------------------
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'suit',
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'crispy_forms', # Form layouts
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
'suit_redactor',
'filer',
'mptt',
'easy_thumbnails',
'django_countries',
'colorfield',
'import_export',
'photologue', # photo gallery
'sortedm2m',
'modeltranslation', # model translations
'rosetta', # translations
'embed_video', # youtube thumbs and videos
)
# Apps specific for this project go here.
LOCAL_APPS = (
'forocacao.users', # custom users app
'forocacao.app',
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
# MIDDLEWARE CONFIGURATION
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES = (
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# MIGRATIONS CONFIGURATION
# ------------------------------------------------------------------------------
MIGRATION_MODULES = {
'sites': 'forocacao.contrib.sites.migrations'
}
# DEBUG
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool("DJANGO_DEBUG", False)
# FIXTURE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
str(APPS_DIR.path('fixtures')),
)
# EMAIL CONFIGURATION
# ------------------------------------------------------------------------------
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND', default='django.core.mail.backends.smtp.EmailBackend')
# MANAGER CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Admin', env('DJANGO_DEFAULT_FROM_EMAIL', default='root@localhost')),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///forocacao"),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
# GENERAL CONFIGURATION
# ------------------------------------------------------------------------------
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Managua'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'es-es'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-TEMPLATES-BACKEND
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
'DIRS': [
str(APPS_DIR.path('templates')),
],
'OPTIONS': {
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
'debug': DEBUG,
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
# Your stuff: custom template context processors go here
'forocacao.app.context_processors.current_event',
'forocacao.app.context_processors.google_analytics',
],
},
},
]
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
# STATIC FILE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(ROOT_DIR('staticfiles'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
str(APPS_DIR.path('static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# MEDIA CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(APPS_DIR('media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# URL Configuration
# ------------------------------------------------------------------------------
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
# AUTHENTICATION CONFIGURATION
# ------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = 'username'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
# Determines whether or not an e-mail address is automatically confirmed by a mere GET request.
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
# The URL to redirect to after a successful e-mail confirmation, in case no user is logged in.
ACCOUNT_EMAIL_CONFIRMATION_ANONYMOUS_REDIRECT_URL = 'app:confirmationmail'
# The URL to redirect to after a successful e-mail confirmation, in case of an authenticated user. Set to None to use settings.
ACCOUNT_EMAIL_CONFIRMATION_AUTHENTICATED_REDIRECT_URL = 'app:home'
ACCOUNT_EMAIL_CONFIRMATION_EXPIRE_DAYS = 15
# Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = 'users.User'
#LOGIN_REDIRECT_URL = 'users:redirect'
LOGIN_REDIRECT_URL = 'app:home'
LOGIN_URL = 'account_login'
# SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = 'slugify.slugify'
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level':'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Your common stuff: Below this line define 3rd party library settings
ACCOUNT_SIGNUP_FORM_CLASS = 'forocacao.users.forms.SignupForm'
ACCOUNT_USERNAME_REQUIRED = False
GOOGLE_ANALYTICS_KEY = env('GOOGLE_ANALYTICS_KEY', default='')
from django.utils.translation import ugettext_lazy as _
LANGUAGES = [
('es', _('Spanish')),
('en', _('English')),
]
| {
"content_hash": "f68f55184298ff9c640aaa978576262f",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 127,
"avg_line_length": 35.63787375415282,
"alnum_prop": 0.6087442901090706,
"repo_name": "javierwilson/resilienciacafe",
"id": "222ce3458cf598fb299386ec4703fb35f7475928",
"size": "10751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config/settings/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "117761"
},
{
"name": "HTML",
"bytes": "91091"
},
{
"name": "JavaScript",
"bytes": "2394"
},
{
"name": "Python",
"bytes": "143840"
},
{
"name": "Shell",
"bytes": "3620"
}
],
"symlink_target": ""
} |
import logging
import sys
import subprocess
from threading import Thread
import time
import fcntl
import os
from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from socketio.sdjango import namespace
from views import get_fabfile_path, fabric_special_options
from fabric_bolt.projects.models import Deployment
@namespace('/deployment')
class ChatNamespace(BaseNamespace, RoomsMixin, BroadcastMixin):
def initialize(self):
self.logger = logging.getLogger("socketio.deployment")
self.log("Socketio session started")
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def on_join(self, deployment_id):
self.deployment = Deployment.objects.get(pk=deployment_id)
if self.deployment.status != self.deployment.PENDING:
return True
update_thread = Thread(target=self.output_stream_generator, args=(self,))
update_thread.daemon = True
update_thread.start()
return True
def on_input(self, text):
self.process.stdin.write(text + '\n')
return True
def recv_disconnect(self):
# Remove nickname from the list.
self.log('Disconnected')
self.disconnect(silent=True)
return True
def build_command(self):
command = ['fab', self.deployment.task.name]
hosts = self.deployment.stage.hosts.values_list('name', flat=True)
if hosts:
command.append('--hosts=' + ','.join(hosts))
# Get the dictionary of configurations for this stage
config = self.deployment.stage.get_configurations()
config.update(self.request.session.get('configuration_values', {}))
command_to_config = {x.replace('-', '_'): x for x in fabric_special_options}
# Take the special env variables out
normal_options = list(set(config.keys()) - set(command_to_config.keys()))
# Special ones get set a different way
special_options = list(set(config.keys()) & set(command_to_config.keys()))
def get_key_value_string(key, value):
if isinstance(value, bool):
return key + ('' if value else '=')
elif isinstance(value, float):
return key + '=' + str(value)
else:
return '{}={}'.format(key, value.replace('"', '\\"'))
if normal_options:
command.append('--set')
command.append(','.join(get_key_value_string(key, config[key]) for key in normal_options))
if special_options:
for key in special_options:
command.append('--' + get_key_value_string(command_to_config[key], config[key]))
command.append('--fabfile={}'.format(get_fabfile_path(self.deployment.stage.project)))
return command
def output_stream_generator(self, *args, **kwargs):
self.process = subprocess.Popen(self.build_command(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT, stdin=subprocess.PIPE)
fd = self.process.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
all_output = ''
while True:
try:
nextline = self.process.stdout.read()
except IOError:
nextline = ''
if nextline == '' and self.process.poll() != None:
break
all_output += nextline
if nextline:
self.broadcast_event('output', {'status': 'pending', 'lines': str(nextline)})
time.sleep(0.00001)
sys.stdout.flush()
self.deployment.status = self.deployment.SUCCESS if self.process.returncode == 0 else self.deployment.FAILED
self.deployment.output = all_output
self.deployment.save()
self.broadcast_event('output', {'status': self.deployment.status})
self.disconnect()
| {
"content_hash": "d61a7bb89f46d5847be87e79ef096f02",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 134,
"avg_line_length": 32.68852459016394,
"alnum_prop": 0.6176028084252758,
"repo_name": "Hedde/fabric-bolt",
"id": "49c626a1bbaa08a0d9bc80537bd5584206afe8a6",
"size": "3988",
"binary": false,
"copies": "1",
"ref": "refs/heads/feature/venv_support",
"path": "fabric_bolt/projects/sockets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "779"
},
{
"name": "JavaScript",
"bytes": "102422"
},
{
"name": "Python",
"bytes": "128175"
}
],
"symlink_target": ""
} |
"""
kombu.messaging
===============
Sending and receiving messages.
"""
from __future__ import absolute_import
from itertools import count
from .compression import compress
from .connection import maybe_channel, is_connection
from .entity import Exchange, Queue, DELIVERY_MODES
from .five import int_types, text_t, values
from .serialization import encode
from .utils import ChannelPromise, maybe_list
__all__ = ['Exchange', 'Queue', 'Producer', 'Consumer']
# XXX compat attribute
entry_to_queue = Queue.from_dict
class Producer(object):
"""Message Producer.
:param channel: Connection or channel.
:keyword exchange: Optional default exchange.
:keyword routing_key: Optional default routing key.
:keyword serializer: Default serializer. Default is `"json"`.
:keyword compression: Default compression method. Default is no
compression.
:keyword auto_declare: Automatically declare the default exchange
at instantiation. Default is :const:`True`.
:keyword on_return: Callback to call for undeliverable messages,
when the `mandatory` or `immediate` arguments to
:meth:`publish` is used. This callback needs the following
signature: `(exception, exchange, routing_key, message)`.
Note that the producer needs to drain events to use this feature.
"""
#: Default exchange
exchange = None
#: Default routing key.
routing_key = ''
#: Default serializer to use. Default is JSON.
serializer = None
#: Default compression method. Disabled by default.
compression = None
#: By default the exchange is declared at instantiation.
#: If you want to declare manually then you can set this
#: to :const:`False`.
auto_declare = True
#: Basic return callback.
on_return = None
#: Set if channel argument was a Connection instance (using
#: default_channel).
__connection__ = None
def __init__(self, channel, exchange=None, routing_key=None,
serializer=None, auto_declare=None, compression=None,
on_return=None):
self._channel = channel
self.exchange = exchange
self.routing_key = routing_key or self.routing_key
self.serializer = serializer or self.serializer
self.compression = compression or self.compression
self.on_return = on_return or self.on_return
self._channel_promise = None
if self.exchange is None:
self.exchange = Exchange('')
if auto_declare is not None:
self.auto_declare = auto_declare
if self._channel:
self.revive(self._channel)
def __repr__(self):
return '<Producer: {0.channel}>'.format(self)
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return (None, self.exchange, self.routing_key, self.serializer,
self.auto_declare, self.compression)
def declare(self):
"""Declare the exchange.
This happens automatically at instantiation if
:attr:`auto_declare` is enabled.
"""
if self.exchange.name:
self.exchange.declare()
def maybe_declare(self, entity, retry=False, **retry_policy):
"""Declare the exchange if it hasn't already been declared
during this session."""
if entity:
from .common import maybe_declare
return maybe_declare(entity, self.channel, retry, **retry_policy)
def publish(self, body, routing_key=None, delivery_mode=None,
mandatory=False, immediate=False, priority=0,
content_type=None, content_encoding=None, serializer=None,
headers=None, compression=None, exchange=None, retry=False,
retry_policy=None, declare=[], **properties):
"""Publish message to the specified exchange.
:param body: Message body.
:keyword routing_key: Message routing key.
:keyword delivery_mode: See :attr:`delivery_mode`.
:keyword mandatory: Currently not supported.
:keyword immediate: Currently not supported.
:keyword priority: Message priority. A number between 0 and 9.
:keyword content_type: Content type. Default is auto-detect.
:keyword content_encoding: Content encoding. Default is auto-detect.
:keyword serializer: Serializer to use. Default is auto-detect.
:keyword compression: Compression method to use. Default is none.
:keyword headers: Mapping of arbitrary headers to pass along
with the message body.
:keyword exchange: Override the exchange. Note that this exchange
must have been declared.
:keyword declare: Optional list of required entities that must
have been declared before publishing the message. The entities
will be declared using :func:`~kombu.common.maybe_declare`.
:keyword retry: Retry publishing, or declaring entities if the
connection is lost.
:keyword retry_policy: Retry configuration, this is the keywords
supported by :meth:`~kombu.Connection.ensure`.
:keyword \*\*properties: Additional message properties, see AMQP spec.
"""
headers = {} if headers is None else headers
retry_policy = {} if retry_policy is None else retry_policy
routing_key = self.routing_key if routing_key is None else routing_key
compression = self.compression if compression is None else compression
exchange = exchange or self.exchange
if isinstance(exchange, Exchange):
delivery_mode = delivery_mode or exchange.delivery_mode
exchange = exchange.name
else:
delivery_mode = delivery_mode or self.exchange.delivery_mode
if not isinstance(delivery_mode, int_types):
delivery_mode = DELIVERY_MODES[delivery_mode]
properties['delivery_mode'] = delivery_mode
body, content_type, content_encoding = self._prepare(
body, serializer, content_type, content_encoding,
compression, headers)
publish = self._publish
if retry:
publish = self.connection.ensure(self, publish, **retry_policy)
return publish(body, priority, content_type,
content_encoding, headers, properties,
routing_key, mandatory, immediate, exchange, declare)
def _publish(self, body, priority, content_type, content_encoding,
headers, properties, routing_key, mandatory,
immediate, exchange, declare):
channel = self.channel
message = channel.prepare_message(
body, priority, content_type,
content_encoding, headers, properties,
)
if declare:
maybe_declare = self.maybe_declare
[maybe_declare(entity) for entity in declare]
return channel.basic_publish(
message,
exchange=exchange, routing_key=routing_key,
mandatory=mandatory, immediate=immediate,
)
def _get_channel(self):
channel = self._channel
if isinstance(channel, ChannelPromise):
channel = self._channel = channel()
self.exchange.revive(channel)
if self.on_return:
channel.events['basic_return'].add(self.on_return)
return channel
def _set_channel(self, channel):
self._channel = channel
channel = property(_get_channel, _set_channel)
def revive(self, channel):
"""Revive the producer after connection loss."""
if is_connection(channel):
connection = channel
self.__connection__ = connection
channel = ChannelPromise(lambda: connection.default_channel)
if isinstance(channel, ChannelPromise):
self._channel = channel
self.exchange = self.exchange(channel)
else:
# Channel already concrete
self._channel = channel
if self.on_return:
self._channel.events['basic_return'].add(self.on_return)
self.exchange = self.exchange(channel)
if self.auto_declare:
# auto_decare is not recommended as this will force
# evaluation of the channel.
self.declare()
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.release()
def release(self):
pass
close = release
def _prepare(self, body, serializer=None, content_type=None,
content_encoding=None, compression=None, headers=None):
# No content_type? Then we're serializing the data internally.
if not content_type:
serializer = serializer or self.serializer
(content_type, content_encoding,
body) = encode(body, serializer=serializer)
else:
# If the programmer doesn't want us to serialize,
# make sure content_encoding is set.
if isinstance(body, text_t):
if not content_encoding:
content_encoding = 'utf-8'
body = body.encode(content_encoding)
# If they passed in a string, we can't know anything
# about it. So assume it's binary data.
elif not content_encoding:
content_encoding = 'binary'
if compression:
body, headers['compression'] = compress(body, compression)
return body, content_type, content_encoding
@property
def connection(self):
try:
return self.__connection__ or self.channel.connection.client
except AttributeError:
pass
class Consumer(object):
"""Message consumer.
:param channel: see :attr:`channel`.
:param queues: see :attr:`queues`.
:keyword no_ack: see :attr:`no_ack`.
:keyword auto_declare: see :attr:`auto_declare`
:keyword callbacks: see :attr:`callbacks`.
:keyword on_message: See :attr:`on_message`
:keyword on_decode_error: see :attr:`on_decode_error`.
"""
#: The connection/channel to use for this consumer.
channel = None
#: A single :class:`~kombu.Queue`, or a list of queues to
#: consume from.
queues = None
#: Flag for message acknowledgment disabled/enabled.
#: Enabled by default.
no_ack = None
#: By default all entities will be declared at instantiation, if you
#: want to handle this manually you can set this to :const:`False`.
auto_declare = True
#: List of callbacks called in order when a message is received.
#:
#: The signature of the callbacks must take two arguments:
#: `(body, message)`, which is the decoded message body and
#: the `Message` instance (a subclass of
#: :class:`~kombu.transport.base.Message`).
callbacks = None
#: Optional function called whenever a message is received.
#:
#: When defined this function will be called instead of the
#: :meth:`receive` method, and :attr:`callbacks` will be disabled.
#:
#: So this can be used as an alternative to :attr:`callbacks` when
#: you don't want the body to be automatically decoded.
#: Note that the message will still be decompressed if the message
#: has the ``compression`` header set.
#:
#: The signature of the callback must take a single argument,
#: which is the raw message object (a subclass of
#: :class:`~kombu.transport.base.Message`).
#:
#: Also note that the ``message.body`` attribute, which is the raw
#: contents of the message body, may in some cases be a read-only
#: :class:`buffer` object.
on_message = None
#: Callback called when a message can't be decoded.
#:
#: The signature of the callback must take two arguments: `(message,
#: exc)`, which is the message that can't be decoded and the exception
#: that occurred while trying to decode it.
on_decode_error = None
_tags = count(1) # global
def __init__(self, channel, queues=None, no_ack=None, auto_declare=None,
callbacks=None, on_decode_error=None, on_message=None):
self.channel = channel
self.queues = self.queues or [] if queues is None else queues
self.no_ack = self.no_ack if no_ack is None else no_ack
self.callbacks = (self.callbacks or [] if callbacks is None
else callbacks)
self.on_message = on_message
self._active_tags = {}
if auto_declare is not None:
self.auto_declare = auto_declare
if on_decode_error is not None:
self.on_decode_error = on_decode_error
if self.channel:
self.revive(self.channel)
def revive(self, channel):
"""Revive consumer after connection loss."""
self._active_tags.clear()
channel = self.channel = maybe_channel(channel)
self.queues = [queue(self.channel)
for queue in maybe_list(self.queues)]
for queue in self.queues:
queue.revive(channel)
if self.auto_declare:
self.declare()
def declare(self):
"""Declare queues, exchanges and bindings.
This is done automatically at instantiation if :attr:`auto_declare`
is set.
"""
for queue in self.queues:
queue.declare()
def register_callback(self, callback):
"""Register a new callback to be called when a message
is received.
The signature of the callback needs to accept two arguments:
`(body, message)`, which is the decoded message body
and the `Message` instance (a subclass of
:class:`~kombu.transport.base.Message`.
"""
self.callbacks.append(callback)
def __enter__(self):
self.consume()
return self
def __exit__(self, *exc_info):
try:
self.cancel()
except Exception:
pass
def add_queue(self, queue):
queue = queue(self.channel)
if self.auto_declare:
queue.declare()
self.queues.append(queue)
return queue
def add_queue_from_dict(self, queue, **options):
return self.add_queue(Queue.from_dict(queue, **options))
def consume(self, no_ack=None):
if self.queues:
no_ack = self.no_ack if no_ack is None else no_ack
H, T = self.queues[:-1], self.queues[-1]
for queue in H:
self._basic_consume(queue, no_ack=no_ack, nowait=True)
self._basic_consume(T, no_ack=no_ack, nowait=False)
def cancel(self):
"""End all active queue consumers.
This does not affect already delivered messages, but it does
mean the server will not send any more messages for this consumer.
"""
cancel = self.channel.basic_cancel
for tag in values(self._active_tags):
cancel(tag)
self._active_tags.clear()
close = cancel
def cancel_by_queue(self, queue):
"""Cancel consumer by queue name."""
try:
tag = self._active_tags.pop(queue)
except KeyError:
pass
else:
self.queues[:] = [q for q in self.queues if q.name != queue]
self.channel.basic_cancel(tag)
def consuming_from(self, queue):
name = queue
if isinstance(queue, Queue):
name = queue.name
return any(q.name == name for q in self.queues)
def purge(self):
"""Purge messages from all queues.
.. warning::
This will *delete all ready messages*, there is no
undo operation.
"""
return sum(queue.purge() for queue in self.queues)
def flow(self, active):
"""Enable/disable flow from peer.
This is a simple flow-control mechanism that a peer can use
to avoid overflowing its queues or otherwise finding itself
receiving more messages than it can process.
The peer that receives a request to stop sending content
will finish sending the current content (if any), and then wait
until flow is reactivated.
"""
self.channel.flow(active)
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
"""Specify quality of service.
The client can request that messages should be sent in
advance so that when the client finishes processing a message,
the following message is already held locally, rather than needing
to be sent down the channel. Prefetching gives a performance
improvement.
The prefetch window is Ignored if the :attr:`no_ack` option is set.
:param prefetch_size: Specify the prefetch window in octets.
The server will send a message in advance if it is equal to
or smaller in size than the available prefetch size (and
also falls within other prefetch limits). May be set to zero,
meaning "no specific limit", although other prefetch limits
may still apply.
:param prefetch_count: Specify the prefetch window in terms of
whole messages.
:param apply_global: Apply new settings globally on all channels.
Currently not supported by RabbitMQ.
"""
return self.channel.basic_qos(prefetch_size,
prefetch_count,
apply_global)
def recover(self, requeue=False):
"""Redeliver unacknowledged messages.
Asks the broker to redeliver all unacknowledged messages
on the specified channel.
:keyword requeue: By default the messages will be redelivered
to the original recipient. With `requeue` set to true, the
server will attempt to requeue the message, potentially then
delivering it to an alternative subscriber.
"""
return self.channel.basic_recover(requeue=requeue)
def receive(self, body, message):
"""Method called when a message is received.
This dispatches to the registered :attr:`callbacks`.
:param body: The decoded message body.
:param message: The `Message` instance.
:raises NotImplementedError: If no consumer callbacks have been
registered.
"""
callbacks = self.callbacks
if not callbacks:
raise NotImplementedError('Consumer does not have any callbacks')
[callback(body, message) for callback in callbacks]
def _basic_consume(self, queue, consumer_tag=None,
no_ack=no_ack, nowait=True):
tag = self._active_tags.get(queue.name)
if tag is None:
tag = self._add_tag(queue, consumer_tag)
queue.consume(tag, self._receive_callback,
no_ack=no_ack, nowait=nowait)
return tag
def _add_tag(self, queue, consumer_tag=None):
tag = consumer_tag or str(next(self._tags))
self._active_tags[queue.name] = tag
return tag
def _receive_callback(self, message):
on_m, channel, decoded = self.on_message, self.channel, None
try:
m2p = getattr(channel, 'message_to_python', None)
if m2p:
message = m2p(message)
decoded = None if on_m else message.decode()
except Exception as exc:
if not self.on_decode_error:
raise
self.on_decode_error(message, exc)
else:
return on_m(message) if on_m else self.receive(decoded, message)
def __repr__(self):
return '<Consumer: {0.queues}>'.format(self)
@property
def connection(self):
try:
return self.channel.connection.client
except AttributeError:
pass
| {
"content_hash": "aceb94e1a9172de119bbf80744576dc5",
"timestamp": "",
"source": "github",
"line_count": 556,
"max_line_length": 78,
"avg_line_length": 35.92805755395683,
"alnum_prop": 0.6190929114937925,
"repo_name": "mathom/kombu",
"id": "c1e0f7decca692a144bc36d271e7a8fb4581056a",
"size": "19976",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kombu/messaging.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "623852"
},
{
"name": "Shell",
"bytes": "2059"
}
],
"symlink_target": ""
} |
import names
import random
names_list = []
#generate a list of random names
print "sender, receiver, num_texts_sent"
for i in range(1,31):
names_list.append(names.get_full_name())
#remove duplicates
unique_names = set(names_list)
senders_names = []
for name in unique_names:
senders_names.append(name)
receivers = unique_names - set(senders_names)
for name2 in receivers:
#print name + "," + name2 + "," + random.randint(0,1000)
print "{name}, {name2}, {numtexts}".format(name=name,name2=name2,
numtexts=random.randint(0,1000))
| {
"content_hash": "baee8fb1418b59a13d3a1ebd0086e878",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 76,
"avg_line_length": 26.52173913043478,
"alnum_prop": 0.6311475409836066,
"repo_name": "rajivjk/data.scripts",
"id": "a54a8ec49e5b02cd343d7c45d99a2193b0bb9f7f",
"size": "611",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sms.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "611"
},
{
"name": "R",
"bytes": "515"
}
],
"symlink_target": ""
} |
import re
import shlex
import threading
import time
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
from cinder import exception
from cinder import utils
from cinder.volume.drivers.hitachi import hbsd_basiclib as basic_lib
LOG = logging.getLogger(__name__)
SNM2_ENV = ('LANG=C STONAVM_HOME=/usr/stonavm '
'LD_LIBRARY_PATH=/usr/stonavm/lib '
'STONAVM_RSP_PASS=on STONAVM_ACT=on')
MAX_HOSTGROUPS = 127
MAX_HOSTGROUPS_ISCSI = 254
MAX_HLUN = 2047
EXEC_LOCK_PATH_BASE = basic_lib.LOCK_DIR + 'hsnm_'
EXEC_TIMEOUT = 10
EXEC_INTERVAL = 1
CHAP_TIMEOUT = 5
PAIRED = 12
DUMMY_LU = -1
class HBSDSNM2(basic_lib.HBSDBasicLib):
def __init__(self, conf):
super(HBSDSNM2, self).__init__(conf=conf)
self.unit_name = conf.hitachi_unit_name
self.hsnm_lock = threading.Lock()
self.hsnm_lock_file = ('%s%s'
% (EXEC_LOCK_PATH_BASE, self.unit_name))
copy_speed = conf.hitachi_copy_speed
if copy_speed <= 2:
self.pace = 'slow'
elif copy_speed == 3:
self.pace = 'normal'
else:
self.pace = 'prior'
def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with self.hsnm_lock, lock:
ret, stdout, stderr = self.exec_command('env', args=args,
printflag=printflag)
if not ret or noretry:
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if time.time() - start >= timeout:
LOG.error("snm2 command timeout.")
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
if (re.search('DMEC002047', stderr)
or re.search('DMEC002048', stderr)
or re.search('DMED09000A', stderr)
or re.search('DMED090026', stderr)
or re.search('DMED0E002B', stderr)
or re.search('DMER03006A', stderr)
or re.search('DMER030080', stderr)
or re.search('DMER0300B8', stderr)
or re.search('DMER0800CF', stderr)
or re.search('DMER0800D[0-6D]', stderr)
or re.search('DMES052602', stderr)):
LOG.error("Unexpected error occurs in snm2.")
raise loopingcall.LoopingCallDone((ret, stdout, stderr))
def exec_hsnm(self, command, args, printflag=True, noretry=False,
timeout=EXEC_TIMEOUT, interval=EXEC_INTERVAL):
args = '%s %s %s' % (SNM2_ENV, command, args)
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_exec_hsnm, args, printflag,
noretry, timeout, time.time())
return loop.start(interval=interval).wait()
def _execute_with_exception(self, cmd, args, **kwargs):
ret, stdout, stderr = self.exec_hsnm(cmd, args, **kwargs)
if ret:
cmds = '%(cmd)s %(args)s' % {'cmd': cmd, 'args': args}
msg = basic_lib.output_err(
600, cmd=cmds, ret=ret, out=stdout, err=stderr)
raise exception.HBSDError(data=msg)
return ret, stdout, stderr
def _execute_and_return_stdout(self, cmd, args, **kwargs):
result = self._execute_with_exception(cmd, args, **kwargs)
return result[1]
def get_comm_version(self):
ret, stdout, stderr = self.exec_hsnm('auman', '-help')
m = re.search(r'Version (\d+).(\d+)', stdout)
if not m:
msg = basic_lib.output_err(
600, cmd='auman', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return '%s.%s' % (m.group(1), m.group(2))
def add_used_hlun(self, command, port, gid, used_list, ldev):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm(command,
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
if line[0] == port and int(line[1][0:3]) == gid:
if int(line[2]) not in used_list:
used_list.append(int(line[2]))
if int(line[3]) == ldev:
hlu = int(line[2])
LOG.warning('ldev(%(ldev)d) is already mapped '
'(hlun: %(hlu)d)',
{'ldev': ldev, 'hlu': hlu})
return hlu
return None
def _get_lu(self, lu=None):
# When 'lu' is 0, it should be true. So, it cannot remove 'is None'.
if lu is None:
args = '-unit %s' % self.unit_name
else:
args = '-unit %s -lu %s' % (self.unit_name, lu)
return self._execute_and_return_stdout('auluref', args)
def get_unused_ldev(self, ldev_range):
start = ldev_range[0]
end = ldev_range[1]
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auluref', '-unit %s' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auluref', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
free_ldev = start
lines = stdout.splitlines()
found = False
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
ldev_num = int(line[0])
if free_ldev > ldev_num:
continue
if free_ldev == ldev_num:
free_ldev += 1
else:
found = True
break
if free_ldev > end:
break
else:
found = True
if not found:
msg = basic_lib.output_err(648, resource='LDEV')
raise exception.HBSDError(message=msg)
return free_ldev
def get_hgname_gid(self, port, host_grp_name):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == 'Port' and line[1] == port:
is_target_port = True
continue
if is_target_port:
if line[0] == 'Port':
break
if not line[0].isdigit():
continue
gid = int(line[0])
if line[1] == host_grp_name:
return gid
return None
def get_unused_gid(self, group_range, port):
start = group_range[0]
end = group_range[1]
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
free_gid = start
found = False
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == 'Port' and line[1] == port:
is_target_port = True
continue
if is_target_port:
if line[0] == 'Port':
found = True
break
if not line[0].isdigit():
continue
gid = int(line[0])
if free_gid > gid:
continue
if free_gid == gid:
free_gid += 1
else:
found = True
break
if free_gid > end or free_gid > MAX_HOSTGROUPS:
break
else:
found = True
if not found:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return free_gid
def comm_set_target_wwns(self, target_ports):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('aufibre1',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='aufibre1', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
target_wwns = {}
for line in lines[3:]:
if re.match('Transfer', line):
break
line = shlex.split(line)
if len(line) < 4:
continue
port = '%s%s' % (line[0], line[1])
if target_ports:
if port in target_ports:
target_wwns[port] = line[3]
else:
target_wwns[port] = line[3]
LOG.debug('target wwns: %s', target_wwns)
return target_wwns
def get_hostgroup_from_wwns(self, hostgroups, port, wwns, buf, login):
for pt in wwns:
for line in buf[port]['assigned']:
hgname = shlex.split(line[38:])[1][4:]
if not re.match(basic_lib.NAME_PREFIX, hgname):
continue
if pt.search(line[38:54]):
wwn = line[38:54]
gid = int(shlex.split(line[38:])[1][0:3])
is_detected = None
if login:
for line in buf[port]['detected']:
if pt.search(line[38:54]):
is_detected = True
break
else:
is_detected = False
hostgroups.append({'port': six.text_type(port), 'gid': gid,
'initiator_wwn': wwn,
'detected': is_detected})
def comm_get_hostgroup_info(self, hgs, wwns, target_ports, login=True):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auhgwwn',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
security_ports = []
patterns = []
for wwn in wwns:
pt = re.compile(wwn, re.IGNORECASE)
patterns.append(pt)
lines = stdout.splitlines()
buf = {}
_buffer = []
port = None
security = None
for line in lines:
if re.match('Port', line):
port = shlex.split(line)[1]
if target_ports and port not in target_ports:
port = None
else:
security = True if shlex.split(line)[5] == 'ON' else False
buf[port] = {'detected': [], 'assigned': [],
'assignable': []}
if security:
security_ports.append(port)
continue
if port and security:
if re.search('Detected WWN', line):
_buffer = buf[port]['detected']
continue
elif re.search('Assigned WWN', line):
_buffer = buf[port]['assigned']
continue
elif re.search('Assignable WWN', line):
_buffer = buf[port]['assignable']
continue
_buffer.append(line)
hostgroups = []
for port in buf.keys():
self.get_hostgroup_from_wwns(
hostgroups, port, patterns, buf, login)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def comm_delete_lun_core(self, command, hostgroups, lun):
unit = self.unit_name
no_lun_cnt = 0
deleted_hostgroups = []
for hostgroup in hostgroups:
LOG.debug('comm_delete_lun: hostgroup is %s', hostgroup)
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
port_no = port[1]
is_deleted = False
for deleted in deleted_hostgroups:
if port == deleted['port'] and gid == deleted['gid']:
is_deleted = True
if is_deleted:
continue
ret, stdout, stderr = self.exec_hsnm(command,
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
line = shlex.split(line)
if not line:
continue
if (line[0] == port and int(line[1][0:3]) == gid
and int(line[3]) == lun):
hlu = int(line[2])
break
else:
no_lun_cnt += 1
if no_lun_cnt == len(hostgroups):
raise exception.HBSDNotFound
else:
continue
opt = '-unit %s -rm %s %s %d %d %d' % (unit, ctl_no, port_no,
gid, hlu, lun)
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
deleted_hostgroups.append({'port': port, 'gid': gid})
LOG.debug('comm_delete_lun is over (%d)', lun)
def comm_delete_lun(self, hostgroups, ldev):
self.comm_delete_lun_core('auhgmap', hostgroups, ldev)
def comm_delete_lun_iscsi(self, hostgroups, ldev):
self.comm_delete_lun_core('autargetmap', hostgroups, ldev)
def comm_add_ldev(self, pool_id, ldev, capacity, is_vvol):
unit = self.unit_name
if is_vvol:
command = 'aureplicationvvol'
opt = ('-unit %s -add -lu %d -size %dg'
% (unit, ldev, capacity))
else:
command = 'auluadd'
opt = ('-unit %s -lu %d -dppoolno %d -size %dg'
% (unit, ldev, pool_id, capacity))
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret:
if (re.search('DMEC002047', stderr)
or re.search('DMES052602', stderr)
or re.search('DMED09000A', stderr)):
raise exception.HBSDNotFound
else:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hostgrp(self, port, gid, host_grp_name):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -add %s %s -gno %d -gname %s' % (unit, ctl_no,
port_no, gid,
host_grp_name)
ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
if ret:
raise exception.HBSDNotFound
def comm_del_hostgrp(self, port, gid, host_grp_name):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -rm %s %s -gname %s' % (unit, ctl_no, port_no,
host_grp_name)
ret, stdout, stderr = self.exec_hsnm('auhgdef', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_hbawwn(self, port, gid, wwn):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -set -permhg %s %s %s -gno %d' % (unit, ctl_no,
port_no, wwn, gid)
ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
if ret:
opt = '-unit %s -assign -permhg %s %s %s -gno %d' % (unit, ctl_no,
port_no, wwn,
gid)
ret, stdout, stderr = self.exec_hsnm('auhgwwn', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='auhgwwn', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_lun(self, command, hostgroups, ldev, is_once=False):
unit = self.unit_name
tmp_hostgroups = hostgroups[:]
used_list = []
is_ok = False
hlu = None
old_hlu = None
for hostgroup in hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
hlu = self.add_used_hlun(command, port, gid, used_list, ldev)
# When 'hlu' or 'old_hlu' is 0, it should be true.
# So, it cannot remove 'is not None'.
if hlu is not None:
if old_hlu is not None and old_hlu != hlu:
msg = basic_lib.output_err(648, resource='LUN (HLUN)')
raise exception.HBSDError(message=msg)
is_ok = True
hostgroup['lun'] = hlu
tmp_hostgroups.remove(hostgroup)
old_hlu = hlu
else:
hlu = old_hlu
if not used_list:
hlu = 0
elif hlu is None:
for i in range(MAX_HLUN + 1):
if i not in used_list:
hlu = i
break
else:
raise exception.HBSDNotFound
ret = 0
stdout = None
stderr = None
invalid_hgs_str = None
for hostgroup in tmp_hostgroups:
port = hostgroup['port']
gid = hostgroup['gid']
ctl_no = port[0]
port_no = port[1]
if not hostgroup['detected']:
if invalid_hgs_str:
invalid_hgs_str = '%s, %s:%d' % (invalid_hgs_str,
port, gid)
else:
invalid_hgs_str = '%s:%d' % (port, gid)
continue
opt = '-unit %s -add %s %s %d %d %d' % (unit, ctl_no, port_no,
gid, hlu, ldev)
ret, stdout, stderr = self.exec_hsnm(command, opt)
if ret == 0:
is_ok = True
hostgroup['lun'] = hlu
if is_once:
break
else:
LOG.warning(basic_lib.set_msg(
314, ldev=ldev, lun=hlu, port=port, id=gid))
if not is_ok:
if stderr:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
else:
msg = basic_lib.output_err(659, gid=invalid_hgs_str)
raise exception.HBSDError(message=msg)
def comm_delete_ldev(self, ldev, is_vvol):
unit = self.unit_name
if is_vvol:
command = 'aureplicationvvol'
opt = '-unit %s -rm -lu %d' % (unit, ldev)
else:
command = 'auludel'
opt = '-unit %s -lu %d -f' % (unit, ldev)
ret, stdout, stderr = self.exec_hsnm(command, opt,
timeout=30, interval=3)
if ret:
if (re.search('DMEC002048', stderr)
or re.search('DMED090026', stderr)):
raise exception.HBSDNotFound
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return ret
def comm_extend_ldev(self, ldev, old_size, new_size):
unit = self.unit_name
command = 'auluchgsize'
options = '-unit %s -lu %d -size %dg' % (unit, ldev, new_size)
ret, stdout, stderr = self.exec_hsnm(command, options)
if ret:
msg = basic_lib.output_err(
600, cmd=command, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def delete_chap_user(self, port):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
auth_username = self.conf.hitachi_auth_user
opt = '-unit %s -rm %s %s -user %s' % (unit, ctl_no, port_no,
auth_username)
return self.exec_hsnm('auchapuser', opt)
def _wait_for_add_chap_user(self, cmd, auth_username,
auth_password, start):
# Don't move 'import pexpect' to the beginning of the file so that
# a tempest can work.
import pexpect
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
with self.hsnm_lock, lock:
try:
child = pexpect.spawn(cmd)
child.expect('Secret: ', timeout=CHAP_TIMEOUT)
child.sendline(auth_password)
child.expect('Re-enter Secret: ',
timeout=CHAP_TIMEOUT)
child.sendline(auth_password)
child.expect('The CHAP user information has '
'been added successfully.',
timeout=CHAP_TIMEOUT)
except Exception:
if time.time() - start >= EXEC_TIMEOUT:
msg = basic_lib.output_err(642, user=auth_username)
raise exception.HBSDError(message=msg)
else:
raise loopingcall.LoopingCallDone(True)
def set_chap_authention(self, port, gid):
ctl_no = port[0]
port_no = port[1]
unit = self.unit_name
auth_username = self.conf.hitachi_auth_user
auth_password = self.conf.hitachi_auth_password
add_chap_user = self.conf.hitachi_add_chap_user
assign_flag = True
added_flag = False
opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no,
auth_username)
ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True)
if ret:
if not add_chap_user:
msg = basic_lib.output_err(643, user=auth_username)
raise exception.HBSDError(message=msg)
root_helper = utils.get_root_helper()
cmd = ('%s env %s auchapuser -unit %s -add %s %s '
'-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no,
port_no, gid, auth_username))
LOG.debug('Add CHAP user')
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_add_chap_user, cmd,
auth_username, auth_password, time.time())
added_flag = loop.start(interval=EXEC_INTERVAL).wait()
else:
lines = stdout.splitlines()[4:]
for line in lines:
if int(shlex.split(line)[0][0:3]) == gid:
assign_flag = False
break
if assign_flag:
opt = '-unit %s -assign %s %s -tno %d -user %s' % (unit, ctl_no,
port_no, gid,
auth_username)
ret, stdout, stderr = self.exec_hsnm('auchapuser', opt)
if ret:
if added_flag:
_ret, _stdout, _stderr = self.delete_chap_user(port)
if _ret:
LOG.warning(basic_lib.set_msg(
303, user=auth_username))
msg = basic_lib.output_err(
600, cmd='auchapuser', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
return added_flag
def comm_add_hostgrp_iscsi(self, port, gid, target_alias, target_iqn):
auth_method = self.conf.hitachi_auth_method
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
if auth_method:
auth_arg = '-authmethod %s -mutual disable' % auth_method
else:
auth_arg = '-authmethod None'
opt = '-unit %s -add %s %s -tno %d' % (unit, ctl_no, port_no, gid)
opt = '%s -talias %s -iname %s %s' % (opt, target_alias, target_iqn,
auth_arg)
ret, stdout, stderr = self.exec_hsnm('autargetdef', opt)
if ret:
raise exception.HBSDNotFound
def delete_iscsi_target(self, port, _target_no, target_alias):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -rm %s %s -talias %s' % (unit, ctl_no, port_no,
target_alias)
return self.exec_hsnm('autargetdef', opt)
def comm_set_hostgrp_reportportal(self, port, target_alias):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -set %s %s -talias %s' % (unit, ctl_no, port_no,
target_alias)
opt = '%s -ReportFullPortalList enable' % opt
ret, stdout, stderr = self.exec_hsnm('autargetopt', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetopt', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_add_initiator(self, port, gid, host_iqn):
unit = self.unit_name
ctl_no = port[0]
port_no = port[1]
opt = '-unit %s -add %s %s -tno %d -iname %s' % (unit, ctl_no,
port_no, gid,
host_iqn)
ret, stdout, stderr = self.exec_hsnm('autargetini', opt)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_get_hostgroup_info_iscsi(self, hgs, host_iqn, target_ports):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetini',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetini', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
security_ports = []
lines = stdout.splitlines()
hostgroups = []
security = True
for line in lines:
if not shlex.split(line):
continue
if re.match('Port', line):
line = shlex.split(line)
port = line[1]
security = True if line[4] == 'ON' else False
continue
if target_ports and port not in target_ports:
continue
if security:
if (host_iqn in shlex.split(line[72:]) and
re.match(basic_lib.NAME_PREFIX,
shlex.split(line)[0][4:])):
gid = int(shlex.split(line)[0][0:3])
hostgroups.append(
{'port': port, 'gid': gid, 'detected': True})
LOG.debug('Find port=%(port)s gid=%(gid)d',
{'port': port, 'gid': gid})
if port not in security_ports:
security_ports.append(port)
for hostgroup in hostgroups:
hgs.append(hostgroup)
return security_ports
def comm_get_iscsi_ip(self, port):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('auiscsi',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='auiscsi', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
is_target_port = False
for line in lines:
line_array = shlex.split(line)
if not line_array:
continue
if line_array[0] == 'Port' and line_array[1] != 'Number':
if line_array[1] == port:
is_target_port = True
else:
is_target_port = False
continue
if is_target_port and re.search('IPv4 Address', line):
ip_addr = shlex.split(line)[3]
break
if is_target_port and re.search('Port Number', line):
ip_port = shlex.split(line)[3]
else:
msg = basic_lib.output_err(651)
raise exception.HBSDError(message=msg)
return ip_addr, ip_port
def comm_get_target_iqn(self, port, gid):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
is_target_host = False
tmp_port = None
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
gid_tmp = line[0][0:3]
if gid_tmp.isdigit() and int(gid_tmp) == gid:
is_target_host = True
continue
if is_target_host and line[0] == "iSCSI":
target_iqn = line[3]
break
else:
msg = basic_lib.output_err(650, resource='IQN')
raise exception.HBSDError(message=msg)
return target_iqn
def get_unused_gid_iscsi(self, group_range, port):
start = group_range[0]
end = min(group_range[1], MAX_HOSTGROUPS_ISCSI)
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
used_list = []
tmp_port = None
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
if line[0][0:3].isdigit():
gid = int(line[0][0:3])
if start <= gid <= end:
used_list.append(gid)
if not used_list:
return start
for gid in range(start, end + 1):
if gid not in used_list:
break
else:
msg = basic_lib.output_err(648, resource='GID')
raise exception.HBSDError(message=msg)
return gid
def get_gid_from_targetiqn(self, target_iqn, target_alias, port):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('autargetdef',
'-unit %s -refer' % unit)
if ret:
msg = basic_lib.output_err(
600, cmd='autargetdef', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
gid = None
tmp_port = None
found_alias_full = False
found_alias_part = False
lines = stdout.splitlines()
for line in lines:
line = shlex.split(line)
if not line:
continue
if line[0] == "Port":
tmp_port = line[1]
continue
if port != tmp_port:
continue
if line[0][0:3].isdigit():
tmp_gid = int(line[0][0:3])
if re.match(basic_lib.NAME_PREFIX, line[0][4:]):
found_alias_part = True
if line[0][4:] == target_alias:
found_alias_full = True
continue
if line[0] == "iSCSI":
if line[3] == target_iqn:
gid = tmp_gid
break
else:
found_alias_part = False
if found_alias_full and gid is None:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
# When 'gid' is 0, it should be true.
# So, it cannot remove 'is not None'.
if not found_alias_part and gid is not None:
msg = basic_lib.output_err(641)
raise exception.HBSDError(message=msg)
return gid
def comm_get_dp_pool(self, pool_id):
unit = self.unit_name
ret, stdout, stderr = self.exec_hsnm('audppool',
'-unit %s -refer -g' % unit,
printflag=False)
if ret:
msg = basic_lib.output_err(
600, cmd='audppool', ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
lines = stdout.splitlines()
for line in lines[2:]:
tc_cc = re.search(r'\s(\d+\.\d) GB\s+(\d+\.\d) GB\s', line)
pool_tmp = re.match(r'\s*\d+', line)
if (pool_tmp and tc_cc
and int(pool_tmp.group(0)) == pool_id):
total_gb = int(float(tc_cc.group(1)))
free_gb = total_gb - int(float(tc_cc.group(2)))
return total_gb, free_gb
msg = basic_lib.output_err(640, pool_id=pool_id)
raise exception.HBSDError(message=msg)
def is_detected(self, port, wwn):
hgs = []
self.comm_get_hostgroup_info(hgs, [wwn], [port], login=True)
return hgs[0]['detected']
def pairoperate(self, opr, pvol, svol, is_vvol, args=None):
unit = self.unit_name
method = '-ss' if is_vvol else '-si'
opt = '-unit %s -%s %s -pvol %d -svol %d' % (unit, opr, method,
pvol, svol)
if args:
opt = '%s %s' % (opt, args)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal', opt)
if ret:
opt = '%s %s' % ('aureplicationlocal', opt)
msg = basic_lib.output_err(
600, cmd=opt, ret=ret, out=stdout, err=stderr)
raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
def comm_create_pair(self, pvol, svol, is_vvol):
if not is_vvol:
args = '-compsplit -pace %s' % self.pace
method = basic_lib.FULL
else:
pool = self.conf.hitachi_thin_pool_id
args = ('-localrepdppoolno %d -localmngdppoolno %d '
'-compsplit -pace %s' % (pool, pool, self.pace))
method = basic_lib.THIN
try:
self.pairoperate('create', pvol, svol, is_vvol, args=args)
except exception.HBSDCmdError as ex:
if (re.search('DMER0300B8', ex.stderr)
or re.search('DMER0800CF', ex.stderr)
or re.search('DMER0800D[0-6D]', ex.stderr)
or re.search('DMER03006A', ex.stderr)
or re.search('DMER030080', ex.stderr)):
msg = basic_lib.output_err(615, copy_method=method, pvol=pvol)
raise exception.HBSDBusy(message=msg)
else:
raise
def _comm_pairevtwait(self, pvol, svol, is_vvol):
unit = self.unit_name
if not is_vvol:
pairname = 'SI_LU%04d_LU%04d' % (pvol, svol)
method = '-si'
else:
pairname = 'SS_LU%04d_LU%04d' % (pvol, svol)
method = '-ss'
opt = ('-unit %s -evwait %s -pairname %s -gname Ungrouped -nowait' %
(unit, method, pairname))
ret, stdout, stderr = self.exec_hsnm('aureplicationmon',
opt, noretry=True)
return ret
def _wait_for_pair_status(self, pvol, svol, is_vvol,
status, timeout, start):
if self._comm_pairevtwait(pvol, svol, is_vvol) in status:
raise loopingcall.LoopingCallDone()
if time.time() - start >= timeout:
msg = basic_lib.output_err(
637, method='_wait_for_pair_status', timeout=timeout)
raise exception.HBSDError(message=msg)
def comm_pairevtwait(self, pvol, svol, is_vvol, status, timeout, interval):
loop = loopingcall.FixedIntervalLoopingCall(
self._wait_for_pair_status, pvol, svol, is_vvol,
status, timeout, time.time())
loop.start(interval=interval).wait()
def delete_pair(self, pvol, svol, is_vvol):
self.pairoperate('simplex', pvol, svol, is_vvol)
def trans_status_hsnm2raid(self, str):
status = None
obj = re.search(r'Split\((.*)%\)', str)
if obj:
status = basic_lib.PSUS
obj = re.search(r'Paired\((.*)%\)', str)
if obj:
status = basic_lib.PAIR
return status
def get_paired_info(self, ldev, only_flag=False):
opt_base = '-unit %s -refer' % self.unit_name
if only_flag:
opt_base = '%s -ss' % opt_base
opt = '%s -pvol %d' % (opt_base, ldev)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
opt, noretry=True)
if ret == 0:
lines = stdout.splitlines()
pair_info = {'pvol': ldev, 'svol': []}
for line in lines[1:]:
status = self.trans_status_hsnm2raid(line)
if re.search('SnapShot', line[100:]):
is_vvol = True
else:
is_vvol = False
line = shlex.split(line)
if not line:
break
svol = int(line[2])
pair_info['svol'].append({'lun': svol,
'status': status,
'is_vvol': is_vvol})
return pair_info
opt = '%s -svol %d' % (opt_base, ldev)
ret, stdout, stderr = self.exec_hsnm('aureplicationlocal',
opt, noretry=True)
if ret == 1:
return {'pvol': None, 'svol': []}
lines = stdout.splitlines()
status = self.trans_status_hsnm2raid(lines[1])
if re.search('SnapShot', lines[1][100:]):
is_vvol = True
else:
is_vvol = False
line = shlex.split(lines[1])
pvol = int(line[1])
return {'pvol': pvol, 'svol': [{'lun': ldev,
'status': status,
'is_vvol': is_vvol}]}
def create_lock_file(self):
basic_lib.create_empty_file(self.hsnm_lock_file)
def get_hostgroup_luns(self, port, gid):
list = []
self.add_used_hlun('auhgmap', port, gid, list, DUMMY_LU)
return list
def get_ldev_size_in_gigabyte(self, ldev, existing_ref):
param = 'unit_name'
if param not in existing_ref:
msg = basic_lib.output_err(700, param=param)
raise exception.HBSDError(data=msg)
storage = existing_ref.get(param)
if storage != self.conf.hitachi_unit_name:
msg = basic_lib.output_err(648, resource=param)
raise exception.HBSDError(data=msg)
try:
stdout = self._get_lu(ldev)
except exception.HBSDError:
with excutils.save_and_reraise_exception():
basic_lib.output_err(648, resource='LDEV')
lines = stdout.splitlines()
line = lines[2]
splits = shlex.split(line)
vol_type = splits[len(splits) - 1]
if basic_lib.NORMAL_VOLUME_TYPE != vol_type:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
dppool = splits[5]
if 'N/A' == dppool:
msg = basic_lib.output_err(702, ldev=ldev)
raise exception.HBSDError(data=msg)
# Hitachi storage calculates volume sizes in a block unit, 512 bytes.
# So, units.Gi is divided by 512.
size = int(splits[1])
if size % (units.Gi / 512):
msg = basic_lib.output_err(703, ldev=ldev)
raise exception.HBSDError(data=msg)
num_port = int(splits[len(splits) - 2])
if num_port:
msg = basic_lib.output_err(704, ldev=ldev)
raise exception.HBSDError(data=msg)
return size / (units.Gi / 512)
| {
"content_hash": "fcbc545dd0a4c960e5d83d4ce8c54da4",
"timestamp": "",
"source": "github",
"line_count": 1140,
"max_line_length": 79,
"avg_line_length": 37.73508771929824,
"alnum_prop": 0.48637779534148495,
"repo_name": "eharney/cinder",
"id": "14a9905791cb222b99ac84a20a28b67edeed746c",
"size": "43629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/volume/drivers/hitachi/hbsd_snm2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
} |
from cms.api import add_plugin
from cms.utils.copy_plugins import copy_plugins_to
from cms.utils.plugins import downcast_plugins
from copy import deepcopy
from django.contrib import admin
from django.contrib.auth.models import AnonymousUser
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.utils.timezone import now
from django.utils.translation import get_language, override
import parler
from taggit.models import Tag
from djangocms_blog.models import Post
from djangocms_blog.settings import get_setting
from . import BaseTest
class AdminTest(BaseTest):
def test_admin_fieldsets(self):
request = self.get_page_request('/', self.user_staff, r'/en/blog/', edit=False)
post_admin = admin.site._registry[Post]
with self.settings(BLOG_USE_PLACEHOLDER=True):
fsets = post_admin.get_fieldsets(request)
self.assertFalse('post_text' in fsets[0][1]['fields'])
with self.settings(BLOG_USE_PLACEHOLDER=False):
fsets = post_admin.get_fieldsets(request)
self.assertTrue('post_text' in fsets[0][1]['fields'])
with self.settings(BLOG_MULTISITE=True):
fsets = post_admin.get_fieldsets(request)
self.assertTrue('sites' in fsets[1][1]['fields'][0])
with self.settings(BLOG_MULTISITE=False):
fsets = post_admin.get_fieldsets(request)
self.assertFalse('sites' in fsets[1][1]['fields'][0])
request = self.get_page_request('/', self.user, r'/en/blog/', edit=False)
fsets = post_admin.get_fieldsets(request)
self.assertTrue('author' in fsets[1][1]['fields'][0])
def test_admin_auto_author(self):
page1, page2 = self.get_pages()
request = self.get_page_request('/', self.user_staff, r'/en/blog/', edit=False)
data = deepcopy(self.data['en'][0])
with self.settings(BLOG_AUTHOR_DEFAULT=True):
data['date_published_0'] = now().strftime('%Y-%m-%d')
data['date_published_1'] = now().strftime('%H:%M:%S')
data['categories'] = self.category_1.pk
request = self.post_request(page1, 'en', data=data)
msg_mid = MessageMiddleware()
msg_mid.process_request(request)
post_admin = admin.site._registry[Post]
post_admin.add_view(request)
self.assertEqual(Post.objects.count(), 1)
self.assertEqual(Post.objects.get(translations__slug='first-post').author_id,
request.user.pk)
with self.settings(BLOG_AUTHOR_DEFAULT=False):
data = deepcopy(self.data['en'][1])
data['date_published_0'] = now().strftime('%Y-%m-%d')
data['date_published_1'] = now().strftime('%H:%M:%S')
data['categories'] = self.category_1.pk
request = self.post_request(page1, 'en', data=data)
msg_mid = MessageMiddleware()
msg_mid.process_request(request)
post_admin = admin.site._registry[Post]
post_admin.add_view(request)
self.assertEqual(Post.objects.count(), 2)
self.assertEqual(Post.objects.get(translations__slug='second-post').author_id, None)
with self.settings(BLOG_AUTHOR_DEFAULT='staff'):
data = deepcopy(self.data['en'][2])
data['date_published_0'] = now().strftime('%Y-%m-%d')
data['date_published_1'] = now().strftime('%H:%M:%S')
data['categories'] = self.category_1.pk
request = self.post_request(page1, 'en', data=data)
msg_mid = MessageMiddleware()
msg_mid.process_request(request)
post_admin = admin.site._registry[Post]
post_admin.add_view(request)
self.assertEqual(Post.objects.count(), 3)
self.assertEqual(Post.objects.get(translations__slug='third-post').author.username, 'staff')
class ModelsTest(BaseTest):
def test_model_attributes(self):
post = self._get_post(self.data['en'][0])
post = self._get_post(self.data['it'][0], post, 'it')
post.main_image = self.img
post.save()
post.set_current_language('en')
meta_en = post.as_meta()
self.assertEqual(meta_en.og_type, get_setting('FB_TYPE'))
self.assertEqual(meta_en.title, post.title)
self.assertTrue(meta_en.url.endswith(post.get_absolute_url()))
self.assertEqual(meta_en.description, post.meta_description)
self.assertEqual(meta_en.keywords, post.meta_keywords.split(','))
self.assertEqual(meta_en.published_time, post.date_published)
post.set_current_language('it')
meta_it = post.as_meta()
self.assertEqual(meta_it.title, post.title)
self.assertTrue(meta_it.url.endswith(post.get_absolute_url()))
self.assertNotEqual(meta_it.title, meta_en.title)
self.assertEqual(meta_it.description, post.meta_description)
with override('en'):
post.set_current_language(get_language())
kwargs = {'year': post.date_published.year,
'month': '%02d' % post.date_published.month,
'day': '%02d' % post.date_published.day,
'slug': post.safe_translation_getter('slug', any_language=get_language())}
url_en = reverse('djangocms_blog:post-detail', kwargs=kwargs)
self.assertEqual(url_en, post.get_absolute_url())
with override('it'):
post.set_current_language(get_language())
kwargs = {'year': post.date_published.year,
'month': '%02d' % post.date_published.month,
'day': '%02d' % post.date_published.day,
'slug': post.safe_translation_getter('slug', any_language=get_language())}
url_it = reverse('djangocms_blog:post-detail', kwargs=kwargs)
self.assertEqual(url_it, post.get_absolute_url())
self.assertNotEqual(url_it, url_en)
self.assertEqual(post.get_full_url(), 'http://example.com%s' % url_it)
self.assertEqual(post.get_image_full_url(), 'http://example.com%s' % post.main_image.url)
self.assertEqual(post.thumbnail_options(), get_setting('IMAGE_THUMBNAIL_SIZE'))
self.assertEqual(post.full_image_options(), get_setting('IMAGE_FULL_SIZE'))
post.main_image_thumbnail = self.thumb_1
post.main_image_full = self.thumb_2
self.assertEqual(post.thumbnail_options(), {
'size': (100, 100),
'width': 100, 'height': 100,
'crop': True,
'upscale': False
})
self.assertEqual(post.full_image_options(), {
'size': (200, 200),
'width': 200, 'height': 200,
'crop': False,
'upscale': False
})
post.set_current_language('en')
post.meta_title = 'meta title'
self.assertEqual(post.get_title(), 'meta title')
def test_manager(self):
post1 = self._get_post(self.data['en'][0])
post2 = self._get_post(self.data['en'][1])
# default queryset, published and unpublished posts
months = Post.objects.get_months()
for data in months:
self.assertEqual(data['date'].date(), now().replace(year=now().year, month=now().month, day=1).date())
self.assertEqual(data['count'], 2)
# custom queryset, only published
post1.publish = True
post1.save()
months = Post.objects.get_months(Post.objects.published())
for data in months:
self.assertEqual(data['date'].date(), now().replace(year=now().year, month=now().month, day=1).date())
self.assertEqual(data['count'], 1)
self.assertEqual(len(Post.objects.available()), 1)
# If post is published but publishing date is in the future
post2.date_published = now().replace(year=now().year+1, month=now().month, day=1)
post2.publish = True
post2.save()
self.assertEqual(len(Post.objects.available()), 2)
self.assertEqual(len(Post.objects.published()), 1)
self.assertEqual(len(Post.objects.archived()), 0)
# If post is published but end publishing date is in the past
post2.date_published = now().replace(year=now().year-2, month=now().month, day=1)
post2.date_published_end = now().replace(year=now().year-1, month=now().month, day=1)
post2.save()
self.assertEqual(len(Post.objects.available()), 2)
self.assertEqual(len(Post.objects.published()), 1)
self.assertEqual(len(Post.objects.archived()), 1)
# counting with language fallback enabled
post = self._get_post(self.data['it'][0], post1, 'it')
self.assertEqual(len(Post.objects.filter_by_language('it')), 2)
# No fallback
parler.appsettings.PARLER_LANGUAGES['default']['hide_untranslated'] = True
for index, lang in enumerate(parler.appsettings.PARLER_LANGUAGES[Site.objects.get_current().pk]):
parler.appsettings.PARLER_LANGUAGES[Site.objects.get_current().pk][index]['hide_untranslated'] = True
self.assertEqual(len(Post.objects.filter_by_language('it')), 1)
parler.appsettings.PARLER_LANGUAGES['default']['hide_untranslated'] = False
for index, lang in enumerate(parler.appsettings.PARLER_LANGUAGES[Site.objects.get_current().pk]):
parler.appsettings.PARLER_LANGUAGES[Site.objects.get_current().pk][index]['hide_untranslated'] = False
def test_tag_cloud(self):
post1 = self._get_post(self.data['en'][0])
post2 = self._get_post(self.data['en'][1])
post1.tags.add('tag 1', 'tag 2', 'tag 3', 'tag 4')
post1.save()
post2.tags.add('tag 6', 'tag 2', 'tag 5', 'tag 8')
post2.save()
self.assertEqual(len(Post.objects.tag_cloud()), 0)
tags = []
for tag in Tag.objects.all():
if tag.slug == 'tag-2':
tag.count = 2
else:
tag.count = 1
tags.append(tag)
self.assertEqual(Post.objects.tag_cloud(published=True), [])
self.assertEqual(set(Post.objects.tag_cloud(published=False)), set(tags))
tags_1 = []
for tag in Tag.objects.all():
if tag.slug == 'tag-2':
tag.count = 2
tags_1.append(tag)
elif tag.slug in ('tag-1', 'tag-3', 'tag-4'):
tag.count = 1
tags_1.append(tag)
post1.publish = True
post1.save()
self.assertEqual(set(Post.objects.tag_cloud()), set(tags_1))
self.assertEqual(set(Post.objects.tag_cloud(published=False)), set(tags))
def test_plugin_latest(self):
post1 = self._get_post(self.data['en'][0])
post2 = self._get_post(self.data['en'][1])
post1.tags.add('tag 1')
post1.save()
request = self.get_page_request('/', AnonymousUser(), r'/en/blog/', edit=False)
request_auth = self.get_page_request('/', self.user_staff, r'/en/blog/', edit=False)
request_edit = self.get_page_request('/', self.user_staff, r'/en/blog/', edit=True)
plugin = add_plugin(post1.content, 'BlogLatestEntriesPlugin', language='en')
tag = Tag.objects.get(slug='tag-1')
plugin.tags.add(tag)
# unauthenticated users get no post
self.assertEqual(len(plugin.get_posts(request)), 0)
# staff users not in edit mode get no post
self.assertEqual(len(plugin.get_posts(request_auth)), 0)
# staff users in edit mode get the post
self.assertEqual(len(plugin.get_posts(request_edit)), 1)
post1.publish = True
post1.save()
self.assertEqual(len(plugin.get_posts(request)), 1)
def test_copy_plugin_latest(self):
post1 = self._get_post(self.data['en'][0])
post2 = self._get_post(self.data['en'][1])
tag = Tag.objects.create(name='tag 1')
plugin = add_plugin(post1.content, 'BlogLatestEntriesPlugin', language='en')
plugin.tags.add(tag)
plugins = list(post1.content.cmsplugin_set.filter(language='en').order_by('tree_id', 'level', 'position'))
copy_plugins_to(plugins, post2.content)
new = downcast_plugins(post2.content.cmsplugin_set.all())
self.assertEqual(set(new[0].tags.all()), set([tag]))
def test_plugin_author(self):
post1 = self._get_post(self.data['en'][0])
post2 = self._get_post(self.data['en'][1])
request = self.get_page_request('/', AnonymousUser(), r'/en/blog/', edit=False)
plugin = add_plugin(post1.content, 'BlogAuthorPostsPlugin', language='en')
plugin.authors.add(self.user)
self.assertEqual(len(plugin.get_posts(request)), 0)
self.assertEqual(plugin.get_authors()[0].count, 0)
post1.publish = True
post1.save()
self.assertEqual(len(plugin.get_posts(request)), 1)
self.assertEqual(plugin.get_authors()[0].count, 1)
post2.publish = True
post2.save()
self.assertEqual(len(plugin.get_posts(request)), 2)
self.assertEqual(plugin.get_authors()[0].count, 2)
def test_copy_plugin_author(self):
post1 = self._get_post(self.data['en'][0])
post2 = self._get_post(self.data['en'][1])
plugin = add_plugin(post1.content, 'BlogAuthorPostsPlugin', language='en')
plugin.authors.add(self.user)
plugins = list(post1.content.cmsplugin_set.filter(language='en').order_by('tree_id', 'level', 'position'))
copy_plugins_to(plugins, post2.content)
new = downcast_plugins(post2.content.cmsplugin_set.all())
self.assertEqual(set(new[0].authors.all()), set([self.user]))
def test_multisite(self):
with override('en'):
post1 = self._get_post(self.data['en'][0], sites=(self.site_1,))
post2 = self._get_post(self.data['en'][1], sites=(self.site_2,))
post3 = self._get_post(self.data['en'][2], sites=(self.site_2, self.site_1))
self.assertEqual(len(Post.objects.all()), 3)
with self.settings(**{'SITE_ID': self.site_1.pk}):
self.assertEqual(len(Post.objects.all().on_site()), 2)
self.assertEqual(set(list(Post.objects.all().on_site())), set([post1, post3]))
with self.settings(**{'SITE_ID': self.site_2.pk}):
self.assertEqual(len(Post.objects.all().on_site()), 2)
self.assertEqual(set(list(Post.objects.all().on_site())), set([post2, post3]))
| {
"content_hash": "88f027b2b887316ca09671c4d26dc104",
"timestamp": "",
"source": "github",
"line_count": 316,
"max_line_length": 114,
"avg_line_length": 46.37658227848101,
"alnum_prop": 0.6060047765267826,
"repo_name": "DjangoBeer/djangocms-blog",
"id": "a06fbda98302d6f335143519a3b6bb025a764d39",
"size": "14679",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "761"
},
{
"name": "HTML",
"bytes": "10182"
},
{
"name": "Makefile",
"bytes": "1053"
},
{
"name": "Python",
"bytes": "286049"
},
{
"name": "Shell",
"bytes": "61"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from thesis.processors import SemevalTrainCorpusReader, SemevalTestCorpusReader
from tqdm import tqdm
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("corpus",
help="Semeval Corpus root directory")
parser.add_argument("--output",
default=None,
help="Output file to write (defaults to STDOUT)")
args = parser.parse_args()
train_reader = SemevalTrainCorpusReader(args.corpus)
test_reader = SemevalTestCorpusReader(args.corpus)
output = sys.stdout if args.output is None else open(args.output, "w")
print("Parsing Semeval corpus", file=sys.stderr)
for sidx, (metadata, sentence) in tqdm(enumerate(train_reader.sentences, start=1)):
print("META:semeval\tsentence:%05d\tcorpus:train\t%s" %
(sidx, "\t".join(map(lambda d: ":".join(d), metadata))), file=output)
for word in sentence:
print("\t".join(word), file=output)
print("", file=output)
for sidx, (metadata, sentence) in tqdm(enumerate(test_reader.sentences, start=1)):
print("META:semeval\tsentence:%05d\tcorpus:test\t%s" %
(sidx, "\t".join(map(lambda d: ":".join(d), metadata))), file=output)
for word in sentence:
print("\t".join(word), file=output)
print("", file=output)
if args.output is not None:
output.close()
print("SenSem corpus parsed", file=sys.stderr)
| {
"content_hash": "09efccad02a8ab5d83b669b0dee57fc0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 87,
"avg_line_length": 36.11363636363637,
"alnum_prop": 0.632473253618628,
"repo_name": "crscardellino/thesis",
"id": "d95c44af65b4c19870b19636df2bece5fa00233e",
"size": "1636",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thesis/scripts/semeval_columnize.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "31212"
},
{
"name": "Python",
"bytes": "159896"
},
{
"name": "Ruby",
"bytes": "10684"
},
{
"name": "Shell",
"bytes": "28921"
}
],
"symlink_target": ""
} |
"""
Example use:
cat ls_data.csv | python3 linear_search_recursive.py > lsr.csv
"""
import sys
sys.path.append('..')
from util import benchmark_search, test_search
alg = "linear_search"
kind = "recursive"
def linear_search(arr, x):
return linear_search_rec(arr, 0, x)
def linear_search_rec(arr, cur, x):
if cur == len(arr):
return -1
else:
if arr[cur] == x:
return cur
else:
return linear_search_rec(arr, cur + 1, x)
if __name__ == '__main__':
test_search(linear_search)
benchmark_search(linear_search, alg, kind)
| {
"content_hash": "72bbdc500f0f83684a2432b66802de4f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 62,
"avg_line_length": 22.037037037037038,
"alnum_prop": 0.5983193277310924,
"repo_name": "kraemerd17/algorithm-benchmarking-analysis",
"id": "b2aac7ca0aab7ac2efd916be2c42527dc8ded0ae",
"size": "595",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_code/search/linear_search_recursive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13452"
},
{
"name": "Makefile",
"bytes": "2333"
},
{
"name": "Python",
"bytes": "11536"
},
{
"name": "R",
"bytes": "11224"
},
{
"name": "Racket",
"bytes": "8190"
},
{
"name": "Scheme",
"bytes": "124429635"
},
{
"name": "TeX",
"bytes": "11770"
}
],
"symlink_target": ""
} |
__author__ = 'phoetrymaster'
import os
| {
"content_hash": "ad4513359d51d6c5783e9e5ebf22166d",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 28,
"avg_line_length": 7.166666666666667,
"alnum_prop": 0.627906976744186,
"repo_name": "jkeifer/pyHytemporal",
"id": "477840cdbf3376226430a608c4fd72dac99893b9",
"size": "43",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old_TO_MIGRATE/test3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254268"
}
],
"symlink_target": ""
} |
"""
test_transifex
~~~~~~~~~~~~~~
Test functions that implements transifex related features.
:copyright: Copyright 2019 by Takayuki SHIMIZUKAWA.
:license: BSD, see LICENSE for details.
"""
import re
from textwrap import dedent
import pytest
from sphinx_intl import transifex
def test_create_transifexrc(home_in_temp):
transifex.create_transifexrc('spam-token')
def test_create_txconfig(home_in_temp, temp):
transifex.create_txconfig()
def test_update_txconfig_resources(home_in_temp, temp):
transifex.create_txconfig()
transifex.update_txconfig_resources('eggs-org', 'ham-project', 'locale', '_build/locale')
def test_update_txconfig_resources_with_config(home_in_temp, temp):
tx_dir = temp / '.tx'
tx_dir.makedirs()
(tx_dir / 'config').write_text(dedent("""\
[main]
host = https://www.transifex.com
"""))
transifex.update_txconfig_resources('eggs-org', 'ham-project', 'locale', '_build/locale')
data = (tx_dir / 'config').text().replace('\\', '/')
assert re.search(r'\[o:eggs-org:p:ham-project:r:README\]', data)
assert re.search(r'source_file\W*=\W*_build/locale/README.pot', data)
def test_update_txconfig_resources_with_another_pot_dir(home_in_temp, temp):
tx_dir = temp / '.tx'
tx_dir.makedirs()
(tx_dir / 'config').write_text(dedent("""\
[main]
host = https://www.transifex.com
[ham-project.domain1]
"""))
(temp / '_build' / 'locale').copytree(temp / 'locale' / 'pot')
transifex.update_txconfig_resources('eggs-org', 'ham-project', 'locale', 'locale/pot')
data = (tx_dir / 'config').text()
assert re.search(r'\[o:eggs-org:p:ham-project:r:README\]', data)
def test_update_txconfig_resources_with_project_name_including_dots(home_in_temp, temp):
tx_dir = temp / '.tx'
tx_dir.makedirs()
(tx_dir / 'config').write_text(dedent("""\
[main]
host = https://www.transifex.com
"""))
(temp / '_build' / 'locale').copytree(temp / 'locale' / 'pot')
transifex.update_txconfig_resources('eggs-org', 'ham-project.com', 'locale', '_build/locale')
data = (tx_dir / 'config').text()
assert re.search(r'\[o:eggs-org:p:ham-projectcom:r:README\]', data)
def test_update_txconfig_resources_with_project_name_including_spaces(home_in_temp, temp):
tx_dir = temp / '.tx'
tx_dir.makedirs()
(tx_dir / 'config').write_text(dedent("""\
[main]
host = https://www.transifex.com
"""))
(temp / '_build' / 'locale').copytree(temp / 'locale' / 'pot')
transifex.update_txconfig_resources('eggs-org', 'ham project com', 'locale', '_build/locale')
data = (tx_dir / 'config').text()
assert re.search(r'\[o:eggs-org:p:ham-project-com:r:README\]', data)
def test_update_txconfig_resources_with_potfile_including_symbols(home_in_temp, temp):
tx_dir = temp / '.tx'
tx_dir.makedirs()
(tx_dir / 'config').write_text(dedent("""\
[main]
host = https://www.transifex.com
"""))
(temp / '_build' / 'locale').copytree(temp / 'locale' / 'pot')
# copy README.pot to 'example document.pot'
readme = (temp / '_build' / 'locale' / 'README.pot').text()
(temp / '_build' / 'locale' / 'example document.pot').write_text(readme)
# copy README.pot to 'test.document.pot'
(temp / '_build' / 'locale' / 'test.document.pot').write_text(readme)
transifex.update_txconfig_resources('eggs-org', 'ham project com', 'locale', '_build/locale')
data = (tx_dir / 'config').text()
assert re.search(r'\[o:eggs-org:p:ham-project-com:r:example_document\]', data)
assert re.search(r'\[o:eggs-org:p:ham-project-com:r:test_document\]', data)
@pytest.mark.parametrize("input,expected", [
('spam/ham', 'spam--ham'),
('spam\\ham', 'spam--ham'),
('ham egg.pot', 'ham_egg_pot'),
('spam-ham/egg.pot', 'spam-ham--egg_pot'),
('glossary', 'glossary_'),
('glossary_', 'glossary_'),
('settings', 'settings_'),
])
def test_normalize_resource_name(input, expected):
_callSUT = transifex.normalize_resource_name
assert _callSUT(input) == expected
| {
"content_hash": "6dd710d438556f45e94d197f00b47c1c",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 97,
"avg_line_length": 30.827067669172934,
"alnum_prop": 0.631219512195122,
"repo_name": "sphinx-doc/sphinx-intl",
"id": "7ae93f380737543a6d6e55454a34ab1c0eb8b755",
"size": "4124",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_transifex.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "49870"
}
],
"symlink_target": ""
} |
"""Test suite for XenAPI."""
import ast
import base64
import contextlib
import copy
import functools
import mox
import os
import re
import mock
import mox
from oslo.config import cfg
from nova.compute import api as compute_api
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import crypto
from nova import db
from nova import exception
from nova.objects import aggregate as aggregate_obj
from nova.objects import instance as instance_obj
from nova.openstack.common.gettextutils import _
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import test
from nova.tests.db import fakes as db_fakes
from nova.tests import fake_instance
from nova.tests import fake_network
from nova.tests import fake_processutils
import nova.tests.image.fake as fake_image
from nova.tests import matchers
from nova.tests.objects import test_aggregate
from nova.tests.virt.xenapi import stubs
from nova.virt import fake
from nova.virt.xenapi import agent
from nova.virt.xenapi.client import session as xenapi_session
from nova.virt.xenapi import driver as xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import host
from nova.virt.xenapi.image import glance
from nova.virt.xenapi import pool
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import vmops
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
CONF.import_opt('login_timeout', 'nova.virt.xenapi.client.session',
group="xenserver")
IMAGE_MACHINE = '1'
IMAGE_KERNEL = '2'
IMAGE_RAMDISK = '3'
IMAGE_RAW = '4'
IMAGE_VHD = '5'
IMAGE_ISO = '6'
IMAGE_IPXE_ISO = '7'
IMAGE_FIXTURES = {
IMAGE_MACHINE: {
'image_meta': {'name': 'fakemachine', 'size': 0,
'disk_format': 'ami',
'container_format': 'ami'},
},
IMAGE_KERNEL: {
'image_meta': {'name': 'fakekernel', 'size': 0,
'disk_format': 'aki',
'container_format': 'aki'},
},
IMAGE_RAMDISK: {
'image_meta': {'name': 'fakeramdisk', 'size': 0,
'disk_format': 'ari',
'container_format': 'ari'},
},
IMAGE_RAW: {
'image_meta': {'name': 'fakeraw', 'size': 0,
'disk_format': 'raw',
'container_format': 'bare'},
},
IMAGE_VHD: {
'image_meta': {'name': 'fakevhd', 'size': 0,
'disk_format': 'vhd',
'container_format': 'ovf'},
},
IMAGE_ISO: {
'image_meta': {'name': 'fakeiso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare'},
},
IMAGE_IPXE_ISO: {
'image_meta': {'name': 'fake_ipxe_iso', 'size': 0,
'disk_format': 'iso',
'container_format': 'bare',
'properties': {'ipxe_boot': 'true'}},
},
}
def get_session():
return xenapi_session.XenAPISession('test_url', 'root', 'test_pass')
def set_image_fixtures():
image_service = fake_image.FakeImageService()
image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
image_service.create(None, image_meta)
def get_fake_device_info():
# FIXME: 'sr_uuid', 'introduce_sr_keys', sr_type and vdi_uuid
# can be removed from the dict when LP bug #1087308 is fixed
fake_vdi_ref = xenapi_fake.create_vdi('fake-vdi', None)
fake_vdi_uuid = xenapi_fake.get_record('VDI', fake_vdi_ref)['uuid']
fake = {'block_device_mapping':
[{'connection_info': {'driver_volume_type': 'iscsi',
'data': {'sr_uuid': 'falseSR',
'introduce_sr_keys': ['sr_type'],
'sr_type': 'iscsi',
'vdi_uuid': fake_vdi_uuid,
'target_discovered': False,
'target_iqn': 'foo_iqn:foo_volid',
'target_portal': 'localhost:3260',
'volume_id': 'foo_volid',
'target_lun': 1,
'auth_password': 'my-p@55w0rd',
'auth_username': 'johndoe',
'auth_method': u'CHAP'}, },
'mount_device': 'vda',
'delete_on_termination': False}, ],
'root_device_name': '/dev/sda',
'ephemerals': [],
'swap': None, }
return fake
def stub_vm_utils_with_vdi_attached_here(function):
"""vm_utils.with_vdi_attached_here needs to be stubbed out because it
calls down to the filesystem to attach a vdi. This provides a
decorator to handle that.
"""
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
@contextlib.contextmanager
def fake_vdi_attached_here(*args, **kwargs):
fake_dev = 'fakedev'
yield fake_dev
def fake_image_download(*args, **kwargs):
pass
orig_vdi_attached_here = vm_utils.vdi_attached_here
orig_image_download = fake_image._FakeImageService.download
try:
vm_utils.vdi_attached_here = fake_vdi_attached_here
fake_image._FakeImageService.download = fake_image_download
return function(self, *args, **kwargs)
finally:
fake_image._FakeImageService.download = orig_image_download
vm_utils.vdi_attached_here = orig_vdi_attached_here
return decorated_function
def create_instance_with_system_metadata(context, instance_values):
flavor = db.flavor_get(context,
instance_values['instance_type_id'])
sys_meta = flavors.save_flavor_info({}, flavor)
instance_values['system_metadata'] = sys_meta
return db.instance_create(context, instance_values)
class XenAPIVolumeTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.flags(disable_process_locking=True,
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.instance = fake_instance.fake_db_instance(name='foo')
@classmethod
def _make_connection_info(cls):
target_iqn = 'iqn.2010-10.org.openstack:volume-00000001'
return {'driver_volume_type': 'iscsi',
'data': {'volume_id': 1,
'target_iqn': target_iqn,
'target_portal': '127.0.0.1:3260,fake',
'target_lun': None,
'auth_method': 'CHAP',
'auth_username': 'username',
'auth_password': 'password'}}
def test_mountpoint_to_number(self):
cases = {
'sda': 0,
'sdp': 15,
'hda': 0,
'hdp': 15,
'vda': 0,
'xvda': 0,
'0': 0,
'10': 10,
'vdq': -1,
'sdq': -1,
'hdq': -1,
'xvdq': -1,
}
for (input, expected) in cases.iteritems():
actual = volume_utils.mountpoint_to_number(input)
self.assertEqual(actual, expected,
'%s yielded %s, not %s' % (input, actual, expected))
def test_parse_volume_info_parsing_auth_details(self):
conn_info = self._make_connection_info()
result = volume_utils.parse_volume_info(conn_info['data'])
self.assertEqual('username', result['chapuser'])
self.assertEqual('password', result['chappassword'])
def test_get_device_number_raise_exception_on_wrong_mountpoint(self):
self.assertRaises(
volume_utils.StorageError,
volume_utils.get_device_number,
'dev/sd')
def test_attach_volume(self):
# This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
conn_info = self._make_connection_info()
result = conn.attach_volume(
None, conn_info, self.instance, '/dev/sdc')
# check that the VM has a VBD attached to it
# Get XenAPI record for VBD
vbds = xenapi_fake.get_all('VBD')
vbd = xenapi_fake.get_record('VBD', vbds[0])
vm_ref = vbd['VM']
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
# This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(self.instance['name'], 'Running')
self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume,
None, {'driver_volume_type': 'nonexist'},
self.instance, '/dev/sdc')
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIVMTestCase(stubs.XenAPITestBase):
"""Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.network = importutils.import_object(CONF.network_manager)
self.flags(disable_process_locking=True,
instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
# Disable conductor so we don't wait on a reply that will never come
self.flags(use_local=True, group='conductor')
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stub_out_vm_methods(self.stubs)
fake_processutils.stub_out_processutils_execute(self.stubs)
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn._session.is_local_connection = False
self.stubs.Set(fake.FakeVirtAPI, 'instance_update',
lambda *args, **kwargs: ('fake-oldref', 'fake-newref'))
fake_image.stub_out_image_service(self.stubs)
set_image_fixtures()
stubs.stubout_image_service_download(self.stubs)
stubs.stubout_stream_disk(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
name_label = "fakenamelabel"
disk_type = "fakedisktype"
virtual_size = 777
return vm_utils.create_vdi(
session, sr_ref, instance, name_label, disk_type,
virtual_size)
self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
def tearDown(self):
fake_image.FakeImageService_reset()
super(XenAPIVMTestCase, self).tearDown()
def test_init_host(self):
session = get_session()
vm = vm_utils._get_this_vm_ref(session)
# Local root disk
vdi0 = xenapi_fake.create_vdi('compute', None)
vbd0 = xenapi_fake.create_vbd(vm, vdi0)
# Instance VDI
vdi1 = xenapi_fake.create_vdi('instance-aaaa', None,
other_config={'nova_instance_uuid': 'aaaa'})
vbd1 = xenapi_fake.create_vbd(vm, vdi1)
# Only looks like instance VDI
vdi2 = xenapi_fake.create_vdi('instance-bbbb', None)
vbd2 = xenapi_fake.create_vbd(vm, vdi2)
self.conn.init_host(None)
self.assertEqual(set(xenapi_fake.get_all('VBD')), set([vbd0, vbd2]))
def test_instance_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'foo').AndReturn(True)
self.mox.ReplayAll()
self.assertTrue(self.conn.instance_exists('foo'))
def test_instance_not_exists(self):
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(mox.IgnoreArg(), 'bar').AndReturn(None)
self.mox.ReplayAll()
self.assertFalse(self.conn.instance_exists('bar'))
def test_list_instances_0(self):
instances = self.conn.list_instances()
self.assertEqual(instances, [])
def test_list_instance_uuids_0(self):
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(instance_uuids, [])
def test_list_instance_uuids(self):
uuids = []
for x in xrange(1, 4):
instance = self._create_instance(x)
uuids.append(instance['uuid'])
instance_uuids = self.conn.list_instance_uuids()
self.assertEqual(len(uuids), len(instance_uuids))
self.assertEqual(set(uuids), set(instance_uuids))
def test_get_rrd_server(self):
self.flags(connection_url='myscheme://myaddress/',
group='xenserver')
server_info = vm_utils._get_rrd_server()
self.assertEqual(server_info[0], 'myscheme')
self.assertEqual(server_info[1], 'myaddress')
def test_get_diagnostics(self):
def fake_get_rrd(host, vm_uuid):
path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(path, 'vm_rrd.xml')) as f:
return re.sub(r'\s', '', f.read())
self.stubs.Set(vm_utils, '_get_rrd', fake_get_rrd)
fake_diagnostics = {
'vbd_xvdb_write': '0.0',
'memory_target': '4294967296.0000',
'memory_internal_free': '1415564.0000',
'memory': '4294967296.0000',
'vbd_xvda_write': '0.0',
'cpu0': '0.0042',
'vif_0_tx': '287.4134',
'vbd_xvda_read': '0.0',
'vif_0_rx': '1816.0144',
'vif_2_rx': '0.0',
'vif_2_tx': '0.0',
'vbd_xvdb_read': '0.0',
'last_update': '1328795567',
}
instance = self._create_instance()
expected = self.conn.get_diagnostics(instance)
self.assertThat(fake_diagnostics, matchers.DictMatches(expected))
def test_get_vnc_console(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = vm_utils.lookup(session, instance['name'])
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We dont care about session id in test
# they will always differ so strip that out
actual_path = console['internal_access_path'].split('&')[0]
expected_path = "/console?ref=%s" % str(vm_ref)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_for_rescue(self):
instance = self._create_instance(obj=True)
session = get_session()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
# Set instance state to rescued
instance['vm_state'] = 'rescued'
console = conn.get_vnc_console(self.context, instance)
# Note(sulo): We dont care about session id in test
# they will always differ so strip that out
actual_path = console['internal_access_path'].split('&')[0]
expected_path = "/console?ref=%s" % str(rescue_vm)
self.assertEqual(expected_path, actual_path)
def test_get_vnc_console_instance_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'building'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotFound,
conn.get_vnc_console, self.context, instance)
def test_get_vnc_console_rescue_not_ready(self):
instance = self._create_instance(obj=True, spawn=False)
instance.vm_state = 'rescued'
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.InstanceNotReady,
conn.get_vnc_console, self.context, instance)
def test_instance_snapshot_fails_with_no_primary_vdi(self):
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=False,
osvol=False):
vbd_rec = {'VM': vm_ref,
'VDI': vdi_ref,
'userdevice': 'fake',
'currently_attached': False}
vbd_ref = xenapi_fake._create_object('VBD', vbd_rec)
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
stubs.stubout_instance_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
image_id = "my_snapshot_id"
self.assertRaises(exception.NovaException, self.conn.snapshot,
self.context, instance, image_id,
lambda *args, **kwargs: None)
def test_instance_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
image_id = "my_snapshot_id"
stubs.stubout_instance_snapshot(self.stubs)
stubs.stubout_is_snapshot(self.stubs)
# Stubbing out firewall driver as previous stub sets alters
# xml rpc result parsing
stubs.stubout_firewall_driver(self.stubs, self.conn)
instance = self._create_instance()
self.fake_upload_called = False
def fake_image_upload(_self, ctx, session, inst, vdi_uuids,
img_id):
self.fake_upload_called = True
self.assertEqual(ctx, self.context)
self.assertEqual(inst, instance)
self.assertIsInstance(vdi_uuids, list)
self.assertEqual(img_id, image_id)
self.stubs.Set(glance.GlanceStore, 'upload_image',
fake_image_upload)
self.conn.snapshot(self.context, instance, image_id,
func_call_matcher.call)
# Ensure VM was torn down
vm_labels = []
for vm_ref in xenapi_fake.get_all('VM'):
vm_rec = xenapi_fake.get_record('VM', vm_ref)
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
self.assertEqual(vm_labels, [instance['name']])
# Ensure VBDs were torn down
vbd_labels = []
for vbd_ref in xenapi_fake.get_all('VBD'):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
self.assertEqual(vbd_labels, [instance['name']])
# Ensure task states changed in correct order
self.assertIsNone(func_call_matcher.match())
# Ensure VDIs were torn down
for vdi_ref in xenapi_fake.get_all('VDI'):
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
name_label = vdi_rec["name_label"]
self.assertTrue(not name_label.endswith('snapshot'))
self.assertTrue(self.fake_upload_called)
def create_vm_record(self, conn, os_type, name):
instances = conn.list_instances()
self.assertEqual(instances, [name])
# Get Nova record for VM
vm_info = conn.get_info({'name': name})
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
self.vm_info = vm_info
self.vm = vm
def check_vm_record(self, conn, instance_type_id, check_injection):
flavor = db.flavor_get(conn, instance_type_id)
mem_kib = long(flavor['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = flavor['vcpus']
vcpu_weight = flavor['vcpu_weight']
self.assertEqual(self.vm_info['max_mem'], mem_kib)
self.assertEqual(self.vm_info['mem'], mem_kib)
self.assertEqual(self.vm['memory_static_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_max'], mem_bytes)
self.assertEqual(self.vm['memory_dynamic_min'], mem_bytes)
self.assertEqual(self.vm['VCPUs_max'], str(vcpus))
self.assertEqual(self.vm['VCPUs_at_startup'], str(vcpus))
if vcpu_weight == None:
self.assertEqual(self.vm['VCPUs_params'], {})
else:
self.assertEqual(self.vm['VCPUs_params'],
{'weight': str(vcpu_weight), 'cap': '0'})
# Check that the VM is running according to Nova
self.assertEqual(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
self.assertEqual(self.vm['power_state'], 'Running')
if check_injection:
xenstore_data = self.vm['xenstore_data']
self.assertNotIn('vm-data/hostname', xenstore_data)
key = 'vm-data/networking/DEADBEEF0001'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEqual(tcpip_data,
{'broadcast': '192.168.1.255',
'dns': ['192.168.1.4', '192.168.1.3'],
'gateway': '192.168.1.1',
'gateway_v6': '2001:db8:0:1::1',
'ip6s': [{'enabled': '1',
'ip': '2001:db8:0:1::1',
'netmask': 64,
'gateway': '2001:db8:0:1::1'}],
'ips': [{'enabled': '1',
'ip': '192.168.1.100',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'},
{'enabled': '1',
'ip': '192.168.1.101',
'netmask': '255.255.255.0',
'gateway': '192.168.1.1'}],
'label': 'test1',
'mac': 'DE:AD:BE:EF:00:01'})
def check_vm_params_for_windows(self):
self.assertEqual(self.vm['platform']['nx'], 'true')
self.assertEqual(self.vm['HVM_boot_params'], {'order': 'dc'})
self.assertEqual(self.vm['HVM_boot_policy'], 'BIOS order')
# check that these are not set
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], '')
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
def check_vm_params_for_linux(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], '')
self.assertEqual(self.vm['PV_bootloader'], 'pygrub')
# check that these are not set
self.assertEqual(self.vm['PV_kernel'], '')
self.assertEqual(self.vm['PV_ramdisk'], '')
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def check_vm_params_for_linux_with_external_kernel(self):
self.assertEqual(self.vm['platform']['nx'], 'false')
self.assertEqual(self.vm['PV_args'], 'root=/dev/xvda1')
self.assertNotEqual(self.vm['PV_kernel'], '')
self.assertNotEqual(self.vm['PV_ramdisk'], '')
# check that these are not set
self.assertEqual(self.vm['HVM_boot_params'], {})
self.assertEqual(self.vm['HVM_boot_policy'], '')
def _list_vdis(self):
session = get_session()
return session.call_xenapi('VDI.get_all')
def _list_vms(self):
session = get_session()
return session.call_xenapi('VM.get_all')
def _check_vdis(self, start_list, end_list):
for vdi_ref in end_list:
if vdi_ref not in start_list:
vdi_rec = xenapi_fake.get_record('VDI', vdi_ref)
# If the cache is turned on then the base disk will be
# there even after the cleanup
if 'other_config' in vdi_rec:
if 'image-id' not in vdi_rec['other_config']:
self.fail('Found unexpected VDI:%s' % vdi_ref)
else:
self.fail('Found unexpected VDI:%s' % vdi_ref)
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
hostname="test", architecture="x86-64", instance_id=1,
injected_files=None, check_injection=False,
create_record=True, empty_dns=False,
block_device_info=None,
key_data=None):
if injected_files is None:
injected_files = []
# Fake out inject_instance_metadata
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
if create_record:
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'root_gb': 20,
'ephemeral_gb': 0,
'instance_type_id': instance_type_id,
'os_type': os_type,
'hostname': hostname,
'key_data': key_data,
'architecture': architecture}
instance = create_instance_with_system_metadata(self.context,
instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
if empty_dns:
# NOTE(tr3buchet): this is a terrible way to do this...
network_info[0]['network']['subnets'][0]['dns'] = []
image_meta = {}
if image_ref:
image_meta = IMAGE_FIXTURES[image_ref]["image_meta"]
self.conn.spawn(self.context, instance, image_meta, injected_files,
'herp', network_info, block_device_info)
self.create_vm_record(self.conn, os_type, instance['name'])
self.check_vm_record(self.conn, instance_type_id, check_injection)
self.assertEqual(instance['os_type'], os_type)
self.assertEqual(instance['architecture'], architecture)
def test_spawn_ipxe_iso_success(self):
self.mox.StubOutWithMock(vm_utils, 'get_sr_path')
vm_utils.get_sr_path(mox.IgnoreArg()).AndReturn('/sr/path')
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url='http://boot.example.com',
ipxe_mkisofs_cmd='/root/mkisofs',
group='xenserver')
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.conn._session.call_plugin_serialized(
'ipxe', 'inject', '/sr/path', mox.IgnoreArg(),
'http://boot.example.com', '192.168.1.100', '255.255.255.0',
'192.168.1.1', '192.168.1.3', '/root/mkisofs')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_network_name(self):
self.flags(ipxe_network_name=None,
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_no_boot_menu_url(self):
self.flags(ipxe_network_name='test1',
ipxe_boot_menu_url=None,
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_ipxe_iso_unknown_network_name(self):
self.flags(ipxe_network_name='test2',
ipxe_boot_menu_url='http://boot.example.com',
group='xenserver')
# call_plugin_serialized shouldn't be called
self.mox.StubOutWithMock(self.conn._session, 'call_plugin_serialized')
self.mox.ReplayAll()
self._test_spawn(IMAGE_IPXE_ISO, None, None)
def test_spawn_empty_dns(self):
# Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
self.check_vm_params_for_linux()
def test_spawn_not_enough_memory(self):
self.assertRaises(exception.InsufficientFreeMemory,
self._test_spawn,
'1', 2, 3, "4") # m1.xlarge
def test_spawn_fail_cleanup_1(self):
"""Simulates an error while downloading an image.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_fetch_disk_image(self.stubs, raise_failure=True)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_2(self):
"""Simulates an error while creating VM record.
Verifies that the VM and VDIs created are properly cleaned up.
"""
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_fail_cleanup_3(self):
"""Simulates an error while attaching disks.
Verifies that the VM and VDIs created are properly cleaned up.
"""
stubs.stubout_attach_disks(self.stubs)
vdi_recs_start = self._list_vdis()
start_vms = self._list_vms()
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, '1', 2, 3)
# No additional VDI should be found.
vdi_recs_end = self._list_vdis()
end_vms = self._list_vms()
self._check_vdis(vdi_recs_start, vdi_recs_end)
# No additional VMs should be found.
self.assertEqual(start_vms, end_vms)
def test_spawn_raw_glance(self):
self._test_spawn(IMAGE_RAW, None, None, os_type=None)
self.check_vm_params_for_windows()
def test_spawn_vhd_glance_linux(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_windows(self):
self._test_spawn(IMAGE_VHD, None, None,
os_type="windows", architecture="i386",
instance_type_id=5)
self.check_vm_params_for_windows()
def test_spawn_iso_glance(self):
self._test_spawn(IMAGE_ISO, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
def fake_fetch_disk_image(context, session, instance, name_label,
image_id, image_type):
sr_ref = vm_utils.safe_find_sr(session)
image_type_str = vm_utils.ImageType.to_string(image_type)
vdi_ref = vm_utils.create_vdi(session, sr_ref, instance,
name_label, image_type_str, "20")
vdi_role = vm_utils.ImageType.get_role(image_type)
vdi_uuid = session.call_xenapi("VDI.get_uuid", vdi_ref)
return {vdi_role: dict(uuid=vdi_uuid, file=None)}
self.stubs.Set(vm_utils, '_fetch_disk_image',
fake_fetch_disk_image)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_boot_from_volume_no_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
block_device_info=dev_info)
def test_spawn_boot_from_volume_with_image_meta(self):
dev_info = get_fake_device_info()
self._test_spawn(None, None, None,
block_device_info=dev_info)
def test_spawn_netinject_file(self):
self.flags(flat_injected=True)
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _tee_handler(cmd, **kwargs):
input = kwargs.get('process_input', None)
self.assertIsNotNone(input)
config = [line.strip() for line in input.split("\n")]
# Find the start of eth0 configuration and check it
index = config.index('auto eth0')
self.assertEqual(config[index + 1:index + 8], [
'iface eth0 inet static',
'address 192.168.1.100',
'netmask 255.255.255.0',
'broadcast 192.168.1.255',
'gateway 192.168.1.1',
'dns-nameservers 192.168.1.3 192.168.1.4',
''])
self._tee_executed = True
return '', ''
def _readlink_handler(cmd_parts, **kwargs):
return os.path.realpath(cmd_parts[2]), ''
fake_processutils.fake_execute_set_repliers([
# Capture the tee .../etc/network/interfaces command
(r'tee.*interfaces', _tee_handler),
(r'readlink -nm.*', _readlink_handler),
])
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
check_injection=True)
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
# When mounting, create real files under the mountpoint to simulate
# files in the mounted filesystem
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
LOG.debug(_('Creating files in %s to simulate guest agent'),
self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'), 'w').close()
return '', ''
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
LOG.debug(_('Removing simulated guest agent files in %s'),
self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
os.rmdir(os.path.join(self._tmpdir, 'usr'))
return '', ''
def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
self._tee_executed = True
return '', ''
fake_processutils.fake_execute_set_repliers([
(r'mount', _mount_handler),
(r'umount', _umount_handler),
(r'tee.*interfaces', _tee_handler)])
self._test_spawn('1', 2, 3, check_injection=True)
# tee must not run in this case, where an injection-capable
# guest agent is detected
self.assertFalse(self._tee_executed)
def test_spawn_injects_auto_disk_config_to_xenstore(self):
instance = self._create_instance(spawn=False)
self.mox.StubOutWithMock(self.conn._vmops, '_inject_auto_disk_config')
self.conn._vmops._inject_auto_disk_config(instance, mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.spawn(self.context, instance,
IMAGE_FIXTURES['1']["image_meta"], [], 'herp', '')
def test_spawn_vlanmanager(self):
self.flags(network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
def dummy(*args, **kwargs):
pass
self.stubs.Set(vmops.VMOps, '_create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
ctxt = self.context.elevated()
self.network.conductor_api = conductor_api.LocalAPI()
instance = self._create_instance(2, False)
networks = self.network.db.network_get_all(ctxt)
with mock.patch('nova.objects.network.Network._from_db_object'):
for network in networks:
self.network.set_network_host(ctxt, network)
self.network.allocate_for_instance(ctxt,
instance_id=2,
instance_uuid='00000000-0000-0000-0000-000000000002',
host=CONF.host,
vpn=None,
rxtx_factor=3,
project_id=self.project_id,
macs=None)
self._test_spawn(IMAGE_MACHINE,
IMAGE_KERNEL,
IMAGE_RAMDISK,
instance_id=2,
create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
def test_spawn_with_network_qos(self):
self._create_instance()
for vif_ref in xenapi_fake.get_all('VIF'):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEqual(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEqual(vif_rec['qos_algorithm_params']['kbps'],
str(3 * 10 * 1024))
def test_spawn_ssh_key_injection(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
self.assertEqual("ssh-rsa fake_keydata", sshkey)
return "fake"
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-rsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-rsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_ssh_key_injection_non_rsa(self):
# Test spawning with key_data on an instance. Should use
# agent file injection.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
def fake_encrypt_text(sshkey, new_pass):
raise NotImplementedError("Should not be called")
self.stubs.Set(crypto, 'ssh_encrypt_text', fake_encrypt_text)
expected_data = ('\n# The following ssh key was injected by '
'Nova\nssh-dsa fake_keydata\n')
injected_files = [('/root/.ssh/authorized_keys', expected_data)]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
key_data='ssh-dsa fake_keydata')
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_injected_files(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_inject_file(self, method, args):
path = base64.b64decode(args['b64_path'])
contents = base64.b64decode(args['b64_contents'])
actual_injected_files.append((path, contents))
return jsonutils.dumps({'returncode': '0', 'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_inject_file', fake_inject_file)
injected_files = [('/tmp/foo', 'foobar')]
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
injected_files=injected_files)
self.check_vm_params_for_linux()
self.assertEqual(actual_injected_files, injected_files)
def test_spawn_agent_upgrade(self):
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_agent_build(_self, *args):
return {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf"}
self.stubs.Set(self.conn.virtapi, 'agent_build_get_by_triple',
fake_agent_build)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def test_spawn_agent_upgrade_fails_silently(self):
self.flags(use_agent_default=True,
group='xenserver')
actual_injected_files = []
def fake_agent_build(_self, *args):
return {"version": "1.1.0", "architecture": "x86-64",
"hypervisor": "xen", "os": "windows",
"url": "url", "md5hash": "asdf"}
self.stubs.Set(self.conn.virtapi, 'agent_build_get_by_triple',
fake_agent_build)
def fake_agent_update(self, method, args):
raise xenapi_fake.Failure(["fake_error"])
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_agentupdate', fake_agent_update)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def test_spawn_with_resetnetwork_alternative_returncode(self):
self.flags(use_agent_default=True,
group='xenserver')
def fake_resetnetwork(self, method, args):
fake_resetnetwork.called = True
#NOTE(johngarbutt): as returned by FreeBSD and Gentoo
return jsonutils.dumps({'returncode': '500',
'message': 'success'})
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_resetnetwork', fake_resetnetwork)
fake_resetnetwork.called = False
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.assertTrue(fake_resetnetwork.called)
def _test_spawn_fails_silently_with(self, trigger, expected_exception):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
actual_injected_files = []
def fake_agent_version(self, method, args):
raise xenapi_fake.Failure([trigger])
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_version', fake_agent_version)
def fake_add_instance_fault(*args, **kwargs):
self.assertEqual(expected_exception, args[3])
self.stubs.Set(compute_utils, 'add_instance_fault_from_exc',
fake_add_instance_fault)
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def test_spawn_fails_with_agent_timeout(self):
self._test_spawn_fails_silently_with("TIMEOUT:fake",
exception.AgentTimeout)
def test_spawn_fails_with_agent_not_implemented(self):
self._test_spawn_fails_silently_with("NOT IMPLEMENTED:fake",
exception.AgentNotImplemented)
def test_spawn_fails_with_agent_error(self):
self._test_spawn_fails_silently_with("fake_error",
exception.AgentError)
def test_spawn_fails_with_agent_bad_return(self):
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
actual_injected_files = []
def fake_agent_version(self, method, args):
return xenapi_fake.as_json(returncode='-1', message='fake')
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_version', fake_agent_version)
exception.AgentError
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def test_spawn_fails_agent_not_implemented(self):
# Test spawning with injected_files.
self.flags(use_agent_default=True,
agent_version_timeout=0,
group='xenserver')
actual_injected_files = []
def fake_agent_version(self, method, args):
raise xenapi_fake.Failure(["NOT IMPLEMENTED:fake"])
self.stubs.Set(stubs.FakeSessionForVMTests,
'_plugin_agent_version', fake_agent_version)
exception.AgentNotImplemented
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
def test_rescue(self):
instance = self._create_instance(spawn=False)
xenapi_fake.create_vm(instance['name'], 'Running')
session = get_session()
vm_ref = vm_utils.lookup(session, instance['name'])
swap_vdi_ref = xenapi_fake.create_vdi('swap', None)
root_vdi_ref = xenapi_fake.create_vdi('root', None)
xenapi_fake.create_vbd(vm_ref, swap_vdi_ref, userdevice=1)
xenapi_fake.create_vbd(vm_ref, root_vdi_ref, userdevice=0)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
conn.rescue(self.context, instance, [], image_meta, '')
vm = xenapi_fake.get_record('VM', vm_ref)
rescue_name = "%s-rescue" % vm["name_label"]
rescue_ref = vm_utils.lookup(session, rescue_name)
rescue_vm = xenapi_fake.get_record('VM', rescue_ref)
vdi_refs = []
for vbd_ref in rescue_vm['VBDs']:
vdi_refs.append(xenapi_fake.get_record('VBD', vbd_ref)['VDI'])
self.assertNotIn(swap_vdi_ref, vdi_refs)
self.assertIn(root_vdi_ref, vdi_refs)
def test_rescue_preserve_disk_on_failure(self):
# test that the original disk is preserved if rescue setup fails
# bug #1227898
instance = self._create_instance()
session = get_session()
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
vm_ref = vm_utils.lookup(session, instance['name'])
vdi_ref, vdi_rec = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
# raise an error in the spawn setup process and trigger the
# undo manager logic:
def fake_start(*args, **kwargs):
raise test.TestingException('Start Error')
self.stubs.Set(self.conn._vmops, '_start', fake_start)
self.assertRaises(test.TestingException, self.conn.rescue,
self.context, instance, [], image_meta, '')
# confirm original disk still exists:
vdi_ref2, vdi_rec2 = vm_utils.get_vdi_for_vm_safely(session, vm_ref)
self.assertEqual(vdi_ref, vdi_ref2)
self.assertEqual(vdi_rec['uuid'], vdi_rec2['uuid'])
def test_unrescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Unrescue expects the original instance to be powered off
conn.power_off(instance)
rescue_vm = xenapi_fake.create_vm(instance['name'] + '-rescue',
'Running')
conn.unrescue(instance, None)
def test_unrescue_not_in_rescue(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(exception.InstanceNotInRescueMode, conn.unrescue,
instance, None)
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, context, instance, block_info,
power_on):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn._vmops = VMOpsMock()
conn.finish_revert_migration(self.context, instance, None)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def test_reboot_hard(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "HARD")
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
instance = self._create_instance()
instances = [instance]
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.poll_rebooting_instances(60, instances)
def test_reboot_soft(self):
instance = self._create_instance()
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.reboot(self.context, instance, None, "SOFT")
def test_reboot_halted(self):
session = get_session()
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Halted')
conn.reboot(self.context, instance, None, "SOFT")
vm_ref = vm_utils.lookup(session, instance['name'])
vm = xenapi_fake.get_record('VM', vm_ref)
self.assertEqual(vm['power_state'], 'Running')
def test_reboot_unknown_state(self):
instance = self._create_instance(spawn=False)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
xenapi_fake.create_vm(instance['name'], 'Unknown')
self.assertRaises(xenapi_fake.Failure, conn.reboot, self.context,
instance, None, "SOFT")
def test_reboot_rescued(self):
instance = self._create_instance()
instance['vm_state'] = vm_states.RESCUED
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
real_result = vm_utils.lookup(conn._session, instance['name'])
self.mox.StubOutWithMock(vm_utils, 'lookup')
vm_utils.lookup(conn._session, instance['name'],
True).AndReturn(real_result)
self.mox.ReplayAll()
conn.reboot(self.context, instance, None, "SOFT")
def test_get_console_output_succeeds(self):
def fake_get_console_output(instance):
self.assertEqual("instance", instance)
return "console_log"
self.stubs.Set(self.conn._vmops, 'get_console_output',
fake_get_console_output)
self.assertEqual(self.conn.get_console_output('context', "instance"),
"console_log")
def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
# Record all the xenapi calls, and return a fake list of hosts
# for the host.get_all call
def fake_call_xenapi(method, *args):
api_calls[method] = args
if method == 'host.get_all':
return ['foo', 'bar', 'baz']
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
def fake_aggregate_get(context, host, key):
if find_aggregate:
return [test_aggregate.fake_aggregate]
else:
return []
self.stubs.Set(db, 'aggregate_get_by_host',
fake_aggregate_get)
def fake_host_find(context, session, src, dst):
if find_host:
return 'bar'
else:
raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
self.assertEqual(result, 'on_maintenance')
# We expect the VM.pool_migrate call to have been called to
# migrate our instance to the 'bar' host
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
host_ref = "foo"
expected = (vm_ref, host_ref, {"live": "true"})
self.assertEqual(api_calls.get('VM.pool_migrate'), expected)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ACTIVE)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
def test_maintenance_mode(self):
self._test_maintenance_mode(True, True)
def test_maintenance_mode_no_host(self):
self.assertRaises(exception.NoValidHost,
self._test_maintenance_mode, False, True)
def test_maintenance_mode_no_aggregate(self):
self.assertRaises(exception.NotFound,
self._test_maintenance_mode, True, False)
def test_uuid_find(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
fake_inst = fake_instance.fake_db_instance(id=123)
fake_inst2 = fake_instance.fake_db_instance(id=456)
db.instance_get_all_by_host(self.context, fake_inst['host'],
columns_to_join=None,
use_slave=False
).AndReturn([fake_inst, fake_inst2])
self.mox.ReplayAll()
expected_name = CONF.instance_name_template % fake_inst['id']
inst_uuid = host._uuid_find(self.context, fake_inst['host'],
expected_name)
self.assertEqual(inst_uuid, fake_inst['uuid'])
def test_session_virtapi(self):
was = {'called': False}
def fake_aggregate_get_by_host(self, *args, **kwargs):
was['called'] = True
raise test.TestingException()
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.stubs.Set(self.conn._session, "is_slave", True)
self.assertRaises(test.TestingException,
self.conn._session._get_host_uuid)
self.assertTrue(was['called'])
def test_per_instance_usage_running(self):
instance = self._create_instance(spawn=True)
flavor = flavors.get_flavor(3)
expected = {instance['uuid']: {'memory_mb': flavor['memory_mb'],
'uuid': instance['uuid']}}
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
# Paused instances still consume resources:
self.conn.pause(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual(expected, actual)
def test_per_instance_usage_suspended(self):
# Suspended instances do not consume memory:
instance = self._create_instance(spawn=True)
self.conn.suspend(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def test_per_instance_usage_halted(self):
instance = self._create_instance(spawn=True)
self.conn.power_off(instance)
actual = self.conn.get_per_instance_usage()
self.assertEqual({}, actual)
def _create_instance(self, instance_id=1, spawn=True, obj=False, **attrs):
"""Creates and spawns a test instance."""
instance_values = {
'id': instance_id,
'uuid': '00000000-0000-0000-0000-00000000000%d' % instance_id,
'display_name': 'host-%d' % instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'vm_mode': 'hvm',
'architecture': 'x86-64'}
instance_values.update(attrs)
instance = create_instance_with_system_metadata(self.context,
instance_values)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': IMAGE_VHD,
'disk_format': 'vhd'}
if spawn:
self.conn.spawn(self.context, instance, image_meta, [], 'herp',
network_info)
if obj:
instance = instance_obj.Instance._from_db_object(
self.context, instance_obj.Instance(), instance,
expected_attrs=instance_obj.INSTANCE_DEFAULT_FIELDS)
return instance
def test_destroy_clean_up_kernel_and_ramdisk(self):
def fake_lookup_kernel_ramdisk(session, vm_ref):
return "kernel", "ramdisk"
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def fake_destroy_kernel_ramdisk(session, instance, kernel, ramdisk):
fake_destroy_kernel_ramdisk.called = True
self.assertEqual("kernel", kernel)
self.assertEqual("ramdisk", ramdisk)
fake_destroy_kernel_ramdisk.called = False
self.stubs.Set(vm_utils, "destroy_kernel_ramdisk",
fake_destroy_kernel_ramdisk)
instance = self._create_instance(spawn=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
self.conn.destroy(self.context, instance, network_info)
vm_ref = vm_utils.lookup(self.conn._session, instance['name'])
self.assertIsNone(vm_ref)
self.assertTrue(fake_destroy_kernel_ramdisk.called)
class XenAPIDiffieHellmanTestCase(test.NoDBTestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = agent.SimpleDH()
self.bob = agent.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
bob_pub = self.bob.get_public()
alice_shared = self.alice.compute_shared(bob_pub)
bob_shared = self.bob.compute_shared(alice_pub)
self.assertEqual(alice_shared, bob_shared)
def _test_encryption(self, message):
enc = self.alice.encrypt(message)
self.assertFalse(enc.endswith('\n'))
dec = self.bob.decrypt(enc)
self.assertEqual(dec, message)
def test_encrypt_simple_message(self):
self._test_encryption('This is a simple message.')
def test_encrypt_message_with_newlines_at_end(self):
self._test_encryption('This message has a newline at the end.\n')
def test_encrypt_many_newlines_at_end(self):
self._test_encryption('Message with lotsa newlines.\n\n\n')
def test_encrypt_newlines_inside_message(self):
self._test_encryption('Message\nwith\ninterior\nnewlines.')
def test_encrypt_with_leading_newlines(self):
self._test_encryption('\n\nMessage with leading newlines.')
def test_encrypt_really_long_message(self):
self._test_encryption(''.join(['abcd' for i in xrange(1024)]))
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIMigrateInstance(stubs.XenAPITestBase):
"""Unit test for verifying migration-related actions."""
def setUp(self):
super(XenAPIMigrateInstance, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', 'fake_br1')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': None,
'ramdisk_id': None,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_processutils.stub_out_processutils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
def fake_inject_instance_metadata(self, instance, vm):
pass
self.stubs.Set(vmops.VMOps, '_inject_instance_metadata',
fake_inject_instance_metadata)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = {"root_gb": 80, 'ephemeral_gb': 0}
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_passes_exceptions(self):
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = {"root_gb": 80, 'ephemeral_gb': 0}
def fake_raise(*args, **kwargs):
raise exception.MigrationError(reason='test failure')
self.stubs.Set(vmops.VMOps, "_migrate_disk_resizing_up", fake_raise)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_disk_and_power_off_throws_on_zero_gb_resize_down(self):
instance = db.instance_create(self.context, self.instance_values)
flavor = {"root_gb": 0, 'ephemeral_gb': 0}
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.ResizeError,
conn.migrate_disk_and_power_off,
self.context, instance,
'fake_dest', flavor, None)
def test_migrate_disk_and_power_off_with_zero_gb_old_and_new_works(self):
flavor = {"root_gb": 0, 'ephemeral_gb': 0}
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = db.instance_create(self.context, values)
xenapi_fake.create_vm(instance['name'], 'Running')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
conn.migrate_disk_and_power_off(self.context, instance,
'127.0.0.1', flavor, None)
def _test_revert_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_finish_revert_migration_called = False
context = 'fake_context'
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration',
fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
base = xenapi_fake.create_vdi('hurr', 'fake')
base_uuid = xenapi_fake.get_record('VDI', base)['uuid']
cow = xenapi_fake.create_vdi('durr', 'fake')
cow_uuid = xenapi_fake.get_record('VDI', cow)['uuid']
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy=base_uuid, cow=cow_uuid),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
conn.finish_revert_migration(context, instance, network_info)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_revert_migrate_power_on(self):
self._test_revert_migrate(True)
def test_revert_migrate_power_off(self):
self._test_revert_migrate(False)
def _test_finish_migrate(self, power_on):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
self.called = False
self.fake_vm_start_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
def fake_vdi_resize(*args, **kwargs):
self.called = True
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests,
product_version=(4, 0, 0),
product_brand='XenServer')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True,
block_device_info=None, power_on=power_on)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, power_on)
def test_finish_migrate_power_on(self):
self._test_finish_migrate(True)
def test_finish_migrate_power_off(self):
self._test_finish_migrate(False)
def test_finish_migrate_no_local_storage(self):
values = copy.copy(self.instance_values)
values["root_gb"] = 0
values["ephemeral_gb"] = 0
instance = create_instance_with_system_metadata(self.context, values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
self.stubs.Set(stubs.FakeSessionForVMTests,
"VDI_resize_online", fake_vdi_resize)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs)
# Resize instance would be determined by the compute call
image_meta = {'id': instance['image_ref'], 'disk_format': 'vhd'}
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, image_meta, resize_instance=False)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_too_many_partitions_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, 4), (1, 2, 3, 4)]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
@stub_vm_utils_with_vdi_attached_here
def test_migrate_bad_fs_type_no_resize_down(self):
instance_values = self.instance_values
instance = db.instance_create(self.context, instance_values)
xenapi_fake.create_vm(instance['name'], 'Running')
flavor = db.flavor_get_by_name(self.context, 'm1.small')
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_partitions(partition):
return [(1, 2, 3, "ext2")]
self.stubs.Set(vm_utils, '_get_partitions', fake_get_partitions)
self.assertRaises(exception.InstanceFaultRollback,
conn.migrate_disk_and_power_off,
self.context, instance,
'127.0.0.1', flavor, None)
def test_migrate_rollback_when_resize_down_fs_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
virtapi = vmops._virtapi
self.mox.StubOutWithMock(vmops, '_resize_ensure_vm_is_shutdown')
self.mox.StubOutWithMock(vmops, '_apply_orig_vm_name_label')
self.mox.StubOutWithMock(vm_utils, 'resize_disk')
self.mox.StubOutWithMock(vm_utils, 'migrate_vhd')
self.mox.StubOutWithMock(vm_utils, 'destroy_vdi')
self.mox.StubOutWithMock(vm_utils, 'get_vdi_for_vm_safely')
self.mox.StubOutWithMock(vmops, '_restore_orig_vm_and_cleanup_orphan')
self.mox.StubOutWithMock(virtapi, 'instance_update')
instance = {'auto_disk_config': True, 'uuid': 'uuid'}
vm_ref = "vm_ref"
dest = "dest"
flavor = "type"
sr_path = "sr_path"
virtapi.instance_update(self.context, 'uuid', {'progress': 20.0})
vmops._resize_ensure_vm_is_shutdown(instance, vm_ref)
vmops._apply_orig_vm_name_label(instance, vm_ref)
old_vdi_ref = "old_ref"
vm_utils.get_vdi_for_vm_safely(vmops._session, vm_ref).AndReturn(
(old_vdi_ref, None))
virtapi.instance_update(self.context, 'uuid', {'progress': 40.0})
new_vdi_ref = "new_ref"
new_vdi_uuid = "new_uuid"
vm_utils.resize_disk(vmops._session, instance, old_vdi_ref,
flavor).AndReturn((new_vdi_ref, new_vdi_uuid))
virtapi.instance_update(self.context, 'uuid', {'progress': 60.0})
vm_utils.migrate_vhd(vmops._session, instance, new_vdi_uuid, dest,
sr_path, 0).AndRaise(
exception.ResizeError(reason="asdf"))
vm_utils.destroy_vdi(vmops._session, new_vdi_ref)
vmops._restore_orig_vm_and_cleanup_orphan(instance)
self.mox.ReplayAll()
self.assertRaises(exception.InstanceFaultRollback,
vmops._migrate_disk_resizing_down, self.context,
instance, dest, flavor, vm_ref, sr_path)
def test_resize_ensure_vm_is_shutdown_cleanly(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_forced(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_fails(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(False)
vm_utils.clean_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
vm_utils.hard_shutdown_vm(vmops._session, fake_instance,
"ref").AndReturn(False)
self.mox.ReplayAll()
self.assertRaises(exception.ResizeError,
vmops._resize_ensure_vm_is_shutdown, fake_instance, "ref")
def test_resize_ensure_vm_is_shutdown_already_shutdown(self):
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vmops = conn._vmops
fake_instance = {'uuid': 'uuid'}
self.mox.StubOutWithMock(vm_utils, 'is_vm_shutdown')
self.mox.StubOutWithMock(vm_utils, 'clean_shutdown_vm')
self.mox.StubOutWithMock(vm_utils, 'hard_shutdown_vm')
vm_utils.is_vm_shutdown(vmops._session, "ref").AndReturn(True)
self.mox.ReplayAll()
vmops._resize_ensure_vm_is_shutdown(fake_instance, "ref")
class XenAPIImageTypeTestCase(test.NoDBTestCase):
"""Test ImageType class."""
def test_to_string(self):
# Can convert from type id to type string.
self.assertEqual(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
def _assert_role(self, expected_role, image_type_id):
self.assertEqual(
expected_role,
vm_utils.ImageType.get_role(image_type_id))
def test_get_image_role_kernel(self):
self._assert_role('kernel', vm_utils.ImageType.KERNEL)
def test_get_image_role_ramdisk(self):
self._assert_role('ramdisk', vm_utils.ImageType.RAMDISK)
def test_get_image_role_disk(self):
self._assert_role('root', vm_utils.ImageType.DISK)
def test_get_image_role_disk_raw(self):
self._assert_role('root', vm_utils.ImageType.DISK_RAW)
def test_get_image_role_disk_vhd(self):
self._assert_role('root', vm_utils.ImageType.DISK_VHD)
class XenAPIDetermineDiskImageTestCase(test.NoDBTestCase):
"""Unit tests for code that detects the ImageType."""
def assert_disk_type(self, image_meta, expected_disk_type):
actual = vm_utils.determine_disk_image_type(image_meta)
self.assertEqual(expected_disk_type, actual)
def test_machine(self):
image_meta = {'id': 'a', 'disk_format': 'ami'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
def test_raw(self):
image_meta = {'id': 'a', 'disk_format': 'raw'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
def test_vhd(self):
image_meta = {'id': 'a', 'disk_format': 'vhd'}
self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
def test_none(self):
image_meta = None
self.assert_disk_type(image_meta, None)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIHostTestCase(stubs.XenAPITestBase):
"""Tests HostState, which holds metrics from XenServer that get
reported back to the Schedulers.
"""
def setUp(self):
super(XenAPIHostTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.flags(use_local=True, group='conductor')
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.instance = fake_instance.fake_db_instance(name='foo')
def test_host_state(self):
stats = self.conn.get_host_stats()
# Values from fake.create_local_srs (ext SR)
self.assertEqual(stats['disk_total'], 40000)
self.assertEqual(stats['disk_used'], 20000)
# Values from fake._plugin_xenhost_host_data
self.assertEqual(stats['host_memory_total'], 10)
self.assertEqual(stats['host_memory_overhead'], 20)
self.assertEqual(stats['host_memory_free'], 30)
self.assertEqual(stats['host_memory_free_computed'], 40)
self.assertEqual(stats['hypervisor_hostname'], 'fake-xenhost')
self.assertThat({'cpu_count': 50},
matchers.DictMatches(stats['host_cpu_info']))
# No VMs running
self.assertEqual(stats['vcpus_used'], 0)
def test_host_state_vcpus_used(self):
stats = self.conn.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 0)
vm = xenapi_fake.create_vm(self.instance['name'], 'Running')
stats = self.conn.get_host_stats(True)
self.assertEqual(stats['vcpus_used'], 4)
def test_pci_passthrough_devices_whitelist(self):
# NOTE(guillaume-thouvenin): This pci whitelist will be used to
# match with _plugin_xenhost_get_pci_device_details method in fake.py.
self.flags(pci_passthrough_whitelist=
['[{"vendor_id":"10de", "product_id":"11bf"}]'])
stats = self.conn.get_host_stats()
self.assertEqual(len(stats['pci_passthrough_devices']), 1)
def test_pci_passthrough_devices_no_whitelist(self):
stats = self.conn.get_host_stats()
self.assertEqual(len(stats['pci_passthrough_devices']), 0)
def test_host_state_missing_sr(self):
def fake_safe_find_sr(session):
raise exception.StorageRepositoryNotFound('not there')
self.stubs.Set(vm_utils, 'safe_find_sr', fake_safe_find_sr)
self.assertRaises(exception.StorageRepositoryNotFound,
self.conn.get_host_stats)
def _test_host_action(self, method, action, expected=None):
result = method('host', action)
if not expected:
expected = action
self.assertEqual(result, expected)
def test_host_reboot(self):
self._test_host_action(self.conn.host_power_action, 'reboot')
def test_host_shutdown(self):
self._test_host_action(self.conn.host_power_action, 'shutdown')
def test_host_startup(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'host', 'startup')
def test_host_maintenance_on(self):
self._test_host_action(self.conn.host_maintenance_mode,
True, 'on_maintenance')
def test_host_maintenance_off(self):
self._test_host_action(self.conn.host_maintenance_mode,
False, 'off_maintenance')
def test_set_enable_host_enable(self):
values = _create_service_entries(self.context, values={'nova':
['host']})
self._test_host_action(self.conn.set_host_enabled, True, 'enabled')
service = db.service_get_by_args(self.context, 'host', 'nova-compute')
self.assertEqual(service.disabled, False)
def test_set_enable_host_disable(self):
values = _create_service_entries(self.context, values={'nova':
['host']})
self._test_host_action(self.conn.set_host_enabled, False, 'disabled')
service = db.service_get_by_args(self.context, 'host', 'nova-compute')
self.assertEqual(service.disabled, True)
def test_get_host_uptime(self):
result = self.conn.get_host_uptime('host')
self.assertEqual(result, 'fake uptime')
def test_supported_instances_is_included_in_host_state(self):
stats = self.conn.get_host_stats()
self.assertIn('supported_instances', stats)
def test_supported_instances_is_calculated_by_to_supported_instances(self):
def to_supported_instances(somedata):
self.assertIsNone(somedata)
return "SOMERETURNVALUE"
self.stubs.Set(host, 'to_supported_instances', to_supported_instances)
stats = self.conn.get_host_stats()
self.assertEqual("SOMERETURNVALUE", stats['supported_instances'])
def test_update_stats_caches_hostname(self):
self.mox.StubOutWithMock(host, 'call_xenhost')
self.mox.StubOutWithMock(vm_utils, 'scan_default_sr')
self.mox.StubOutWithMock(vm_utils, 'list_vms')
self.mox.StubOutWithMock(self.conn._session, 'call_xenapi')
data = {'disk_total': 0,
'disk_used': 0,
'disk_available': 0,
'supported_instances': 0,
'host_capabilities': [],
'host_hostname': 'foo',
'vcpus_used': 0,
}
sr_rec = {
'physical_size': 0,
'physical_utilisation': 0,
}
for i in range(3):
host.call_xenhost(mox.IgnoreArg(), 'host_data', {}).AndReturn(data)
vm_utils.scan_default_sr(self.conn._session).AndReturn("ref")
vm_utils.list_vms(self.conn._session).AndReturn([])
self.conn._session.call_xenapi('SR.get_record', "ref").AndReturn(
sr_rec)
if i == 2:
# On the third call (the second below) change the hostname
data = dict(data, host_hostname='bar')
self.mox.ReplayAll()
stats = self.conn.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
stats = self.conn.get_host_stats(refresh=True)
self.assertEqual('foo', stats['hypervisor_hostname'])
class ToSupportedInstancesTestCase(test.NoDBTestCase):
def test_default_return_value(self):
self.assertEqual([],
host.to_supported_instances(None))
def test_return_value(self):
self.assertEqual([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64']))
def test_invalid_values_do_not_break(self):
self.assertEqual([('x86_64', 'xapi', 'xen')],
host.to_supported_instances([u'xen-3.0-x86_64', 'spam']))
def test_multiple_values(self):
self.assertEqual(
[
('x86_64', 'xapi', 'xen'),
('x86_32', 'xapi', 'hvm')
],
host.to_supported_instances([u'xen-3.0-x86_64', 'hvm-3.0-x86_32'])
)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
def setUp(self):
super(XenAPIAutoDiskConfigTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False):
pass
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertIsPartitionCalled(self, called):
marker = {"partition_called": False}
def fake_resize_part_and_fs(dev, start, old, new):
marker["partition_called"] = True
self.stubs.Set(vm_utils, "_resize_part_and_fs",
fake_resize_part_and_fs)
ctx = context.RequestContext(self.user_id, self.project_id)
session = get_session()
disk_image_type = vm_utils.ImageType.DISK_VHD
instance = create_instance_with_system_metadata(self.context,
self.instance_values)
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdis = {'root': {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type, "fake_nw_inf")
self.assertEqual(marker["partition_called"], called)
def test_instance_not_auto_disk_config(self):
"""Should not partition unless instance is marked as
auto_disk_config.
"""
self.instance_values['auto_disk_config'] = False
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
# Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(False)
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_passes_fail_safes(self):
"""Should partition if instance is marked as auto_disk_config=True and
virt-layer specific fail-safe checks pass.
"""
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
return [(1, 0, 100, 'ext4')]
self.stubs.Set(vm_utils, "_get_partitions",
fake_get_partitions)
self.assertIsPartitionCalled(True)
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIGenerateLocal(stubs.XenAPITestBase):
"""Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
db_fakes.stub_out_db_instance_api(self.stubs)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.user_id = 'fake'
self.project_id = 'fake'
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'root_gb': 80,
'ephemeral_gb': 0,
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
self.context = context.RequestContext(self.user_id, self.project_id)
def fake_create_vbd(session, vm_ref, vdi_ref, userdevice,
vbd_type='disk', read_only=False, bootable=True,
osvol=False, empty=False, unpluggable=True):
return session.call_xenapi('VBD.create', {'VM': vm_ref,
'VDI': vdi_ref})
self.stubs.Set(vm_utils, 'create_vbd', fake_create_vbd)
def assertCalled(self, instance,
disk_image_type=vm_utils.ImageType.DISK_VHD):
ctx = context.RequestContext(self.user_id, self.project_id)
session = get_session()
vm_ref = xenapi_fake.create_vm(instance['name'], 'Halted')
vdi_ref = xenapi_fake.create_vdi(instance['name'], 'fake')
vdi_uuid = session.call_xenapi('VDI.get_record', vdi_ref)['uuid']
vdi_key = 'root'
if disk_image_type == vm_utils.ImageType.DISK_ISO:
vdi_key = 'iso'
vdis = {vdi_key: {'uuid': vdi_uuid, 'ref': vdi_ref}}
self.called = False
self.conn._vmops._attach_disks(instance, vm_ref, instance['name'],
vdis, disk_image_type, "fake_nw_inf")
self.assertTrue(self.called)
def test_generate_swap(self):
# Test swap disk generation.
instance_values = dict(self.instance_values, instance_type_id=5)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_swap(*args, **kwargs):
self.called = True
self.stubs.Set(vm_utils, 'generate_swap', fake_generate_swap)
self.assertCalled(instance)
def test_generate_ephemeral(self):
# Test ephemeral disk generation.
instance_values = dict(self.instance_values, instance_type_id=4)
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
self.assertCalled(instance)
def test_generate_iso_blank_root_disk(self):
instance_values = dict(self.instance_values, instance_type_id=4)
instance_values.pop('kernel_id')
instance_values.pop('ramdisk_id')
instance = create_instance_with_system_metadata(self.context,
instance_values)
def fake_generate_ephemeral(*args):
pass
self.stubs.Set(vm_utils, 'generate_ephemeral', fake_generate_ephemeral)
def fake_generate_iso(*args):
self.called = True
self.stubs.Set(vm_utils, 'generate_iso_blank_root_disk',
fake_generate_iso)
self.assertCalled(instance, vm_utils.ImageType.DISK_ISO)
class XenAPIBWCountersTestCase(stubs.XenAPITestBaseNoDB):
FAKE_VMS = {'test1:ref': dict(name_label='test1',
other_config=dict(nova_uuid='hash'),
domid='12',
_vifmap={'0': "a:b:c:d...",
'1': "e:f:12:q..."}),
'test2:ref': dict(name_label='test2',
other_config=dict(nova_uuid='hash'),
domid='42',
_vifmap={'0': "a:3:c:d...",
'1': "e:f:42:q..."}),
}
def setUp(self):
super(XenAPIBWCountersTestCase, self).setUp()
self.stubs.Set(vm_utils, 'list_vms',
XenAPIBWCountersTestCase._fake_list_vms)
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def _fake_get_vif_device_map(vm_rec):
return vm_rec['_vifmap']
self.stubs.Set(self.conn._vmops, "_get_vif_device_map",
_fake_get_vif_device_map)
@classmethod
def _fake_list_vms(cls, session):
return cls.FAKE_VMS.iteritems()
@staticmethod
def _fake_fetch_bandwidth_mt(session):
return {}
@staticmethod
def _fake_fetch_bandwidth(session):
return {'42':
{'0': {'bw_in': 21024, 'bw_out': 22048},
'1': {'bw_in': 231337, 'bw_out': 221212121}},
'12':
{'0': {'bw_in': 1024, 'bw_out': 2048},
'1': {'bw_in': 31337, 'bw_out': 21212121}},
}
def test_get_all_bw_counters(self):
instances = [dict(name='test1', uuid='1-2-3'),
dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
bw_in=1024,
bw_out=2048), result)
self.assertIn(dict(uuid='1-2-3',
mac_address="e:f:12:q...",
bw_in=31337,
bw_out=21212121), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="a:3:c:d...",
bw_in=21024,
bw_out=22048), result)
self.assertIn(dict(uuid='4-5-6',
mac_address="e:f:42:q...",
bw_in=231337,
bw_out=221212121), result)
def test_get_all_bw_counters_in_failure_case(self):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
self._fake_fetch_bandwidth_mt)
result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
# TODO(salvatore-orlando): this class and
# nova.tests.virt.test_libvirt.IPTablesFirewallDriverTestCase share a lot of
# code. Consider abstracting common code in a base class for firewall driver
# testing.
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
_in_rules = [
'# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
'*nat',
':PREROUTING ACCEPT [1170:189210]',
':INPUT ACCEPT [844:71028]',
':OUTPUT ACCEPT [5149:405186]',
':POSTROUTING ACCEPT [5063:386098]',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*mangle',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'[0:0] -A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'[0:0] -A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'[0:0] -A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'[0:0] -A FORWARD -o virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'[0:0] -A FORWARD -i virbr0 -j REJECT '
'--reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010',
]
_in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [349256:75777230]',
'COMMIT',
'# Completed on Tue Jan 18 23:47:56 2011',
]
def setUp(self):
super(XenAPIDom0IptablesFirewallTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
self.user_id = 'mappin'
self.project_id = 'fake'
stubs.stubout_session(self.stubs, stubs.FakeSessionForFirewallTests,
test_case=self)
self.context = context.RequestContext(self.user_id, self.project_id)
self.network = importutils.import_object(CONF.network_manager)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.fw = self.conn._vmops.firewall_driver
def _create_instance_ref(self):
return db.instance_create(self.context,
{'user_id': self.user_id,
'project_id': self.project_id,
'instance_type_id': 1})
def _create_test_security_group(self):
admin_ctxt = context.get_admin_context()
secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'icmp',
'from_port': 8,
'to_port': -1,
'cidr': '192.168.11.0/24'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
return secgroup
def _validate_security_group(self):
in_rules = filter(lambda l: not l.startswith('#'),
self._in_rules)
for rule in in_rules:
if 'nova' not in rule:
self.assertTrue(rule in self._out_rules,
'Rule went missing: %s' % rule)
instance_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
# last two octets change
if re.search('-d 192.168.[0-9]{1,3}.[0-9]{1,3} -j', rule):
instance_chain = rule.split(' ')[-1]
break
self.assertTrue(instance_chain, "The instance chain wasn't added")
security_group_chain = None
for rule in self._out_rules:
# This is pretty crude, but it'll do for now
if '-A %s -j' % instance_chain in rule:
security_group_chain = rule.split(' ')[-1]
break
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp'
' -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p icmp -m icmp'
' --icmp-type 8 -s 192.168.11.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp --dport 80:81'
' -s 192.168.10.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
src_instance_ref = self._create_instance_ref()
admin_ctxt = context.get_admin_context()
secgroup = self._create_test_security_group()
src_secgroup = db.security_group_create(admin_ctxt,
{'user_id': self.user_id,
'project_id': self.project_id,
'name': 'testsourcegroup',
'description': 'src group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'group_id': src_secgroup['id']})
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
db.instance_add_security_group(admin_ctxt, src_instance_ref['uuid'],
src_secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
src_instance_ref = db.instance_get(admin_ctxt, src_instance_ref['id'])
network_model = fake_network.fake_get_instance_nw_info(self.stubs, 1)
from nova.compute import utils as compute_utils
self.stubs.Set(compute_utils, 'get_nw_info_for_instance',
lambda instance: network_model)
self.fw.prepare_instance_filter(instance_ref, network_model)
self.fw.apply_instance_filter(instance_ref, network_model)
self._validate_security_group()
# Extra test for TCP acceptance rules
for ip in network_model.fixed_ips():
if ip['version'] != 4:
continue
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p tcp'
' --dport 80:81 -s %s' % ip['address'])
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['uuid'])
def test_filters_for_instance_with_ip_v6(self):
self.flags(use_ipv6=True)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 1)
def test_filters_for_instance_without_ip_v6(self):
self.flags(use_ipv6=False)
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1)
rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
self.assertEqual(len(rulesv4), 2)
self.assertEqual(len(rulesv6), 0)
def test_multinic_iptables(self):
ipv4_rules_per_addr = 1
ipv4_addr_per_network = 2
ipv6_rules_per_addr = 1
ipv6_addr_per_network = 1
networks_count = 5
instance_ref = self._create_instance_ref()
_get_instance_nw_info = fake_network.fake_get_instance_nw_info
network_info = _get_instance_nw_info(self.stubs,
networks_count,
ipv4_addr_per_network)
network_info[0]['network']['subnets'][0]['meta']['dhcp_server'] = \
'1.1.1.1'
ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
network_info)
self.fw.prepare_instance_filter(instance_ref, network_info)
ipv4 = self.fw.iptables.ipv4['filter'].rules
ipv6 = self.fw.iptables.ipv6['filter'].rules
ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
# Extra rules are for the DHCP request
rules = (ipv4_rules_per_addr * ipv4_addr_per_network *
networks_count) + 2
self.assertEqual(ipv4_network_rules, rules)
self.assertEqual(ipv6_network_rules,
ipv6_rules_per_addr * ipv6_addr_per_network * networks_count)
def test_do_refresh_security_group_rules(self):
admin_ctxt = context.get_admin_context()
instance_ref = self._create_instance_ref()
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
secgroup = self._create_test_security_group()
db.instance_add_security_group(admin_ctxt, instance_ref['uuid'],
secgroup['id'])
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.instances[instance_ref['id']] = instance_ref
self._validate_security_group()
# add a rule to the security group
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'udp',
'from_port': 200,
'to_port': 299,
'cidr': '192.168.99.0/24'})
#validate the extra rule
self.fw.refresh_security_group_rules(secgroup)
regex = re.compile('\[0\:0\] -A .* -j ACCEPT -p udp --dport 200:299'
' -s 192.168.99.0/24')
self.assertTrue(len(filter(regex.match, self._out_rules)) > 0,
"Rules were not updated properly."
"The rule for UDP acceptance is missing")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
# FRAGILE: as in libvirt tests
# peeks at how the firewall names chains
chain_name = 'inst-%s' % instance_ref['id']
network_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)
self.fw.prepare_instance_filter(instance_ref, network_info)
self.assertIn('provider', self.fw.iptables.ipv4['filter'].chains)
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(0, len(rules))
admin_ctxt = context.get_admin_context()
# add a rule and send the update message, check for 1 rule
provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'tcp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
# Add another, refresh, and make sure number of rules goes to two
provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
{'protocol': 'udp',
'cidr': '10.99.99.99/32',
'from_port': 1,
'to_port': 65535})
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(2, len(rules))
# create the instance filter and make sure it has a jump rule
self.fw.prepare_instance_filter(instance_ref, network_info)
self.fw.apply_instance_filter(instance_ref, network_info)
inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == chain_name]
jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
provjump_rules = []
# IptablesTable doesn't make rules unique internally
for rule in jump_rules:
if 'provider' in rule.rule and rule not in provjump_rules:
provjump_rules.append(rule)
self.assertEqual(1, len(provjump_rules))
# remove a rule from the db, cast to compute to refresh rule
db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
self.fw.refresh_provider_fw_rules()
rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
if rule.chain == 'provider']
self.assertEqual(1, len(rules))
class XenAPISRSelectionTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
# Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada', group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
self.assertRaises(exception.StorageRepositoryNotFound,
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
# Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
# This test is only guaranteed if there is one host in the pool
self.assertEqual(len(xenapi_fake.get_all('host')), 1)
host_ref = xenapi_fake.get_all('host')[0]
pbd_refs = xenapi_fake.get_all('PBD')
for pbd_ref in pbd_refs:
pbd_rec = xenapi_fake.get_record('PBD', pbd_ref)
if pbd_rec['host'] != host_ref:
continue
sr_rec = xenapi_fake.get_record('SR', pbd_rec['SR'])
if sr_rec['other_config']['i18n-key'] == 'local-storage':
local_sr = pbd_rec['SR']
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
# Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
host_ref = xenapi_fake.get_all('host')[0]
local_sr = xenapi_fake.create_sr(name_label='Fake Storage',
type='lvm',
other_config={'my_fake_sr': 'true'},
host_ref=host_ref)
expected = vm_utils.safe_find_sr(session)
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
# Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true',
group='xenserver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = get_session()
pool_ref = session.call_xenapi('pool.get_all')[0]
expected = vm_utils.safe_find_sr(session)
self.assertEqual(session.call_xenapi('pool.get_default_SR', pool_ref),
expected)
def _create_service_entries(context, values={'avail_zone1': ['fake_host1',
'fake_host2'],
'avail_zone2': ['fake_host3'], }):
for avail_zone, hosts in values.iteritems():
for host in hosts:
db.service_create(context,
{'host': host,
'binary': 'nova-compute',
'topic': 'compute',
'report_count': 0})
return values
# FIXME(sirp): convert this to use XenAPITestBaseNoDB
class XenAPIAggregateTestCase(stubs.XenAPITestBase):
"""Unit tests for aggregate operations."""
def setUp(self):
super(XenAPIAggregateTestCase, self).setUp()
self.flags(connection_url='http://test_url',
connection_username='test_user',
connection_password='test_pass',
group='xenserver')
self.flags(instance_name_template='%d',
firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host',
compute_driver='xenapi.XenAPIDriver',
default_availability_zone='avail_zone1')
self.flags(use_local=True, group='conductor')
host_ref = xenapi_fake.get_all('host')[0]
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.context = context.get_admin_context()
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.compute = importutils.import_object(CONF.compute_manager)
self.api = compute_api.AggregateAPI()
values = {'name': 'test_aggr',
'metadata': {'availability_zone': 'test_zone',
pool_states.POOL_FLAG: 'XenAPI'}}
self.aggr = db.aggregate_create(self.context, values)
self.fake_metadata = {pool_states.POOL_FLAG: 'XenAPI',
'master_compute': 'host',
'availability_zone': 'fake_zone',
pool_states.KEY: pool_states.ACTIVE,
'host': xenapi_fake.get_record('host',
host_ref)['uuid']}
def test_pool_add_to_aggregate_called_by_driver(self):
calls = []
def pool_add_to_aggregate(context, aggregate, host, slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_add_to_aggregate)
self.stubs.Set(self.conn._pool,
"add_to_aggregate",
pool_add_to_aggregate)
self.conn.add_to_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_add_to_aggregate, calls)
def test_pool_remove_from_aggregate_called_by_driver(self):
calls = []
def pool_remove_from_aggregate(context, aggregate, host,
slave_info=None):
self.assertEqual("CONTEXT", context)
self.assertEqual("AGGREGATE", aggregate)
self.assertEqual("HOST", host)
self.assertEqual("SLAVEINFO", slave_info)
calls.append(pool_remove_from_aggregate)
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
pool_remove_from_aggregate)
self.conn.remove_from_aggregate("CONTEXT", "AGGREGATE", "HOST",
slave_info="SLAVEINFO")
self.assertIn(pool_remove_from_aggregate, calls)
def test_add_to_aggregate_for_first_host_sets_metadata(self):
def fake_init_pool(id, name):
fake_init_pool.called = True
self.stubs.Set(self.conn._pool, "_init_pool", fake_init_pool)
aggregate = self._aggregate_setup()
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_init_pool.called)
self.assertThat(self.fake_metadata,
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
# Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host2",
dict(compute_uuid='fake_uuid',
url='fake_url',
user='fake_user',
passwd='fake_pass',
xenhost_uuid='fake_uuid'))
self.assertTrue(fake_join_slave.called)
def test_add_to_aggregate_first_host(self):
def fake_pool_set_name_label(self, session, pool_ref, name):
fake_pool_set_name_label.called = True
self.stubs.Set(xenapi_fake.SessionBase, "pool_set_name_label",
fake_pool_set_name_label)
self.conn._session.call_xenapi("pool.create", {"name": "asdf"})
metadata = {'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.CREATED}
aggregate = aggregate_obj.Aggregate()
aggregate.name = 'fake_aggregate'
aggregate.metadata = dict(metadata)
aggregate.create(self.context)
aggregate.add_host('host')
self.assertEqual(["host"], aggregate.hosts)
self.assertEqual(metadata, aggregate.metadata)
self.conn._pool.add_to_aggregate(self.context, aggregate, "host")
self.assertTrue(fake_pool_set_name_label.called)
def test_remove_from_aggregate_called(self):
def fake_remove_from_aggregate(context, aggregate, host):
fake_remove_from_aggregate.called = True
self.stubs.Set(self.conn._pool,
"remove_from_aggregate",
fake_remove_from_aggregate)
self.conn.remove_from_aggregate(None, None, None)
self.assertTrue(fake_remove_from_aggregate.called)
def test_remove_from_empty_aggregate(self):
result = self._aggregate_setup()
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, result, "test_host")
def test_remove_slave(self):
# Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
self.fake_metadata['host2'] = 'fake_host2_uuid'
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata, aggr_state=pool_states.ACTIVE)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host2")
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
# Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
aggregate = self._aggregate_setup(metadata=self.fake_metadata)
self.conn._pool.remove_from_aggregate(self.context, aggregate, "host")
result = db.aggregate_get(self.context, aggregate['id'])
self.assertTrue(fake_clear_pool.called)
self.assertThat({'availability_zone': 'fake_zone',
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: pool_states.ACTIVE},
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
# Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
self.assertRaises(exception.InvalidAggregateAction,
self.conn._pool.remove_from_aggregate,
self.context, aggregate, "host")
def _aggregate_setup(self, aggr_name='fake_aggregate',
aggr_zone='fake_zone',
aggr_state=pool_states.CREATED,
hosts=['host'], metadata=None):
aggregate = aggregate_obj.Aggregate()
aggregate.name = aggr_name
aggregate.metadata = {'availability_zone': aggr_zone,
pool_states.POOL_FLAG: 'XenAPI',
pool_states.KEY: aggr_state,
}
if metadata:
aggregate.metadata.update(metadata)
aggregate.create(self.context)
for host in hosts:
aggregate.add_host(host)
return aggregate
def test_add_host_to_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when adding host while
aggregate is not ready.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'host')
def test_add_host_to_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_add_host_to_aggregate_invalid_error_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
in error.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.ERROR)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.add_to_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
# Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
'fake_aggregate', fake_zone)
# let's mock the fact that the aggregate is ready!
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, aggr['id'], metadata)
for host in values[fake_zone]:
aggr = self.api.add_host_to_aggregate(self.context,
aggr['id'], host)
# let's mock the fact that the aggregate is in error!
status = {'operational_state': pool_states.ERROR}
expected = self.api.remove_host_from_aggregate(self.context,
aggr['id'],
values[fake_zone][0])
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
self.assertEqual(expected['metadata'][pool_states.KEY],
pool_states.ACTIVE)
def test_remove_host_from_aggregate_invalid_dismissed_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
deleted.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.DISMISSED)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_remove_host_from_aggregate_invalid_changing_status(self):
"""Ensure InvalidAggregateAction is raised when aggregate is
changing.
"""
aggregate = self._aggregate_setup(aggr_state=pool_states.CHANGING)
self.assertRaises(exception.InvalidAggregateAction,
self.conn.remove_from_aggregate, self.context,
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
# Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
self.stubs.Set(self.compute.driver, "add_to_aggregate",
fake_driver_add_to_aggregate)
metadata = {pool_states.POOL_FLAG: "XenAPI",
pool_states.KEY: pool_states.ACTIVE}
db.aggregate_metadata_add(self.context, self.aggr['id'], metadata)
db.aggregate_host_add(self.context, self.aggr['id'], 'fake_host')
self.assertRaises(exception.AggregateError,
self.compute.add_aggregate_host,
self.context, host="fake_host",
aggregate=jsonutils.to_primitive(self.aggr),
slave_info=None)
excepted = db.aggregate_get(self.context, self.aggr['id'])
self.assertEqual(excepted['metadetails'][pool_states.KEY],
pool_states.ERROR)
self.assertEqual(excepted['hosts'], [])
class MockComputeAPI(object):
def __init__(self):
self._mock_calls = []
def add_aggregate_host(self, ctxt, aggregate,
host_param, host, slave_info):
self._mock_calls.append((
self.add_aggregate_host, ctxt, aggregate,
host_param, host, slave_info))
def remove_aggregate_host(self, ctxt, aggregate_id, host_param,
host, slave_info):
self._mock_calls.append((
self.remove_aggregate_host, ctxt, aggregate_id,
host_param, host, slave_info))
class StubDependencies(object):
"""Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
def _is_hv_pool(self, *_ignore):
return True
def _get_metadata(self, *_ignore):
return {
pool_states.KEY: {},
'master_compute': 'master'
}
def _create_slave_info(self, *ignore):
return "SLAVE_INFO"
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
"""A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.NoDBTestCase):
fake_aggregate = {
'id': 98,
'hosts': [],
'metadata': {
'master_compute': 'master',
pool_states.POOL_FLAG: {},
pool_states.KEY: {}
}
}
def test_slave_asks_master_to_add_slave_to_pool(self):
slave = ResourcePoolWithStubs()
slave.add_to_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.add_aggregate_host,
"CONTEXT", jsonutils.to_primitive(self.fake_aggregate),
"slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
def test_slave_asks_master_to_remove_slave_from_pool(self):
slave = ResourcePoolWithStubs()
slave.remove_from_aggregate("CONTEXT", self.fake_aggregate, "slave")
self.assertIn(
(slave.compute_rpcapi.remove_aggregate_host,
"CONTEXT", 98, "slave", "master", "SLAVE_INFO"),
slave.compute_rpcapi._mock_calls)
class SwapXapiHostTestCase(test.NoDBTestCase):
def test_swapping(self):
self.assertEqual(
"http://otherserver:8765/somepath",
pool.swap_xapi_host(
"http://someserver:8765/somepath", 'otherserver'))
def test_no_port(self):
self.assertEqual(
"http://otherserver/somepath",
pool.swap_xapi_host(
"http://someserver/somepath", 'otherserver'))
def test_no_path(self):
self.assertEqual(
"http://otherserver",
pool.swap_xapi_host(
"http://someserver", 'otherserver'))
class XenAPILiveMigrateTestCase(stubs.XenAPITestBaseNoDB):
"""Unit tests for live_migration."""
def setUp(self):
super(XenAPILiveMigrateTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver',
host='host')
db_fakes.stub_out_db_instance_api(self.stubs)
self.context = context.get_admin_context()
def test_live_migration_calls_vmops(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_live_migrate(context, instance_ref, dest, post_method,
recover_method, block_migration, migrate_data):
fake_live_migrate.called = True
self.stubs.Set(self.conn._vmops, "live_migrate", fake_live_migrate)
self.conn.live_migration(None, None, None, None, None)
self.assertTrue(fake_live_migrate.called)
def test_pre_live_migration(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.conn.pre_live_migration(None, None, None, None, None)
def test_post_live_migration_at_destination(self):
# ensure method is present
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
fake_instance = {"name": "name"}
fake_network_info = "network_info"
def fake_fw(instance, network_info):
self.assertEqual(instance, fake_instance)
self.assertEqual(network_info, fake_network_info)
fake_fw.call_count += 1
def fake_create_kernel_and_ramdisk(context, session, instance,
name_label):
return "fake-kernel-file", "fake-ramdisk-file"
fake_fw.call_count = 0
_vmops = self.conn._vmops
self.stubs.Set(_vmops.firewall_driver,
'setup_basic_filtering', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'prepare_instance_filter', fake_fw)
self.stubs.Set(_vmops.firewall_driver,
'apply_instance_filter', fake_fw)
self.stubs.Set(vm_utils, "create_kernel_and_ramdisk",
fake_create_kernel_and_ramdisk)
def fake_get_vm_opaque_ref(instance):
fake_get_vm_opaque_ref.called = True
self.stubs.Set(_vmops, "_get_vm_opaque_ref", fake_get_vm_opaque_ref)
fake_get_vm_opaque_ref.called = False
def fake_strip_base_mirror_from_vdis(session, vm_ref):
fake_strip_base_mirror_from_vdis.called = True
self.stubs.Set(vm_utils, "strip_base_mirror_from_vdis",
fake_strip_base_mirror_from_vdis)
fake_strip_base_mirror_from_vdis.called = False
self.conn.post_live_migration_at_destination(None, fake_instance,
fake_network_info, None)
self.assertEqual(fake_fw.call_count, 3)
self.assertTrue(fake_get_vm_opaque_ref.called)
self.assertTrue(fake_strip_base_mirror_from_vdis.called)
def test_check_can_live_migrate_destination_with_block_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
expected = {'block_migration': True,
'migrate_data': {
'migrate_send_data': "fake_migrate_data",
'destination_sr_ref': 'asdf'
}
}
result = self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'},
{}, {},
True, False)
self.assertEqual(expected, result)
def test_check_live_migrate_destination_verifies_ip(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
for pif_ref in xenapi_fake.get_all('PIF'):
pif_rec = xenapi_fake.get_record('PIF', pif_ref)
pif_rec['IP'] = ''
pif_rec['IPv6'] = ''
self.stubs.Set(vm_utils, "safe_find_sr", lambda _x: "asdf")
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def test_check_can_live_migrate_destination_block_migration_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'},
{}, {},
True, False)
def _add_default_live_migrate_stubs(self, conn):
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
pass
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return []
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
def fake_lookup_kernel_ramdisk(session, vm):
return ("fake_PV_kernel", "fake_PV_ramdisk")
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
self.stubs.Set(conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
self.stubs.Set(conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
self.stubs.Set(vm_utils, "lookup_kernel_ramdisk",
fake_lookup_kernel_ramdisk)
def test_check_can_live_migrate_source_with_block_migrate(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_migrate_iscsi(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return "true"
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
result = self.conn.check_can_live_migrate_source(self.context,
{'host': 'host'},
dest_check_data)
self.assertEqual(dest_check_data, result)
def test_check_can_live_migrate_source_with_block_iscsi_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(destination_sr_ref, _vm_ref):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_make_plugin_call(plugin, method, **args):
return {'returncode': 'error', 'message': 'Plugin not found'}
self.stubs.Set(self.conn._vmops, "_make_plugin_call",
fake_make_plugin_call)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context, {'host': 'host'},
{})
def test_check_can_live_migrate_source_with_block_migrate_fails(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
dest_check_data = {'block_migration': True,
'migrate_data': {
'destination_sr_ref': None,
'migrate_send_data': None
}}
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_source,
self.context,
{'host': 'host'},
dest_check_data)
def test_check_can_live_migrate_works(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"host": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.conn.check_can_live_migrate_destination(self.context,
{'host': 'host'}, False, False)
def test_check_can_live_migrate_fails(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_aggregate_get_by_host(context, host, key=None):
self.assertEqual(CONF.host, host)
return [dict(test_aggregate.fake_aggregate,
metadetails={"dest_other": "test_host_uuid"})]
self.stubs.Set(db, "aggregate_get_by_host",
fake_aggregate_get_by_host)
self.assertRaises(exception.MigrationError,
self.conn.check_can_live_migrate_destination,
self.context, {'host': 'host'}, None, None)
def test_live_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
self.conn.live_migration(self.conn, None, None, post_method, None)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_on_failure(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
def fake_get_vm_opaque_ref(instance):
return "fake_vm"
self.stubs.Set(self.conn._vmops, "_get_vm_opaque_ref",
fake_get_vm_opaque_ref)
def fake_get_host_opaque_ref(context, destination_hostname):
return "fake_host"
self.stubs.Set(self.conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def fake_call_xenapi(*args):
raise NotImplementedError()
self.stubs.Set(self.conn._vmops._session, "call_xenapi",
fake_call_xenapi)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
self.assertRaises(NotImplementedError, self.conn.live_migration,
self.conn, None, None, None, recover_method)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_calls_post_migration(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
# pass block_migration = True and migrate data
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
def test_live_migration_block_cleans_srs(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def fake_get_iscsi_srs(context, instance):
return ['sr_ref']
self.stubs.Set(self.conn._vmops, "_get_iscsi_srs",
fake_get_iscsi_srs)
def fake_forget_sr(context, instance):
fake_forget_sr.called = True
self.stubs.Set(volume_utils, "forget_sr",
fake_forget_sr)
def post_method(context, instance, destination_hostname,
block_migration, migrate_data):
post_method.called = True
migrate_data = {"destination_sr_ref": "foo",
"migrate_send_data": "bar"}
self.conn.live_migration(self.conn, None, None, post_method, None,
True, migrate_data)
self.assertTrue(post_method.called, "post_method.called")
self.assertTrue(fake_forget_sr.called, "forget_sr.called")
def test_live_migration_with_block_migration_raises_invalid_param(self):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and no migrate data
self.assertRaises(exception.InvalidParameterValue,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, None)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migration_with_block_migration_fails_migrate_send(self):
stubs.stubout_session(self.stubs,
stubs.FakeSessionForFailedMigrateTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(self.conn)
def recover_method(context, instance, destination_hostname,
block_migration):
recover_method.called = True
# pass block_migration = True and migrate data
migrate_data = dict(destination_sr_ref='foo', migrate_send_data='bar')
self.assertRaises(exception.MigrationError,
self.conn.live_migration, self.conn,
None, None, None, recover_method, True, migrate_data)
self.assertTrue(recover_method.called, "recover_method.called")
def test_live_migrate_block_migration_xapi_call_parameters(self):
fake_vdi_map = object()
class Session(xenapi_fake.SessionBase):
def VM_migrate_send(self_, session, vmref, migrate_data, islive,
vdi_map, vif_map, options):
self.assertEqual('SOMEDATA', migrate_data)
self.assertEqual(fake_vdi_map, vdi_map)
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_generate_vdi_map(destination_sr_ref, _vm_ref):
return fake_vdi_map
self.stubs.Set(conn._vmops, "_generate_vdi_map",
fake_generate_vdi_map)
def dummy_callback(*args, **kwargs):
pass
conn.live_migration(
self.context, instance_ref=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration="SOMEDATA",
migrate_data=dict(migrate_send_data='SOMEDATA',
destination_sr_ref="TARGET_SR_OPAQUE_REF"))
def test_live_migrate_pool_migration_xapi_call_parameters(self):
class Session(xenapi_fake.SessionBase):
def VM_pool_migrate(self_, session, vm_ref, host_ref, options):
self.assertEqual("fake_ref", host_ref)
self.assertEqual({"live": "true"}, options)
raise IOError()
stubs.stubout_session(self.stubs, Session)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self._add_default_live_migrate_stubs(conn)
def fake_get_host_opaque_ref(context, destination):
return "fake_ref"
self.stubs.Set(conn._vmops, "_get_host_opaque_ref",
fake_get_host_opaque_ref)
def dummy_callback(*args, **kwargs):
pass
self.assertRaises(IOError, conn.live_migration,
self.context, instance_ref=dict(name='ignore'), dest=None,
post_method=dummy_callback, recover_method=dummy_callback,
block_migration=False, migrate_data={})
def test_generate_vdi_map(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
vm_ref = "fake_vm_ref"
def fake_find_sr(_session):
self.assertEqual(conn._session, _session)
return "source_sr_ref"
self.stubs.Set(vm_utils, "safe_find_sr", fake_find_sr)
def fake_get_instance_vdis_for_sr(_session, _vm_ref, _sr_ref):
self.assertEqual(conn._session, _session)
self.assertEqual(vm_ref, _vm_ref)
self.assertEqual("source_sr_ref", _sr_ref)
return ["vdi0", "vdi1"]
self.stubs.Set(vm_utils, "get_instance_vdis_for_sr",
fake_get_instance_vdis_for_sr)
result = conn._vmops._generate_vdi_map("dest_sr_ref", vm_ref)
self.assertEqual({"vdi0": "dest_sr_ref",
"vdi1": "dest_sr_ref"}, result)
def test_rollback_live_migration_at_destination(self):
stubs.stubout_session(self.stubs, xenapi_fake.SessionBase)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
with mock.patch.object(conn, "destroy") as mock_destroy:
conn.rollback_live_migration_at_destination("context",
"instance", [], None)
self.assertFalse(mock_destroy.called)
class XenAPIInjectMetadataTestCase(stubs.XenAPITestBaseNoDB):
def setUp(self):
super(XenAPIInjectMetadataTestCase, self).setUp()
self.flags(connection_url='test_url',
connection_password='test_pass',
group='xenserver')
self.flags(firewall_driver='nova.virt.xenapi.firewall.'
'Dom0IptablesFirewallDriver')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
self.conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
self.xenstore = dict(persist={}, ephem={})
self.called_fake_get_vm_opaque_ref = False
def fake_get_vm_opaque_ref(inst, instance):
self.called_fake_get_vm_opaque_ref = True
if instance["uuid"] == "not_found":
raise exception.NotFound
self.assertEqual(instance, {'uuid': 'fake'})
return 'vm_ref'
def fake_add_to_param_xenstore(inst, vm_ref, key, val):
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['persist'][key] = val
def fake_remove_from_param_xenstore(inst, vm_ref, key):
self.assertEqual(vm_ref, 'vm_ref')
if key in self.xenstore['persist']:
del self.xenstore['persist'][key]
def fake_write_to_xenstore(inst, instance, path, value, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
self.xenstore['ephem'][path] = jsonutils.dumps(value)
def fake_delete_from_xenstore(inst, instance, path, vm_ref=None):
self.assertEqual(instance, {'uuid': 'fake'})
self.assertEqual(vm_ref, 'vm_ref')
if path in self.xenstore['ephem']:
del self.xenstore['ephem'][path]
self.stubs.Set(vmops.VMOps, '_get_vm_opaque_ref',
fake_get_vm_opaque_ref)
self.stubs.Set(vmops.VMOps, '_add_to_param_xenstore',
fake_add_to_param_xenstore)
self.stubs.Set(vmops.VMOps, '_remove_from_param_xenstore',
fake_remove_from_param_xenstore)
self.stubs.Set(vmops.VMOps, '_write_to_xenstore',
fake_write_to_xenstore)
self.stubs.Set(vmops.VMOps, '_delete_from_xenstore',
fake_delete_from_xenstore)
def test_inject_instance_metadata(self):
# Add some system_metadata to ensure it doesn't get added
# to xenstore
instance = dict(metadata=[{'key': 'a', 'value': 1},
{'key': 'b', 'value': 2},
{'key': 'c', 'value': 3},
# Check xenstore key sanitizing
{'key': 'hi.there', 'value': 4},
{'key': 'hi!t.e/e', 'value': 5}],
# Check xenstore key sanitizing
system_metadata=[{'key': 'sys_a', 'value': 1},
{'key': 'sys_b', 'value': 2},
{'key': 'sys_c', 'value': 3}],
uuid='fake')
self.conn._vmops._inject_instance_metadata(instance, 'vm_ref')
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/hi_there': '4',
'vm-data/user-metadata/hi_t_e_e': '5',
},
'ephem': {},
})
def test_change_instance_metadata_add(self):
# Test XenStore key sanitizing here, too.
diff = {'test.key': ['+', 4]}
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
'vm-data/user-metadata/test_key': '4',
},
})
def test_change_instance_metadata_update(self):
diff = dict(b=['+', 4])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '4',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_delete(self):
diff = dict(b=['-'])
instance = {'uuid': 'fake'}
self.xenstore = {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/b': '2',
'vm-data/user-metadata/c': '3',
},
}
self.conn._vmops.change_instance_metadata(instance, diff)
self.assertEqual(self.xenstore, {
'persist': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
'ephem': {
'vm-data/user-metadata/a': '1',
'vm-data/user-metadata/c': '3',
},
})
def test_change_instance_metadata_not_found(self):
instance = {'uuid': 'not_found'}
self.conn._vmops.change_instance_metadata(instance, "fake_diff")
self.assertTrue(self.called_fake_get_vm_opaque_ref)
class XenAPISessionTestCase(test.NoDBTestCase):
def _get_mock_xapisession(self, software_version):
class MockXapiSession(xenapi_session.XenAPISession):
def __init__(_ignore):
"Skip the superclass's dirty init"
def _get_software_version(_ignore):
return software_version
return MockXapiSession()
def test_local_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = True
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.xapi_local().AndReturn("local_connection")
self.mox.ReplayAll()
self.assertEqual("local_connection",
session._create_session("unix://local"))
def test_remote_session(self):
session = self._get_mock_xapisession({})
session.is_local_connection = False
session.XenAPI = self.mox.CreateMockAnything()
session.XenAPI.Session("url").AndReturn("remote_connection")
self.mox.ReplayAll()
self.assertEqual("remote_connection", session._create_session("url"))
def test_get_product_version_product_brand_does_not_fail(self):
session = self._get_mock_xapisession({
'build_number': '0',
'date': '2012-08-03',
'hostname': 'komainu',
'linux': '3.2.0-27-generic',
'network_backend': 'bridge',
'platform_name': 'XCP_Kronos',
'platform_version': '1.6.0',
'xapi': '1.3',
'xen': '4.1.2',
'xencenter_max': '1.10',
'xencenter_min': '1.10'
})
self.assertEqual(
((1, 6, 0), None),
session._get_product_version_and_brand()
)
def test_get_product_version_product_brand_xs_6(self):
session = self._get_mock_xapisession({
'product_brand': 'XenServer',
'product_version': '6.0.50',
'platform_version': '0.0.1'
})
self.assertEqual(
((6, 0, 50), 'XenServer'),
session._get_product_version_and_brand()
)
def test_verify_plugin_version_same(self):
session = self._get_mock_xapisession({})
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.4")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_compatible(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.5")
self.mox.ReplayAll()
session._verify_plugin_version()
def test_verify_plugin_version_bad_maj(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("3.0")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_plugin_version_bad_min(self):
session = self._get_mock_xapisession({})
session.XenAPI = xenapi_fake.FakeXenAPI()
session.PLUGIN_REQUIRED_VERSION = '2.4'
self.mox.StubOutWithMock(session, 'call_plugin_serialized')
session.call_plugin_serialized('nova_plugin_version', 'get_version',
).AndReturn("2.3")
self.mox.ReplayAll()
self.assertRaises(xenapi_fake.Failure, session._verify_plugin_version)
def test_verify_current_version_matches(self):
session = self._get_mock_xapisession({})
# Import the plugin to extract its version
path = os.path.dirname(__file__)
rel_path_elem = "../../../../plugins/xenserver/xenapi/etc/xapi.d/" \
"plugins/nova_plugin_version"
for elem in rel_path_elem.split('/'):
path = os.path.join(path, elem)
path = os.path.realpath(path)
plugin_version = None
with open(path) as plugin_file:
for line in plugin_file:
if "PLUGIN_VERSION = " in line:
plugin_version = line.strip()[17:].strip('"')
self.assertEqual(session.PLUGIN_REQUIRED_VERSION,
plugin_version)
class XenAPIFakeTestCase(test.NoDBTestCase):
def test_query_matches(self):
record = {'a': '1', 'b': '2', 'c_d': '3'}
tests = {'field "a"="1"': True,
'field "b"="2"': True,
'field "b"="4"': False,
'not field "b"="4"': True,
'field "a"="1" and field "b"="4"': False,
'field "a"="1" or field "b"="4"': True,
'field "c__d"="3"': True,
'field \'b\'=\'2\'': True,
}
for query in tests.keys():
expected = tests[query]
fail_msg = "for test '%s'" % query
self.assertEqual(xenapi_fake._query_matches(record, query),
expected, fail_msg)
def test_query_bad_format(self):
record = {'a': '1', 'b': '2', 'c': '3'}
tests = ['"a"="1" or "b"="4"',
'a=1',
]
for query in tests:
fail_msg = "for test '%s'" % query
self.assertFalse(xenapi_fake._query_matches(record, query),
fail_msg)
| {
"content_hash": "b3cd0d7a3c2d5a3ebeab4aa1e4b45fb5",
"timestamp": "",
"source": "github",
"line_count": 4041,
"max_line_length": 79,
"avg_line_length": 42.33754021281861,
"alnum_prop": 0.5632255123154437,
"repo_name": "ycl2045/nova-master",
"id": "61ecbdd4b35ad4bc6276fe3c4972a7f5d3105dde",
"size": "171706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/tests/virt/xenapi/test_xenapi.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "2035"
},
{
"name": "Python",
"bytes": "13677408"
},
{
"name": "R",
"bytes": "7817"
},
{
"name": "Ruby",
"bytes": "851"
},
{
"name": "Shell",
"bytes": "14571"
}
],
"symlink_target": ""
} |
import collections
import contextlib
import os
import re
import shlex
import shutil
import string
import subprocess
import textwrap
from bloggertool.str_util import Template, NamespaceFormatter
class Executor(object):
def __init__(self, blog_cmd, regr_folder):
self.blog_cmd = blog_cmd
self.regr_folder = os.path.abspath(regr_folder)
self.cwd = [regr_folder]
self.out = None
self.retcode = None
@contextlib.contextmanager
def cd(self, arg):
self.cwd.append(os.path.abspath(os.path.join(self.cwd[-1], arg)))
yield
self.cwd.pop()
def get_cwd(self):
return self.cwd[-1]
def mkdir(self, arg):
os.makedirs(os.path.join(self.cwd[-1], arg))
def rmtree(self):
folder = self.cwd[-1]
if os.path.exists(folder):
shutil.rmtree(folder)
def write(self, fname, text):
with open(self.full_name(fname), 'wb') as f:
f.write(text)
def read(self, fname):
with open(self.full_name(fname), 'rb') as f:
return f.read()
def full_name(self, fname):
return os.path.join(self.cwd[-1], fname)
def go(self, args, retcode=0):
"""run blog with args, return (retcode, stdout)"""
args_list = shlex.split(args)
proc = subprocess.Popen([self.blog_cmd] + args_list,
cwd=self.get_cwd(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.out, err = proc.communicate()
self.retcode = proc.returncode
if retcode is not None:
if retcode != self.retcode:
raise RuntimeError("RETCODE %s, EXPECTED %s\n%s" %
(self.retcode,
retcode,
self.out))
return self.out
class Q(Template):
"""Query, used for result check"""
NS = NamespaceFormatter(collections.defaultdict(lambda: None))
def format(__self, *__args, **__kwargs):
return Q(__self.NS.vformat(__self, __args, __kwargs))
def eq(self, other, strip=True):
if strip:
other = other.strip()
if str(self) != str(other):
raise RuntimeError(
'Not Equals\n%s\n----------- != ------------\n%s'
% (self, other))
def __eq__(self, other):
return self.eq(other)
def __ne__(self, other):
return not self.eq(other)
def match(self, test, strip=True):
if strip:
test = test.strip()
match = re.match(self, test, re.M)
if match is None:
raise RuntimeError("%s\n doesn't match pattern\n%s" % (test, self))
return Match(match)
def ifind(self, test, strip=True):
if strip:
test = test.strip()
return (Match(m) for m in re.finditer(self, test, re.M))
class Match(object):
def __init__(self, match):
self.match = match
def check(self, **kwargs):
groups = self.match.groupdict()
for name, val in kwargs:
group = groups(name)
Q(val) == group
def __getitem__(self, key):
return self.match.groupdict()[key]
| {
"content_hash": "19418c5b24dacc32b4f04b2bc7ffd2a7",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 28.728070175438596,
"alnum_prop": 0.5383206106870229,
"repo_name": "asvetlov/bloggertool",
"id": "ceade30a41594daf1d8852a5e1de668f476912c7",
"size": "3275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "regression/util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "147950"
}
],
"symlink_target": ""
} |
"""
Run Regression Test Suite
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts, other
than:
- `-extended`: run the "extended" test suite in addition to the basic one.
- `-win`: signal that this is running in a Windows environment, and we
should run the tests.
- `--coverage`: this generates a basic coverage report for the RPC
interface.
For a description of arguments recognized by test scripts, see
`qa/pull-tester/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
sys.path.append("qa/pull-tester/")
from tests_config import *
BOLD = ("","")
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
RPC_TESTS_DIR = SRCDIR + '/qa/rpc-tests/'
#If imported values are not defined then set to zero (or disabled)
if 'ENABLE_WALLET' not in vars():
ENABLE_WALLET=0
if 'ENABLE_BITCOIND' not in vars():
ENABLE_BITCOIND=0
if 'ENABLE_UTILS' not in vars():
ENABLE_UTILS=0
if 'ENABLE_ZMQ' not in vars():
ENABLE_ZMQ=0
ENABLE_COVERAGE=0
#Create a set to store arguments and create the passon string
opts = set()
passon_args = []
PASSON_REGEX = re.compile("^--")
PARALLEL_REGEX = re.compile('^-parallel=')
print_help = False
run_parallel = 4
for arg in sys.argv[1:]:
if arg == "--help" or arg == "-h" or arg == "-?":
print_help = True
break
if arg == '--coverage':
ENABLE_COVERAGE = 1
elif PASSON_REGEX.match(arg):
passon_args.append(arg)
elif PARALLEL_REGEX.match(arg):
run_parallel = int(arg.split(sep='=', maxsplit=1)[1])
else:
opts.add(arg)
#Set env vars
if "BITCOIND" not in os.environ:
os.environ["ELEMENTSD"] = BUILDDIR+ '/src/elementsd' + EXEEXT
if "BITCOINCLI" not in os.environ:
os.environ["BITCOINCLI"] = BUILDDIR + '/src/bitcoin-cli' + EXEEXT
if EXEEXT == ".exe" and "-win" not in opts:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print("Win tests currently disabled by default. Use -win option to enable")
sys.exit(0)
if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
print("No rpc tests to run. Wallet, utils, and bitcoind must all be enabled")
sys.exit(0)
# python3-zmq may not be installed. Handle this gracefully and with some helpful info
if ENABLE_ZMQ:
try:
import zmq
except ImportError:
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
"to run zmq tests, see dependency info in /qa/README.md.")
# ENABLE_ZMQ=0
raise
testScripts = [
# longest test should go first, to favor running tests in parallel
'p2p-fullblocktest.py',
#'walletbackup.py',
#'bip68-112-113-p2p.py',
'wallet.py',
#'wallet-hd.py',
#'wallet-dump.py',
'listtransactions.py',
'receivedby.py',
'mempool_resurrect_test.py',
'txn_doublespend.py --mineblock',
'txn_clone.py',
'getchaintips.py',
'rawtransactions.py',
'rest.py',
'mempool_spendcoinbase.py',
'mempool_reorg.py',
#'mempool_limit.py',
'httpbasics.py',
'multi_rpc.py',
'zapwallettxes.py',
'proxy_test.py',
'merkle_blocks.py',
'fundrawtransaction.py',
'signrawtransactions.py',
'nodehandling.py',
'reindex.py',
'decodescript.py',
#'blockchain.py',
#'disablewallet.py',
'sendheaders.py',
'keypool.py',
'prioritise_transaction.py',
'invalidblockrequest.py',
'invalidtxrequest.py',
'abandonconflict.py',
'confidential_transactions.py',
#'p2p-versionbits-warning.py',
#'p2p-segwit.py',
'segwit.py',
#'importprunedfunds.py',
'signmessages.py',
#'p2p-compactblocks.py',
#'nulldummy.py',
]
if ENABLE_ZMQ:
testScripts.append('zmq_test.py')
testScriptsExt = [
'bip9-softforks.py',
'bip65-cltv.py',
'bip65-cltv-p2p.py',
'bip68-sequence.py',
'bipdersig-p2p.py',
'bipdersig.py',
'getblocktemplate_longpoll.py',
'getblocktemplate_proposals.py',
'txn_doublespend.py',
'txn_clone.py --mineblock',
'forknotify.py',
'invalidateblock.py',
'rpcbind_test.py',
#'smartfees.py',
'maxblocksinflight.py',
'p2p-acceptblock.py',
'mempool_packages.py',
'maxuploadtarget.py',
'replace-by-fee.py',
'p2p-feefilter.py',
'pruning.py', # leave pruning last as it takes a REALLY long time
]
def runtests():
test_list = []
if '-extended' in opts:
test_list = testScripts + testScriptsExt
elif len(opts) == 0 or (len(opts) == 1 and "-win" in opts):
test_list = testScripts
else:
for t in testScripts + testScriptsExt:
if t in opts or re.sub(".py$", "", t) in opts:
test_list.append(t)
if print_help:
# Only print help of the first script and exit
subprocess.check_call((RPC_TESTS_DIR + test_list[0]).split() + ['-h'])
sys.exit(0)
coverage = None
if ENABLE_COVERAGE:
coverage = RPCCoverage()
print("Initializing coverage directory at %s\n" % coverage.dir)
flags = ["--srcdir=%s/src" % BUILDDIR] + passon_args
if coverage:
flags.append(coverage.flag)
if len(test_list) > 1 and run_parallel > 1:
# Populate cache
subprocess.check_output([RPC_TESTS_DIR + 'create_cache.py'] + flags)
#Run Tests
max_len_name = len(max(test_list, key=len))
time_sum = 0
time0 = time.time()
job_queue = RPCTestHandler(run_parallel, test_list, flags)
results = BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "PASSED", "DURATION") + BOLD[0]
all_passed = True
for _ in range(len(test_list)):
(name, stdout, stderr, passed, duration) = job_queue.get_next()
all_passed = all_passed and passed
time_sum += duration
print('\n' + BOLD[1] + name + BOLD[0] + ":")
print(stdout)
print('stderr:\n' if not stderr == '' else '', stderr)
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), str(passed).ljust(6), duration)
print("Pass: %s%s%s, Duration: %s s\n" % (BOLD[1], passed, BOLD[0], duration))
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(6), time_sum) + BOLD[0]
print(results)
print("\nRuntime: %s s" % (int(time.time() - time0)))
if coverage:
coverage.report_rpc_coverage()
print("Cleaning up coverage data")
coverage.cleanup()
sys.exit(not all_passed)
class RPCTestHandler:
"""
Trigger the testscrips passed in via the list.
"""
def __init__(self, num_tests_parallel, test_list=None, flags=None):
assert(num_tests_parallel >= 1)
self.num_jobs = num_tests_parallel
self.test_list = test_list
self.flags = flags
self.num_running = 0
self.jobs = []
def get_next(self):
while self.num_running < self.num_jobs and self.test_list:
# Add tests
self.num_running += 1
t = self.test_list.pop(0)
port_seed = ["--portseed=%s" % len(self.test_list)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
self.jobs.append((t,
time.time(),
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
log_stdout,
log_stderr))
if not self.jobs:
raise IndexError('pop from empty list')
while True:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
(name, time0, proc, log_out, log_err) = j
if proc.poll() is not None:
log_out.seek(0), log_err.seek(0)
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
log_out.close(), log_err.close()
passed = stderr == "" and proc.returncode == 0
self.num_running -= 1
self.jobs.remove(j)
return name, stdout, stderr, passed, int(time.time() - time0)
print('.', end='', flush=True)
class RPCCoverage(object):
"""
Coverage reporting utilities for pull-tester.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: qa/rpc-tests/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir=%s' % self.dir
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - %s\n" % i) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `qa/rpc-tests/test-framework/coverage.py`
REFERENCE_FILENAME = 'rpc_interface.txt'
COVERAGE_FILE_PREFIX = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, REFERENCE_FILENAME)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r') as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(COVERAGE_FILE_PREFIX):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r') as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
if __name__ == '__main__':
runtests()
| {
"content_hash": "0b08801def1aabb96f72c499a81ec489",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 133,
"avg_line_length": 32.115606936416185,
"alnum_prop": 0.5971022318214543,
"repo_name": "martindale/elements",
"id": "4631983e8a581a58c3ea772d6f23cb66123790d4",
"size": "11327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/pull-tester/rpc-tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "755016"
},
{
"name": "C++",
"bytes": "4847348"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "178269"
},
{
"name": "Makefile",
"bytes": "103386"
},
{
"name": "Objective-C",
"bytes": "3771"
},
{
"name": "Objective-C++",
"bytes": "7239"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1003575"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3792"
},
{
"name": "Shell",
"bytes": "36006"
}
],
"symlink_target": ""
} |
from .clientInterface import generate_address
from .clientInterface import generate_transaction
from .clientInterface import send_transaction
from .client import EsseClient | {
"content_hash": "34d493f22600a47dd949ea200ba0c175",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 49,
"avg_line_length": 43,
"alnum_prop": 0.872093023255814,
"repo_name": "DigitalHills/Esse",
"id": "5f2958fccd1dffd3c41c342c14f48482152294b3",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "75173"
},
{
"name": "Shell",
"bytes": "26"
}
],
"symlink_target": ""
} |
import threading
from subprocess import Popen, PIPE
import time, datetime
import shlex
import uuid
import json
import collections
import urllib2, urllib
from os.path import expanduser
from reporting.parsers import MatchParser, SplitParser, DummyParser, JsonGrepParser
from reporting.utilities import getLogger, get_hostname, init_object
from reporting.exceptions import PluginInitialisationError, RemoteServerError
from reporting.crontab import CronEvent
log = getLogger(__name__)
class IDataSource(object):
def get_data(self, **kwargs):
assert 0, "This method must be defined."
class CommandRunner(IDataSource):
def __init__(self, cmd):
self.__cmd=cmd
def get_data(self, **kwargs):
log.debug("running cmd %s"%self.__cmd)
process = Popen(shlex.split(self.__cmd), stdout=PIPE)
pipe = process.stdout
output = ''.join(pipe.readlines()).strip()
process.wait()
return output
class FileReader(IDataSource):
def __init__(self, path):
if path.startswith("~"):
path = expanduser(path)
self.__path = path
def get_data(self, **kwargs):
log.debug("Reading file %s" % self.__path)
with open(self.__path) as f:
content = ''.join(f.readlines()).strip()
return content
class HTTPReader(IDataSource):
def __init__(self, url, headers={}, auth=None):
self.__url=url
self.__headers=headers
self.__auth=auth
def get_data(self, **kwargs):
try:
if self.__auth:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, self.__url, self.__auth['username'], self.__auth['password'])
auth = urllib2.HTTPBasicAuthHandler(password_manager) # create an authentication handler
opener = urllib2.build_opener(auth) # create an opener with the authentication handler
urllib2.install_opener(opener) # install the opener...
req = urllib2.Request(self.__url, None, self.__headers)
handler = urllib2.urlopen(req)
except urllib2.HTTPError as e:
log.error('response code %d' % e.code)
if e.code == 400 or e.code==500:
raise MessageInvalidError()
else:
raise RemoteServerError()
except urllib2.URLError as e:
raise Exception("Error accessing URL %s: %s" % (self.__url, e.args))
else:
# 200
response = handler.read()
log.debug("response %d %s"% (handler.code, response))
handler.close()
return response
class Collector(threading.Thread):
def __init__(self, collector_name, config, output, tailer=None):
threading.Thread.__init__(self, name=collector_name)
self.__collector_name=collector_name
self.__config=config
self.__sleep_time=self.__config['input'].get('frequency',10)
self.__cron=self.__config['input'].get('schedule',None)
self.__schedule=None
if self.__cron is not None:
self.__schedule=CronEvent(self.__cron)
log.debug("job scheduled at %s"%self.__schedule.numerical_tab)
self.__input=None
self.__parser=None
self.__output=output
if self.__config['input']['type']=='command':
self.__input=CommandRunner(self.__config['input']['source'])
elif self.__config['input']['type']=='file':
self.__input=FileReader(self.__config['input']['path'])
elif self.__config['input']['type']=='http':
#log.debug('input %s'%self.__config['input'])
url=self.__config['input']['url']
headers=self.__config['input'].get('headers', {})
#log.debug('headers %s'%headers)
auth=self.__config['input'].get('auth', None)
self.__input=HTTPReader(url, headers, auth)
elif self.__config['input']['type']=='class':
arguments={}
if 'arguments' in self.__config['input']:
arguments=self.__config['input']['arguments']
self.__input=init_object(self.__config['input']['name'], **arguments)
elif self.__config['input']['type']=='tailer':
if tailer is None:
raise AttributeError("Missing tailer in config file for tailer type input")
self.__input=tailer
assert(self.__input)
if 'parser' in self.__config:
if self.__config['parser']['type']=='match':
self.__parser=MatchParser(self.__config['parser']['pattern'].strip(), self.__config['parser']['transform'].strip())
elif self.__config['parser']['type']=='split':
self.__parser=SplitParser(self.__config['parser']['delimiter'].strip(), self.__config['parser']['transform'].strip())
elif self.__config['parser']['type']=='dummy':
self.__parser=DummyParser()
elif self.__config['parser']['type']=='json':
arguments={}
if 'arguments' in self.__config['parser']:
arguments=self.__config['parser']['arguments']
self.__parser=JsonGrepParser(**arguments)
elif self.__config['parser']['type']=='class':
arguments={}
if 'arguments' in self.__config['parser']:
arguments=self.__config['parser']['arguments']
self.__parser=init_object(self.__config['parser']['name'], **arguments)
self.__running=True
self.__session_id=str(uuid.uuid4())
self.__max_error_count=self.__config['input'].get('max_error_count', -1)
self.__current_data=None
self.__number_collected=0
self.__number_failed=0
self.__sleep_count=0
self.__error_count=0
self.__last_check_minute=-1
def quit(self):
self.__running=False
def info(self):
col_info={"name":self.__collector_name, "config":self.__config, "sleep_time": self.__sleep_time}
col_info["session_id"]=self.__session_id
col_info["is_running"]=self.__running
col_info["current_data"]=self.__current_data
col_info["number_collected"]=self.__number_collected
col_info["number_failed"]=self.__number_failed
col_info["sleep_count"]=self.__sleep_count
col_info["error_count"]=self.__error_count
col_info["max_error_count"]=self.__max_error_count
if self.__cron is not None:
col_info["cron"]=self.__cron
if self.__config['input']['type']=='tailer':
col_info["tailer"]=self.__input.info(self.__config['input']['path'])
return col_info
def match_time(self):
"""Return True if this event should trigger at the specified datetime"""
if self.__schedule is None:
return False
t=datetime.datetime.now()
if t.minute==self.__last_check_minute:
return False
self.__last_check_minute=t.minute
log.debug("check if cron job can be triggered. %d"%self.__last_check_minute)
return self.__schedule.check_trigger((t.year,t.month,t.day,t.hour,t.minute))
def run(self):
count = self.__sleep_time
error_count = 0
log.info("Collector %s has started.", self.__collector_name)
while self.__running:
args = {'config': self.__config['input']}
if (self.__schedule is None and count == self.__sleep_time) or self.match_time():
log.debug("Starting to collect data.")
count = 0
data = None
no_msgs = 1
try:
data = self.__input.get_data(**args)
if isinstance(data, collections.deque) or isinstance(data, list):
self.__current_data = [l.decode('ASCII', 'ignore') for l in data]
payload = []
no_msgs = len(data)
for line in data:
log.debug("Raw data: %s", line)
payload.append(self.generate_payload(str(line.decode('ASCII', 'ignore'))))
if len(payload) > 0:
self.__output.push(payload)
else:
continue
else:
# a block of data: either string to be parsed or dict
self.__current_data = data
log.debug("Raw data: %s", data)
if isinstance(data, str):
payload = self.generate_payload(str(data.decode('ASCII', 'ignore')))
else:
payload = self.generate_payload(data)
self.__output.push(payload)
except:
self.__current_data = data
log.exception('Unable to get or parse data. data: %s', data)
error_count += 1
if self.__max_error_count > 0 and error_count >= self.__max_error_count:
self.__running = False
self.__error_count = error_count
break
self.__number_failed += no_msgs
if self.__config['input']['type'] == 'tailer':
self.__input.fail(**args)
else:
error_count = 0
self.__number_collected += no_msgs
if self.__config['input']['type'] == 'tailer':
self.__input.success(**args)
self.__error_count = error_count
else:
time.sleep(1)
if self.__schedule is None:
count += 1
self.__sleep_count = count
self.__output.close()
log.info("Collector %s has stopped.", self.__collector_name)
def generate_payload(self, data):
"""Parse raw data and package the result in required format"""
if self.__parser:
data = self.__parser.parse(data)
log.debug("Parser %s parsed data %s: ", self.__parser.__class__.__name__, data)
payload = {"id": str(uuid.uuid4()), "session": self.__session_id}
payload['data'] = data
if 'metadata' in self.__config:
for m in self.__config['metadata']:
payload[m] = self.__config['metadata'][m]
log.debug("payload to push: %s", payload)
return payload
| {
"content_hash": "913bad62943c3bfd46da827446222e47",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 133,
"avg_line_length": 43.59426229508197,
"alnum_prop": 0.5387797311271976,
"repo_name": "eResearchSA/reporting-producers",
"id": "9da7dea09845df5e5f16843bab9be2586a76b8b5",
"size": "10692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reporting/collectors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "229293"
},
{
"name": "Shell",
"bytes": "8607"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'migration_manager_test.views.home', name='home'),
# url(r'^migration_manager_test/', include('migration_manager_test.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "dbd2de29611ff4a9c4b0df9af99bdfdc",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 83,
"avg_line_length": 35.529411764705884,
"alnum_prop": 0.6920529801324503,
"repo_name": "jbzdak/migration_manager",
"id": "39da74324634f6fe1588c084aa6c9f09b1b8e886",
"size": "604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migration_manager_test/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "47427"
}
],
"symlink_target": ""
} |
"""Implements vlans, bridges, and iptables rules using linux utilities."""
import calendar
import inspect
import netaddr
import os
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
LOG = logging.getLogger("nova.linux_net")
def _bin_file(script):
"""Return the absolute path to scipt in the bin directory."""
return os.path.abspath(os.path.join(__file__, '../../../bin', script))
FLAGS = flags.FLAGS
flags.DEFINE_string('dhcpbridge_flagfile',
'/etc/nova/nova-dhcpbridge.conf',
'location of flagfile for dhcpbridge')
flags.DEFINE_string('dhcp_domain',
'novalocal',
'domain to use for building the hostnames')
flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files')
flags.DEFINE_string('public_interface', 'eth0',
'Interface for public IP addresses')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
flags.DEFINE_string('input_chain', 'INPUT',
'chain to add nova_input to')
flags.DEFINE_integer('dhcp_lease_time', 120,
'Lifetime of a DHCP lease')
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
flags.DEFINE_string('dnsmasq_config_file', "",
'Override the default dnsmasq settings with this file')
flags.DEFINE_string('linuxnet_interface_driver',
'nova.network.linux_net.LinuxBridgeInterfaceDriver',
'Driver used to create ethernet devices.')
flags.DEFINE_string('linuxnet_ovs_integration_bridge',
'br-int', 'Name of Open vSwitch bridge used with linuxnet')
flags.DEFINE_bool('send_arp_for_ha', False,
'send gratuitous ARPs for HA setup')
flags.DEFINE_bool('use_single_default_gateway',
False, 'Use single default gateway. Only first nic of vm'
' will get default gateway from dhcp server')
binary_name = os.path.basename(inspect.stack()[-1][1])
class IptablesRule(object):
"""An iptables rule.
You shouldn't need to use this class directly, it's only used by
IptablesManager.
"""
def __init__(self, chain, rule, wrap=True, top=False):
self.chain = chain
self.rule = rule
self.wrap = wrap
self.top = top
def __eq__(self, other):
return ((self.chain == other.chain) and
(self.rule == other.rule) and
(self.top == other.top) and
(self.wrap == other.wrap))
def __ne__(self, other):
return not self == other
def __str__(self):
if self.wrap:
chain = '%s-%s' % (binary_name, self.chain)
else:
chain = self.chain
return '-A %s %s' % (chain, self.rule)
class IptablesTable(object):
"""An iptables table."""
def __init__(self):
self.rules = []
self.chains = set()
self.unwrapped_chains = set()
def add_chain(self, name, wrap=True):
"""Adds a named chain to the table.
The chain name is wrapped to be unique for the component creating
it, so different components of Nova can safely create identically
named chains without interfering with one another.
At the moment, its wrapped name is <binary name>-<chain name>,
so if nova-compute creates a chain named 'OUTPUT', it'll actually
end up named 'nova-compute-OUTPUT'.
"""
if wrap:
self.chains.add(name)
else:
self.unwrapped_chains.add(name)
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
if wrap:
chain_set = self.chains
else:
chain_set = self.unwrapped_chains
if name not in chain_set:
LOG.debug(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
def add_rule(self, chain, rule, wrap=True, top=False):
"""Add a rule to the table.
This is just like what you'd feed to iptables, just without
the '-A <chain name>' bit at the start.
However, if you need to jump to one of your wrapped chains,
prepend its name with a '$' which will ensure the wrapping
is applied correctly.
"""
if wrap and chain not in self.chains:
raise ValueError(_('Unknown chain: %r') % chain)
if '$' in rule:
rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
self.rules.append(IptablesRule(chain, rule, wrap, top))
def _wrap_target_chain(self, s):
if s.startswith('$'):
return '%s-%s' % (binary_name, s[1:])
return s
def remove_rule(self, chain, rule, wrap=True, top=False):
"""Remove a rule from a chain.
Note: The rule must be exactly identical to the one that was added.
You cannot switch arguments around like you can with the iptables
CLI tool.
"""
try:
self.rules.remove(IptablesRule(chain, rule, wrap, top))
except ValueError:
LOG.debug(_('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
def empty_chain(self, chain, wrap=True):
"""Remove all rules from a chain."""
chained_rules = [rule for rule in self.rules
if rule.chain == chain and rule.wrap == wrap]
for rule in chained_rules:
self.rules.remove(rule)
class IptablesManager(object):
"""Wrapper for iptables.
See IptablesTable for some usage docs
A number of chains are set up to begin with.
First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
name is not wrapped, so it's shared between the various nova workers. It's
intended for rules that need to live at the top of the FORWARD and OUTPUT
chains. It's in both the ipv4 and ipv6 set of tables.
For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are
wrapped, meaning that the "real" INPUT chain has a rule that jumps to the
wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
"local" which is jumped to from nova-filter-top.
For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
wrapped in the same was as the builtin filter chains. Additionally, there's
a snat chain that is applied after the POSTROUTING chain.
"""
def __init__(self, execute=None):
if not execute:
self.execute = _execute
else:
self.execute = execute
self.ipv4 = {'filter': IptablesTable(),
'nat': IptablesTable()}
self.ipv6 = {'filter': IptablesTable()}
# Add a nova-filter-top chain. It's intended to be shared
# among the various nova components. It sits at the very top
# of FORWARD and OUTPUT.
for tables in [self.ipv4, self.ipv6]:
tables['filter'].add_chain('nova-filter-top', wrap=False)
tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
wrap=False, top=True)
tables['filter'].add_chain('local')
tables['filter'].add_rule('nova-filter-top', '-j $local',
wrap=False)
# Wrap the builtin chains
builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
for ip_version in builtin_chains:
if ip_version == 4:
tables = self.ipv4
elif ip_version == 6:
tables = self.ipv6
for table, chains in builtin_chains[ip_version].iteritems():
for chain in chains:
tables[table].add_chain(chain)
tables[table].add_rule(chain, '-j $%s' % (chain,),
wrap=False)
# Add a nova-postrouting-bottom chain. It's intended to be shared
# among the various nova components. We set it as the last chain
# of POSTROUTING chain.
self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
wrap=False)
# We add a snat chain to the shared nova-postrouting-bottom chain
# so that it's applied last.
self.ipv4['nat'].add_chain('snat')
self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
wrap=False)
# And then we add a floating-snat chain and jump to first thing in
# the snat chain.
self.ipv4['nat'].add_chain('floating-snat')
self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
@utils.synchronized('iptables', external=True)
def apply(self):
"""Apply the current in-memory set of iptables rules.
This will blow away any rules left over from previous runs of the
same component of Nova, and replace them with our current set of
rules. This happens atomically, thanks to iptables-restore.
"""
s = [('iptables', self.ipv4)]
if FLAGS.use_ipv6:
s += [('ip6tables', self.ipv6)]
for cmd, tables in s:
for table in tables:
current_table, _ = self.execute('%s-save' % (cmd,),
'-t', '%s' % (table,),
run_as_root=True,
attempts=5)
current_lines = current_table.split('\n')
new_filter = self._modify_rules(current_lines,
tables[table])
self.execute('%s-restore' % (cmd,), run_as_root=True,
process_input='\n'.join(new_filter),
attempts=5)
def _modify_rules(self, current_lines, table, binary=None):
unwrapped_chains = table.unwrapped_chains
chains = table.chains
rules = table.rules
# Remove any trace of our rules
new_filter = filter(lambda line: binary_name not in line,
current_lines)
seen_chains = False
rules_index = 0
for rules_index, rule in enumerate(new_filter):
if not seen_chains:
if rule.startswith(':'):
seen_chains = True
else:
if not rule.startswith(':'):
break
our_rules = []
for rule in rules:
rule_str = str(rule)
if rule.top:
# rule.top == True means we want this rule to be at the top.
# Further down, we weed out duplicates from the bottom of the
# list, so here we remove the dupes ahead of time.
new_filter = filter(lambda s: s.strip() != rule_str.strip(),
new_filter)
our_rules += [rule_str]
new_filter[rules_index:rules_index] = our_rules
new_filter[rules_index:rules_index] = [':%s - [0:0]' % \
(name,) \
for name in unwrapped_chains]
new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' % \
(binary_name, name,) \
for name in chains]
seen_lines = set()
def _weed_out_duplicates(line):
line = line.strip()
if line in seen_lines:
return False
else:
seen_lines.add(line)
return True
# We filter duplicates, letting the *last* occurrence take
# precendence.
new_filter.reverse()
new_filter = filter(_weed_out_duplicates, new_filter)
new_filter.reverse()
return new_filter
def metadata_forward():
"""Create forwarding rule for metadata."""
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-s 0.0.0.0/0 -d 169.254.169.254/32 '
'-p tcp -m tcp --dport 80 -j DNAT '
'--to-destination %s:%s' % \
(FLAGS.ec2_dmz_host, FLAGS.ec2_port))
iptables_manager.apply()
def init_host():
"""Basic networking setup goes here."""
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
iptables_manager.ipv4['nat'].add_rule('snat',
'-s %s -j SNAT --to-source %s' % \
(FLAGS.fixed_range,
FLAGS.routing_source_ip))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %s -d %s -j ACCEPT' % \
(FLAGS.fixed_range, FLAGS.dmz_cidr))
iptables_manager.ipv4['nat'].add_rule('POSTROUTING',
'-s %(range)s -d %(range)s '
'-j ACCEPT' % \
{'range': FLAGS.fixed_range})
iptables_manager.apply()
def bind_floating_ip(floating_ip, check_exit_code=True):
"""Bind ip to public interface."""
_execute('ip', 'addr', 'add', floating_ip,
'dev', FLAGS.public_interface,
run_as_root=True, check_exit_code=check_exit_code)
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', floating_ip,
'-A', '-I', FLAGS.public_interface,
'-c', 1, run_as_root=True, check_exit_code=False)
def unbind_floating_ip(floating_ip):
"""Unbind a public ip from public interface."""
_execute('ip', 'addr', 'del', floating_ip,
'dev', FLAGS.public_interface, run_as_root=True)
def ensure_metadata_ip():
"""Sets up local metadata ip."""
_execute('ip', 'addr', 'add', '169.254.169.254/32',
'scope', 'link', 'dev', 'lo',
run_as_root=True, check_exit_code=False)
def ensure_vpn_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan."""
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'-d %s -p udp '
'--dport 1194 '
'-j ACCEPT' % private_ip)
iptables_manager.ipv4['nat'].add_rule('PREROUTING',
'-d %s -p udp '
'--dport %s -j DNAT --to %s:1194' %
(public_ip, port, private_ip))
iptables_manager.ipv4['nat'].add_rule("OUTPUT",
"-d %s -p udp "
"--dport %s -j DNAT --to %s:1194" %
(public_ip, port, private_ip))
iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].add_rule(chain, rule)
iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip."""
for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
iptables_manager.ipv4['nat'].remove_rule(chain, rule)
iptables_manager.apply()
def floating_forward_rules(floating_ip, fixed_ip):
return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)),
('floating-snat',
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
def initialize_gateway_device(dev, network_ref):
if not network_ref:
return
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
full_ip = '%s/%s' % (network_ref['dhcp_server'],
network_ref['cidr'].rpartition('/')[2])
new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
old_ip_params = []
out, err = _execute('ip', 'addr', 'show', 'dev', dev,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
ip_params = fields[1:-1]
old_ip_params.append(ip_params)
if ip_params[0] != full_ip:
new_ip_params.append(ip_params)
if not old_ip_params or old_ip_params[0][0] != full_ip:
for ip_params in old_ip_params:
_execute(*_ip_bridge_cmd('del', ip_params, dev),
run_as_root=True)
for ip_params in new_ip_params:
_execute(*_ip_bridge_cmd('add', ip_params, dev),
run_as_root=True)
if FLAGS.send_arp_for_ha:
_execute('arping', '-U', network_ref['dhcp_server'],
'-A', '-I', dev,
'-c', 1, run_as_root=True, check_exit_code=False)
if(FLAGS.use_ipv6):
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
'dev', dev, run_as_root=True)
# NOTE(vish): If the public interface is the same as the
# bridge, then the bridge has to be in promiscuous
# to forward packets properly.
if(FLAGS.public_interface == dev):
_execute('ip', 'link', 'set',
'dev', dev, 'promisc', 'on', run_as_root=True)
def get_dhcp_leases(context, network_ref):
"""Return a network's hosts config in dnsmasq leasefile format."""
hosts = []
for fixed_ref in db.network_get_associated_fixed_ips(context,
network_ref['id']):
host = fixed_ref['instance']['host']
if network_ref['multi_host'] and FLAGS.host != host:
continue
hosts.append(_host_lease(fixed_ref))
return '\n'.join(hosts)
def get_dhcp_hosts(context, network_ref):
"""Get network's hosts config in dhcp-host format."""
hosts = []
for fixed_ref in db.network_get_associated_fixed_ips(context,
network_ref['id']):
host = fixed_ref['instance']['host']
if network_ref['multi_host'] and FLAGS.host != host:
continue
hosts.append(_host_dhcp(fixed_ref))
return '\n'.join(hosts)
def get_dhcp_opts(context, network_ref):
"""Get network's hosts config in dhcp-opts format."""
hosts = []
ips_ref = db.network_get_associated_fixed_ips(context, network_ref['id'])
if ips_ref:
#set of instance ids
instance_set = set([fixed_ip_ref['instance_id']
for fixed_ip_ref in ips_ref])
default_gw_network_node = {}
for instance_id in instance_set:
vifs = db.virtual_interface_get_by_instance(context, instance_id)
if vifs:
#offer a default gateway to the first virtual interface
default_gw_network_node[instance_id] = vifs[0]['network_id']
for fixed_ip_ref in ips_ref:
instance_id = fixed_ip_ref['instance_id']
if instance_id in default_gw_network_node:
target_network_id = default_gw_network_node[instance_id]
# we don't want default gateway for this fixed ip
if target_network_id != fixed_ip_ref['network_id']:
hosts.append(_host_dhcp_opts(fixed_ip_ref))
return '\n'.join(hosts)
def release_dhcp(dev, address, mac_address):
utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True)
def _add_dnsmasq_accept_rules(dev):
"""Allow DHCP and DNS traffic through to dnsmasq."""
table = iptables_manager.ipv4['filter']
for port in [67, 53]:
for proto in ['udp', 'tcp']:
args = {'dev': dev, 'port': port, 'proto': proto}
table.add_rule('INPUT',
'-i %(dev)s -p %(proto)s -m %(proto)s '
'--dport %(port)s -j ACCEPT' % args)
iptables_manager.apply()
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
@utils.synchronized('dnsmasq_start')
def update_dhcp(context, dev, network_ref):
"""(Re)starts a dnsmasq server for a given network.
If a dnsmasq instance is already running then send a HUP
signal causing it to reload, otherwise spawn a new instance.
"""
conffile = _dhcp_file(dev, 'conf')
with open(conffile, 'w') as f:
f.write(get_dhcp_hosts(context, network_ref))
if FLAGS.use_single_default_gateway:
optsfile = _dhcp_file(dev, 'opts')
with open(optsfile, 'w') as f:
f.write(get_dhcp_opts(context, network_ref))
os.chmod(optsfile, 0644)
# Make sure dnsmasq can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _dnsmasq_pid_for(dev)
# if dnsmasq is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline' % pid,
check_exit_code=False)
if conffile in out:
try:
_execute('kill', '-HUP', pid, run_as_root=True)
return
except Exception as exc: # pylint: disable=W0703
LOG.debug(_('Hupping dnsmasq threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid)
cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile,
'NETWORK_ID=%s' % str(network_ref['id']),
'dnsmasq',
'--strict-order',
'--bind-interfaces',
'--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(dev, 'pid'),
'--listen-address=%s' % network_ref['dhcp_server'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % network_ref['dhcp_start'],
'--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
if FLAGS.dns_server:
cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
if FLAGS.use_single_default_gateway:
cmd += ['--dhcp-optsfile=%s' % _dhcp_file(dev, 'opts')]
_execute(*cmd, run_as_root=True)
_add_dnsmasq_accept_rules(dev)
@utils.synchronized('radvd_start')
def update_ra(context, dev, network_ref):
conffile = _ra_file(dev, 'conf')
with open(conffile, 'w') as f:
conf_str = """
interface %s
{
AdvSendAdvert on;
MinRtrAdvInterval 3;
MaxRtrAdvInterval 10;
prefix %s
{
AdvOnLink on;
AdvAutonomous on;
};
};
""" % (dev, network_ref['cidr_v6'])
f.write(conf_str)
# Make sure radvd can actually read it (it setuid()s to "nobody")
os.chmod(conffile, 0644)
pid = _ra_pid_for(dev)
# if radvd is already running, then tell it to reload
if pid:
out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
_execute('kill', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.debug(_('killing radvd threw %s'), exc)
else:
LOG.debug(_('Pid %d is stale, relaunching radvd'), pid)
cmd = ['radvd',
'-C', '%s' % _ra_file(dev, 'conf'),
'-p', '%s' % _ra_file(dev, 'pid')]
_execute(*cmd, run_as_root=True)
def _host_lease(fixed_ip_ref):
"""Return a host string for an address in leasefile format."""
instance_ref = fixed_ip_ref['instance']
if instance_ref['updated_at']:
timestamp = instance_ref['updated_at']
else:
timestamp = instance_ref['created_at']
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
fixed_ip_ref['virtual_interface']['address'],
fixed_ip_ref['address'],
instance_ref['hostname'] or '*')
def _host_dhcp_network(fixed_ip_ref):
instance_ref = fixed_ip_ref['instance']
return 'NW-i%08d-%s' % (instance_ref['id'],
fixed_ip_ref['network_id'])
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address in dhcp-host format."""
instance_ref = fixed_ip_ref['instance']
vif = fixed_ip_ref['virtual_interface']
if FLAGS.use_single_default_gateway:
return '%s,%s.%s,%s,%s' % (vif['address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'],
"net:" + _host_dhcp_network(fixed_ip_ref))
else:
return '%s,%s.%s,%s' % (vif['address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'])
def _host_dhcp_opts(fixed_ip_ref):
"""Return a host string for an address in dhcp-host format."""
return '%s,%s' % (_host_dhcp_network(fixed_ip_ref), 3)
def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network."""
if FLAGS.fake_network:
LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd)))
return 'fake', 0
else:
return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists."""
(_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
def _stop_dnsmasq(dev):
"""Stops the dnsmasq instance for a given network."""
pid = _dnsmasq_pid_for(dev)
if pid:
try:
_execute('kill', '-TERM', pid, run_as_root=True)
except Exception as exc: # pylint: disable=W0703
LOG.debug(_('Killing dnsmasq threw %s'), exc)
def _dhcp_file(dev, kind):
"""Return path to a pid, leases or conf file for a bridge/device."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _ra_file(dev, kind):
"""Return path to a pid or conf file for a bridge/device."""
if not os.path.exists(FLAGS.networks_path):
os.makedirs(FLAGS.networks_path)
return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path,
dev,
kind))
def _dnsmasq_pid_for(dev):
"""Returns the pid for prior dnsmasq instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _dhcp_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ra_pid_for(dev):
"""Returns the pid for prior radvd instance for a bridge/device.
Returns None if no pid file exists.
If machine has rebooted pid might be incorrect (caller should check).
"""
pid_file = _ra_file(dev, 'pid')
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
def _ip_bridge_cmd(action, params, device):
"""Build commands to add/del ips to bridges/devices."""
cmd = ['ip', 'addr', action]
cmd.extend(params)
cmd.extend(['dev', device])
return cmd
# Similar to compute virt layers, the Linux network node
# code uses a flexible driver model to support different ways
# of creating ethernet interfaces and attaching them to the network.
# In the case of a network host, these interfaces
# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces.
def plug(network, mac_address):
return interface_driver.plug(network, mac_address)
def unplug(network):
return interface_driver.unplug(network)
def get_dev(network):
return interface_driver.get_dev(network)
class LinuxNetInterfaceDriver(object):
"""Abstract class that defines generic network host API"""
""" for for all Linux interface drivers."""
def plug(self, network, mac_address):
"""Create Linux device, return device name"""
raise NotImplementedError()
def unplug(self, network):
"""Destory Linux device, return device name"""
raise NotImplementedError()
def get_dev(self, network):
"""Get device name"""
raise NotImplementedError()
# plugs interfaces using Linux Bridge
class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address):
if network.get('vlan', None) is not None:
LinuxBridgeInterfaceDriver.ensure_vlan_bridge(
network['vlan'],
network['bridge'],
network['bridge_interface'],
network,
mac_address)
else:
LinuxBridgeInterfaceDriver.ensure_bridge(
network['bridge'],
network['bridge_interface'],
network)
return network['bridge']
def unplug(self, network):
return self.get_dev(network)
def get_dev(self, network):
return network['bridge']
@classmethod
def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface,
net_attrs=None, mac_address=None):
"""Create a vlan and bridge unless they already exist."""
interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num,
bridge_interface, mac_address)
LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs)
return interface
@classmethod
@utils.synchronized('ensure_vlan', external=True)
def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('vconfig', 'set_name_type',
'VLAN_PLUS_VID_NO_PAD', run_as_root=True)
_execute('vconfig', 'add', bridge_interface,
vlan_num, run_as_root=True)
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, "address",
mac_address, run_as_root=True)
_execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
return interface
@classmethod
@utils.synchronized('ensure_bridge', external=True)
def ensure_bridge(_self, bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists.
:param interface: the interface to create the bridge on.
:param net_attrs: dictionary with attributes used to create bridge.
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
The code will attempt to move any ips that already exist on the
interface onto the bridge and reset the default gateway if necessary.
"""
if not _device_exists(bridge):
LOG.debug(_('Starting Bridge interface for %s'), interface)
_execute('brctl', 'addbr', bridge, run_as_root=True)
_execute('brctl', 'setfd', bridge, 0, run_as_root=True)
# _execute('brctl setageing %s 10' % bridge, run_as_root=True)
_execute('brctl', 'stp', bridge, 'off', run_as_root=True)
# (danwent) bridge device MAC address can't be set directly.
# instead it inherits the MAC address of the first device on the
# bridge, which will either be the vlan interface, or a
# physical NIC.
_execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
if interface:
out, err = _execute('brctl', 'addif', bridge, interface,
check_exit_code=False, run_as_root=True)
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
gateway = None
out, err = _execute('route', '-n', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == '0.0.0.0' and \
fields[-1] == interface:
gateway = fields[1]
_execute('route', 'del', 'default', 'gw', gateway,
'dev', interface, check_exit_code=False,
run_as_root=True)
out, err = _execute('ip', 'addr', 'show', 'dev', interface,
'scope', 'global', run_as_root=True)
for line in out.split('\n'):
fields = line.split()
if fields and fields[0] == 'inet':
params = fields[1:-1]
_execute(*_ip_bridge_cmd('del', params, fields[-1]),
run_as_root=True)
_execute(*_ip_bridge_cmd('add', params, bridge),
run_as_root=True)
if gateway:
_execute('route', 'add', 'default', 'gw', gateway,
run_as_root=True)
if (err and err != "device %s is already a member of a bridge;"
"can't enslave it to bridge %s.\n" % (interface, bridge)):
raise exception.Error('Failed to add interface: %s' % err)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--in-interface %s -j ACCEPT' % \
bridge)
iptables_manager.ipv4['filter'].add_rule('FORWARD',
'--out-interface %s -j ACCEPT' % \
bridge)
# plugs interfaces using Open vSwitch
class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver):
def plug(self, network, mac_address):
dev = "gw-" + str(network['id'])
if not _device_exists(dev):
bridge = FLAGS.linuxnet_ovs_integration_bridge
_execute('ovs-vsctl',
'--', '--may-exist', 'add-port', bridge, dev,
'--', 'set', 'Interface', dev, "type=internal",
'--', 'set', 'Interface', dev,
"external-ids:iface-id=nova-%s" % dev,
'--', 'set', 'Interface', dev,
"external-ids:iface-status=active",
'--', 'set', 'Interface', dev,
"external-ids:attached-mac=%s" % mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, "address", mac_address,
run_as_root=True)
_execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
return dev
def unplug(self, network):
return self.get_dev(network)
def get_dev(self, network):
dev = "gw-" + str(network['id'])
return dev
iptables_manager = IptablesManager()
interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver)
| {
"content_hash": "6966c85c98818417180ac44280933905",
"timestamp": "",
"source": "github",
"line_count": 985,
"max_line_length": 79,
"avg_line_length": 38.4497461928934,
"alnum_prop": 0.537902991577113,
"repo_name": "xushiwei/nova",
"id": "712db32dbc8d8501678023ad22323996d09f57ed",
"size": "38650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/network/linux_net.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7412"
},
{
"name": "Python",
"bytes": "4170357"
},
{
"name": "Shell",
"bytes": "33002"
}
],
"symlink_target": ""
} |
import collections
import functools
import json
import os
import sys
import time
import traceback
import uuid
from django import http
from django.core.exceptions import PermissionDenied
from django.core.files.storage import default_storage as storage
from django.conf import settings
from django import forms as django_forms
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.http import urlquote
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
import commonware.log
from PIL import Image
from session_csrf import anonymous_csrf
from tower import ugettext_lazy as _lazy, ugettext as _
import waffle
from waffle.decorators import waffle_switch
from applications.models import Application, AppVersion
import amo
import amo.utils
from amo import messages
from amo.decorators import json_view, login_required, post_required
from amo.helpers import absolutify, loc, urlparams
from amo.utils import escape_all, HttpResponseSendFile, MenuItem
from amo.urlresolvers import reverse
from access import acl
from addons import forms as addon_forms
from addons.decorators import addon_view
from addons.models import Addon, AddonUser
from addons.views import BaseFilter
from devhub.decorators import dev_required
from devhub.forms import CheckCompatibilityForm
from devhub.models import ActivityLog, BlogPost, RssKey, SubmitStep
from devhub import perf
from editors.helpers import ReviewHelper, get_position
from files.models import File, FileUpload, Platform
from files.utils import parse_addon
from market.models import Refund
from paypal.check import Check
import paypal
from search.views import BaseAjaxSearch
from stats.models import Contribution
from translations.models import delete_translation
from users.models import UserProfile
from versions.models import Version
from mkt.webapps.models import Webapp
from zadmin.models import ValidationResult
from . import forms, tasks, feeds, signals
log = commonware.log.getLogger('z.devhub')
paypal_log = commonware.log.getLogger('z.paypal')
# We use a session cookie to make sure people see the dev agreement.
DEV_AGREEMENT_COOKIE = 'yes-I-read-the-dev-agreement'
class AddonFilter(BaseFilter):
opts = (('name', _lazy(u'Name')),
('updated', _lazy(u'Updated')),
('created', _lazy(u'Created')),
('popular', _lazy(u'Downloads')),
('rating', _lazy(u'Rating')))
class AppFilter(BaseFilter):
opts = (('name', _lazy(u'Name')),
('created', _lazy(u'Created')),
('downloads', _lazy(u'Weekly Downloads')),
('rating', _lazy(u'Rating')))
def addon_listing(request, default='name', webapp=False, theme=False):
"""Set up the queryset and filtering for addon listing for Dashboard."""
Filter = AppFilter if webapp else AddonFilter
if webapp:
qs = Webapp.objects.filter(
id__in=request.amo_user.addons.filter(type=amo.ADDON_WEBAPP))
model = Webapp
elif theme:
qs = request.amo_user.addons.filter(type=amo.ADDON_PERSONA)
model = Addon
else:
qs = request.amo_user.addons.exclude(type__in=[amo.ADDON_WEBAPP,
amo.ADDON_PERSONA])
model = Addon
filter = Filter(request, qs, 'sort', default, model=model)
return filter.qs, filter
def index(request):
if settings.APP_PREVIEW:
# This can be a permanent redirect when we finalize devhub for apps.
return redirect('devhub.apps')
ctx = {'blog_posts': _get_posts()}
if request.amo_user:
user_addons = request.amo_user.addons.exclude(type=amo.ADDON_WEBAPP)
recent_addons = user_addons.order_by('-modified')[:3]
ctx['recent_addons'] = []
for addon in recent_addons:
ctx['recent_addons'].append({'addon': addon,
'position': get_position(addon)})
return render(request, 'devhub/index.html', ctx)
@login_required
def dashboard(request, webapp=False, theme=False):
addon_items = _get_items(
None, request.amo_user.addons.exclude(type=amo.ADDON_WEBAPP))[:4]
data = dict(rss=_get_rss_feed(request), blog_posts=_get_posts(),
timestamp=int(time.time()), addon_tab=not webapp and not theme,
webapp=webapp, theme=theme, addon_items=addon_items)
if data['addon_tab']:
addons, data['filter'] = addon_listing(request, webapp=webapp)
data['addons'] = amo.utils.paginate(request, addons, per_page=10)
if theme:
themes, data['filter'] = addon_listing(request, theme=True)
data['themes'] = amo.utils.paginate(request, themes, per_page=10)
if 'filter' in data:
data['sorting'] = data['filter'].field
data['sort_opts'] = data['filter'].opts
return render(request, 'devhub/addons/dashboard.html', data)
@dev_required
def ajax_compat_status(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_status.html',
dict(addon=addon))
@dev_required
def ajax_compat_error(request, addon_id, addon):
if not (addon.accepts_compatible_apps() and addon.current_version):
raise http.Http404()
return render(request, 'devhub/addons/ajax_compat_error.html',
dict(addon=addon))
@dev_required
def ajax_compat_update(request, addon_id, addon, version_id):
if not addon.accepts_compatible_apps():
raise http.Http404()
version = get_object_or_404(Version, pk=version_id, addon=addon)
compat_form = forms.CompatFormSet(request.POST or None,
queryset=version.apps.all())
if request.method == 'POST' and compat_form.is_valid():
for compat in compat_form.save(commit=False):
compat.version = version
compat.save()
for form in compat_form.forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
return render(request, 'devhub/addons/ajax_compat_update.html',
dict(addon=addon, version=version, compat_form=compat_form))
def _get_addons(request, addons, addon_id, action):
"""Create a list of ``MenuItem``s for the activity feed."""
items = []
a = MenuItem()
a.selected = (not addon_id)
(a.text, a.url) = (_('All My Add-ons'), reverse('devhub.feed_all'))
if action:
a.url += '?action=' + action
items.append(a)
for addon in addons:
item = MenuItem()
try:
item.selected = (addon_id and addon.id == int(addon_id))
except ValueError:
pass # We won't get here... EVER
url = reverse('devhub.feed', args=[addon.slug])
if action:
url += '?action=' + action
item.text, item.url = addon.name, url
items.append(item)
return items
def _get_posts(limit=5):
return BlogPost.objects.order_by('-date_posted')[0:limit]
def _get_activities(request, action):
url = request.get_full_path()
choices = (None, 'updates', 'status', 'collections', 'reviews')
text = {None: _('All Activity'),
'updates': _('Add-on Updates'),
'status': _('Add-on Status'),
'collections': _('User Collections'),
'reviews': _('User Reviews'),
}
items = []
for c in choices:
i = MenuItem()
i.text = text[c]
i.url, i.selected = urlparams(url, page=None, action=c), (action == c)
items.append(i)
return items
def _get_items(action, addons):
filters = dict(updates=(amo.LOG.ADD_VERSION, amo.LOG.ADD_FILE_TO_VERSION),
status=(amo.LOG.USER_DISABLE, amo.LOG.USER_ENABLE,
amo.LOG.CHANGE_STATUS, amo.LOG.APPROVE_VERSION,),
collections=(amo.LOG.ADD_TO_COLLECTION,
amo.LOG.REMOVE_FROM_COLLECTION,),
reviews=(amo.LOG.ADD_REVIEW,))
filter = filters.get(action)
items = (ActivityLog.objects.for_addons(addons).filter()
.exclude(action__in=amo.LOG_HIDE_DEVELOPER))
if filter:
items = items.filter(action__in=[i.id for i in filter])
return items
def _get_rss_feed(request):
key, __ = RssKey.objects.get_or_create(user=request.amo_user)
return urlparams(reverse('devhub.feed_all'), privaterss=key.key)
def feed(request, addon_id=None):
if request.GET.get('privaterss'):
return feeds.ActivityFeedRSS()(request)
addon_selected = None
if not request.user.is_authenticated():
url = reverse('users.login')
p = urlquote(request.get_full_path())
return http.HttpResponseRedirect('%s?to=%s' % (url, p))
else:
# We exclude apps on AMO.
addons_all = request.amo_user.addons.exclude(type=amo.ADDON_WEBAPP)
if addon_id:
addon = get_object_or_404(Addon.objects.id_or_slug(addon_id))
addons = addon # common query set
try:
key = RssKey.objects.get(addon=addons)
except RssKey.DoesNotExist:
key = RssKey.objects.create(addon=addons)
addon_selected = addon.id
rssurl = urlparams(reverse('devhub.feed', args=[addon_id]),
privaterss=key.key)
if not acl.check_addon_ownership(request, addons, viewer=True,
ignore_disabled=True):
raise PermissionDenied
else:
rssurl = _get_rss_feed(request)
addon = None
addons = addons_all
action = request.GET.get('action')
items = _get_items(action, addons)
activities = _get_activities(request, action)
addon_items = _get_addons(request, addons_all, addon_selected, action)
pager = amo.utils.paginate(request, items, 20)
data = dict(addons=addon_items, pager=pager, activities=activities,
rss=rssurl, addon=addon)
return render(request, 'devhub/addons/activity.html', data)
@dev_required(webapp=True)
def edit(request, addon_id, addon, webapp=False):
url_prefix = 'apps' if webapp else 'addons'
data = {
'page': 'edit',
'addon': addon,
'webapp': webapp,
'url_prefix': url_prefix,
'valid_slug': addon.slug,
'tags': addon.tags.not_blacklisted().values_list('tag_text',
flat=True),
'previews': addon.previews.all(),
}
if (not webapp and
acl.action_allowed(request, 'Addons', 'Configure')):
data['admin_form'] = forms.AdminForm(instance=addon)
return render(request, 'devhub/addons/edit.html', data)
@dev_required(theme=True)
def edit_theme(request, addon_id, addon, theme=False):
form = addon_forms.EditThemeForm(data=request.POST or None,
request=request, instance=addon)
owner_form = addon_forms.EditThemeOwnerForm(data=request.POST or None,
instance=addon)
if request.method == 'POST':
if 'owner_submit' in request.POST:
if owner_form.is_valid():
owner_form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect('devhub.themes.edit', addon.slug)
elif form.is_valid():
form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect('devhub.themes.edit', addon.reload().slug)
else:
messages.error(request, _('Please check the form for errors.'))
return render(request, 'devhub/personas/edit.html', {
'addon': addon, 'persona': addon.persona, 'form': form,
'owner_form': owner_form})
@dev_required(owner_for_post=True, webapp=True, theme=True)
@post_required
def delete(request, addon_id, addon, webapp=False, theme=False):
# Database deletes only allowed for free or incomplete addons.
if not addon.can_be_deleted():
if webapp:
msg = loc('App cannot be deleted. Disable this app instead.')
else:
msg = _('Add-on cannot be deleted. Disable this add-on instead.')
messages.error(request, msg)
return redirect(addon.get_dev_url('versions'))
form = forms.DeleteForm(request)
if form.is_valid():
reason = form.cleaned_data.get('reason', '')
addon.delete(msg='Removed via devhub', reason=reason)
messages.success(request,
_('Theme deleted.') if theme else _('Add-on deleted.'))
return redirect('devhub.%s' % ('apps' if webapp else
'themes' if theme else 'addons'))
else:
if theme:
messages.error(request,
_('Password was incorrect. Theme was not deleted.'))
return redirect(addon.get_dev_url())
else:
messages.error(request,
_('Password was incorrect. Add-on was not deleted.'))
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def enable(request, addon_id, addon):
addon.update(disabled_by_user=False)
amo.log(amo.LOG.USER_ENABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True)
@post_required
def cancel(request, addon_id, addon):
if addon.status in amo.STATUS_UNDER_REVIEW:
if addon.status == amo.STATUS_LITE_AND_NOMINATED:
addon.update(status=amo.STATUS_LITE)
else:
addon.update(status=amo.STATUS_NULL)
amo.log(amo.LOG.CHANGE_STATUS, addon.get_status_display(), addon)
return redirect(addon.get_dev_url('versions'))
@dev_required
@post_required
def disable(request, addon_id, addon):
addon.update(disabled_by_user=True)
amo.log(amo.LOG.USER_DISABLE, addon)
return redirect(addon.get_dev_url('versions'))
@dev_required(owner_for_post=True, webapp=True)
def ownership(request, addon_id, addon, webapp=False):
fs, ctx = [], {}
# Authors.
qs = AddonUser.objects.filter(addon=addon).order_by('position')
user_form = forms.AuthorFormSet(request.POST or None, queryset=qs)
fs.append(user_form)
# Versions.
license_form = forms.LicenseForm(request.POST or None, addon=addon)
if not addon.is_webapp():
ctx.update(license_form.get_context())
if ctx['license_form']: # if addon has a version
fs.append(ctx['license_form'])
# Policy.
policy_form = forms.PolicyForm(request.POST or None, addon=addon)
if not addon.is_webapp():
ctx.update(policy_form=policy_form)
fs.append(policy_form)
if request.method == 'POST' and all([form.is_valid() for form in fs]):
# Authors.
authors = user_form.save(commit=False)
for author in authors:
action = None
if not author.id or author.user_id != author._original_user_id:
action = amo.LOG.ADD_USER_WITH_ROLE
author.addon = addon
elif author.role != author._original_role:
action = amo.LOG.CHANGE_USER_WITH_ROLE
author.save()
if action:
amo.log(action, author.user, author.get_role_display(), addon)
if (author._original_user_id and
author.user_id != author._original_user_id):
amo.log(amo.LOG.REMOVE_USER_WITH_ROLE,
(UserProfile, author._original_user_id),
author.get_role_display(), addon)
for author in user_form.deleted_objects:
amo.log(amo.LOG.REMOVE_USER_WITH_ROLE, author.user,
author.get_role_display(), addon)
if license_form in fs:
license_form.save()
if policy_form in fs:
policy_form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('owner'))
ctx.update(addon=addon, webapp=webapp, user_form=user_form)
return render(request, 'devhub/addons/owner.html', ctx)
@dev_required(owner_for_post=True, webapp=True)
def payments(request, addon_id, addon, webapp=False):
if addon.is_premium():
return _premium(request, addon_id, addon, webapp)
return _voluntary(request, addon_id, addon, webapp)
def _premium(request, addon_id, addon, webapp=False):
premium_form = forms.PremiumForm(request.POST or None,
request=request,
extra={'addon': addon,
'amo_user': request.amo_user,
'dest': 'payment'})
if request.method == 'POST' and premium_form.is_valid():
premium_form.save()
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('payments'))
return render(request, 'devhub/payments/premium.html',
dict(addon=addon, webapp=webapp, premium=addon.premium,
form=premium_form))
def _voluntary(request, addon_id, addon, webapp):
charity = None if addon.charity_id == amo.FOUNDATION_ORG else addon.charity
charity_form = forms.CharityForm(request.POST or None, instance=charity,
prefix='charity')
contrib_form = forms.ContribForm(request.POST or None, instance=addon,
initial=forms.ContribForm.initial(addon))
profile_form = forms.ProfileForm(request.POST or None, instance=addon,
required=True)
if request.method == 'POST':
if contrib_form.is_valid():
addon = contrib_form.save(commit=False)
addon.wants_contributions = True
valid = _save_charity(addon, contrib_form, charity_form)
if not addon.has_full_profile():
valid &= profile_form.is_valid()
if valid:
profile_form.save()
if valid:
addon.save()
messages.success(request, _('Changes successfully saved.'))
amo.log(amo.LOG.EDIT_CONTRIBUTIONS, addon)
return redirect(addon.get_dev_url('payments'))
errors = charity_form.errors or contrib_form.errors or profile_form.errors
if errors:
messages.error(request, _('There were errors in your submission.'))
return render(request, 'devhub/payments/payments.html',
dict(addon=addon, webapp=webapp, errors=errors,
charity_form=charity_form, contrib_form=contrib_form,
profile_form=profile_form))
def _save_charity(addon, contrib_form, charity_form):
recipient = contrib_form.cleaned_data['recipient']
if recipient == 'dev':
addon.charity = None
elif recipient == 'moz':
addon.charity_id = amo.FOUNDATION_ORG
elif recipient == 'org':
if charity_form.is_valid():
addon.charity = charity_form.save()
else:
return False
return True
@waffle_switch('allow-refund')
@dev_required(webapp=True)
def issue_refund(request, addon_id, addon, webapp=False):
txn_id = request.REQUEST.get('transaction_id')
if not txn_id:
raise http.Http404
form_enabled = True
contribution = get_object_or_404(Contribution, transaction_id=txn_id,
type=amo.CONTRIB_PURCHASE)
if Refund.objects.filter(contribution=contribution).exists():
messages.error(request, _('Refund already processed.'))
form_enabled = False
elif request.method == 'POST':
if 'issue' in request.POST:
try:
results = paypal.refund(contribution.paykey)
except paypal.PaypalError, e:
messages.error(request, _('Refund failed. error: %s') % e)
contribution.record_failed_refund(e)
else:
for res in results:
if res['refundStatus'] == 'ALREADY_REVERSED_OR_REFUNDED':
paypal_log.debug(
'Refund attempt for already-refunded paykey: %s, '
'%s' % (contribution.paykey,
res['receiver.email']))
messages.error(request,
_('Refund was previously issued; '
'no action taken.'))
return redirect(addon.get_dev_url('refunds'))
contribution.mail_approved()
refund = contribution.enqueue_refund(amo.REFUND_APPROVED,
request.amo_user)
paypal_log.info('Refund %r issued for contribution %r' %
(refund.pk, contribution.pk))
messages.success(request, _('Refund issued.'))
else:
contribution.mail_declined()
# TODO: Consider requiring a rejection reason for declined refunds.
refund = contribution.enqueue_refund(amo.REFUND_DECLINED,
request.amo_user)
paypal_log.info('Refund %r declined for contribution %r' %
(refund.pk, contribution.pk))
messages.success(request, _('Refund declined.'))
return redirect(addon.get_dev_url('refunds'))
return render(request, 'devhub/payments/issue-refund.html',
{'enabled': form_enabled, 'contribution': contribution,
'addon': addon, 'webapp': webapp, 'transaction_id': txn_id})
@waffle_switch('allow-refund')
@dev_required(webapp=True)
# TODO: Make sure 'Support' staff can access this.
def refunds(request, addon_id, addon, webapp=False):
ctx = {'addon': addon, 'webapp': webapp}
queues = {
'pending': Refund.objects.pending(addon),
'approved': Refund.objects.approved(addon),
'instant': Refund.objects.instant(addon),
'declined': Refund.objects.declined(addon),
}
# For now set the limit to something stupid so this is stupid easy to QA.
for status, refunds in queues.iteritems():
ctx[status] = amo.utils.paginate(request, refunds, per_page=5)
return render(request, 'devhub/payments/refunds.html', ctx)
@dev_required
@post_required
def disable_payments(request, addon_id, addon):
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('payments'))
@dev_required(webapp=True)
@post_required
def remove_profile(request, addon_id, addon, webapp=False):
delete_translation(addon, 'the_reason')
delete_translation(addon, 'the_future')
if addon.wants_contributions:
addon.update(wants_contributions=False)
return redirect(addon.get_dev_url('profile'))
@dev_required(webapp=True)
def profile(request, addon_id, addon, webapp=False):
profile_form = forms.ProfileForm(request.POST or None, instance=addon)
if request.method == 'POST' and profile_form.is_valid():
profile_form.save()
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
messages.success(request, _('Changes successfully saved.'))
return redirect(addon.get_dev_url('profile'))
return render(request, 'devhub/addons/profile.html',
dict(addon=addon, webapp=webapp, profile_form=profile_form))
@login_required
@post_required
@json_view
def compat_application_versions(request):
app_id = request.POST['application_id']
f = CheckCompatibilityForm()
return {'choices': f.version_choices_for_app_id(app_id)}
@login_required
def validate_addon(request):
return render(request, 'devhub/validate_addon.html',
{'title': _('Validate Add-on'),
'upload_url': reverse('devhub.standalone_upload')})
@login_required
def check_addon_compatibility(request):
form = CheckCompatibilityForm()
return render(request, 'devhub/validate_addon.html',
{'appversion_form': form,
'title': _('Check Add-on Compatibility'),
'upload_url': reverse('devhub.standalone_upload')})
@dev_required
@json_view
def file_perf_tests_start(request, addon_id, addon, file_id):
if not waffle.flag_is_active(request, 'perf-tests'):
raise PermissionDenied
file_ = get_object_or_404(File, pk=file_id)
plats = perf.PLATFORM_MAP.get(file_.platform.id, None)
if plats is None:
log.info('Unsupported performance platform %s for file %s'
% (file_.platform.id, file_))
# TODO(Kumar) provide a message about this
return {'success': False}
for app in perf.ALL_APPS:
for plat in plats:
tasks.start_perf_test_for_file.delay(file_.id, plat, app)
return {'success': True}
def packager_path(name):
return os.path.join(settings.PACKAGER_PATH, '%s.zip' % name)
@anonymous_csrf
def package_addon(request):
basic_form = forms.PackagerBasicForm(request.POST or None)
features_form = forms.PackagerFeaturesForm(request.POST or None)
compat_forms = forms.PackagerCompatFormSet(request.POST or None)
# Process requests, but also avoid short circuiting by using all().
if (request.method == 'POST' and
all([basic_form.is_valid(),
features_form.is_valid(),
compat_forms.is_valid()])):
basic_data = basic_form.cleaned_data
compat_data = compat_forms.cleaned_data
data = {'id': basic_data['id'],
'version': basic_data['version'],
'name': basic_data['name'],
'slug': basic_data['package_name'],
'description': basic_data['description'],
'author_name': basic_data['author_name'],
'contributors': basic_data['contributors'],
'targetapplications': [c for c in compat_data if c['enabled']]}
tasks.packager.delay(data, features_form.cleaned_data)
return redirect('devhub.package_addon_success',
basic_data['package_name'])
return render(request, 'devhub/package_addon.html',
{'basic_form': basic_form, 'compat_forms': compat_forms,
'features_form': features_form})
def package_addon_success(request, package_name):
"""Return the success page for the add-on packager."""
return render(request, 'devhub/package_addon_success.html',
{'package_name': package_name})
@json_view
def package_addon_json(request, package_name):
"""Return the URL of the packaged add-on."""
path_ = packager_path(package_name)
if storage.exists(path_):
url = reverse('devhub.package_addon_download', args=[package_name])
return {'download_url': url, 'filename': os.path.basename(path_),
'size': round(storage.open(path_).size / 1024, 1)}
def package_addon_download(request, package_name):
"""Serve a packaged add-on."""
path_ = packager_path(package_name)
if not storage.exists(path_):
raise http.Http404()
return HttpResponseSendFile(request, path_, content_type='application/zip')
@login_required
@post_required
def upload(request, addon_slug=None, is_standalone=False):
filedata = request.FILES['upload']
fu = FileUpload.from_post(filedata, filedata.name, filedata.size)
log.info('FileUpload created: %s' % fu.pk)
if request.user.is_authenticated():
fu.user = request.amo_user
fu.save()
if request.POST.get('app_id') and request.POST.get('version_id'):
app = get_object_or_404(Application, pk=request.POST['app_id'])
ver = get_object_or_404(AppVersion, pk=request.POST['version_id'])
tasks.compatibility_check.delay(fu.pk, app.guid, ver.version)
else:
tasks.validator.delay(fu.pk)
if addon_slug:
return redirect('devhub.upload_detail_for_addon',
addon_slug, fu.pk)
elif is_standalone:
return redirect('devhub.standalone_upload_detail', fu.pk)
else:
return redirect('devhub.upload_detail', fu.pk, 'json')
@login_required
@post_required
@json_view
def upload_manifest(request):
form = forms.NewManifestForm(request.POST)
if form.is_valid():
upload = FileUpload.objects.create()
tasks.fetch_manifest.delay(form.cleaned_data['manifest'], upload.pk)
return redirect('devhub.upload_detail', upload.pk, 'json')
else:
error_text = _('There was an error with the submission.')
if 'manifest' in form.errors:
error_text = ' '.join(form.errors['manifest'])
error_message = {'type': 'error', 'message': error_text, 'tier': 1}
v = {'errors': 1, 'success': False, 'messages': [error_message]}
return make_validation_result(dict(validation=v, error=error_text))
@login_required
@post_required
def standalone_upload(request):
return upload(request, is_standalone=True)
@login_required
@json_view
def standalone_upload_detail(request, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
url = reverse('devhub.standalone_upload_detail', args=[uuid])
return upload_validation_context(request, upload, url=url)
@post_required
@dev_required
def upload_for_addon(request, addon_id, addon):
return upload(request, addon_slug=addon.slug)
@dev_required
@json_view
def upload_detail_for_addon(request, addon_id, addon, uuid):
upload = get_object_or_404(FileUpload, uuid=uuid)
return json_upload_detail(request, upload, addon_slug=addon.slug)
def make_validation_result(data, is_compatibility=False):
"""Safe wrapper around JSON dict containing a validation result.
Keyword Arguments
**is_compatibility=False**
When True, errors will be summarized as if they were in a regular
validation result.
"""
if not settings.EXPOSE_VALIDATOR_TRACEBACKS:
if data['error']:
# Just expose the message, not the traceback
data['error'] = data['error'].strip().split('\n')[-1].strip()
if data['validation']:
lim = settings.VALIDATOR_MESSAGE_LIMIT
if lim:
del (data['validation']['messages']
[settings.VALIDATOR_MESSAGE_LIMIT:])
ending_tier = data['validation'].get('ending_tier', 0)
for msg in data['validation']['messages']:
if msg['tier'] > ending_tier:
ending_tier = msg['tier']
if msg['tier'] == 0:
# We can't display a message if it's on tier 0.
# Should get fixed soon in bug 617481
msg['tier'] = 1
for k, v in msg.items():
msg[k] = escape_all(v)
if lim:
compatibility_count = 0
if data['validation'].get('compatibility_summary'):
cs = data['validation']['compatibility_summary']
compatibility_count = (cs['errors']
+ cs['warnings']
+ cs['notices'])
else:
cs = {}
leftover_count = (data['validation'].get('errors', 0)
+ data['validation'].get('warnings', 0)
+ data['validation'].get('notices', 0)
+ compatibility_count
- lim)
if leftover_count > 0:
msgtype = 'notice'
if is_compatibility:
if cs.get('errors'):
msgtype = 'error'
elif cs.get('warnings'):
msgtype = 'warning'
else:
if data['validation']['errors']:
msgtype = 'error'
elif data['validation']['warnings']:
msgtype = 'warning'
data['validation']['messages'].append(
{'tier': 1,
'type': msgtype,
'message': (_('Validation generated too many errors/'
'warnings so %s messages were truncated. '
'After addressing the visible messages, '
"you'll be able to see the others.")
% (leftover_count,)),
'compatibility_type': None
})
if is_compatibility:
compat = data['validation']['compatibility_summary']
for k in ('errors', 'warnings', 'notices'):
data['validation'][k] = compat[k]
for msg in data['validation']['messages']:
if msg['compatibility_type']:
msg['type'] = msg['compatibility_type']
data['validation']['ending_tier'] = ending_tier
return data
@dev_required(allow_editors=True)
def file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, id=file_id)
v = reverse('devhub.json_file_validation', args=[addon.slug, file.id])
return render(request, 'devhub/validation.html',
dict(validate_url=v, filename=file.filename,
timestamp=file.created, addon=addon))
@dev_required(allow_editors=True)
def bulk_compat_result(request, addon_id, addon, result_id):
qs = ValidationResult.objects.exclude(completed=None)
result = get_object_or_404(qs, pk=result_id)
job = result.validation_job
revalidate_url = reverse('devhub.json_bulk_compat_result',
args=[addon.slug, result.id])
return _compat_result(request, revalidate_url,
job.application, job.target_version,
for_addon=result.file.version.addon,
validated_filename=result.file.filename,
validated_ts=result.completed)
def _compat_result(request, revalidate_url, target_app, target_version,
validated_filename=None, validated_ts=None,
for_addon=None):
app_trans = dict((g, unicode(a.pretty)) for g, a in amo.APP_GUIDS.items())
ff_versions = (AppVersion.objects.filter(application=amo.FIREFOX.id,
version_int__gte=4000000000000)
.values_list('application', 'version')
.order_by('version_int'))
tpl = 'https://developer.mozilla.org/en/Firefox_%s_for_developers'
change_links = dict()
for app, ver in ff_versions:
major = ver.split('.')[0] # 4.0b3 -> 4
change_links['%s %s' % (amo.APP_IDS[app].guid, ver)] = tpl % major
return render(request, 'devhub/validation.html',
dict(validate_url=revalidate_url,
filename=validated_filename, timestamp=validated_ts,
target_app=target_app, target_version=target_version,
addon=for_addon, result_type='compat',
app_trans=app_trans, version_change_links=change_links))
@json_view
@csrf_exempt
@dev_required(allow_editors=True)
def json_file_validation(request, addon_id, addon, file_id):
file = get_object_or_404(File, id=file_id)
if not file.has_been_validated:
if request.method != 'POST':
return http.HttpResponseNotAllowed(['POST'])
try:
v_result = tasks.file_validator(file.id)
except Exception, exc:
log.error('file_validator(%s): %s' % (file.id, exc))
error = "\n".join(traceback.format_exception(*sys.exc_info()))
return make_validation_result({'validation': '',
'error': error})
else:
v_result = file.validation
validation = json.loads(v_result.validation)
return make_validation_result(dict(validation=validation,
error=None))
@json_view
@csrf_exempt
@post_required
@dev_required(allow_editors=True)
def json_bulk_compat_result(request, addon_id, addon, result_id):
qs = ValidationResult.objects.exclude(completed=None)
result = get_object_or_404(qs, pk=result_id)
if result.task_error:
return make_validation_result({'validation': '',
'error': result.task_error})
else:
validation = json.loads(result.validation)
return make_validation_result(dict(validation=validation, error=None))
@json_view
def json_upload_detail(request, upload, addon_slug=None):
addon = None
if addon_slug:
addon = get_object_or_404(Addon, slug=addon_slug)
result = upload_validation_context(request, upload, addon=addon)
plat_exclude = []
if result['validation']:
try:
pkg = parse_addon(upload, addon=addon)
except django_forms.ValidationError, exc:
errors_before = result['validation'].get('errors', 0)
# FIXME: This doesn't guard against client-side
# tinkering.
for i, msg in enumerate(exc.messages):
# Simulate a validation error so the UI displays
# it as such
result['validation']['messages'].insert(
i, {'type': 'error',
'message': msg, 'tier': 1,
'fatal': True})
result['validation']['errors'] += 1
if not errors_before:
return json_view.error(make_validation_result(result))
else:
app_ids = set([a.id for a in pkg.get('apps', [])])
supported_platforms = []
for app in (amo.MOBILE, amo.ANDROID):
if app.id in app_ids:
supported_platforms.extend(amo.MOBILE_PLATFORMS.keys())
app_ids.remove(app.id)
if len(app_ids):
# Targets any other non-mobile app:
supported_platforms.extend(amo.DESKTOP_PLATFORMS.keys())
s = amo.SUPPORTED_PLATFORMS.keys()
plat_exclude = set(s) - set(supported_platforms)
plat_exclude = [str(p) for p in plat_exclude]
result['platforms_to_exclude'] = plat_exclude
return result
def upload_validation_context(request, upload, addon_slug=None, addon=None,
url=None):
if addon_slug and not addon:
addon = get_object_or_404(Addon, slug=addon_slug)
if not settings.VALIDATE_ADDONS:
upload.task_error = ''
upload.validation = json.dumps({'errors': 0, 'messages': [],
'metadata': {}, 'notices': 0,
'warnings': 0})
upload.save()
validation = json.loads(upload.validation) if upload.validation else ""
if not url:
if addon:
url = reverse('devhub.upload_detail_for_addon',
args=[addon.slug, upload.uuid])
else:
url = reverse('devhub.upload_detail', args=[upload.uuid, 'json'])
full_report_url = reverse('devhub.upload_detail', args=[upload.uuid])
return make_validation_result(dict(upload=upload.uuid,
validation=validation,
error=upload.task_error, url=url,
full_report_url=full_report_url),
is_compatibility=upload.compat_with_app)
@login_required
def upload_detail(request, uuid, format='html'):
upload = get_object_or_404(FileUpload, uuid=uuid)
if format == 'json' or request.is_ajax():
return json_upload_detail(request, upload)
validate_url = reverse('devhub.standalone_upload_detail',
args=[upload.uuid])
if upload.compat_with_app:
return _compat_result(request, validate_url,
upload.compat_with_app,
upload.compat_with_appver)
return render(request, 'devhub/validation.html',
dict(validate_url=validate_url, filename=upload.name,
timestamp=upload.created))
class AddonDependencySearch(BaseAjaxSearch):
# No personas. No webapps.
types = [amo.ADDON_ANY, amo.ADDON_EXTENSION, amo.ADDON_THEME,
amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_LPAPP]
class AppDependencySearch(BaseAjaxSearch):
# Only webapps.
types = [amo.ADDON_WEBAPP]
@dev_required
@json_view
def ajax_dependencies(request, addon_id, addon):
s = AppDependencySearch if addon.is_webapp() else AddonDependencySearch
return s(request, excluded_ids=[addon_id]).items
@dev_required(webapp=True)
def addons_section(request, addon_id, addon, section, editable=False,
webapp=False):
basic = addon_forms.AppFormBasic if webapp else addon_forms.AddonFormBasic
models = {'basic': basic,
'media': addon_forms.AddonFormMedia,
'details': addon_forms.AddonFormDetails,
'support': addon_forms.AddonFormSupport,
'technical': addon_forms.AddonFormTechnical,
'admin': forms.AdminForm}
if section not in models:
raise http.Http404()
tags, previews, restricted_tags = [], [], []
cat_form = dependency_form = None
if section == 'basic':
tags = addon.tags.not_blacklisted().values_list('tag_text', flat=True)
cat_form = addon_forms.CategoryFormSet(request.POST or None,
addon=addon, request=request)
restricted_tags = addon.tags.filter(restricted=True)
elif section == 'media':
previews = forms.PreviewFormSet(request.POST or None,
prefix='files', queryset=addon.previews.all())
elif section == 'technical':
if not webapp:
dependency_form = forms.DependencyFormSet(request.POST or None,
queryset=addon.addons_dependencies.all(), addon=addon,
prefix='dependencies')
# Get the slug before the form alters it to the form data.
valid_slug = addon.slug
if editable:
if request.method == 'POST':
if section == 'license':
form = models[section](request.POST)
else:
form = models[section](request.POST, request.FILES,
instance=addon, request=request)
if form.is_valid() and (not previews or previews.is_valid()):
addon = form.save(addon)
if previews:
for preview in previews.forms:
preview.save(addon)
editable = False
if section == 'media':
amo.log(amo.LOG.CHANGE_ICON, addon)
else:
amo.log(amo.LOG.EDIT_PROPERTIES, addon)
valid_slug = addon.slug
if cat_form:
if cat_form.is_valid():
cat_form.save()
addon.save()
else:
editable = True
if dependency_form:
if dependency_form.is_valid():
dependency_form.save()
else:
editable = True
else:
if section == 'license':
form = models[section]()
else:
form = models[section](instance=addon, request=request)
else:
form = False
url_prefix = 'apps' if webapp else 'addons'
data = {'addon': addon,
'webapp': webapp,
'url_prefix': url_prefix,
'form': form,
'editable': editable,
'tags': tags,
'restricted_tags': restricted_tags,
'cat_form': cat_form,
'preview_form': previews,
'dependency_form': dependency_form,
'valid_slug': valid_slug}
return render(request, 'devhub/addons/edit/%s.html' % section, data)
@never_cache
@dev_required(theme=True)
@json_view
def image_status(request, addon_id, addon, theme=False):
# Default icon needs no checking.
if not addon.icon_type or addon.icon_type.split('/')[0] == 'icon':
icons = True
# Persona icon is handled differently.
elif addon.type == amo.ADDON_PERSONA:
icons = True
else:
icons = storage.exists(os.path.join(addon.get_icon_dir(),
'%s-32.png' % addon.id))
previews = all(storage.exists(p.thumbnail_path)
for p in addon.previews.all())
return {'overall': icons and previews,
'icons': icons,
'previews': previews}
@json_view
def ajax_upload_image(request, upload_type, addon_id=None):
errors = []
upload_hash = ''
if 'upload_image' in request.FILES:
upload_preview = request.FILES['upload_image']
upload_preview.seek(0)
upload_hash = uuid.uuid4().hex
loc = os.path.join(settings.TMP_PATH, upload_type, upload_hash)
with storage.open(loc, 'wb') as fd:
for chunk in upload_preview:
fd.write(chunk)
is_icon = upload_type == 'icon'
is_persona = upload_type.startswith('persona_')
check = amo.utils.ImageCheck(upload_preview)
if (not check.is_image() or
upload_preview.content_type not in amo.IMG_TYPES):
if is_icon:
errors.append(_('Icons must be either PNG or JPG.'))
else:
errors.append(_('Images must be either PNG or JPG.'))
if check.is_animated():
if is_icon:
errors.append(_('Icons cannot be animated.'))
else:
errors.append(_('Images cannot be animated.'))
max_size = None
if is_icon:
max_size = settings.MAX_ICON_UPLOAD_SIZE
if is_persona:
max_size = settings.MAX_PERSONA_UPLOAD_SIZE
if max_size and upload_preview.size > max_size:
if is_icon:
errors.append(_('Please use images smaller than %dMB.') % (
max_size / 1024 / 1024 - 1))
if is_persona:
errors.append(_('Images cannot be larger than %dKB.') % (
max_size / 1024))
if check.is_image() and is_persona:
persona, img_type = upload_type.split('_') # 'header' or 'footer'
expected_size = amo.PERSONA_IMAGE_SIZES.get(img_type)[1]
with storage.open(loc, 'rb') as fp:
actual_size = Image.open(fp).size
if actual_size != expected_size:
# L10n: {0} is an image width (in pixels), {1} is a height.
errors.append(_('Image must be exactly {0} pixels wide '
'and {1} pixels tall.')
.format(expected_size[0], expected_size[1]))
else:
errors.append(_('There was an error uploading your preview.'))
if errors:
upload_hash = ''
return {'upload_hash': upload_hash, 'errors': errors}
@dev_required
def upload_image(request, addon_id, addon, upload_type):
return ajax_upload_image(request, upload_type)
@dev_required
def version_edit(request, addon_id, addon, version_id):
version = get_object_or_404(Version, pk=version_id, addon=addon)
version_form = forms.VersionForm(request.POST or None, instance=version)
new_file_form = forms.NewFileForm(request.POST or None,
addon=addon, version=version,
request=request)
file_form = forms.FileFormSet(request.POST or None, prefix='files',
queryset=version.files.all())
file_history = _get_file_history(version)
data = {'version_form': version_form, 'file_form': file_form}
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
if addon.accepts_compatible_apps():
# We should be in no-caching land but this one stays cached for some
# reason.
qs = version.apps.all().no_cache()
compat_form = forms.CompatFormSet(request.POST or None, queryset=qs)
data['compat_form'] = compat_form
if (request.method == 'POST' and
all([form.is_valid() for form in data.values()])):
data['version_form'].save()
data['file_form'].save()
for deleted in data['file_form'].deleted_forms:
file = deleted.cleaned_data['id']
amo.log(amo.LOG.DELETE_FILE_FROM_VERSION,
file.filename, file.version, addon)
if 'compat_form' in data:
for compat in data['compat_form'].save(commit=False):
compat.version = version
compat.save()
for form in data['compat_form'].forms:
if (isinstance(form, forms.CompatForm) and
'max' in form.changed_data):
_log_max_version_change(addon, version, form.instance)
messages.success(request, _('Changes successfully saved.'))
return redirect('devhub.versions.edit', addon.slug, version_id)
data.update(addon=addon, version=version, new_file_form=new_file_form,
file_history=file_history, is_admin=is_admin)
return render(request, 'devhub/versions/edit.html', data)
def _log_max_version_change(addon, version, appversion):
details = {'version': version.version,
'target': appversion.version.version,
'application': appversion.application.pk}
amo.log(amo.LOG.MAX_APPVERSION_UPDATED,
addon, version, details=details)
def _get_file_history(version):
file_ids = [f.id for f in version.all_files]
addon = version.addon
file_history = (ActivityLog.objects.for_addons(addon)
.filter(action__in=amo.LOG_REVIEW_QUEUE))
files = dict([(fid, []) for fid in file_ids])
for log in file_history:
details = log.details
current_file_ids = details["files"] if 'files' in details else []
for fid in current_file_ids:
if fid in file_ids:
files[fid].append(log)
return files
@dev_required
@post_required
@transaction.commit_on_success
def version_delete(request, addon_id, addon):
version_id = request.POST.get('version_id')
version = get_object_or_404(Version, pk=version_id, addon=addon)
if 'disable_version' in request.POST:
messages.success(request, _('Version %s disabled.') % version.version)
version.files.update(status=amo.STATUS_DISABLED)
else:
messages.success(request, _('Version %s deleted.') % version.version)
version.delete()
return redirect(addon.get_dev_url('versions'))
def check_validation_override(request, form, addon, version):
if version and form.cleaned_data.get('admin_override_validation'):
helper = ReviewHelper(request=request, addon=addon, version=version)
helper.set_data(
dict(operating_systems='', applications='',
comments=_(u'This upload has failed validation, and may '
u'lack complete validation results. Please '
u'take due care when reviewing it.')))
helper.actions['super']['method']()
@json_view
@dev_required
@post_required
def version_add(request, addon_id, addon):
form = forms.NewVersionForm(request.POST, addon=addon, request=request)
if form.is_valid():
pl = (list(form.cleaned_data['desktop_platforms']) +
list(form.cleaned_data['mobile_platforms']))
v = Version.from_upload(form.cleaned_data['upload'], addon, pl)
log.info('Version created: %s for: %s' %
(v.pk, form.cleaned_data['upload']))
check_validation_override(request, form, addon, v)
if (addon.status == amo.STATUS_NULL and
form.cleaned_data['nomination_type']):
addon.update(status=form.cleaned_data['nomination_type'])
url = reverse('devhub.versions.edit', args=[addon.slug, str(v.id)])
return dict(url=url)
else:
return json_view.error(form.errors)
@json_view
@dev_required
@post_required
def version_add_file(request, addon_id, addon, version_id):
version = get_object_or_404(Version, pk=version_id, addon=addon)
form = forms.NewFileForm(request.POST, addon=addon, version=version,
request=request)
if not form.is_valid():
return json_view.error(form.errors)
upload = form.cleaned_data['upload']
new_file = File.from_upload(upload, version, form.cleaned_data['platform'],
parse_addon(upload, addon))
storage.delete(upload.path)
check_validation_override(request, form, addon, new_file.version)
file_form = forms.FileFormSet(prefix='files', queryset=version.files.all())
form = [f for f in file_form.forms if f.instance == new_file]
return render(request, 'devhub/includes/version_file.html',
{'form': form[0], 'addon': addon})
@dev_required(webapp=True)
def version_list(request, addon_id, addon, webapp=False):
qs = addon.versions.order_by('-created').transform(Version.transformer)
versions = amo.utils.paginate(request, qs)
new_file_form = forms.NewVersionForm(None, addon=addon, request=request)
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
data = {'addon': addon,
'webapp': webapp,
'versions': versions,
'new_file_form': new_file_form,
'position': get_position(addon),
'timestamp': int(time.time()),
'is_admin': is_admin}
return render(request, 'devhub/versions/list.html', data)
@dev_required
def version_bounce(request, addon_id, addon, version):
# Use filter since there could be dupes.
vs = (Version.objects.filter(version=version, addon=addon)
.order_by('-created'))
if vs:
return redirect('devhub.versions.edit', addon.slug, vs[0].id)
else:
raise http.Http404()
@json_view
@dev_required
def version_stats(request, addon_id, addon):
qs = Version.objects.filter(addon=addon)
reviews = (qs.annotate(reviews=Count('reviews'))
.values('id', 'version', 'reviews'))
d = dict((v['id'], v) for v in reviews)
files = qs.annotate(files=Count('files')).values_list('id', 'files')
for id, files in files:
d[id]['files'] = files
return d
Step = collections.namedtuple('Step', 'current max')
def submit_step(outer_step):
"""Wraps the function with a decorator that bounces to the right step."""
def decorator(f):
@functools.wraps(f)
def wrapper(request, *args, **kw):
step = outer_step
webapp = kw.get('webapp', False)
if webapp and step == 7:
# decorator calls this step 7, but it's step 5 for apps
step = 5
max_step = 5 if webapp else 7
# We only bounce on pages with an addon id.
if 'addon' in kw:
addon = kw['addon']
on_step = SubmitStep.objects.filter(addon=addon)
if on_step:
max_step = on_step[0].step
if max_step < step:
# The step was too high, so bounce to the saved step.
return redirect(_step_url(max_step, webapp),
addon.slug)
elif step != max_step:
# We couldn't find a step, so we must be done.
return redirect(_step_url(7, webapp), addon.slug)
kw['step'] = Step(step, max_step)
return f(request, *args, **kw)
# Tell @dev_required that this is a function in the submit flow so it
# doesn't try to redirect into the submit flow.
wrapper.submitting = True
return wrapper
return decorator
def _step_url(step, is_webapp):
url_base = 'devhub.submit%s' % ('_apps' if is_webapp else '')
if is_webapp and str(step).isdigit() and step > 5:
step = 5
return '%s.%s' % (url_base, step)
@login_required
@submit_step(1)
def submit(request, step, webapp=False):
if request.method == 'POST':
response = redirect(_step_url(2, webapp))
response.set_cookie(DEV_AGREEMENT_COOKIE)
return response
return render(request, 'devhub/addons/submit/start.html',
{'step': step, 'webapp': webapp})
@login_required
@submit_step(2)
def submit_addon(request, step, webapp=False):
if DEV_AGREEMENT_COOKIE not in request.COOKIES:
return redirect(_step_url(1, webapp))
NewItem = forms.NewWebappForm if webapp else forms.NewAddonForm
form = NewItem(request.POST or None, request=request)
if request.method == 'POST':
if form.is_valid():
data = form.cleaned_data
if webapp:
p = [Platform.objects.get(id=amo.PLATFORM_ALL.id)]
else:
p = (list(data.get('desktop_platforms', [])) +
list(data.get('mobile_platforms', [])))
addon = Addon.from_upload(data['upload'], p)
if webapp:
tasks.fetch_icon.delay(addon)
AddonUser(addon=addon, user=request.amo_user).save()
SubmitStep.objects.create(addon=addon, step=3)
check_validation_override(request, form, addon,
addon.current_version)
return redirect(_step_url(3, webapp), addon.slug)
template = 'upload_webapp.html' if webapp else 'upload.html'
is_admin = acl.action_allowed(request, 'ReviewerAdminTools', 'View')
return render(request, 'devhub/addons/submit/%s' % template,
{'step': step, 'webapp': webapp, 'new_addon_form': form,
'is_admin': is_admin})
@dev_required(webapp=True)
@submit_step(3)
def submit_describe(request, addon_id, addon, step, webapp=False):
form_cls = forms.Step3WebappForm if addon.is_webapp() else forms.Step3Form
form = form_cls(request.POST or None, instance=addon, request=request)
cat_form = addon_forms.CategoryFormSet(request.POST or None, addon=addon,
request=request)
if request.method == 'POST' and form.is_valid() and cat_form.is_valid():
addon = form.save(addon)
cat_form.save()
SubmitStep.objects.filter(addon=addon).update(step=4)
return redirect(_step_url(4, webapp), addon.slug)
return render(request, 'devhub/addons/submit/describe.html',
{'form': form, 'cat_form': cat_form, 'addon': addon,
'step': step, 'webapp': addon.is_webapp()})
@dev_required(webapp=True)
@submit_step(4)
def submit_media(request, addon_id, addon, step, webapp=False):
form_icon = addon_forms.AddonFormMedia(request.POST or None,
request.FILES or None, instance=addon, request=request)
form_previews = forms.PreviewFormSet(request.POST or None,
prefix='files', queryset=addon.previews.all())
if (request.method == 'POST' and
form_icon.is_valid() and form_previews.is_valid()):
addon = form_icon.save(addon)
for preview in form_previews.forms:
preview.save(addon)
SubmitStep.objects.filter(addon=addon).update(step=5)
# Special handling for webapps, where this is jumping to the done step
if addon.is_webapp():
addon.update(status=amo.WEBAPPS_UNREVIEWED_STATUS)
SubmitStep.objects.filter(addon=addon).delete()
signals.submission_done.send(sender=addon)
return redirect(_step_url(5, webapp), addon.slug)
return render(request, 'devhub/addons/submit/media.html',
{'form': form_icon, 'addon': addon, 'step': step,
'preview_form': form_previews, 'webapp': addon.is_webapp()})
@dev_required(webapp=True)
@submit_step(5)
def submit_license(request, addon_id, addon, step, webapp=False):
fs, ctx = [], {}
# Versions.
license_form = forms.LicenseForm(request.POST or None, addon=addon)
if not addon.is_webapp():
ctx.update(license_form.get_context())
fs.append(ctx['license_form'])
# Policy.
policy_form = forms.PolicyForm(request.POST or None, addon=addon)
fs.append(policy_form)
if request.method == 'POST' and all([form.is_valid() for form in fs]):
if license_form in fs:
license_form.save(log=False)
policy_form.save()
SubmitStep.objects.filter(addon=addon).update(step=6)
return redirect('devhub.submit.6', addon.slug)
ctx.update(addon=addon, policy_form=policy_form, step=step,
webapp=addon.is_webapp())
return render(request, 'devhub/addons/submit/license.html', ctx)
@dev_required
@submit_step(6)
def submit_select_review(request, addon_id, addon, step):
review_type_form = forms.ReviewTypeForm(request.POST or None)
updated_status = None
if request.method == 'POST' and review_type_form.is_valid():
updated_status = review_type_form.cleaned_data['review_type']
if updated_status:
addon.update(status=updated_status)
SubmitStep.objects.filter(addon=addon).delete()
signals.submission_done.send(sender=addon)
return redirect('devhub.submit.7', addon.slug)
return render(request, 'devhub/addons/submit/select-review.html',
{'addon': addon, 'review_type_form': review_type_form,
'step': step})
@dev_required(webapp=True)
@submit_step(7)
def submit_done(request, addon_id, addon, step, webapp=False):
# Bounce to the versions page if they don't have any versions.
if not addon.versions.exists():
return redirect(addon.get_dev_url('versions'))
sp = addon.current_version.supported_platforms
is_platform_specific = sp != [amo.PLATFORM_ALL]
try:
author = addon.authors.all()[0]
except IndexError:
# This should never happen.
author = None
if author:
submitted_addons = (author.addons
.exclude(type=amo.ADDON_WEBAPP)
.exclude(status=amo.STATUS_NULL).count())
if submitted_addons == 1:
# We can use locale-prefixed URLs because the submitter probably
# speaks the same language by the time he/she reads the email.
context = {
'app': unicode(request.APP.pretty),
'detail_url': absolutify(addon.get_url_path()),
'version_url': absolutify(addon.get_dev_url('versions')),
'edit_url': absolutify(addon.get_dev_url('edit')),
'full_review': addon.status == amo.STATUS_NOMINATED
}
tasks.send_welcome_email.delay(addon.id, [author.email], context)
return render(request, 'devhub/addons/submit/done.html',
{'addon': addon, 'step': step, 'webapp': addon.is_webapp(),
'is_platform_specific': is_platform_specific})
@dev_required
def submit_resume(request, addon_id, addon):
step = SubmitStep.objects.filter(addon=addon)
return _resume(addon, step)
def _resume(addon, step):
if step:
return redirect(_step_url(step[0].step, addon.is_webapp()), addon.slug)
return redirect(addon.get_dev_url('versions'))
@login_required
@dev_required
def submit_bump(request, addon_id, addon, webapp=False):
if not acl.action_allowed(request, 'Admin', 'EditSubmitStep'):
raise PermissionDenied
step = SubmitStep.objects.filter(addon=addon)
step = step[0] if step else None
if request.method == 'POST' and request.POST.get('step'):
new_step = request.POST['step']
if step:
step.step = new_step
else:
step = SubmitStep(addon=addon, step=new_step)
step.save()
return redirect(_step_url('bump', webapp), addon.slug)
return render(request, 'devhub/addons/submit/bump.html',
dict(addon=addon, step=step))
@login_required
def submit_theme(request):
data = {}
if request.method == 'POST':
data = request.POST.dict()
if 'unsaved_data' in request.session and data['unsaved_data'] == '{}':
# Restore unsaved data on second invalid POST..
data['unsaved_data'] = request.session['unsaved_data']
form = addon_forms.ThemeForm(data=data or None,
files=request.FILES or None,
request=request)
if request.method == 'POST':
if form.is_valid():
addon = form.save()
return redirect('devhub.themes.submit.done', addon.slug)
else:
# Stored unsaved data in request.session since it gets lost on
# second invalid POST.
messages.error(request, _('Please check the form for errors.'))
request.session['unsaved_data'] = data['unsaved_data']
return render(request, 'devhub/personas/submit.html', dict(form=form))
@dev_required(theme=True)
def submit_theme_done(request, addon_id, addon, theme):
if addon.is_public():
return redirect(addon.get_url_path())
return render(request, 'devhub/personas/submit_done.html',
dict(addon=addon))
@dev_required(theme=True)
@post_required
def remove_locale(request, addon_id, addon, theme):
POST = request.POST
if 'locale' in POST and POST['locale'] != addon.default_locale:
addon.remove_locale(POST['locale'])
return http.HttpResponse()
return http.HttpResponseBadRequest()
# You can only request one of the new review tracks.
REQUEST_REVIEW = (amo.STATUS_PUBLIC, amo.STATUS_LITE)
@dev_required
@post_required
def request_review(request, addon_id, addon, status):
status_req = int(status)
if status_req not in addon.can_request_review():
return http.HttpResponseBadRequest()
elif status_req == amo.STATUS_PUBLIC:
if addon.status == amo.STATUS_LITE:
new_status = amo.STATUS_LITE_AND_NOMINATED
else:
new_status = amo.STATUS_NOMINATED
elif status_req == amo.STATUS_LITE:
if addon.status in (amo.STATUS_PUBLIC, amo.STATUS_LITE_AND_NOMINATED):
new_status = amo.STATUS_LITE
else:
new_status = amo.STATUS_UNREVIEWED
addon.update(status=new_status)
msg = {amo.STATUS_LITE: _('Preliminary Review Requested.'),
amo.STATUS_PUBLIC: _('Full Review Requested.')}
messages.success(request, msg[status_req])
amo.log(amo.LOG.CHANGE_STATUS, addon.get_status_display(), addon)
return redirect(addon.get_dev_url('versions'))
# TODO(kumar): Remove when the editor tools are in zamboni.
def validator_redirect(request, version_id):
v = get_object_or_404(Version, id=version_id)
return redirect('devhub.addons.versions', v.addon_id, permanent=True)
@post_required
@addon_view
def admin(request, addon):
if not acl.action_allowed(request, 'Addons', 'Configure'):
raise PermissionDenied
form = forms.AdminForm(request, request.POST or None, instance=addon)
if form.is_valid():
form.save()
return render(request, 'devhub/addons/edit/admin.html',
{'addon': addon, 'admin_form': form})
def docs(request, doc_name=None, doc_page=None):
filename = ''
all_docs = {'getting-started': [], 'reference': [],
'policies': ['submission', 'reviews', 'maintenance',
'recommended', 'agreement', 'contact'],
'case-studies': ['cooliris', 'stumbleupon',
'download-statusbar'],
'how-to': ['getting-started', 'extension-development',
'thunderbird-mobile', 'theme-development',
'other-addons'],
'themes': ['faq']}
if doc_name and doc_name in all_docs:
filename = '%s.html' % doc_name
if doc_page and doc_page in all_docs[doc_name]:
filename = '%s-%s.html' % (doc_name, doc_page)
if not filename:
return redirect('devhub.index')
return render(request, 'devhub/docs/%s' % filename)
def builder(request):
return render(request, 'devhub/builder.html')
@json_view
@post_required
def check_paypal(request):
if 'email' not in request.POST:
raise http.Http404()
check = Check(paypal_id=request.POST['email'])
check.all()
# TODO(andym): we will want to l10n these messages at some point and
# we'll need to change this to give more detail back to the user than
# a tooltip at a later date.
return {'valid': check.passed, 'message': ' '.join(check.errors)}
def search(request):
query = request.GET.get('q', '')
return render(request, 'devhub/devhub_search.html', {'query': query})
| {
"content_hash": "d08f05052f7eeda7ee9d8e9e7d2eaa7d",
"timestamp": "",
"source": "github",
"line_count": 1818,
"max_line_length": 79,
"avg_line_length": 38.200770077007704,
"alnum_prop": 0.5979207763970683,
"repo_name": "robhudson/zamboni",
"id": "731dfbee3e0473e99d9ed58f534b798f29b96250",
"size": "69449",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/devhub/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "900136"
},
{
"name": "JavaScript",
"bytes": "1700376"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6317591"
},
{
"name": "Shell",
"bytes": "20633"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import re
from django.core.exceptions import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Expression
from django.db.models.lookups import Lookup
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
def __init__(self, *args, **kwargs):
super(GISLookup, self).__init__(*args, **kwargs)
self.template_params = {}
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.remote_field.model._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, compiler, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(compiler, connection)
if hasattr(self.rhs, '_as_sql'):
# If rhs is some QuerySet, don't touch it
return rhs, rhs_params
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, Expression):
raise ValueError('Complex expressions not supported for GeometryField')
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def get_rhs_op(self, connection, rhs):
# Unlike BuiltinLookup, the GIS get_rhs_op() implementation should return
# an object (SpatialOperator) with an as_sql() method to allow for more
# complex computations (where the lhs part can be mixed in).
return connection.ops.gis_operators[self.lookup_name]
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql, 'value': '%s'}
template_params.update(self.template_params)
rhs_op = self.get_rhs_op(connection, rhs_sql)
return rhs_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %(value)s'
def process_rhs(self, compiler, connection):
if not isinstance(self.rhs, (tuple, list)) or not 2 <= len(self.rhs) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(self.rhs[0])]
# Getting the distance parameter in the units of the field.
dist_param = self.rhs[1]
if hasattr(dist_param, 'resolve_expression'):
dist_param = dist_param.resolve_expression(compiler.query)
sql, expr_params = compiler.compile(dist_param)
self.template_params['value'] = sql
params.extend(expr_params)
else:
params += connection.ops.get_distance(
self.lhs.output_field, (dist_param,) + self.rhs[2:],
self.lookup_name, handle_spheroid=False
)
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, params[0], compiler)
return (rhs, params)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| {
"content_hash": "b435b3aa6496d1cc74121c24e947c2a6",
"timestamp": "",
"source": "github",
"line_count": 355,
"max_line_length": 98,
"avg_line_length": 33.42535211267606,
"alnum_prop": 0.6400640485420529,
"repo_name": "yephper/django",
"id": "d00d457629b4ca09dbdc67ea53b9435e9a8e03e9",
"size": "11866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/db/models/lookups.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
} |
"""Tools for deserializing `Function`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import re
from tensorflow.core.framework import function_pb2
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function as function_lib
from tensorflow.python.framework import func_graph as func_graph_lib
from tensorflow.python.framework import function_def_to_graph as function_def_lib
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import nested_structure_coder
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_tensor(t):
return isinstance(t, (ops.Tensor, resource_variable_ops.ResourceVariable))
def _call_concrete_function(function, inputs):
"""Calls a restored Function with structured inputs.
This differs from `function.__call__` in that inputs and outputs are
structured and that it casts inputs to tensors if needed.
Note: this does not checks that non-tensor inputs match. That should be
done before via `_concrete_function_callable_with`.
Args:
function: ConcreteFunction to call.
inputs: Structured inputs compatible with
`function.graph.structured_input_signature`.
Returns:
The structured function output.
"""
expected_structure = function.graph.structured_input_signature
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
tensor_inputs = []
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
tensor_inputs.append(
ops.convert_to_tensor(arg, dtype_hint=expected.dtype))
result = function._call_flat(tensor_inputs, function._captured_inputs) # pylint: disable=protected-access
if isinstance(result, ops.Operation):
return None
return result
def _try_convert_to_tensor_spec(arg, dtype_hint):
"""Returns None or TensorSpec obtained if `arg` is converted to tensor."""
try:
# Note: try conversion in a FuncGraph to avoid poluting current context.
with func_graph_lib.FuncGraph(name="guess_conversion").as_default():
result = ops.convert_to_tensor(arg, dtype_hint=dtype_hint)
return tensor_spec.TensorSpec(shape=result.shape, dtype=result.dtype)
except (TypeError, ValueError):
return None
def _concrete_function_callable_with(function, inputs, allow_conversion):
"""Returns whether concrete `function` can be called with `inputs`."""
expected_structure = function.graph.structured_input_signature
try:
flatten_inputs = nest.flatten_up_to(expected_structure, inputs)
except (TypeError, ValueError):
return False
try:
# Verify that no input elements were dropped during flattening.
repacked = nest.pack_sequence_as(expected_structure, flatten_inputs)
# TODO(b/129422719): Namedtuple subclasses re-created through
# saved_model.load don't compare equal in type to the original in
# assert_same_structure. Fix that and we can take out check_types=False
# here.
nest.assert_same_structure(inputs, repacked, check_types=False)
except (TypeError, ValueError):
return False
for arg, expected in zip(flatten_inputs, nest.flatten(expected_structure)):
if isinstance(expected, tensor_spec.TensorSpec):
if allow_conversion:
arg = _try_convert_to_tensor_spec(arg, dtype_hint=expected.dtype)
if not _is_tensor(arg) and not isinstance(arg, tensor_spec.TensorSpec):
return False
if arg.dtype != expected.dtype:
return False
if not expected.shape.is_compatible_with(arg.shape):
return False
else:
if arg != expected:
return False
return True
def _deserialize_function_spec_as_nonmethod(function_spec_proto, coder):
"""Deserialize a FunctionSpec object from its proto representation."""
typeless_fullargspec = coder.decode_proto(function_spec_proto.fullargspec)
# Convert a method function into a non method.
if function_spec_proto.is_method:
if not typeless_fullargspec.args:
raise NotImplementedError(
"Missing support to deserialize a method function without a named "
"'self' argument.")
args = typeless_fullargspec.args[1:]
else:
args = typeless_fullargspec.args
fullargspec = tf_inspect.FullArgSpec(
args=args,
varargs=typeless_fullargspec.varargs,
varkw=typeless_fullargspec.varkw,
defaults=typeless_fullargspec.defaults,
kwonlyargs=typeless_fullargspec.kwonlyargs,
kwonlydefaults=typeless_fullargspec.kwonlydefaults,
annotations=typeless_fullargspec.annotations)
input_signature = coder.decode_proto(function_spec_proto.input_signature)
return function_lib.FunctionSpec(fullargspec=fullargspec,
is_method=False,
args_to_prepend=[],
kwargs_to_include={},
input_signature=input_signature)
# TODO(allenl): The fact that we can't derive ConcreteFunction calling
# conventions from the serialized input spec right now is unfortunate. Merging
# these would be good, maybe by adding TensorSpec names to cache keys so renamed
# keyword arguments would yield different ConcreteFunctions.
def setup_bare_concrete_function(saved_bare_concrete_function,
concrete_functions):
"""Makes a restored bare concrete function callable."""
# Bare concrete functions accept only flat lists of Tensors with unique
# names.
concrete_function = concrete_functions[
saved_bare_concrete_function.concrete_function_name]
# pylint: disable=protected-access
concrete_function._arg_keywords = (
saved_bare_concrete_function.argument_keywords)
concrete_function._num_positional_args = (
saved_bare_concrete_function.allowed_positional_arguments)
# pylint: enable=protected-access
concrete_function.add_to_graph()
return concrete_function
class RestoredFunction(def_function.Function):
"""Wrapper class for a function that has been restored from saved state.
See `def_function.Function`.
"""
def __init__(self, python_function, name, function_spec, concrete_functions):
# TODO(mdan): We may enable autograph once exceptions are supported.
super(RestoredFunction, self).__init__(
python_function, name, autograph=False)
self.concrete_functions = concrete_functions
self._function_spec = function_spec
def _list_all_concrete_functions_for_serialization(self):
return self.concrete_functions
def _defun_with_scope(self, scope):
func = super(RestoredFunction, self)._defun_with_scope(scope)
func._function_spec = self._function_spec # pylint: disable=protected-access
return func
def recreate_function(saved_function, concrete_functions):
"""Creates a `Function` from a `SavedFunction`.
Args:
saved_function: `SavedFunction` proto.
concrete_functions: map from function name to `ConcreteFunction`.
Returns:
A `Function`.
"""
# TODO(andresp): Construct a `Function` with the cache populated
# instead of creating a new `Function` backed by a Python layer to
# glue things together. Current approach is nesting functions deeper for each
# serialization cycle.
coder = nested_structure_coder.StructureCoder()
# Note: handling method functions is tricky since make_decorator does not
# allows control of "ismethod". Additionally since restored functions do
# not behave as methods i.e. they always use the same captured tensors
# independent of the object they are bound to, there is little value on
# propagating that correctly.
#
# Ideally this conversion should happen at serialization time. But since
# there are SavedModels which have "ismethod" populated and have an extra
# argument that they expect to be ignored, we do it at deserialization.
function_spec = _deserialize_function_spec_as_nonmethod(
saved_function.function_spec,
coder)
def restored_function_body(*args, **kwargs):
"""Calls a restored function."""
# This is the format of function.graph.structured_input_signature. At this
# point, the args and kwargs have already been canonicalized.
inputs = (args, kwargs)
# First try to find a concrete function that can be called without input
# conversions. This allows one to pick a more specific trace in case there
# was also a more expensive one that supported tensors.
for allow_conversion in [False, True]:
for function_name in saved_function.concrete_functions:
function = concrete_functions[function_name]
if _concrete_function_callable_with(function, inputs, allow_conversion):
return _call_concrete_function(function, inputs)
signature_descriptions = []
def _pretty_format_positional(positional):
return "Positional arguments ({} total):\n * {}".format(
len(positional),
"\n * ".join([str(a) for a in positional]))
for index, function_name in enumerate(saved_function.concrete_functions):
concrete_function = concrete_functions[function_name]
positional, keyword = concrete_function.structured_input_signature
signature_descriptions.append(
"Option {}:\n {}\n Keyword arguments: {}"
.format(index + 1, _pretty_format_positional(positional), keyword))
raise ValueError(
"Could not find matching function to call loaded from the SavedModel. "
"Got:\n {}\n Keyword arguments: {}\n\nExpected "
"these arguments to match one of the following {} option(s):\n\n{}"
.format(_pretty_format_positional(args), kwargs,
len(saved_function.concrete_functions),
"\n\n".join(signature_descriptions)))
concrete_function_objects = []
for concrete_function_name in saved_function.concrete_functions:
concrete_function_objects.append(concrete_functions[concrete_function_name])
restored_function = RestoredFunction(
restored_function_body,
restored_function_body.__name__,
function_spec,
concrete_function_objects)
return tf_decorator.make_decorator(
restored_function_body,
restored_function,
decorator_argspec=function_spec.fullargspec)
def load_function_def_library(library):
"""Load a set of functions as concrete functions without captured inputs.
Functions names are manipulated during load such that they do not overlap
with previously created ones.
Args:
library: FunctionDefLibrary proto message.
Returns:
Map of original function names in the library to instances of
`ConcreteFunction` without captured inputs.
Raises:
ValueError: if functions dependencies have a cycle.
"""
functions = {}
load_shared_name_suffix = "_load_{}".format(ops.uid())
for fdef in _sort_function_defs(library):
copy = _fix_fdef(fdef, functions, load_shared_name_suffix)
# There is no need to copy functions into the function def graph.
# It leads to a O(n^2) increase of memory when importing functions
# and the extra function definitions are a no-op since they already
# imported as a function before (due to the topologic sort import).
func_graph = function_def_lib.function_def_to_graph(
copy, copy_functions=False)
for dep in _list_function_deps(fdef):
functions[dep].add_to_graph(func_graph)
func = function_lib.ConcreteFunction(func_graph)
func.add_to_graph()
functions[fdef.signature.name] = func
# Also register the gradients in the current root context.
with ops.init_scope():
func._register_gradient() # pylint: disable=protected-access
return functions
def _sort_function_defs(library):
"""Return a topologic sort of FunctionDefs in a library."""
edges = collections.defaultdict(list)
in_count = collections.defaultdict(lambda: 0)
for fdef in library.function:
for dep in _list_function_deps(fdef):
edges[dep].append(fdef.signature.name)
in_count[fdef.signature.name] += 1
ready = [
fdef.signature.name
for fdef in library.function
if in_count[fdef.signature.name] == 0
]
output = []
while ready:
node = ready.pop()
output.append(node)
for dest in edges[node]:
in_count[dest] -= 1
if not in_count[dest]:
ready.append(dest)
if len(output) != len(library.function):
failed_to_resolve = sorted(set(in_count.keys()) - set(output))
raise ValueError("There is a cyclic-dependency between functions. ",
"Could not resolve %r." % (failed_to_resolve,))
reverse = {fdef.signature.name: fdef for fdef in library.function}
return [reverse[x] for x in output]
def _fix_fdef(orig_fdef, functions, shared_name_suffix):
"""Fixes a FunctionDef proto to be loaded in current context.
In particular, when loading a function library into an eager context, one
must rename the functions to avoid conflicts with existent functions.
Args:
orig_fdef: FunctionDef proto to fix. It is not modified.
functions: map from function name to a ConcreteFunction instance.
shared_name_suffix: A unique string for this load which helps to avoid
`shared_name` collisions across loads. Two functions from the same load
using the same `shared_name` still need to share, but functions from
different loads with the same `shared_name` should not.
Returns:
A fixed copy of the original FunctionDef.
"""
fdef = function_pb2.FunctionDef()
fdef.CopyFrom(orig_fdef)
for node_def in fdef.node_def:
if "_gradient_op_type" in node_def.attr:
if node_def.op in ["StatefulPartitionedCall", "PartitionedCall"]:
# TODO(andresp): This code assumes that the gradient registered for this
# function call is the default gradient for the function and not a
# custom one.
fname = node_def.attr["f"].func.name
node_def.attr["_gradient_op_type"].s = compat.as_bytes(
functions[fname]._gradient_name) # pylint: disable=protected-access
else:
logging.warning("Importing a function (%s) with ops with custom "
"gradients. Will likely fail if a gradient is "
"requested.", fdef.signature.name)
for _, attr_value in node_def.attr.items():
if attr_value.func.name:
attr_value.func.name = functions[attr_value.func.name].name
# TODO(b/124205571): Avoid accidental sharing and destruction of restored
# resources. For now uniquify "shared_name" when loading functions to avoid
# sharing.
if "shared_name" in node_def.attr:
node_def.attr["shared_name"].s += compat.as_bytes(shared_name_suffix)
fdef.signature.name = _clean_function_name(fdef.signature.name)
return fdef
def _list_function_deps(fdef):
# TODO(andresp): Recurse into list attributes and into NameAttrList attrs both
# when listing deps and when fixing them. `function_def_to_graph` also
# requires fixes.
deps = set()
for node_def in fdef.node_def:
for _, attr_value in node_def.attr.items():
if attr_value.WhichOneof("value") == "func":
deps.add(attr_value.func.name)
return deps
def _clean_function_name(name):
"""Vanity function to keep the function names comprehensible."""
# Note: each time a function is wrapped into `function_lib.ConcreteFunction`
# its name becomes "__inference_<orig>_xyz".
match = re.search(r"^__inference_(.*)_\d+$", name)
if match:
return match.group(1)
else:
return name
| {
"content_hash": "404ca6de3984f751b3fe351c93471749",
"timestamp": "",
"source": "github",
"line_count": 402,
"max_line_length": 108,
"avg_line_length": 39.524875621890544,
"alnum_prop": 0.7107432815155139,
"repo_name": "alsrgv/tensorflow",
"id": "1993babfb3cec16f3bb5c38d05357ed8ede12a54",
"size": "16578",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/saved_model/function_deserialization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "755360"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "68001148"
},
{
"name": "CMake",
"bytes": "204596"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1627121"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "842866"
},
{
"name": "Jupyter Notebook",
"bytes": "1665584"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "101157"
},
{
"name": "Objective-C",
"bytes": "104061"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17570"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48843099"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "488241"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
},
{
"name": "TSQL",
"bytes": "921"
}
],
"symlink_target": ""
} |
"""
__ExitPoint.py_____________________________________________________
Automatically generated AToM3 syntactic object (DO NOT MODIFY DIRECTLY)
Author: gehan
Modified: Sat Aug 30 18:23:40 2014
___________________________________________________________________
"""
from ASGNode import *
from ATOM3Type import *
from ATOM3String import *
from graph_ExitPoint import *
class ExitPoint(ASGNode, ATOM3Type):
def __init__(self, parent = None):
ASGNode.__init__(self)
ATOM3Type.__init__(self)
self.superTypes = ['Vertex', 'MetaModelElement_S']
self.graphClass_ = graph_ExitPoint
self.isGraphObjectVisual = True
if(hasattr(self, '_setHierarchicalLink')):
self._setHierarchicalLink(False)
if(hasattr(self, '_setHierarchicalNode')):
self._setHierarchicalNode(False)
self.parent = parent
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.cardinality=ATOM3String('1', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.classtype=ATOM3String('t_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.name=ATOM3String('s_', 20)
self.generatedAttributes = {'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'cardinality': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'classtype': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ),
'name': ('ATOM3String', ) }
self.realOrder = ['cardinality','cardinality','cardinality','cardinality','cardinality','classtype','classtype','classtype','classtype','classtype','name','name','name','name','name']
self.directEditing = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
def clone(self):
cloneObject = ExitPoint( self.parent )
for atr in self.realOrder:
cloneObject.setAttrValue(atr, self.getAttrValue(atr).clone() )
ASGNode.cloneActions(self, cloneObject)
return cloneObject
def copy(self, other):
ATOM3Type.copy(self, other)
for atr in self.realOrder:
self.setAttrValue(atr, other.getAttrValue(atr) )
ASGNode.copy(self, other)
def preCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preCondition(actionID, params)
else: return None
def postCondition (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postCondition(actionID, params)
else: return None
def preAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.preAction(actionID, params)
else: return None
def postAction (self, actionID, * params):
if self.graphObject_:
return self.graphObject_.postAction(actionID, params)
else: return None
def QOCA(self, params):
"""
QOCA Constraint Template
NOTE: DO NOT select a POST/PRE action trigger
Constraints will be added/removed in a logical manner by other mechanisms.
"""
return # <---- Remove this to use QOCA
""" Get the high level constraint helper and solver """
from Qoca.atom3constraints.OffsetConstraints import OffsetConstraints
oc = OffsetConstraints(self.parent.qocaSolver)
"""
Example constraint, see Kernel/QOCA/atom3constraints/OffsetConstraints.py
For more types of constraints
"""
oc.fixedWidth(self.graphObject_, self.graphObject_.sizeX)
oc.fixedHeight(self.graphObject_, self.graphObject_.sizeY)
| {
"content_hash": "341e29414ac91da6260e619524752e0d",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 189,
"avg_line_length": 40.78378378378378,
"alnum_prop": 0.5681466755025403,
"repo_name": "levilucio/SyVOLT",
"id": "d814647bf71693aee93a5920d9f38f9d13775ee5",
"size": "4527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/ExitPoint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
import os, sys, re
from Bio import SeqIO
from Bio.SeqIO.QualityIO import PairedFastaQualIterator
from optparse import OptionParser
def read_midfile(midfile):
# Here we read in the mid tagfile. The expected format is
# mid-name in col1, mid in col2, the rest is not used
fh = open(midfile, "r")
mids = {}
for line in fh:
midid = line.split()[0]
mid = line.split()[1]
mids[mid] = midid
return mids
def doRecord(record, middict, search_for, maxsize, varsize):
#These two are for reporting purposes, and will be returned with the record
reportFMid = "NoF"
reportRMid = "NoR"
# Iff we do not have a forward mid, we discard sequence and just return None
editedRecord = None
# Here we examine each record to find the forward and backward mids
# The way we do this is to have a regexp object (search_for) that
# is composed of all of the mids that we have taken in. We then
# search for these in both ends of the record.
match_objForward = search_for.search(str(record.seq[:maxsize]))
match_objReverse = search_for.search(str(record.reverse_complement().seq[:maxsize]))
if match_objForward is None:
return reportFMid, reportRMid, editedRecord
# Here, we have at least found a forward mid, and we get what it is, and
# where it is.
whichmidForward = match_objForward.group()
forwardMidSize = len(whichmidForward)
indexForward = match_objForward.start()
reportFMid = middict[whichmidForward]
if match_objReverse is None:
# If there is no reverse, I set it to the forward one since
# there is no logical difference between missing and the same
whichmidReverse=whichmidForward
reverseMidSize = 0
indexReverse=0
else:
# Here, we have a reverse mid, and we use those
whichmidReverse = match_objReverse.group()
reverseMidSize = len(whichmidReverse)
indexReverse = match_objReverse.start()
reportRMid = middict[whichmidReverse]
if indexForward <= varsize and indexReverse <= varsize:
if whichmidForward == whichmidReverse:
# Here we test first, if the mids are the same, second, that they
# are where we expect them to be.
firstcut = indexForward
lastcut = len(record.seq) - indexReverse
editedRecord = record[firstcut:lastcut]
editedRecord.id = editedRecord.id + "_" + reportFMid + "_" + reportRMid
else:
if indexForward > varsize:
reportFMid = "LongF"
if indesReverse > varsize:
reportRMid = "LongR"
return reportFMid, reportRMid, editedRecord
def split_on_mid(middict, fastafile, qualfile, varsize, otag):
# Here we go through all of the records.
# First, I create a regexp object consisting of all of the mids
# I can then in doRecord search for them. I also figure out
# the max size of the region that I accept it to be in, by
# adding the length of the longest mid and the variable size
# that the user supplied.
mids = middict.keys()
search_for = re.compile("|".join(mids))
maxsize = max([len(x) for x in mids]) + varsize
output_handleF = open(otag + ".fsa", "w")
# Here we go through the file(s). It is done like this to allow
# for not having a qual file.
totalseqs = 0
noseqs = 0
reportDict = {}
if qualfile != "None":
output_handleQ = open(otag + ".qual", "w")
for record in PairedFastaQualIterator(open(fastafile), open(qualfile)):
totalseqs += 1
reportFMid, reportRMid, editedRecord = doRecord(record, middict, search_for, maxsize, varsize)
reportDict[(reportFMid,reportRMid)] = reportDict.get((reportFMid, reportRMid), 0) + 1
if editedRecord == None:
continue
SeqIO.write(editedRecord, output_handleF, "fasta")
SeqIO.write(editedRecord, output_handleQ, "qual")
noseqs += 1
output_handleQ.close()
else:
for record in SeqIO.parse(open(fastafile, "rU"), "fasta"):
totalseqs += 1
reportFMid, reportRMid, editedRecord = doRecord(record, middict, search_for, maxsize, varsize)
reportDict[(reportFMid,reportRMid)] = reportDict.get((reportFMid, reportRMid), 0) + 1
if editedRecord == None:
continue
SeqIO.write(editedRecord, output_handleF, "fasta")
noseqs += 1
output_handleF.close()
print "A total of %s sequences written, out of a start set of %s" %(noseqs, totalseqs)
reportFile = open(otag + ".log", "w")
reportFile.write("Forward\tReverse\tNumber\n" )
for key in reportDict:
category = "Ma"
if key[0] == "NoF":
category = "NF"
elif key[1] == "NoR":
category = "NR"
elif key[0] != key[1]:
category = "MM"
elif "Long" in key[0] or "Long" in key[1]:
category = "LM"
reportFile.write("%s\t%s\t%s\t%s\n" % (category, key[0],key[1], reportDict[key]))
reportFile.close()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-f", "--fastafile", dest = "fastafile",
help = "fasta filename", metavar = "FILE")
parser.add_option("-q", "--qualfile", dest = "qualfile", default = "None",
help = "qual file, optional", metavar = "FILE")
parser.add_option("-m", "--midfile", dest = "midfile",
help = "midfile name", metavar = "FILE")
parser.add_option("-v", "--varsize", dest="varsize", type="int",
help = "variable region size", metavar = "INT")
parser.add_option("-o", "--outnametag", dest="otag",
help = "output filename tag", metavar="STRING")
(options, args) = parser.parse_args()
mids = read_midfile(options.midfile)
split_on_mid(mids, options.fastafile, options.qualfile, \
options.varsize, options.otag) | {
"content_hash": "00678cfce39403e0736e29d09a1d14de",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 106,
"avg_line_length": 39.645161290322584,
"alnum_prop": 0.609113100081367,
"repo_name": "karinlag/karinlag_utils",
"id": "74bfec74d2fd99f1d3ed8983665acc3c6b5351dc",
"size": "7094",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "same_mid_quantitate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7094"
}
],
"symlink_target": ""
} |
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/trollcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *trollcoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("trollcoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("trollcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("trollcoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("trollcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| {
"content_hash": "c54885e9860b10dcd8736112efa81ab3",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 108,
"avg_line_length": 29.50588235294118,
"alnum_prop": 0.6124401913875598,
"repo_name": "gautes/TrollCoinCore",
"id": "62e356dc14d51c1175ea07d8a259f9a743bcdd47",
"size": "2721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "share/qt/extract_strings_qt.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "693292"
},
{
"name": "C++",
"bytes": "5054321"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "51512"
},
{
"name": "Java",
"bytes": "30306"
},
{
"name": "M4",
"bytes": "189880"
},
{
"name": "Makefile",
"bytes": "105024"
},
{
"name": "Objective-C",
"bytes": "3904"
},
{
"name": "Objective-C++",
"bytes": "7244"
},
{
"name": "Protocol Buffer",
"bytes": "2336"
},
{
"name": "Python",
"bytes": "1151221"
},
{
"name": "QMake",
"bytes": "758"
},
{
"name": "Shell",
"bytes": "53344"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class ColoraxisValidator(_plotly_utils.basevalidators.SubplotidValidator):
def __init__(
self, plotly_name="coloraxis", parent_name="scattergl.marker.line", **kwargs
):
super(ColoraxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
dflt=kwargs.pop("dflt", None),
edit_type=kwargs.pop("edit_type", "calc"),
regex=kwargs.pop("regex", "/^coloraxis([2-9]|[1-9][0-9]+)?$/"),
**kwargs,
)
| {
"content_hash": "463d59e027ad20d520bee9ba6481f212",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 84,
"avg_line_length": 36.86666666666667,
"alnum_prop": 0.5840867992766727,
"repo_name": "plotly/plotly.py",
"id": "3b9952080bcd87b6e06c03f1cfd773f320cf6d12",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergl/marker/line/_coloraxis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import numpy as np
import pytest
import umap
# Globals, used for all the tests
SEED = 189212 # 0b101110001100011100
np.random.seed(SEED)
try:
from umap import plot
IMPORT_PLOT = True
except ImportError:
IMPORT_PLOT = False
plot_only = pytest.mark.skipif(not IMPORT_PLOT, reason="umap plot not found.")
@pytest.fixture(scope="session")
def mapper(iris):
return umap.UMAP(n_epochs=100).fit(iris.data)
# These tests requires revision: Refactoring is
# needed as there is no assertion nor
# property verification.
@plot_only
def test_plot_runs_at_all(mapper, iris, iris_selection):
from umap import plot as umap_plot
umap_plot.points(mapper)
umap_plot.points(mapper, labels=iris.target)
umap_plot.points(mapper, values=iris.data[:, 0])
umap_plot.points(mapper, labels=iris.target, subset_points=iris_selection)
umap_plot.points(mapper, values=iris.data[:, 0], subset_points=iris_selection)
umap_plot.points(mapper, theme="fire")
umap_plot.diagnostic(mapper, diagnostic_type="all")
umap_plot.diagnostic(mapper, diagnostic_type="neighborhood")
umap_plot.connectivity(mapper)
umap_plot.connectivity(mapper, theme="fire")
umap_plot.connectivity(mapper, edge_bundling="hammer")
umap_plot.interactive(mapper)
umap_plot.interactive(mapper, labels=iris.target)
umap_plot.interactive(mapper, values=iris.data[:, 0])
umap_plot.interactive(mapper, labels=iris.target, subset_points=iris_selection)
umap_plot.interactive(mapper, values=iris.data[:, 0], subset_points=iris_selection)
umap_plot.interactive(mapper, theme="fire")
umap_plot._datashade_points(mapper.embedding_)
umap_plot._datashade_points(mapper.embedding_, labels=iris.target)
umap_plot._datashade_points(mapper.embedding_, values=iris.data[:, 0])
| {
"content_hash": "e73f22aca5ca20fea247afed99735e02",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 87,
"avg_line_length": 36.12,
"alnum_prop": 0.7347729789590255,
"repo_name": "lmcinnes/umap",
"id": "effcbc1054c9944eaa44ca4854db2ee74edc37ee",
"size": "1806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "umap/tests/test_plot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "428"
},
{
"name": "Python",
"bytes": "8520392"
},
{
"name": "Shell",
"bytes": "3613"
},
{
"name": "TeX",
"bytes": "690"
}
],
"symlink_target": ""
} |
from aiocache.plugins import BasePlugin
from collections import deque
class LRUPlugin(BasePlugin):
"""
Implements a Least Recently Used policy with max_keys. The policy does the following:
- When a key is retrieved get, keys are moved to the beginning of the queue
- When a key is added (set), keys are added to the beginning of the queue. If
the queue is full, it will remove as many keys as needed to make space for the new
ones.
"""
def __init__(self, max_keys=None):
super().__init__()
if max_keys is not None:
assert max_keys >= 1, "Number of keys must be 1 or bigger"
self.deque = deque(maxlen=max_keys)
async def post_get(self, client, key, *args, took=0, **kwargs):
"""
Remove the key from its current position and set it at the beginning of the queue.
:param key: string key used in the get operation
:param client: :class:`aiocache.base.BaseCache` or child instance to use to interact with
the storage if needed
"""
if await client.exists(key):
self.deque.remove(key)
self.deque.appendleft(key)
async def post_set(self, client, key, value, *args, took=0, **kwargs):
"""
Set the given key at the beginning of the queue. If the queue is full, remove the last
item first.
:param key: string key used in the set operation
:param value: obj used in the set operation
:param client: :class:`aiocache.base.BaseCache` or child instance to use to interact with
the storage if needed
"""
if len(self.deque) == self.deque.maxlen:
await client.delete(self.deque.pop())
self.deque.appendleft(key)
| {
"content_hash": "c669f33d54762a7bda0d98d893142100",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 97,
"avg_line_length": 42.357142857142854,
"alnum_prop": 0.6278808319280494,
"repo_name": "dpanin/lesson-rest",
"id": "c1a5799e05b0eaad496e746f1aa6658d5d49a498",
"size": "1779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lru_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5429"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.