text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
---|---|---|---|---|---|---|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
VERSION = "unknown"
class KeyVaultClientConfiguration(Configuration):
"""Configuration for KeyVaultClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:keyword api_version: Api Version. The default value is "7.2". Note that overriding this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
**kwargs: Any
) -> None:
super(KeyVaultClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "7.2") # type: str
self.api_version = api_version
kwargs.setdefault('sdk_moniker', 'keyvault/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
| Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_generated/v7_2/aio/_configuration.py | Python | mit | 2,282 | 0.005697 |
"""Tests for the array padding functions.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
TestCase)
from numpy.lib import pad
class TestConditionalShortcuts(TestCase):
def test_zero_padding_shortcuts(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(0, 0) for axis in test.shape]
modes = ['constant',
'edge',
'linear_ramp',
'maximum',
'mean',
'median',
'minimum',
'reflect',
'symmetric',
'wrap',
]
for mode in modes:
assert_array_equal(test, pad(test, pad_amt, mode=mode))
def test_shallow_statistic_range(self):
test = np.arange(120).reshape(4, 5, 6)
pad_amt = [(1, 1) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode='edge'),
pad(test, pad_amt, mode=mode, stat_length=1))
def test_clip_statistic_range(self):
test = np.arange(30).reshape(5, 6)
pad_amt = [(3, 3) for axis in test.shape]
modes = ['maximum',
'mean',
'median',
'minimum',
]
for mode in modes:
assert_array_equal(pad(test, pad_amt, mode=mode),
pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(TestCase):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,
98., 98., 98., 98., 98., 98., 98., 98., 98., 98.
])
assert_array_equal(a, b)
def test_check_maximum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99]
)
assert_array_equal(a, b)
def test_check_maximum_2(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100]
)
assert_array_equal(a, b)
def test_check_minimum_1(self):
a = np.arange(100)
a = pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_minimum_2(self):
a = np.arange(100) + 2
a = pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 85, 86, 87, 88, 89, 90, 91,
92, 93, 94, 95, 96, 97, 98, 99, 100, 101,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
)
assert_array_equal(a, b)
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
a = pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1,
1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40,
41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60,
61, 62, 63, 64, 65, 66, 67, 68, 69, 70,
71, 72, 73, 74, 75, 76, 77, 78, 79, 80,
81, 82, 83, 84, 85, 86, 87, 88, 89, 90,
91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91,
91, 91, 91, 91, 91, 91, 91, 91, 91, 91]
)
assert_array_equal(a, b)
def test_check_median(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[4, 4, 5, 4, 4]]
)
assert_array_equal(a, b)
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
a = pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
[3, 3, 1, 4, 3],
[5, 4, 5, 9, 5],
[8, 9, 8, 2, 8],
[5, 4, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_median_stat_length(self):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
a = pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2.,
0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,
96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]
)
assert_array_equal(a, b)
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]
)
assert_array_equal(a, b)
def test_check_mean_2(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5,
0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,
10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,
20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,
30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,
40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,
50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,
60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,
70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,
80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,
90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]
)
assert_array_equal(a, b)
class TestConstant(TestCase):
def test_check_constant(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20,
20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
)
assert_array_equal(a, b)
def test_check_constant_zeros(self):
a = np.arange(100)
a = pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
)
assert_array_equal(a, b)
def test_check_constant_float(self):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
test = pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 0, 1, 2, 3, 4, 5, 1, 1],
[ 1, 6, 7, 8, 9, 10, 11, 1, 1],
[ 1, 12, 13, 14, 15, 16, 17, 1, 1],
[ 1, 18, 19, 20, 21, 22, 23, 1, 1],
[ 1, 24, 25, 26, 27, 28, 29, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
[ 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
assert_allclose(test, expected)
def test_check_constant_float2(self):
# If input array is float, and constant_values are float, the dtype of
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
test = pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1],
[ 1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1],
[ 1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1],
[ 1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1],
[ 1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]
)
assert_allclose(test, expected)
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,
-1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]
)
assert_allclose(a, b)
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
test = pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
[ 3, 3, 0, 1, 2, 3, 4, 5, 3, 3],
[ 3, 3, 6, 7, 8, 9, 10, 11, 3, 3],
[ 3, 3, 12, 13, 14, 15, 16, 17, 3, 3],
[ 3, 3, 18, 19, 20, 21, 22, 23, 3, 3],
[ 3, 3, 24, 25, 26, 27, 28, 29, 3, 3],
[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]
)
assert_allclose(test, expected)
def test_check_constant_pad_2d(self):
arr = np.arange(4).reshape(2, 2)
test = np.lib.pad(arr, ((1, 2), (1, 3)), mode='constant',
constant_values=((1, 2), (3, 4)))
expected = np.array(
[[3, 1, 1, 4, 4, 4],
[3, 0, 1, 4, 4, 4],
[3, 2, 3, 4, 4, 4],
[3, 2, 2, 4, 4, 4],
[3, 2, 2, 4, 4, 4]]
)
assert_allclose(test, expected)
class TestLinearRamp(TestCase):
def test_check_simple(self):
a = np.arange(100).astype('f')
a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
class TestReflect(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
5, 4, 3, 2, 1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
98, 97, 96, 95, 94, 93, 92, 91, 90, 89,
88, 87, 86, 85, 84, 83, 82, 81, 80, 79]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
-5, -4, -3, -2, -1,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
110, 111, 112, 113, 114, 115, 116, 117, 118, 119]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
class TestSymmetric(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
4, 3, 2, 1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 98, 97, 96, 95, 94, 93, 92, 91, 90,
89, 88, 87, 86, 85, 84, 83, 82, 81, 80]
)
assert_array_equal(a, b)
def test_check_odd_method(self):
a = np.arange(100)
a = pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
-4, -3, -2, -1, 0,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
99, 100, 101, 102, 103, 104, 105, 106, 107, 108,
109, 110, 111, 112, 113, 114, 115, 116, 117, 118]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
a = pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],
[ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]
)
assert_array_equal(a, b)
def test_check_shape(self):
a = [[4, 5, 6]]
a = pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
a = pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
class TestWrap(TestCase):
def test_check_simple(self):
a = np.arange(100)
a = pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
)
assert_array_equal(a, b)
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
a = pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
[2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,
3, 0, 1, 2, 3, 0, 1, 2, 3],
[6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,
7, 4, 5, 6, 7, 4, 5, 6, 7],
[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11]]
)
assert_array_equal(a, b)
def test_check_01(self):
a = pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
a = pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
class TestStatLen(TestCase):
def test_check_simple(self):
a = np.arange(30)
a = np.reshape(a, (6, 5))
a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
b = np.array(
[[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
[11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
[16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
[21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
)
assert_array_equal(a, b)
class TestEdge(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
def test_check_width_shape_1_2(self):
# Check a pad_width of the form ((1, 2),).
# Regression test for issue gh-7808.
a = np.array([1, 2, 3])
padded = pad(a, ((1, 2),), 'edge')
expected = np.array([1, 1, 2, 3, 3, 3])
assert_array_equal(padded, expected)
a = np.array([[1, 2, 3], [4, 5, 6]])
padded = pad(a, ((1, 2),), 'edge')
expected = pad(a, ((1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
a = np.arange(24).reshape(2, 3, 4)
padded = pad(a, ((1, 2),), 'edge')
expected = pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
class TestZeroPadWidth(TestCase):
def test_zero_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
class TestLegacyVectorFunction(TestCase):
def test_legacy_vector_functionality(self):
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
return vector
a = np.arange(6).reshape(2, 3)
a = pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
class TestNdarrayPadWidth(TestCase):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
a = pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
[3, 3, 3, 3, 4, 5, 5, 5],
[6, 6, 6, 6, 7, 8, 8, 8],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11],
[9, 9, 9, 9, 10, 11, 11, 11]]
)
assert_array_equal(a, b)
class TestUnicodeInput(TestCase):
def test_unicode_mode(self):
try:
constant_mode = unicode('constant')
except NameError:
constant_mode = 'constant'
a = np.pad([1], 2, mode=constant_mode)
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
class ValueError1(TestCase):
def test_check_simple(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)),
**kwargs)
def test_check_negative_stat_length(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(-3, ))
assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)),
**kwargs)
def test_check_negative_pad_width(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
class ValueError2(TestCase):
def test_check_negative_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
**kwargs)
class ValueError3(TestCase):
def test_check_kwarg_not_allowed(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, 4, mode='mean',
reflect_type='odd')
def test_mode_not_set(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(TypeError, pad, arr, 4)
def test_malformed_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant')
def test_malformed_pad_amount2(self):
arr = np.arange(30).reshape(5, 6)
assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)),
mode='constant')
def test_pad_too_many_axes(self):
arr = np.arange(30).reshape(5, 6)
# Attempt to pad using a 3D array equivalent
bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,)))
assert_raises(ValueError, pad, arr, bad_shape,
mode='constant')
class TypeError1(TestCase):
def test_float(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2))))
def test_str(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, 'foo')
assert_raises(TypeError, pad, arr, np.array('foo'))
def test_object(self):
class FooBar(object):
pass
arr = np.arange(30)
assert_raises(TypeError, pad, arr, FooBar())
def test_complex(self):
arr = np.arange(30)
assert_raises(TypeError, pad, arr, complex(1, -1))
assert_raises(TypeError, pad, arr, np.array(complex(1, -1)))
def test_check_wrong_pad_amount(self):
arr = np.arange(30)
arr = np.reshape(arr, (6, 5))
kwargs = dict(mode='mean', stat_length=(3, ))
assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
**kwargs)
if __name__ == "__main__":
np.testing.run_module_suite()
| jjas0nn/solvem | tensorflow/lib/python2.7/site-packages/numpy/lib/tests/test_arraypad.py | Python | mit | 43,332 | 0.002285 |
#
# The Python Imaging Library.
# $Id$
#
# standard image operations
#
# History:
# 2001-10-20 fl Created
# 2001-10-23 fl Added autocontrast operator
# 2001-12-18 fl Added Kevin's fit operator
# 2004-03-14 fl Fixed potential division by zero in equalize
# 2005-05-05 fl Fixed equalize for low number of values
#
# Copyright (c) 2001-2004 by Secret Labs AB
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
import operator
##
# (New in 1.1.3) The <b>ImageOps</b> module contains a number of
# 'ready-made' image processing operations. This module is somewhat
# experimental, and most operators only work on L and RGB images.
#
# @since 1.1.3
##
#
# helpers
def _border(border):
if type(border) is type(()):
if len(border) == 2:
left, top = right, bottom = border
elif len(border) == 4:
left, top, right, bottom = border
else:
left = top = right = bottom = border
return left, top, right, bottom
def _color(color, mode):
if Image.isStringType(color):
import ImageColor
color = ImageColor.getcolor(color, mode)
return color
def _lut(image, lut):
if image.mode == "P":
# FIXME: apply to lookup table, not image data
raise NotImplementedError("mode P support coming soon")
elif image.mode in ("L", "RGB"):
if image.mode == "RGB" and len(lut) == 256:
lut = lut + lut + lut
return image.point(lut)
else:
raise IOError, "not supported for this image mode"
#
# actions
##
# Maximize (normalize) image contrast. This function calculates a
# histogram of the input image, removes <i>cutoff</i> percent of the
# lightest and darkest pixels from the histogram, and remaps the image
# so that the darkest pixel becomes black (0), and the lightest
# becomes white (255).
#
# @param image The image to process.
# @param cutoff How many percent to cut off from the histogram.
# @param ignore The background pixel value (use None for no background).
# @return An image.
def autocontrast(image, cutoff=0, ignore=None):
"Maximize image contrast, based on histogram"
histogram = image.histogram()
lut = []
for layer in range(0, len(histogram), 256):
h = histogram[layer:layer+256]
if ignore is not None:
# get rid of outliers
try:
h[ignore] = 0
except TypeError:
# assume sequence
for ix in ignore:
h[ix] = 0
if cutoff:
# cut off pixels from both ends of the histogram
# get number of pixels
n = 0
for ix in range(256):
n = n + h[ix]
# remove cutoff% pixels from the low end
cut = n * cutoff / 100
for lo in range(256):
if cut > h[lo]:
cut = cut - h[lo]
h[lo] = 0
else:
h[lo] = h[lo] - cut
cut = 0
if cut <= 0:
break
# remove cutoff% samples from the hi end
cut = n * cutoff / 100
for hi in range(255, -1, -1):
if cut > h[hi]:
cut = cut - h[hi]
h[hi] = 0
else:
h[hi] = h[hi] - cut
cut = 0
if cut <= 0:
break
# find lowest/highest samples after preprocessing
for lo in range(256):
if h[lo]:
break
for hi in range(255, -1, -1):
if h[hi]:
break
if hi <= lo:
# don't bother
lut.extend(range(256))
else:
scale = 255.0 / (hi - lo)
offset = -lo * scale
for ix in range(256):
ix = int(ix * scale + offset)
if ix < 0:
ix = 0
elif ix > 255:
ix = 255
lut.append(ix)
return _lut(image, lut)
##
# Colorize grayscale image. The <i>black</i> and <i>white</i>
# arguments should be RGB tuples; this function calculates a colour
# wedge mapping all black pixels in the source image to the first
# colour, and all white pixels to the second colour.
#
# @param image The image to colourize.
# @param black The colour to use for black input pixels.
# @param white The colour to use for white input pixels.
# @return An image.
def colorize(image, black, white):
"Colorize a grayscale image"
assert image.mode == "L"
black = _color(black, "RGB")
white = _color(white, "RGB")
red = []; green = []; blue = []
for i in range(256):
red.append(black[0]+i*(white[0]-black[0])/255)
green.append(black[1]+i*(white[1]-black[1])/255)
blue.append(black[2]+i*(white[2]-black[2])/255)
image = image.convert("RGB")
return _lut(image, red + green + blue)
##
# Remove border from image. The same amount of pixels are removed
# from all four sides. This function works on all image modes.
#
# @param image The image to crop.
# @param border The number of pixels to remove.
# @return An image.
# @see Image#Image.crop
def crop(image, border=0):
"Crop border off image"
left, top, right, bottom = _border(border)
return image.crop(
(left, top, image.size[0]-right, image.size[1]-bottom)
)
##
# Deform the image.
#
# @param image The image to deform.
# @param deformer A deformer object. Any object that implements a
# <b>getmesh</b> method can be used.
# @param resample What resampling filter to use.
# @return An image.
def deform(image, deformer, resample=Image.BILINEAR):
"Deform image using the given deformer"
return image.transform(
image.size, Image.MESH, deformer.getmesh(image), resample
)
##
# Equalize the image histogram. This function applies a non-linear
# mapping to the input image, in order to create a uniform
# distribution of grayscale values in the output image.
#
# @param image The image to equalize.
# @param mask An optional mask. If given, only the pixels selected by
# the mask are included in the analysis.
# @return An image.
def equalize(image, mask=None):
"Equalize image histogram"
if image.mode == "P":
image = image.convert("RGB")
h = image.histogram(mask)
lut = []
for b in range(0, len(h), 256):
histo = filter(None, h[b:b+256])
if len(histo) <= 1:
lut.extend(range(256))
else:
step = (reduce(operator.add, histo) - histo[-1]) / 255
if not step:
lut.extend(range(256))
else:
n = step / 2
for i in range(256):
lut.append(n / step)
n = n + h[i+b]
return _lut(image, lut)
##
# Add border to the image
#
# @param image The image to expand.
# @param border Border width, in pixels.
# @param fill Pixel fill value (a colour value). Default is 0 (black).
# @return An image.
def expand(image, border=0, fill=0):
"Add border to image"
left, top, right, bottom = _border(border)
width = left + image.size[0] + right
height = top + image.size[1] + bottom
out = Image.new(image.mode, (width, height), _color(fill, image.mode))
out.paste(image, (left, top))
return out
##
# Returns a sized and cropped version of the image, cropped to the
# requested aspect ratio and size.
# <p>
# The <b>fit</b> function was contributed by Kevin Cazabon.
#
# @param size The requested output size in pixels, given as a
# (width, height) tuple.
# @param method What resampling method to use. Default is Image.NEAREST.
# @param bleed Remove a border around the outside of the image (from all
# four edges. The value is a decimal percentage (use 0.01 for one
# percent). The default value is 0 (no border).
# @param centering Control the cropping position. Use (0.5, 0.5) for
# center cropping (e.g. if cropping the width, take 50% off of the
# left side, and therefore 50% off the right side). (0.0, 0.0)
# will crop from the top left corner (i.e. if cropping the width,
# take all of the crop off of the right side, and if cropping the
# height, take all of it off the bottom). (1.0, 0.0) will crop
# from the bottom left corner, etc. (i.e. if cropping the width,
# take all of the crop off the left side, and if cropping the height
# take none from the top, and therefore all off the bottom).
# @return An image.
def fit(image, size, method=Image.NEAREST, bleed=0.0, centering=(0.5, 0.5)):
"""
This method returns a sized and cropped version of the image,
cropped to the aspect ratio and size that you request.
"""
# by Kevin Cazabon, Feb 17/2000
# [email protected]
# http://www.cazabon.com
# ensure inputs are valid
if type(centering) != type([]):
centering = [centering[0], centering[1]]
if centering[0] > 1.0 or centering[0] < 0.0:
centering [0] = 0.50
if centering[1] > 1.0 or centering[1] < 0.0:
centering[1] = 0.50
if bleed > 0.49999 or bleed < 0.0:
bleed = 0.0
# calculate the area to use for resizing and cropping, subtracting
# the 'bleed' around the edges
# number of pixels to trim off on Top and Bottom, Left and Right
bleedPixels = (
int((float(bleed) * float(image.size[0])) + 0.5),
int((float(bleed) * float(image.size[1])) + 0.5)
)
liveArea = (
bleedPixels[0], bleedPixels[1], image.size[0] - bleedPixels[0] - 1,
image.size[1] - bleedPixels[1] - 1
)
liveSize = (liveArea[2] - liveArea[0], liveArea[3] - liveArea[1])
# calculate the aspect ratio of the liveArea
liveAreaAspectRatio = float(liveSize[0])/float(liveSize[1])
# calculate the aspect ratio of the output image
aspectRatio = float(size[0]) / float(size[1])
# figure out if the sides or top/bottom will be cropped off
if liveAreaAspectRatio >= aspectRatio:
# liveArea is wider than what's needed, crop the sides
cropWidth = int((aspectRatio * float(liveSize[1])) + 0.5)
cropHeight = liveSize[1]
else:
# liveArea is taller than what's needed, crop the top and bottom
cropWidth = liveSize[0]
cropHeight = int((float(liveSize[0])/aspectRatio) + 0.5)
# make the crop
leftSide = int(liveArea[0] + (float(liveSize[0]-cropWidth) * centering[0]))
if leftSide < 0:
leftSide = 0
topSide = int(liveArea[1] + (float(liveSize[1]-cropHeight) * centering[1]))
if topSide < 0:
topSide = 0
out = image.crop(
(leftSide, topSide, leftSide + cropWidth, topSide + cropHeight)
)
# resize the image and return it
return out.resize(size, method)
##
# Flip the image vertically (top to bottom).
#
# @param image The image to flip.
# @return An image.
def flip(image):
"Flip image vertically"
return image.transpose(Image.FLIP_TOP_BOTTOM)
##
# Convert the image to grayscale.
#
# @param image The image to convert.
# @return An image.
def grayscale(image):
"Convert to grayscale"
return image.convert("L")
##
# Invert (negate) the image.
#
# @param image The image to invert.
# @return An image.
def invert(image):
"Invert image (negate)"
lut = []
for i in range(256):
lut.append(255-i)
return _lut(image, lut)
##
# Flip image horizontally (left to right).
#
# @param image The image to mirror.
# @return An image.
def mirror(image):
"Flip image horizontally"
return image.transpose(Image.FLIP_LEFT_RIGHT)
##
# Reduce the number of bits for each colour channel.
#
# @param image The image to posterize.
# @param bits The number of bits to keep for each channel (1-8).
# @return An image.
def posterize(image, bits):
"Reduce the number of bits per color channel"
lut = []
mask = ~(2**(8-bits)-1)
for i in range(256):
lut.append(i & mask)
return _lut(image, lut)
##
# Invert all pixel values above a threshold.
#
# @param image The image to posterize.
# @param threshold All pixels above this greyscale level are inverted.
# @return An image.
def solarize(image, threshold=128):
"Invert all values above threshold"
lut = []
for i in range(256):
if i < threshold:
lut.append(i)
else:
lut.append(255-i)
return _lut(image, lut)
# --------------------------------------------------------------------
# PIL USM components, from Kevin Cazabon.
def gaussian_blur(im, radius=None):
""" PIL_usm.gblur(im, [radius])"""
if radius is None:
radius = 5.0
im.load()
return im.im.gaussian_blur(radius)
gblur = gaussian_blur
def unsharp_mask(im, radius=None, percent=None, threshold=None):
""" PIL_usm.usm(im, [radius, percent, threshold])"""
if radius is None:
radius = 5.0
if percent is None:
percent = 150
if threshold is None:
threshold = 3
im.load()
return im.im.unsharp_mask(radius, percent, threshold)
usm = unsharp_mask
| ppizarror/Hero-of-Antair | data/images/pil/ImageOps.py | Python | gpl-2.0 | 13,229 | 0.00189 |
#!/usr/bin/env python3
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import Element, SubElement
import gzip
import os
import requests
import time
from xml.dom import minidom
VALID_REFS = ['VENDOR', 'VERSION', 'CHANGE_LOG', 'PRODUCT', 'PROJECT', 'ADVISORY']
CPEDB_URL = "https://static.nvd.nist.gov/feeds/xml/cpe/dictionary/official-cpe-dictionary_v2.3.xml.gz"
ns = {
'': 'http://cpe.mitre.org/dictionary/2.0',
'cpe-23': 'http://scap.nist.gov/schema/cpe-extension/2.3',
'xml': 'http://www.w3.org/XML/1998/namespace'
}
class CPE:
def __init__(self, cpe_str, titles, refs):
self.cpe_str = cpe_str
self.titles = titles
self.references = refs
self.cpe_cur_ver = "".join(self.cpe_str.split(":")[5:6])
def update_xml_dict(self):
ET.register_namespace('', 'http://cpe.mitre.org/dictionary/2.0')
cpes = Element('cpe-list')
cpes.set('xmlns:cpe-23', "http://scap.nist.gov/schema/cpe-extension/2.3")
cpes.set('xmlns:ns6', "http://scap.nist.gov/schema/scap-core/0.1")
cpes.set('xmlns:scap-core', "http://scap.nist.gov/schema/scap-core/0.3")
cpes.set('xmlns:config', "http://scap.nist.gov/schema/configuration/0.1")
cpes.set('xmlns:xsi', "http://www.w3.org/2001/XMLSchema-instance")
cpes.set('xmlns:meta', "http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2")
cpes.set('xsi:schemaLocation', " ".join(["http://scap.nist.gov/schema/cpe-extension/2.3",
"https://scap.nist.gov/schema/cpe/2.3/cpe-dictionary-extension_2.3.xsd",
"http://cpe.mitre.org/dictionary/2.0",
"https://scap.nist.gov/schema/cpe/2.3/cpe-dictionary_2.3.xsd",
"http://scap.nist.gov/schema/cpe-dictionary-metadata/0.2",
"https://scap.nist.gov/schema/cpe/2.1/cpe-dictionary-metadata_0.2.xsd",
"http://scap.nist.gov/schema/scap-core/0.3",
"https://scap.nist.gov/schema/nvd/scap-core_0.3.xsd",
"http://scap.nist.gov/schema/configuration/0.1",
"https://scap.nist.gov/schema/nvd/configuration_0.1.xsd",
"http://scap.nist.gov/schema/scap-core/0.1",
"https://scap.nist.gov/schema/nvd/scap-core_0.1.xsd"]))
item = SubElement(cpes, 'cpe-item')
cpe_short_name = CPE.short_name(self.cpe_str)
cpe_new_ver = CPE.version_update(self.cpe_str)
item.set('name', 'cpe:/' + cpe_short_name)
self.titles[0].text.replace(self.cpe_cur_ver, cpe_new_ver)
for title in self.titles:
item.append(title)
if self.references:
item.append(self.references)
cpe23item = SubElement(item, 'cpe-23:cpe23-item')
cpe23item.set('name', self.cpe_str)
# Generate the XML as a string
xmlstr = ET.tostring(cpes)
# And use minidom to pretty print the XML
return minidom.parseString(xmlstr).toprettyxml(encoding="utf-8").decode("utf-8")
@staticmethod
def version(cpe):
return cpe.split(":")[5]
@staticmethod
def product(cpe):
return cpe.split(":")[4]
@staticmethod
def short_name(cpe):
return ":".join(cpe.split(":")[2:6])
@staticmethod
def version_update(cpe):
return ":".join(cpe.split(":")[5:6])
@staticmethod
def no_version(cpe):
return ":".join(cpe.split(":")[:5])
class CPEDB:
def __init__(self, nvd_path):
self.all_cpes = dict()
self.all_cpes_no_version = dict()
self.nvd_path = nvd_path
def get_xml_dict(self):
print("CPE: Setting up NIST dictionary")
if not os.path.exists(os.path.join(self.nvd_path, "cpe")):
os.makedirs(os.path.join(self.nvd_path, "cpe"))
cpe_dict_local = os.path.join(self.nvd_path, "cpe", os.path.basename(CPEDB_URL))
if not os.path.exists(cpe_dict_local) or os.stat(cpe_dict_local).st_mtime < time.time() - 86400:
print("CPE: Fetching xml manifest from [" + CPEDB_URL + "]")
cpe_dict = requests.get(CPEDB_URL)
open(cpe_dict_local, "wb").write(cpe_dict.content)
print("CPE: Unzipping xml manifest...")
nist_cpe_file = gzip.GzipFile(fileobj=open(cpe_dict_local, 'rb'))
print("CPE: Converting xml manifest to dict...")
tree = ET.parse(nist_cpe_file)
all_cpedb = tree.getroot()
self.parse_dict(all_cpedb)
def parse_dict(self, all_cpedb):
# Cycle through the dict and build two dict to be used for custom
# lookups of partial and complete CPE objects
# The objects are then used to create new proposed XML updates if
# if is determined one is required
# Out of the different language titles, select English
for cpe in all_cpedb.findall(".//{http://cpe.mitre.org/dictionary/2.0}cpe-item"):
cpe_titles = []
for title in cpe.findall('.//{http://cpe.mitre.org/dictionary/2.0}title[@xml:lang="en-US"]', ns):
title.tail = None
cpe_titles.append(title)
# Some older CPE don't include references, if they do, make
# sure we handle the case of one ref needing to be packed
# in a list
cpe_ref = cpe.find(".//{http://cpe.mitre.org/dictionary/2.0}references")
if cpe_ref:
for ref in cpe_ref.findall(".//{http://cpe.mitre.org/dictionary/2.0}reference"):
ref.tail = None
ref.text = ref.text.upper()
if ref.text not in VALID_REFS:
ref.text = ref.text + "-- UPDATE this entry, here are some examples and just one word should be used -- " + ' '.join(VALID_REFS) # noqa E501
cpe_ref.tail = None
cpe_ref.text = None
cpe_str = cpe.find(".//{http://scap.nist.gov/schema/cpe-extension/2.3}cpe23-item").get('name')
item = CPE(cpe_str, cpe_titles, cpe_ref)
cpe_str_no_version = CPE.no_version(cpe_str)
# This dict must have a unique key for every CPE version
# which allows matching to the specific obj data of that
# NIST dict entry
self.all_cpes.update({cpe_str: item})
# This dict has one entry for every CPE (w/o version) to allow
# partial match (no valid version) check (the obj is saved and
# used as seed for suggested xml updates. By updating the same
# non-version'd entry, it assumes the last update here is the
# latest version in the NIST dict)
self.all_cpes_no_version.update({cpe_str_no_version: item})
def find_partial(self, cpe_str):
cpe_str_no_version = CPE.no_version(cpe_str)
if cpe_str_no_version in self.all_cpes_no_version:
return cpe_str_no_version
def find_partial_obj(self, cpe_str):
cpe_str_no_version = CPE.no_version(cpe_str)
if cpe_str_no_version in self.all_cpes_no_version:
return self.all_cpes_no_version[cpe_str_no_version]
def find_partial_latest_version(self, cpe_str_partial):
cpe_obj = self.find_partial_obj(cpe_str_partial)
return cpe_obj.cpe_cur_ver
def find(self, cpe_str):
if self.find_partial(cpe_str):
if cpe_str in self.all_cpes:
return cpe_str
def gen_update_xml(self, cpe_str):
cpe = self.find_partial_obj(cpe_str)
return cpe.update_xml_dict()
| masahir0y/buildroot-yamada | support/scripts/cpedb.py | Python | gpl-2.0 | 7,878 | 0.003427 |
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand, CommandError
from annotations.models import Corpus
from annotations.exports import export_fragments
from core.utils import CSV, XLSX
class Command(BaseCommand):
help = 'Exports existing Fragments for the given Corpus and Languages'
def add_arguments(self, parser):
parser.add_argument('corpus', type=str)
parser.add_argument('languages', nargs='+', type=str)
parser.add_argument('--add_lemmata', action='store_true', dest='add_lemmata', default=False)
parser.add_argument('--add_indices', action='store_true', dest='add_indices', default=False)
parser.add_argument('--xlsx', action='store_true', dest='format_xlsx', default=False)
parser.add_argument('--doc', dest='document')
parser.add_argument('--formal_structure')
def handle(self, *args, **options):
# Retrieve the Corpus from the database
try:
corpus = Corpus.objects.get(title=options['corpus'])
except Corpus.DoesNotExist:
raise CommandError('Corpus with title {} does not exist'.format(options['corpus']))
format_ = XLSX if options['format_xlsx'] else CSV
for language in options['languages']:
if not corpus.languages.filter(iso=language):
raise CommandError('Language {} does not exist'.format(language))
filename = 'fragments_{lang}.{ext}'.format(lang=language, ext=format_)
export_fragments(filename, format_, corpus, language,
document=options['document'],
add_lemmata=options['add_lemmata'],
add_indices=options['add_indices'],
formal_structure=options['formal_structure'])
| UUDigitalHumanitieslab/timealign | annotations/management/commands/export_fragments.py | Python | mit | 1,823 | 0.003291 |
# Python/NLTK implementation of algorithm to detect similarity between
# short sentences described in the paper - "Sentence Similarity based
# on Semantic Nets and Corpus Statistics" by Li, et al.
# Results achieved are NOT identical to that reported in the paper, but
# this is very likely due to the differences in the way the algorithm was
# described in the paper and how I implemented it.
from __future__ import division
import nltk
from nltk.corpus import wordnet as wn
from nltk.corpus import brown
import math
import numpy as np
import sys
# Parameters to the algorithm. Currently set to values that was reported
# in the paper to produce "best" results.
ALPHA = 0.2
BETA = 0.45
ETA = 0.4
PHI = 0.2
DELTA = 0.85
brown_freqs = dict()
N = 0
######################### word similarity ##########################
def get_best_synset_pair(word_1, word_2):
"""
Choose the pair with highest path similarity among all pairs.
Mimics pattern-seeking behavior of humans.
"""
max_sim = -1.0
synsets_1 = wn.synsets(word_1)
synsets_2 = wn.synsets(word_2)
if len(synsets_1) == 0 or len(synsets_2) == 0:
return None, None
else:
max_sim = -1.0
best_pair = None, None
for synset_1 in synsets_1:
for synset_2 in synsets_2:
sim = wn.path_similarity(synset_1, synset_2)
if sim > max_sim:
max_sim = sim
best_pair = synset_1, synset_2
return best_pair
def length_dist(synset_1, synset_2):
"""
Return a measure of the length of the shortest path in the semantic
ontology (Wordnet in our case as well as the paper's) between two
synsets.
"""
l_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
# if synset_1 and synset_2 are the same synset return 0
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
# if synset_1 != synset_2 but there is word overlap, return 1.0
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
# normalize path length to the range [0,1]
return math.exp(-ALPHA * l_dist)
def hierarchy_dist(synset_1, synset_2):
"""
Return a measure of depth in the ontology to model the fact that
nodes closer to the root are broader and have less semantic similarity
than nodes further away from the root.
"""
h_dist = sys.maxint
if synset_1 is None or synset_2 is None:
return h_dist
if synset_1 == synset_2:
# return the depth of one of synset_1 or synset_2
h_dist = max([x[1] for x in synset_1.hypernym_distances()])
else:
# find the max depth of least common subsumer
hypernyms_1 = {x[0]:x[1] for x in synset_1.hypernym_distances()}
hypernyms_2 = {x[0]:x[1] for x in synset_2.hypernym_distances()}
lcs_candidates = set(hypernyms_1.keys()).intersection(
set(hypernyms_2.keys()))
if len(lcs_candidates) > 0:
lcs_dists = []
for lcs_candidate in lcs_candidates:
lcs_d1 = 0
if hypernyms_1.has_key(lcs_candidate):
lcs_d1 = hypernyms_1[lcs_candidate]
lcs_d2 = 0
if hypernyms_2.has_key(lcs_candidate):
lcs_d2 = hypernyms_2[lcs_candidate]
lcs_dists.append(max([lcs_d1, lcs_d2]))
h_dist = max(lcs_dists)
else:
h_dist = 0
return ((math.exp(BETA * h_dist) - math.exp(-BETA * h_dist)) /
(math.exp(BETA * h_dist) + math.exp(-BETA * h_dist)))
def word_similarity(word_1, word_2):
synset_pair = get_best_synset_pair(word_1, word_2)
return (length_dist(synset_pair[0], synset_pair[1]) *
hierarchy_dist(synset_pair[0], synset_pair[1]))
######################### sentence similarity ##########################
def most_similar_word(word, word_set):
"""
Find the word in the joint word set that is most similar to the word
passed in. We use the algorithm above to compute word similarity between
the word and each word in the joint word set, and return the most similar
word and the actual similarity value.
"""
max_sim = -1.0
sim_word = ""
for ref_word in word_set:
sim = word_similarity(word, ref_word)
if sim > max_sim:
max_sim = sim
sim_word = ref_word
return sim_word, max_sim
def info_content(lookup_word):
"""
Uses the Brown corpus available in NLTK to calculate a Laplace
smoothed frequency distribution of words, then uses this information
to compute the information content of the lookup_word.
"""
global N
if N == 0:
# poor man's lazy evaluation
for sent in brown.sents():
for word in sent:
word = word.lower()
if not brown_freqs.has_key(word):
brown_freqs[word] = 0
brown_freqs[word] = brown_freqs[word] + 1
N = N + 1
lookup_word = lookup_word.lower()
n = 0 if not brown_freqs.has_key(lookup_word) else brown_freqs[lookup_word]
return 1.0 - (math.log(n + 1) / math.log(N + 1))
def semantic_vector(words, joint_words, info_content_norm):
"""
Computes the semantic vector of a sentence. The sentence is passed in as
a collection of words. The size of the semantic vector is the same as the
size of the joint word set. The elements are 1 if a word in the sentence
already exists in the joint word set, or the similarity of the word to the
most similar word in the joint word set if it doesn't. Both values are
further normalized by the word's (and similar word's) information content
if info_content_norm is True.
"""
sent_set = set(words)
semvec = np.zeros(len(joint_words))
i = 0
for joint_word in joint_words:
if joint_word in sent_set:
# if word in union exists in the sentence, s(i) = 1 (unnormalized)
semvec[i] = 1.0
if info_content_norm:
semvec[i] = semvec[i] * math.pow(info_content(joint_word), 2)
else:
# find the most similar word in the joint set and set the sim value
sim_word, max_sim = most_similar_word(joint_word, sent_set)
semvec[i] = PHI if max_sim > PHI else 0.0
if info_content_norm:
semvec[i] = semvec[i] * info_content(joint_word) * info_content(sim_word)
i = i + 1
return semvec
def semantic_similarity(sentence_1, sentence_2, info_content_norm):
"""
Computes the semantic similarity between two sentences as the cosine
similarity between the semantic vectors computed for each sentence.
"""
words_1 = nltk.word_tokenize(sentence_1)
words_2 = nltk.word_tokenize(sentence_2)
joint_words = set(words_1).union(set(words_2))
vec_1 = semantic_vector(words_1, joint_words, info_content_norm)
vec_2 = semantic_vector(words_2, joint_words, info_content_norm)
return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))
######################### word order similarity ##########################
def word_order_vector(words, joint_words, windex):
"""
Computes the word order vector for a sentence. The sentence is passed
in as a collection of words. The size of the word order vector is the
same as the size of the joint word set. The elements of the word order
vector are the position mapping (from the windex dictionary) of the
word in the joint set if the word exists in the sentence. If the word
does not exist in the sentence, then the value of the element is the
position of the most similar word in the sentence as long as the similarity
is above the threshold ETA.
"""
wovec = np.zeros(len(joint_words))
i = 0
wordset = set(words)
for joint_word in joint_words:
if joint_word in wordset:
# word in joint_words found in sentence, just populate the index
wovec[i] = windex[joint_word]
else:
# word not in joint_words, find most similar word and populate
# word_vector with the thresholded similarity
sim_word, max_sim = most_similar_word(joint_word, wordset)
if max_sim > ETA:
wovec[i] = windex[sim_word]
else:
wovec[i] = 0
i = i + 1
return wovec
def word_order_similarity(sentence_1, sentence_2):
"""
Computes the word-order similarity between two sentences as the normalized
difference of word order between the two sentences.
"""
words_1 = nltk.word_tokenize(sentence_1)
words_2 = nltk.word_tokenize(sentence_2)
joint_words = list(set(words_1).union(set(words_2)))
windex = {x[1]: x[0] for x in enumerate(joint_words)}
r1 = word_order_vector(words_1, joint_words, windex)
r2 = word_order_vector(words_2, joint_words, windex)
return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))
######################### overall similarity ##########################
def similarity(sentence_1, sentence_2):
"""
Calculate the semantic similarity between two sentences. The last
parameter is True or False depending on whether information content
normalization is desired or not.
"""
info_content_norm = True
return DELTA * semantic_similarity(sentence_1, sentence_2, info_content_norm) + \
(1.0 - DELTA) * word_order_similarity(sentence_1, sentence_2)
| joshtechnologygroup/dark-engine | engine/dark_matter/search_engine/similarity_checker.py | Python | gpl-3.0 | 9,843 | 0.003353 |
import unittest
import os
import shutil
import filecmp
import pytest
import marks
import options
import util
from controller.partition import PartitionController
class PartitionControllerTest(unittest.TestCase):
TEST_DATA_PATH = os.path.join(os.getcwd(), 'testdata')
def setUp(self):
self.controller = PartitionController()
self.projectOwner = 'victorclf'
self.projectName = 'jcc-web-persontest'
self.projectId = '%s/%s' % (self.projectOwner, self.projectName)
self.pullRequestId = 1
self.pullPath = os.path.join(options.PULL_REQUESTS_PATH, self.projectOwner, self.projectName, str(self.pullRequestId))
self.tearDown()
util.makedirsIfNotExists(options.PULL_REQUESTS_PATH)
shutil.copytree(os.path.join(self.TEST_DATA_PATH, self.projectOwner),
os.path.join(options.PULL_REQUESTS_PATH, self.projectOwner),
ignore=lambda root,files: [f for f in files if f.endswith('.old')])
def tearDown(self):
shutil.rmtree(options.PULL_REQUESTS_PATH, True)
shutil.rmtree(os.path.join(os.getcwd(), 'workspace'), True)
@marks.slow
def testDownloadPullRequest(self):
shutil.rmtree(options.PULL_REQUESTS_PATH, True)
self.assertFalse(os.path.exists(os.path.join(self.pullPath)))
self.assertTrue(self.controller._downloadPullRequestFromGitHub(self.projectId, self.pullRequestId))
self.assertTrue(os.path.exists(os.path.join(self.pullPath)))
self.assertFalse(self.controller._downloadPullRequestFromGitHub(self.projectId, self.pullRequestId))
@marks.slow
def testPartitionPullRequest(self):
self.controller._partitionPullRequest(self.projectId, self.pullRequestId)
self.assertTrue(os.path.exists(os.path.join(self.pullPath, options.PARTITION_RESULTS_FOLDER_NAME)))
self.assertTrue(os.path.exists(os.path.join(self.pullPath, options.PARTITION_RESULTS_FOLDER_NAME, 'partitions.csv')))
for root, dirs, files in os.walk(self.pullPath):
for f in files:
if f.endswith('java'):
oldF = os.path.join(root, f) + '.old'
self.assertTrue(os.path.exists(oldF))
expectedOldF = os.path.join(self.TEST_DATA_PATH, '..', os.path.relpath(oldF))
self.assertTrue(filecmp.cmp(oldF, expectedOldF, False))
self.assertFalse(filecmp.cmp(oldF, os.path.join(root, f), False))
def testGetPartitionJSON(self):
pJSON = self.controller.getPartitionJSON(self.projectOwner, self.projectName, self.pullRequestId)
self.assertTrue(pJSON)
| victorclf/jcc-web | server/test/controller/test_partition.py | Python | agpl-3.0 | 2,710 | 0.007749 |
import socket
import pytest
from urllib3.exceptions import (
NewConnectionError,
ProtocolError,
ProxyError,
)
from tests import mock, unittest
from botocore.awsrequest import (
AWSRequest,
AWSHTTPConnectionPool,
AWSHTTPSConnectionPool,
)
from botocore.httpsession import (
get_cert_path,
mask_proxy_url,
URLLib3Session,
ProxyConfiguration,
)
from botocore.exceptions import (
ConnectionClosedError,
EndpointConnectionError,
ProxyConnectionError,
)
class TestProxyConfiguration(unittest.TestCase):
def setUp(self):
self.url = 'http://localhost/'
self.auth_url = 'http://user:pass@localhost/'
self.proxy_config = ProxyConfiguration(
proxies={'http': 'http://localhost:8081/'}
)
def update_http_proxy(self, url):
self.proxy_config = ProxyConfiguration(
proxies={'http': url}
)
def test_construct_proxy_headers_with_auth(self):
headers = self.proxy_config.proxy_headers_for(self.auth_url)
proxy_auth = headers.get('Proxy-Authorization')
self.assertEqual('Basic dXNlcjpwYXNz', proxy_auth)
def test_construct_proxy_headers_without_auth(self):
headers = self.proxy_config.proxy_headers_for(self.url)
self.assertEqual({}, headers)
def test_proxy_for_url_no_slashes(self):
self.update_http_proxy('localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
def test_proxy_for_url_no_protocol(self):
self.update_http_proxy('//localhost:8081/')
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
def test_fix_proxy_url_has_protocol_http(self):
proxy_url = self.proxy_config.proxy_url_for(self.url)
self.assertEqual('http://localhost:8081/', proxy_url)
class TestHttpSessionUtils(unittest.TestCase):
def test_get_cert_path_path(self):
path = '/some/path'
cert_path = get_cert_path(path)
self.assertEqual(path, cert_path)
def test_get_cert_path_certifi_or_default(self):
with mock.patch('botocore.httpsession.where') as where:
path = '/bundle/path'
where.return_value = path
cert_path = get_cert_path(True)
self.assertEqual(path, cert_path)
@pytest.mark.parametrize(
'proxy_url, expected_mask_url',
(
(
'http://myproxy.amazonaws.com',
'http://myproxy.amazonaws.com'
),
(
'http://[email protected]',
'http://***@myproxy.amazonaws.com'
),
(
'http://user:[email protected]',
'http://***:***@myproxy.amazonaws.com'
),
(
'https://user:[email protected]',
'https://***:***@myproxy.amazonaws.com'
),
(
'http://user:pass@localhost',
'http://***:***@localhost'
),
(
'http://user:pass@localhost:80',
'http://***:***@localhost:80'
),
(
'http://user:[email protected]',
'http://***:***@userpass.com'
),
(
'http://user:[email protected]',
'http://***:***@192.168.1.1'
),
(
'http://user:pass@[::1]',
'http://***:***@[::1]'
),
(
'http://user:pass@[::1]:80',
'http://***:***@[::1]:80'
),
)
)
def test_mask_proxy_url(proxy_url, expected_mask_url):
assert mask_proxy_url(proxy_url) == expected_mask_url
class TestURLLib3Session(unittest.TestCase):
def setUp(self):
self.request = AWSRequest(
method='GET',
url='http://example.com/',
headers={},
data=b'',
)
self.response = mock.Mock()
self.response.headers = {}
self.response.stream.return_value = b''
self.pool_manager = mock.Mock()
self.connection = mock.Mock()
self.connection.urlopen.return_value = self.response
self.pool_manager.connection_from_url.return_value = self.connection
self.pool_patch = mock.patch('botocore.httpsession.PoolManager')
self.proxy_patch = mock.patch('botocore.httpsession.proxy_from_url')
self.pool_manager_cls = self.pool_patch.start()
self.proxy_manager_fun = self.proxy_patch.start()
self.pool_manager_cls.return_value = self.pool_manager
self.proxy_manager_fun.return_value = self.pool_manager
def tearDown(self):
self.pool_patch.stop()
self.proxy_patch.stop()
def assert_request_sent(self, headers=None, body=None, url='/', chunked=False):
if headers is None:
headers = {}
self.connection.urlopen.assert_called_once_with(
method=self.request.method,
url=url,
body=body,
headers=headers,
retries=mock.ANY,
assert_same_host=False,
preload_content=False,
decode_content=False,
chunked=chunked,
)
def _assert_manager_call(self, manager, *assert_args, **assert_kwargs):
call_kwargs = {
'strict': True,
'maxsize': mock.ANY,
'timeout': mock.ANY,
'ssl_context': mock.ANY,
'socket_options': [],
'cert_file': None,
'key_file': None,
}
call_kwargs.update(assert_kwargs)
manager.assert_called_with(*assert_args, **call_kwargs)
def assert_pool_manager_call(self, *args, **kwargs):
self._assert_manager_call(self.pool_manager_cls, *args, **kwargs)
def assert_proxy_manager_call(self, *args, **kwargs):
self._assert_manager_call(self.proxy_manager_fun, *args, **kwargs)
def test_forwards_max_pool_size(self):
URLLib3Session(max_pool_connections=22)
self.assert_pool_manager_call(maxsize=22)
def test_forwards_client_cert(self):
URLLib3Session(client_cert='/some/cert')
self.assert_pool_manager_call(cert_file='/some/cert', key_file=None)
def test_forwards_client_cert_and_key_tuple(self):
cert = ('/some/cert', '/some/key')
URLLib3Session(client_cert=cert)
self.assert_pool_manager_call(cert_file=cert[0], key_file=cert[1])
def test_proxies_config_settings(self):
proxies = {'http': 'http://proxy.com'}
proxies_config = {
'proxy_ca_bundle': 'path/to/bundle',
'proxy_client_cert': ('path/to/cert', 'path/to/key'),
'proxy_use_forwarding_for_https': False,
}
use_forwarding = proxies_config['proxy_use_forwarding_for_https']
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies,
proxies_config=proxies_config
)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
proxy_ssl_context=mock.ANY,
use_forwarding_for_https=use_forwarding
)
self.assert_request_sent(url=self.request.url)
def test_proxies_config_settings_unknown_config(self):
proxies = {'http': 'http://proxy.com'}
proxies_config = {
'proxy_ca_bundle': None,
'proxy_client_cert': None,
'proxy_use_forwarding_for_https': True,
'proxy_not_a_real_arg': 'do not pass'
}
use_forwarding = proxies_config['proxy_use_forwarding_for_https']
session = URLLib3Session(
proxies=proxies,
proxies_config=proxies_config
)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
use_forwarding_for_https=use_forwarding
)
self.assertNotIn(
'proxy_not_a_real_arg',
self.proxy_manager_fun.call_args
)
self.assert_request_sent(url=self.request.url)
def test_http_proxy_scheme_with_http_url(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
)
self.assert_request_sent(url=self.request.url)
def test_http_proxy_scheme_with_https_url(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
)
self.assert_request_sent()
def test_https_proxy_scheme_with_http_url(self):
proxies = {'http': 'https://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'http://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
)
self.assert_request_sent(url=self.request.url)
def test_https_proxy_scheme_tls_in_tls(self):
proxies = {'https': 'https://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
)
self.assert_request_sent()
def test_https_proxy_scheme_forwarding_https_url(self):
proxies = {'https': 'https://proxy.com'}
proxies_config = {"proxy_use_forwarding_for_https": True}
session = URLLib3Session(proxies=proxies, proxies_config=proxies_config)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
use_forwarding_for_https=True,
)
self.assert_request_sent(url=self.request.url)
def test_basic_https_proxy_with_client_cert(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies, client_cert='/some/cert')
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
cert_file='/some/cert',
key_file=None,
)
self.assert_request_sent()
def test_basic_https_proxy_with_client_cert_and_key(self):
cert = ('/some/cert', '/some/key')
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies, client_cert=cert)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
cert_file=cert[0],
key_file=cert[1],
)
self.assert_request_sent()
def test_urllib3_proxies_kwargs_included(self):
cert = ('/some/cert', '/some/key')
proxies = {'https': 'https://proxy.com'}
proxies_config = {'proxy_client_cert': "path/to/cert"}
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies, client_cert=cert,
proxies_config=proxies_config
)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['https'],
proxy_headers={},
cert_file=cert[0],
key_file=cert[1],
proxy_ssl_context=mock.ANY
)
self.assert_request_sent()
def test_proxy_ssl_context_uses_check_hostname(self):
cert = ('/some/cert', '/some/key')
proxies = {'https': 'https://proxy.com'}
proxies_config = {'proxy_client_cert': "path/to/cert"}
with mock.patch('botocore.httpsession.create_urllib3_context'):
session = URLLib3Session(
proxies=proxies, client_cert=cert,
proxies_config=proxies_config
)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
last_call = self.proxy_manager_fun.call_args[-1]
self.assertIs(last_call['ssl_context'].check_hostname, True)
def test_basic_request(self):
session = URLLib3Session()
session.send(self.request.prepare())
self.assert_request_sent()
self.response.stream.assert_called_once_with()
def test_basic_streaming_request(self):
session = URLLib3Session()
self.request.stream_output = True
session.send(self.request.prepare())
self.assert_request_sent()
self.response.stream.assert_not_called()
def test_basic_https_request(self):
session = URLLib3Session()
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_request_sent()
def test_basic_https_proxy_request(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
self.assert_proxy_manager_call(proxies['https'], proxy_headers={})
self.assert_request_sent()
def test_basic_proxy_request_caches_manager(self):
proxies = {'https': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
self.request.url = 'https://example.com/'
session.send(self.request.prepare())
# assert we created the proxy manager
self.assert_proxy_manager_call(proxies['https'], proxy_headers={})
session.send(self.request.prepare())
# assert that we did not create another proxy manager
self.assertEqual(self.proxy_manager_fun.call_count, 1)
def test_basic_http_proxy_request(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
session.send(self.request.prepare())
self.assert_proxy_manager_call(proxies['http'], proxy_headers={})
self.assert_request_sent(url=self.request.url)
def test_ssl_context_is_explicit(self):
session = URLLib3Session()
session.send(self.request.prepare())
_, manager_kwargs = self.pool_manager_cls.call_args
self.assertIsNotNone(manager_kwargs.get('ssl_context'))
def test_proxy_request_ssl_context_is_explicit(self):
proxies = {'http': 'http://proxy.com'}
session = URLLib3Session(proxies=proxies)
session.send(self.request.prepare())
_, proxy_kwargs = self.proxy_manager_fun.call_args
self.assertIsNotNone(proxy_kwargs.get('ssl_context'))
def test_session_forwards_socket_options_to_pool_manager(self):
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
URLLib3Session(socket_options=socket_options)
self.assert_pool_manager_call(socket_options=socket_options)
def test_session_forwards_socket_options_to_proxy_manager(self):
proxies = {'http': 'http://proxy.com'}
socket_options = [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)]
session = URLLib3Session(
proxies=proxies,
socket_options=socket_options,
)
session.send(self.request.prepare())
self.assert_proxy_manager_call(
proxies['http'],
proxy_headers={},
socket_options=socket_options,
)
def make_request_with_error(self, error):
self.connection.urlopen.side_effect = error
session = URLLib3Session()
session.send(self.request.prepare())
def test_catches_new_connection_error(self):
error = NewConnectionError(None, None)
with pytest.raises(EndpointConnectionError):
self.make_request_with_error(error)
def test_catches_bad_status_line(self):
error = ProtocolError(None)
with pytest.raises(ConnectionClosedError):
self.make_request_with_error(error)
def test_catches_proxy_error(self):
self.connection.urlopen.side_effect = ProxyError('test', None)
session = URLLib3Session(proxies={'http': 'http://user:[email protected]'})
with pytest.raises(ProxyConnectionError) as e:
session.send(self.request.prepare())
assert 'user:pass' not in str(e.value)
assert 'http://***:***@proxy.com' in str(e.value)
def test_aws_connection_classes_are_used(self):
session = URLLib3Session() # noqa
# ensure the pool manager is using the correct classes
http_class = self.pool_manager.pool_classes_by_scheme.get('http')
self.assertIs(http_class, AWSHTTPConnectionPool)
https_class = self.pool_manager.pool_classes_by_scheme.get('https')
self.assertIs(https_class, AWSHTTPSConnectionPool)
def test_chunked_encoding_is_set_with_header(self):
session = URLLib3Session()
self.request.headers['Transfer-Encoding'] = 'chunked'
session.send(self.request.prepare())
self.assert_request_sent(
chunked=True,
headers={'Transfer-Encoding': 'chunked'},
)
def test_chunked_encoding_is_not_set_without_header(self):
session = URLLib3Session()
session.send(self.request.prepare())
self.assert_request_sent(chunked=False)
| boto/botocore | tests/unit/test_http_session.py | Python | apache-2.0 | 18,102 | 0.000221 |
"""Pipeline configuration parameters."""
from os.path import dirname, abspath, join
from sqlalchemy import create_engine
OS_TYPES_URL = ('https://raw.githubusercontent.com/'
'openspending/os-types/master/src/os-types.json')
PIPELINE_FILE = 'pipeline-spec.yaml'
SOURCE_DATAPACKAGE_FILE = 'source.datapackage.json'
SOURCE_FILE = 'source.description.yaml'
STATUS_FILE = 'pipeline-status.json'
SCRAPER_FILE = 'scraper.py'
SOURCE_ZIP = 'source.datapackage.zip'
FISCAL_ZIP_FILE = 'fiscal.datapackage.zip'
SOURCE_DB = 'source.db.xlsx'
DATAPACKAGE_FILE = 'datapackage.json'
ROOT_DIR = abspath(join(dirname(__file__), '..'))
DATA_DIR = join(ROOT_DIR, 'data')
SPECIFICATIONS_DIR = join(ROOT_DIR, 'specifications')
PROCESSORS_DIR = join(ROOT_DIR, 'common', 'processors')
CODELISTS_DIR = join(ROOT_DIR, 'codelists')
DROPBOX_DIR = join(ROOT_DIR, 'dropbox')
GEOCODES_FILE = join(ROOT_DIR, 'geography', 'geocodes.nuts.csv')
FISCAL_SCHEMA_FILE = join(SPECIFICATIONS_DIR, 'fiscal.schema.yaml')
FISCAL_MODEL_FILE = join(SPECIFICATIONS_DIR, 'fiscal.model.yaml')
FISCAL_METADATA_FILE = join(SPECIFICATIONS_DIR, 'fiscal.metadata.yaml')
DEFAULT_PIPELINE_FILE = join(SPECIFICATIONS_DIR, 'default-pipeline-spec.yaml')
TEMPLATE_SCRAPER_FILE = join(PROCESSORS_DIR, 'scraper_template.py')
DESCRIPTION_SCHEMA_FILE = join(SPECIFICATIONS_DIR, 'source.schema.json')
TEMPLATE_SOURCE_FILE = join(SPECIFICATIONS_DIR, SOURCE_FILE)
LOCAL_PATH_EXTRACTOR = 'ingest_local_file'
REMOTE_CSV_EXTRACTOR = 'simple_remote_source'
REMOTE_EXCEL_EXTRACTOR = 'stream_remote_excel'
DATAPACKAGE_MUTATOR = 'mutate_datapackage'
DB_URI = 'sqlite:///{}/metrics.sqlite'
DB_ENGINE = create_engine(DB_URI.format(ROOT_DIR))
VERBOSE = False
LOG_SAMPLE_SIZE = 15
JSON_FORMAT = dict(indent=4, ensure_ascii=False, default=repr)
SNIFFER_SAMPLE_SIZE = 5000
SNIFFER_MAX_FAILURE_RATIO = 0.01
IGNORED_FIELD_TAG = '_ignored'
UNKNOWN_FIELD_TAG = '_unknown'
WARNING_CUTOFF = 10
NUMBER_FORMATS = [
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ','},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': '.'},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ' '},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': ' '},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': ''},
{'format': 'default', 'bareNumber': False, 'decimalChar': '.', 'groupChar': '`'},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': '\''},
{'format': 'default', 'bareNumber': False, 'decimalChar': ',', 'groupChar': ' '},
]
DATE_FORMATS = [
{'format': '%Y'},
{'format': '%d/%m/%Y'},
{'format': '%d//%m/%Y'},
{'format': '%d-%b-%Y'}, # abbreviated month
{'format': '%d-%b-%y'}, # abbreviated month
{'format': '%d. %b %y'}, # abbreviated month
{'format': '%b %y'}, # abbreviated month
{'format': '%d/%m/%y'},
{'format': '%d-%m-%Y'},
{'format': '%Y-%m-%d'},
{'format': '%y-%m-%d'},
{'format': '%y.%m.%d'},
{'format': '%Y.%m.%d'},
{'format': '%d.%m.%Y'},
{'format': '%d.%m.%y'},
{'format': '%d.%m.%Y %H:%M'},
{'format': '%Y-%m-%d %H:%M:%S'},
{'format': '%Y-%m-%d %H:%M:%S.%f'},
{'format': '%Y-%m-%dT%H:%M:%SZ'},
{'format': '%m/%d/%Y'},
{'format': '%m/%Y'},
{'format': '%y'},
]
| Victordeleon/os-data-importers | eu-structural-funds/common/config.py | Python | mit | 3,382 | 0.002365 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set et sts=4 sw=4:
__all__ = (
"WeaselSession",
"WeaselService",
"service",
)
import logging
import logging.config
import os
import time
import threading
logfile = os.path.join(os.path.dirname(__file__), "logging.conf")
logging.config.fileConfig(logfile)
logger = logging.getLogger("weasel")
import ibus
from core import *
from engine import *
import storage
def add_text(actions, msg, field, text):
actions.add(u'ctx')
(s, attrs, cursor) = text
msg.append(u'ctx.%s=%s\n' % (field, s))
if attrs:
msg.append(u'ctx.%s.attr.length=%d\n' % (field, len(attrs)))
for i in range(len(attrs)):
(extent, type_) = attrs[i]
msg.append(u'ctx.%s.attr.%d.range=%d,%d\n' % (field, i, extent[0], extent[1]))
msg.append(u'ctx.%s.attr.%d.type=%s\n' % (field, i, type_))
if cursor:
msg.append(u'ctx.%s.cursor=%d,%d\n' % (field, cursor[0], cursor[1]))
def add_cand(actions, msg, cand_info):
actions.add(u'ctx')
(current_page, total_pages, cursor, cands) = cand_info
n = len(cands)
msg.append(u'ctx.cand.length=%d\n' % n)
for i in range(n):
msg.append(u'ctx.cand.%d=%s\n' % (i, cands[i][0]))
msg.append(u'ctx.cand.cursor=%d\n' % cursor)
msg.append(u'ctx.cand.page=%d/%d\n' % (current_page, total_pages))
#msg.append(u'ctx.cand.current_page=%d\n' % current_page)
#msg.append(u'ctx.cand.total_pages=%d\n' % total_pages)
class WeaselSession:
'''【小狼毫】會話
承擔Rime算法引擎與【小狼毫】前端的交互
'''
def __init__(self, params=None):
logger.info("init weasel session: %s", params)
self.__page_size = storage.DB.read_setting(u'Option/PageSize') or 5
self.__lookup_table = ibus.LookupTable(self.__page_size)
self.__clear()
self.__backend = Engine(self, params)
def __clear(self):
self.__commit = None
self.__preedit = None
self.__aux = None
self.__cand = None
def process_key_event(self, keycode, mask):
'''處理鍵盤事件'''
logger.debug("process_key_event: '%s'(%x), %08x" % \
(keysyms.keycode_to_name(keycode), keycode, mask))
self.__clear()
taken = self.__backend.process_key_event(KeyEvent(keycode, mask))
return taken
def get_response(self):
'''生成回應消息'''
actions = set()
msg = list()
if self.__commit:
actions.add(u'commit')
msg.append(u'commit=%s\n' % u''.join(self.__commit))
if self.__preedit:
add_text(actions, msg, u'preedit', self.__preedit)
if self.__aux:
add_text(actions, msg, u'aux', self.__aux)
if self.__cand:
add_cand(actions, msg, self.__cand)
#self.__clear()
if not actions:
return u'action=noop\n.\n'
else:
# starts with an action list
msg.insert(0, u'action=%s\n' % u','.join(sorted(actions)))
# ends with a single dot
msg.append(u'.\n')
return u''.join(msg)
# implement a frontend proxy for rime engine
def commit_string(self, s):
'''文字上屏'''
logger.debug(u'commit: [%s]' % s)
if self.__commit:
self.__commit.append(s)
else:
self.__commit = [s]
def update_preedit(self, s, start=0, end=0):
'''更新寫作串
[start, end) 定義了串中的高亮區間
'''
if start < end:
logger.debug(u'preedit: [%s[%s]%s]' % (s[:start], s[start:end], s[end:]))
else:
logger.debug(u'preedit: [%s]' % s)
#attrs = [((start, end), u'HIGHLIGHTED')] if start < end else None
#self.__preedit = (s, attrs)
cursor = (start, end) if start < end else None
self.__preedit = (s, None, cursor)
def update_aux(self, s, start=0, end=0):
'''更新輔助串
[start, end) 定義了串中的高亮區間
'''
if start < end:
logger.debug(u'aux: [%s[%s]%s]' % (s[:start], s[start:end], s[end:]))
else:
logger.debug(u'aux: [%s]' % s)
cursor = (start, end) if start < end else None
self.__aux = (s, None, cursor)
def update_candidates(self, candidates):
'''更新候選列表'''
self.__lookup_table.clean()
self.__lookup_table.show_cursor(False)
if not candidates:
self.__cand = (0, 0, 0, [])
else:
for c in candidates:
self.__lookup_table.append_candidate(ibus.Text(c[0]))
self.__update_page()
def __update_page(self):
candidates = self.__lookup_table.get_candidates_in_current_page()
n = self.__lookup_table.get_number_of_candidates()
c = self.__lookup_table.get_cursor_pos()
p = self.__lookup_table.get_page_size()
current_page = c / p
total_pages = (n + p - 1) / p
cands = [(x.get_text(), None) for x in candidates]
self.__cand = (current_page, total_pages, c % p, cands)
def page_up(self):
if self.__lookup_table.page_up():
#print u'page up.'
self.__update_page()
return True
return False
def page_down(self):
if self.__lookup_table.page_down():
#print u'page down.'
self.__update_page()
return True
return False
def cursor_up(self):
if self.__lookup_table.cursor_up():
#print u'cursor up.'
self.__update_page()
return True
return False
def cursor_down(self):
if self.__lookup_table.cursor_down():
#print u'cursor down.'
self.__update_page()
return True
return False
def get_candidate_index(self, number):
if number >= self.__page_size:
return -1
index = number + self.__lookup_table.get_current_page_start()
#print u'cand index = %d' % index
return index
def get_highlighted_candidate_index(self):
index = self.__lookup_table.get_cursor_pos()
#print u'highlighted cand index = %d' % index
return index
class WeaselService:
'''【小狼毫】算法服務
管理一組會話
每個會話對象持有一個算法引擎實例,並響應一個IME前端的輸入請求
'''
SESSION_EXPIRE_TIME = 3 * 60 # 3 min.
def __init__(self):
self.__sessions = dict()
self.__timer = None
def cleanup(self):
'''清除所有會話'''
logger.info("cleaning up %d remaining sessions." % len(self.__sessions))
self.cancel_check()
self.__sessions.clear()
def schedule_next_check(self):
self.cancel_check()
self.__timer = threading.Timer(WeaselService.SESSION_EXPIRE_TIME + 10, \
lambda: self.check_stale_sessions())
self.__timer.start()
def cancel_check(self):
if self.__timer:
self.__timer.cancel()
self.__timer = None
def check_stale_sessions(self):
'''檢查過期的回話'''
logger.info("check_stale_sessions...")
expire_time = time.time() - WeaselService.SESSION_EXPIRE_TIME
for sid in self.__sessions.keys():
if self.__sessions[sid].last_active_time < expire_time:
logger.info("removing stale session #%x." % sid)
self.destroy_session(sid)
# 還有活動會話,計劃下一次檢查
self.__timer = None
if self.__sessions:
self.schedule_next_check()
def has_session(self, sid):
'''檢查指定會話的存在狀態'''
if sid in self.__sessions:
return True
else:
return False
def get_session(self, sid):
'''按標識獲取會話對象
以傳遞按鍵消息等
'''
if sid in self.__sessions:
session = self.__sessions[sid]
session.last_active_time = time.time()
return session
else:
return None
def create_session(self):
'''創建會話
IME前端開啟輸入法時調用
返回會話的標識(正整數)
'''
try:
session = WeaselSession()
session.last_active_time = time.time()
except Exception, e:
logger.error("create_session: error creating session: %s" % e)
return None
sid = id(session)
self.__sessions[sid] = session
logger.info("create_session: session #%x, total %d active sessions." % \
(sid, len(self.__sessions)))
# 啟動過期會話檢查
if self.__sessions and not self.__timer:
self.schedule_next_check()
return sid
def destroy_session(self, sid):
'''結束指定的會話
IME前端關閉輸入法時調用
'''
if sid not in self.__sessions:
logger.warning("destroy_session: invalid session #%x." % sid)
return False
del self.__sessions[sid]
logger.info("destroy_session: session #%x, %d active sessions left." % \
(sid, len(self.__sessions)))
# 已經無有會話時,停了過期會話檢查
if not self.__sessions and self.__timer:
self.cancel_check()
return True
# a ready-to-use service instance
service = WeaselService()
| lotem/rime.py | weasel/weasel.py | Python | gpl-3.0 | 9,656 | 0.003598 |
import sublime
import sublime_plugin
MARKDOWN_LINK_SNIPPET = "[${{1:{}}}](${{2:{}}})"
class InsertMarkdownLinkCommand(sublime_plugin.TextCommand):
def decode_page(self, page_bytes, potential_encoding=None):
if potential_encoding:
try:
text = page_bytes.decode(potential_encoding)
return text
except:
pass
encodings_to_try = ["utf-8", "iso-8859-1"]
for encoding in encodings_to_try:
if encoding == potential_encoding:
continue
try:
text = page_bytes.decode(encoding)
return text
except:
pass
raise UnicodeDecodeError
def run(self, edit):
import re
def on_done(link):
import urllib.request
request = urllib.request.Request(link, headers={'User-Agent' : 'Google Internal-Only Browser'})
with urllib.request.urlopen(request) as page:
encoding = page.headers.get_content_charset()
text = self.decode_page(page.read(), encoding)
match = re.search("<title>(.+?)</title>", text, re.IGNORECASE | re.DOTALL)
if match is None:
title = link
else:
title = match.group(1).strip()
markdown_link = MARKDOWN_LINK_SNIPPET.format(title, link)
self.view.run_command("insert_snippet", {"contents": markdown_link})
clipboard_text = sublime.get_clipboard(2000)
if re.match("https?://", clipboard_text, re.IGNORECASE) is not None:
initial_text = clipboard_text
else:
initial_text = ""
input_view = self.view.window().show_input_panel("Link", initial_text, on_done, None, None)
input_view.sel().clear()
input_view.sel().add(sublime.Region(0, input_view.size()))
| rahul-ramadas/BagOfTricks | InsertMarkdownLink.py | Python | unlicense | 1,987 | 0.003523 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for Keychain password database parser."""
import unittest
from plaso.formatters import mac_keychain as _ # pylint: disable=unused-import
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import mac_keychain
from tests.parsers import test_lib
class MacKeychainParserTest(test_lib.ParserTestCase):
"""Tests for keychain file parser."""
def setUp(self):
"""Sets up the needed objects used throughout the test."""
self._parser = mac_keychain.KeychainParser()
def testParse(self):
"""Tests the Parse function."""
test_file = self._GetTestFilePath([u'login.keychain'])
event_queue_consumer = self._ParseFile(self._parser, test_file)
event_objects = self._GetEventObjectsFromQueue(event_queue_consumer)
self.assertEqual(len(event_objects), 5)
event_object = event_objects[0]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:51:48')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.CREATION_TIME)
self.assertEqual(event_object.entry_name, u'Secret Application')
self.assertEqual(event_object.account_name, u'moxilo')
expected_ssgp = (
u'b8e44863af1cb0785b89681d22e2721997ccfb8adb8853e726aff94c8830b05a')
self.assertEqual(event_object.ssgp_hash, expected_ssgp)
self.assertEqual(event_object.text_description, u'N/A')
expected_msg = u'Name: Secret Application Account: moxilo'
expected_msg_short = u'Secret Application'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[1]
self.assertEqual(
event_object.timestamp_desc,
eventdata.EventTimestamp.MODIFICATION_TIME)
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:52:29')
self.assertEqual(event_object.timestamp, expected_timestamp)
event_object = event_objects[2]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:53:29')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.entry_name, u'Secret Note')
self.assertEqual(event_object.text_description, u'secure note')
self.assertEqual(len(event_object.ssgp_hash), 1696)
expected_msg = u'Name: Secret Note'
expected_msg_short = u'Secret Note'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
event_object = event_objects[3]
expected_timestamp = timelib.Timestamp.CopyFromString(
u'2014-01-26 14:54:33')
self.assertEqual(event_object.timestamp, expected_timestamp)
self.assertEqual(event_object.entry_name, u'plaso.kiddaland.net')
self.assertEqual(event_object.account_name, u'MrMoreno')
expected_ssgp = (
u'83ccacf55a8cb656d340ec405e9d8b308fac54bb79c5c9b0219bd0d700c3c521')
self.assertEqual(event_object.ssgp_hash, expected_ssgp)
self.assertEqual(event_object.where, u'plaso.kiddaland.net')
self.assertEqual(event_object.protocol, u'http')
self.assertEqual(event_object.type_protocol, u'dflt')
self.assertEqual(event_object.text_description, u'N/A')
expected_msg = (
u'Name: plaso.kiddaland.net '
u'Account: MrMoreno '
u'Where: plaso.kiddaland.net '
u'Protocol: http (dflt)')
expected_msg_short = u'plaso.kiddaland.net'
self._TestGetMessageStrings(event_object, expected_msg, expected_msg_short)
if __name__ == '__main__':
unittest.main()
| ostree/plaso | tests/parsers/mac_keychain.py | Python | apache-2.0 | 3,604 | 0.00111 |
import unittest
from time import mktime, strptime
from knxmonitor.Knx.KnxParser import KnxParser
class TestKnxParser(unittest.TestCase):
def setUp(self):
self.parser = KnxParser("enheter.xml", "groupaddresses.csv", False, False,
{ "1/1/14" : "onoff",
"1/1/15" : "temp",
"1/1/16" : "time",
"1/1/17" : "%%"})
def test_init(self):
p = KnxParser("enheter.xml", "groupaddresses.csv", False, False,
{ "1/1/14" : "onoff",
"1/1/15" : "temp",
"1/1/16" : "time",
"1/1/17" : "%%"})
self.assertIsInstance(p, KnxParser)
def test_setTimeBase(self):
basetime = mktime(strptime("Fri Sep 4 06:15:03 2015",
"%a %b %d %H:%M:%S %Y"))
try:
self.parser.setTimeBase(basetime)
except:
self.fail("setTimeBase raised exception")
def test_parseVbusOutput(self):
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 1)
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 2)
self.parser.parseVbusOutput(2, "Fri Dec 10 14:08:59 2010", "Fri Dec 10 14:08:59 2010:LPDU: B0 FF FF 00 00 E3 00 C0 11 1B 66 :L_Data system from 15.15.255 to 0/0/0 hops: 06 T_DATA_XXX_REQ A_IndividualAddress_Write 1.1.27")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 2)
self.parser.parseVbusOutput(3, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 2/7/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.assertEqual(len(self.parser.knxAddrStream["1/1/15"].telegrams), 2)
@unittest.skip("Cache functionality not finished yet.")
def test_storeCachedInput(self):
pass
def test_getStreamMinMaxValues(self):
self.assertEqual(self.parser.getStreamMinMaxValues("1/1/15"), (None, None))
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.assertEqual(self.parser.getStreamMinMaxValues("1/1/15"), ("-15.37","5.11"))
self.assertEqual(self.parser.getStreamMinMaxValues("666/1/15"), (None, None))
def test_printStreams(self):
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.parser.printStreams(["1/1/15"])
@unittest.skip("Does not play well with Travis CI environment at the moment...")
def test_plotStreams(self):
basetime = mktime(strptime("Fri Sep 4 06:15:00 2015",
"%a %b %d %H:%M:%S %Y"))
self.parser.setTimeBase(basetime)
self.parser.parseVbusOutput(0, "Fri Sep 4 06:15:03 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 01 ff")
self.parser.parseVbusOutput(1, "Fri Sep 4 06:15:06 2015", "Fri Sep 4 06:15:03 2015:LPDU: BC 11 03 12 00 E2 00 80 00 21 :L_Data low from 6.12.31 to 1/1/15 hops: 06 T_DATA_XXX_REQ A_GroupValue_Write 81 ff")
self.parser.plotStreams(["1/1/15"], "testimg.png", 0.0)
if __name__ == '__m':
unittest.main()
| TrondKjeldas/knxmonitor | test/test_KnxParser.py | Python | gpl-2.0 | 4,428 | 0.007227 |
import glob
import os
from distutils.dep_util import newer
from distutils.core import Command
from distutils.spawn import find_executable
from distutils.util import change_root
class build_gschemas(Command):
"""build message catalog files
Build message catalog (.mo) files from .po files using xgettext
and intltool. These are placed directly in the build tree.
"""
description = "build gschemas used for dconf"
user_options = []
build_base = None
def initialize_options(self):
pass
def finalize_options(self):
self.gschemas_directory = self.distribution.gschemas
self.set_undefined_options('build', ('build_base', 'build_base'))
def run(self):
if find_executable("glib-compile-schemas") is None:
raise SystemExit("Error: 'glib-compile-schemas' not found.")
basepath = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
self.copy_tree(self.gschemas_directory, basepath)
class install_gschemas(Command):
"""install message catalog files
Copy compiled message catalog files into their installation
directory, $prefix/share/locale/$lang/LC_MESSAGES/$package.mo.
"""
description = "install message catalog files"
user_options = []
skip_build = None
build_base = None
install_base = None
root = None
def initialize_options(self):
pass
def finalize_options(self):
self.set_undefined_options('build', ('build_base', 'build_base'))
self.set_undefined_options(
'install',
('root', 'root'),
('install_base', 'install_base'),
('skip_build', 'skip_build'))
def run(self):
if not self.skip_build:
self.run_command('build_gschemas')
src = os.path.join(self.build_base, 'share', 'glib-2.0', 'schemas')
dest = os.path.join(self.install_base, 'share', 'glib-2.0', 'schemas')
if self.root != None:
dest = change_root(self.root, dest)
self.copy_tree(src, dest)
self.spawn(['glib-compile-schemas', dest])
__all__ = ["build_gschemas", "install_gschemas"]
| nagisa/Feeds | gdist/gschemas.py | Python | gpl-2.0 | 2,161 | 0.001851 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import url
from openstack_dashboard.dashboards.admin.volumes.snapshots import views
urlpatterns = [
url(r'^(?P<snapshot_id>[^/]+)$',
views.DetailView.as_view(),
name='detail'),
url(r'^(?P<snapshot_id>[^/]+)/update_status/$',
views.UpdateStatusView.as_view(),
name='update_status'),
]
| xuweiliang/Codelibrary | openstack_dashboard/dashboards/admin/volumes_back/snapshots/urls.py | Python | apache-2.0 | 922 | 0 |
from django.test import Client, TestCase
from django.core.urlresolvers import reverse
class ViewTests(TestCase):
def setUp(self):
self.client = Client()
def test_bmi_calculator_main_page_1(self):
response = self.client.get(reverse('calculators:main_bmi'))
self.assertEqual(response.status_code, 200)
def test_bmi_calculator_main_page_2(self):
response = self.client.get(reverse('calculators:main_bmi'))
self.assertEqual(response.context["sex"], ("kobieta", "mężczyzna"))
def test_bmi_calculator_main_page_3(self):
"""A list of Template instances used to render the final content,
in the order they were rendered. For each template in the list,
use template.name to get the template’s file name, if the template was loaded from a file. """
response = self.client.get(reverse('calculators:main_bmi'))
self.assertEqual(response.templates[0].name, "calculators/bmi.html")
# SimpleTestCase.assertContains(response, text, count=None, status_code=200, msg_prefix='', html=False)
# TODO a lot of tests
| PyLadiesPoznanAdvanced/django-introduction-bmi | calculators/tests_view.py | Python | mit | 1,112 | 0.002708 |
# -*- coding: utf-8 -*-
from django.shortcuts import get_object_or_404
from django_bootstrap3view.django_bootstrap3view_app.utils.render import render, render_string
from django_bootstrap3view.django_bootstrap3view_app.utils.python import convert_to_bool
class BaseService(object):
_repo = property(fget=lambda self: self.entity.objects)
_page_size = 10
default_query_params = {}
def __getattr__(self, name):
"""
Delegates automatically all undefined methods on the repository entity.
"""
def decorator(*args, **kwargs):
method = getattr(self._repo, name)
if method is None:
raise AttributeError("'%s' has no attribute '%s'" % (self.__class__.__name__, name))
if not kwargs.pop("without_filters", False):
for key, value in self.default_query_params.iteritems():
kwargs.setdefault(key, value)
return method(*args, **kwargs)
return decorator
def get_page(self, page=0, size=None, min_page=None, **kwargs):
if size is None:
size = self._page_size
page = int(page)
if min_page is not None:
min_page = int(min_page)
limit = (page + 1) * size
offset = min_page * size
else:
limit = (page + 1) * size
offset = size * page
return self._get_objects(self._get_page_query(offset, limit, **kwargs))
def _get_page_query(self, offset, limit, **kwargs):
return self.all()[offset:limit]
def list(self, start, size, **kwargs):
page = int(start / size)
return self.get_page(page=page, size=size, min_page=None, **kwargs)
def _get_objects(self, objects):
""" Override to add behaviour """
return objects
def get_one(self, *args, **kwargs):
objects = self.filter(*args, **kwargs)
return objects[0] if objects else None
def new(self, *args, **kwargs):
return self.entity(*args, **kwargs)
def _get_or_new(self, *args, **kwargs):
try:
obj, created = self.get_or_create(*args, **kwargs)
except:
obj, created = self.entity(*args, **kwargs), True
return obj, created
def get_or_new(self, *args, **kwargs):
obj, _ = self._get_or_new(*args, **kwargs)
return obj
def update_or_create(self, pre_create_function=None, pre_update_function=None, *args, **kwargs):
entity_id = kwargs.pop("id", None)
if entity_id:
if pre_update_function is not None:
pre_update_function(kwargs)
entity = self.get(id=entity_id)
for key, value in kwargs.iteritems():
setattr(entity, key, value)
else:
if pre_create_function is not None:
pre_create_function(kwargs)
entity = self.new(**kwargs)
entity.save()
return entity
def get_or_new_created(self, *args, **kwargs):
return self._get_or_new(*args, **kwargs)
def get_form(self):
return None
def _get_data(self, request, *args, **kwargs):
data = dict([(key, value) for key, value in request.POST.iteritems() if key != "csrfmiddlewaretoken"])
data.update(self._get_additional_data(request))
return data
def _get_additional_data(self, request, *args, **kwargs):
return {}
def _get_entity(self, request, *args, **kwargs):
return self.get_or_new(**self._get_data(request))
def _set_data(self, entity, request, *args, **kwargs):
data = self._get_data(request)
for key, value in data.iteritems():
setattr(entity, key, value)
return entity
def set_attrs(self, entity, attrs):
for key, value in attrs.iteritems():
setattr(entity, key, value)
def save_entity(self, entity, *args, **kwargs):
entity.save()
def save(self, request, *args, **kwargs):
entity = self._get_entity(request, *args, **kwargs)
self._set_data(entity, request, *args, **kwargs)
self.save_entity(entity, *args, **kwargs)
self._post_save(entity, request, *args, **kwargs)
return entity
def _post_save(self, entity, request, *args, **kwargs):
pass
def render(self, template, context):
return render(template, context)
def render_string(self, string, context):
return render_string(string, context)
def get_object_or_404(self, **kwargs):
return get_object_or_404(self.entity, **kwargs)
def delete(self, *args, **kwargs):
logical_delete = kwargs.pop("logical", False)
objs = self.filter(*args, **kwargs)
if not objs:
return False
for obj in objs:
if not logical_delete:
obj.delete()
else:
obj.active = False
obj.save()
return True
def get_formated_sum(self, value):
if value is None:
value = 0
return "%.2f" % value
def _render_row_value(self, row_data, render):
if isinstance(render, str):
if isinstance(row_data, dict):
return str(row_data[render])
else:
return str(getattr(row_data, render))
else:
return str(render(row_data))
def get_params(self, data, params):
dict_params = {}
for param in params:
dict_params[param] = data.get(param)
return dict_params
def convert_to_bool(self, data, params):
convert_to_bool(data, params)
def to_bool(self, param):
return bool(int(param))
def get_action_params(self, request, params_names, prefix="", bar_action=True):
complete_names = ["%s%s" % (prefix, param) for param in params_names]
params = self.get_params(request.POST, complete_names)
if bar_action:
boolean_params = ["%s%s" % (prefix, param) for param in ["is_main_action", "is_side_action"]]
self.convert_to_bool(params, boolean_params)
final_params = {}
for key, value in params.iteritems():
new_key = key.replace(prefix, "")
final_params[new_key] = value
return final_params
def check_nullables(self, data, params):
for param in params:
if not data.get(param):
data[param] = None | bossiernesto/django-bootstrap3-classview | django_bootstrap3view/django_bootstrap3view_app/services/base.py | Python | bsd-3-clause | 6,504 | 0.001538 |
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.apps import AppConfig
class ShuupNotifyAppConfig(AppConfig):
name = "shuup.notify"
verbose_name = "Shuup Notification Framework"
label = "shuup_notify"
provides = {
"notify_condition": [
"shuup.notify.conditions:LanguageEqual",
"shuup.notify.conditions:BooleanEqual",
"shuup.notify.conditions:IntegerEqual",
"shuup.notify.conditions:TextEqual",
"shuup.notify.conditions:Empty",
"shuup.notify.conditions:NonEmpty",
],
"notify_action": [
"shuup.notify.actions:SetDebugFlag",
"shuup.notify.actions:AddOrderLogEntry",
"shuup.notify.actions:SendEmail",
"shuup.notify.actions:AddNotification",
],
"notify_event": [],
"admin_module": [
"shuup.notify.admin_module:NotifyAdminModule",
]
}
default_app_config = "shuup.notify.ShuupNotifyAppConfig"
| suutari-ai/shoop | shuup/notify/__init__.py | Python | agpl-3.0 | 1,215 | 0 |
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = NodePath("")
a.setHpr(360 * random(), 0, 0)
a.setPos(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "NodePath")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
class myscene(bee.frame):
pandaclassname_ = bee.get_parameter("pandaclassname")
pandaname_ = bee.get_parameter("pandaname")
c1 = bee.configure("scene")
c1.import_mesh_EGG("models/environment")
a = NodePath("")
a.setScale(0.25)
a.setPos(-8, 42, 0)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c1.add_model_MATRIX(matrix=m)
c2 = bee.configure("scene")
c2.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c2.add_actor_MATRIX(matrix=m, entityname=pandaname_)
c2.import_mesh_EGG("models/panda-walk4")
c2.add_animation("walk")
c3 = bee.configure("scene")
c3.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c3.add_actorclass_MATRIX(matrix=m, actorclassname=pandaclassname_)
c3.import_mesh_EGG("models/panda-walk4")
c3.add_animation("walk")
del a, m, mat
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
myscene = myscene(
scene="scene",
pandaname=pandaname_,
pandaclassname=pandaclassname_,
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
| agoose77/hivesystem | manual/movingpanda/panda-5.py | Python | bsd-2-clause | 3,673 | 0.001361 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-09-02 14:39
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reference', '0004_educationinstitution'),
]
operations = [
migrations.CreateModel(
name='AssimilationCriteria',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('criteria', models.CharField(max_length=255, unique=True)),
('order', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='EducationType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('type', models.CharField(choices=[('TRANSITION', 'Transition'), ('QUALIFICATION', 'Qualification'), ('ANOTHER', 'Autre')], max_length=20)),
('name', models.CharField(max_length=100)),
('adhoc', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='ExternalOffer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('changed', models.DateTimeField(null=True)),
('name', models.CharField(max_length=150, unique=True)),
('adhoc', models.BooleanField(default=True)),
('national', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='GradeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('name', models.CharField(max_length=255)),
('coverage', models.CharField(choices=[('HIGH_EDUC_NOT_UNIVERSITY', 'HIGH_EDUC_NOT_UNIVERSITY'), ('UNIVERSITY', 'UNIVERSITY'), ('UNKNOWN', 'UNKNOWN')], default='UNKNOWN', max_length=30)),
('adhoc', models.BooleanField(default=True)),
('institutional', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='InstitutionalGradeType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.CharField(blank=True, max_length=100, null=True)),
('name', models.CharField(max_length=255)),
],
),
migrations.AddField(
model_name='domain',
name='adhoc',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='domain',
name='national',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='domain',
name='reference',
field=models.CharField(blank=True, max_length=10, null=True),
),
migrations.AddField(
model_name='domain',
name='type',
field=models.CharField(choices=[('HIGH_EDUC_NOT_UNIVERSITY', 'HIGH_EDUC_NOT_UNIVERSITY'), ('UNIVERSITY', 'UNIVERSITY'), ('UNKNOWN', 'UNKNOWN')], default='UNKNOWN', max_length=50),
),
migrations.AddField(
model_name='language',
name='external_id',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='domain',
name='decree',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.Decree'),
),
migrations.AlterField(
model_name='educationinstitution',
name='adhoc',
field=models.BooleanField(default=True),
),
migrations.AlterField(
model_name='educationinstitution',
name='institution_type',
field=models.CharField(choices=[('SECONDARY', 'SECONDARY'), ('UNIVERSITY', 'UNIVERSITY'), ('HIGHER_NON_UNIVERSITY', 'HIGHER_NON_UNIVERSITY')], max_length=25),
),
migrations.AlterField(
model_name='educationinstitution',
name='national_community',
field=models.CharField(blank=True, choices=[('FRENCH', 'FRENCH'), ('GERMAN', 'GERMAN'), ('DUTCH', 'DUTCH')], max_length=20, null=True),
),
migrations.AddField(
model_name='gradetype',
name='institutional_grade_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.InstitutionalGradeType'),
),
migrations.AddField(
model_name='externaloffer',
name='domain',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='reference.Domain'),
),
migrations.AddField(
model_name='externaloffer',
name='grade_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='reference.GradeType'),
),
migrations.AddField(
model_name='externaloffer',
name='offer_year',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='base.OfferYear'),
),
]
| uclouvain/osis | reference/migrations/0005_auto_20160902_1639.py | Python | agpl-3.0 | 5,965 | 0.003353 |
# -*- coding: utf-8 -*-
"""
Sphinx plugins for Django documentation.
"""
import os
import re
from docutils import nodes, transforms
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
json = None
from sphinx import addnodes, roles, __version__ as sphinx_ver
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.writers.html import SmartyPantsHTMLTranslator
from sphinx.util.console import bold
from sphinx.util.compat import Directive
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(
r'([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)')
def setup(app):
app.add_crossref_type(
directivename = "setting",
rolename = "setting",
indextemplate = "pair: %s; setting",
)
app.add_crossref_type(
directivename = "templatetag",
rolename = "ttag",
indextemplate = "pair: %s; template tag"
)
app.add_crossref_type(
directivename = "templatefilter",
rolename = "tfilter",
indextemplate = "pair: %s; template filter"
)
app.add_crossref_type(
directivename = "fieldlookup",
rolename = "lookup",
indextemplate = "pair: %s; field lookup type",
)
app.add_description_unit(
directivename = "django-admin",
rolename = "djadmin",
indextemplate = "pair: %s; django-admin command",
parse_node = parse_django_admin_node,
)
app.add_description_unit(
directivename = "django-admin-option",
rolename = "djadminopt",
indextemplate = "pair: %s; django-admin command-line option",
parse_node = parse_django_adminopt_node,
)
app.add_config_value('django_next_version', '0.0', True)
app.add_directive('versionadded', VersionDirective)
app.add_directive('versionchanged', VersionDirective)
app.add_builder(DjangoStandaloneHTMLBuilder)
class VersionDirective(Directive):
has_content = True
required_arguments = 1
optional_arguments = 1
final_argument_whitespace = True
option_spec = {}
def run(self):
env = self.state.document.settings.env
arg0 = self.arguments[0]
is_nextversion = env.config.django_next_version == arg0
ret = []
node = addnodes.versionmodified()
ret.append(node)
if not is_nextversion:
if len(self.arguments) == 1:
linktext = u'Пожалуйста, обратитесь к описанию релиза </releases/%s>' % (arg0)
xrefs = roles.XRefRole()('doc', linktext, linktext, self.lineno, self.state)
node.extend(xrefs[0])
node['version'] = arg0
else:
node['version'] = "Development version"
node['type'] = self.name
if len(self.arguments) == 2:
inodes, messages = self.state.inline_text(self.arguments[1], self.lineno+1)
node.extend(inodes)
if self.content:
self.state.nested_parse(self.content, self.content_offset, node)
ret = ret + messages
env.note_versionchange(node['type'], node['version'], node, self.lineno)
return ret
class DjangoHTMLTranslator(SmartyPantsHTMLTranslator):
"""
Django-specific reST to HTML tweaks.
"""
# Don't use border=1, which docutils does by default.
def visit_table(self, node):
self._table_row_index = 0 # Needed by Sphinx
self.body.append(self.starttag(node, 'table', CLASS='docutils'))
# <big>? Really?
def visit_desc_parameterlist(self, node):
self.body.append('(')
self.first_param = 1
self.param_separator = node.child_text_separator
def depart_desc_parameterlist(self, node):
self.body.append(')')
if sphinx_ver < '1.0.8':
#
# Don't apply smartypants to literal blocks
#
def visit_literal_block(self, node):
self.no_smarty += 1
SmartyPantsHTMLTranslator.visit_literal_block(self, node)
def depart_literal_block(self, node):
SmartyPantsHTMLTranslator.depart_literal_block(self, node)
self.no_smarty -= 1
#
# Turn the "new in version" stuff (versionadded/versionchanged) into a
# better callout -- the Sphinx default is just a little span,
# which is a bit less obvious that I'd like.
#
# FIXME: these messages are all hardcoded in English. We need to change
# that to accomodate other language docs, but I can't work out how to make
# that work.
#
version_text = {
'deprecated': u'Устарело в Django %s',
'versionchanged': u'Изменено в Django %s',
'versionadded': u'Добавлено в Django %s',
}
def visit_versionmodified(self, node):
self.body.append(
self.starttag(node, 'div', CLASS=node['type'])
)
title = "%s%s" % (
self.version_text[node['type']] % node['version'],
len(node) and ":" or "."
)
self.body.append('<span class="title">%s</span> ' % title)
def depart_versionmodified(self, node):
self.body.append("</div>\n")
# Give each section a unique ID -- nice for custom CSS hooks
def visit_section(self, node):
old_ids = node.get('ids', [])
node['ids'] = ['s-' + i for i in old_ids]
node['ids'].extend(old_ids)
SmartyPantsHTMLTranslator.visit_section(self, node)
node['ids'] = old_ids
def parse_django_admin_node(env, sig, signode):
command = sig.split(' ')[0]
env._django_curr_admin_command = command
title = "django-admin.py %s" % sig
signode += addnodes.desc_name(title, title)
return sig
def parse_django_adminopt_node(env, sig, signode):
"""A copy of sphinx.directives.CmdoptionDesc.parse_signature()"""
from sphinx.domains.std import option_desc_re
count = 0
firstname = ''
for m in option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not count:
for m in simple_option_desc_re.finditer(sig):
optname, args = m.groups()
if count:
signode += addnodes.desc_addname(', ', ', ')
signode += addnodes.desc_name(optname, optname)
signode += addnodes.desc_addname(args, args)
if not count:
firstname = optname
count += 1
if not firstname:
raise ValueError
return firstname
class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
"""
Subclass to add some extra things we need.
"""
name = 'djangohtml'
def finish(self):
super(DjangoStandaloneHTMLBuilder, self).finish()
if json is None:
self.warn("cannot create templatebuiltins.js due to missing simplejson dependency")
return
self.info(bold("writing templatebuiltins.js..."))
xrefs = self.env.domaindata["std"]["objects"]
templatebuiltins = {
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatetag" and l == "ref/templates/builtins"],
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
if t == "templatefilter" and l == "ref/templates/builtins"],
}
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
f = open(outfilename, 'wb')
f.write('var django_template_builtins = ')
json.dump(templatebuiltins, f)
f.write(';\n')
f.close()
| RaD/django-south | docs/djbook/_ext/djbookdocs.py | Python | apache-2.0 | 7,983 | 0.008081 |
#
# Validator for "idlebg" Test
#
from pscheduler import json_validate
MAX_SCHEMA = 1
def spec_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"duration": { "$ref": "#/pScheduler/Duration" },
"host": { "$ref": "#/pScheduler/Host" },
"host-node": { "$ref": "#/pScheduler/URLHostPort" },
"interval": { "$ref": "#/pScheduler/Duration" },
"parting-comment": { "$ref": "#/pScheduler/String" },
"starting-comment": { "$ref": "#/pScheduler/String" },
},
"required": [
"duration"
]
}
return json_validate(json, schema, max_schema=MAX_SCHEMA)
def result_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"succeeded": { "$ref": "#/pScheduler/Boolean" },
"error": { "$ref": "#/pScheduler/String" },
"diags": { "$ref": "#/pScheduler/String" },
"time-slept": { "$ref": "#/pScheduler/Duration" },
},
"required": [
"succeeded",
"time-slept",
]
}
return json_validate(json, schema)
def limit_is_valid(json):
schema = {
"type": "object",
"properties": {
"schema": { "$ref": "#/pScheduler/Cardinal" },
"duration": { "$ref": "#/pScheduler/Limit/Duration" },
"starting-comment": { "$ref": "#/pScheduler/Limit/String" },
"parting-comment": { "$ref": "#/pScheduler/Limit/String" }
},
"additionalProperties": False
}
return json_validate(json, schema)
| perfsonar/pscheduler | pscheduler-test-idlebgm/idlebgm/validate.py | Python | apache-2.0 | 1,872 | 0.017628 |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import imp
def FindModule(name):
"""Gets the path of the named module.
This is useful for cases where we want to use subprocess.call on a module we
have imported, and safer than using __file__ since that can point to .pyc
files.
Args:
name: the string name of a module (e.g. 'dev_appserver')
Returns:
The path to the module.
"""
return imp.find_module(name)[1]
| modulexcite/catapult | catapult_build/module_finder.py | Python | bsd-3-clause | 551 | 0.005445 |
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy
setup(
name = 'SEGD',
version = '0.a1',
description = 'A Python3 reader for SEG-D rev3.1 binary data.',
url = 'https://github.com/drsudow/SEG-D.git',
author = 'Mattias Südow',
author_email = '[email protected]',
license = 'MIT',
classifiers = [
'Development Status :: 3 -Aplha',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Information Analysis',
'Programming Language :: Python :: 3.5',
],
keywords = 'seismic SEGD',
packages = ['SEGD'],
install_requires = ['cython','numpy','datetime'],
ext_modules = cythonize([Extension('SEGD.read_traces',['SEGD/read_traces.pyx']
,include_dirs=[numpy.get_include()])])
)
| drsudow/SEG-D | setup.py | Python | mit | 1,008 | 0.029791 |
# maker.py
#
# A class representing vcdMaker specific test.
#
# Copyright (c) 2019 vcdMaker team
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
from flat import Flat
from test import Test
class Maker(Test):
"""A vcdMaker specific test class."""
def __init__(self, node, test_directory):
"""The Maker class constructor.
Arguments:
node - The XML node to be read.
test_directory - The test directory.
"""
Test.__init__(self, node, test_directory)
self.command = []
self.unique_params = {'input_file': ['', 'Missing input file'],
'time_unit': ['', 'Missing time unit'],
'line_counter': ['', ''],
'user_format': ['', '']}
for element in node.iter(tag='unique'):
self.unique = Flat(element, self.unique_params)
self.create_command(test_directory)
def create_command(self, test_directory):
"""Builds the vcdMaker specific command line."""
self.command.append('-t')
self.command.append(self.unique.get_parameter('time_unit'))
if self.unique.get_parameter('line_counter'):
self.command.append('-c')
self.command.append(self.unique.get_parameter('line_counter'))
if self.unique.get_parameter('user_format'):
self.command.append('-u')
self.command.append(self.unique.get_parameter('user_format'))
self.command.append('-o')
self.command.append(os.path.join(test_directory,
self.common.get_parameter('output_file')))
self.command.append(os.path.join(test_directory,
self.unique.get_parameter('input_file')))
| WojciechRynczuk/vcdMaker | test/functional/maker.py | Python | mit | 2,819 | 0.000709 |
##
## This file is part of the sigrok-meter project.
##
## Copyright (C) 2014 Jens Steinhauser <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import itertools
import math
import qtcompat
import sigrok.core as sr
import util
try:
from itertools import izip
except ImportError:
izip = zip
QtCore = qtcompat.QtCore
QtGui = qtcompat.QtGui
class Trace(object):
'''Class to hold the measured samples.'''
def __init__(self):
self.samples = []
self.new = False
def append(self, sample):
self.samples.append(sample)
self.new = True
class MeasurementDataModel(QtGui.QStandardItemModel):
'''Model to hold the measured values.'''
'''Role used to identify and find the item.'''
idRole = QtCore.Qt.UserRole + 1
'''Role used to store the device vendor and model.'''
descRole = QtCore.Qt.UserRole + 2
'''Role used to store a dictionary with the traces.'''
tracesRole = QtCore.Qt.UserRole + 3
'''Role used to store the color to draw the graph of the channel.'''
colorRole = QtCore.Qt.UserRole + 4
def __init__(self, parent):
super(self.__class__, self).__init__(parent)
# Use the description text to sort the items for now, because the
# idRole holds tuples, and using them to sort doesn't work.
self.setSortRole(MeasurementDataModel.descRole)
# A generator for the colors of the channels.
self._colorgen = self._make_colorgen()
def _make_colorgen(self):
cols = [
QtGui.QColor(0x8F, 0x52, 0x02), # brown
QtGui.QColor(0x73, 0xD2, 0x16), # green
QtGui.QColor(0xCC, 0x00, 0x00), # red
QtGui.QColor(0x34, 0x65, 0xA4), # blue
QtGui.QColor(0xF5, 0x79, 0x00), # orange
QtGui.QColor(0xED, 0xD4, 0x00), # yellow
QtGui.QColor(0x75, 0x50, 0x7B) # violet
]
def myrepeat(g, n):
'''Repeats every element from 'g' 'n' times'.'''
for e in g:
for f in itertools.repeat(e, n):
yield f
colorcycle = itertools.cycle(cols)
darkness = myrepeat(itertools.count(100, 10), len(cols))
for c, d in izip(colorcycle, darkness):
yield QtGui.QColor(c).darker(d)
def format_mqflags(self, mqflags):
if sr.QuantityFlag.AC in mqflags:
return 'AC'
elif sr.QuantityFlag.DC in mqflags:
return 'DC'
else:
return ''
def format_value(self, mag):
if math.isinf(mag):
return u'\u221E'
return '{:f}'.format(mag)
def getItem(self, device, channel):
'''Return the item for the device + channel combination from the
model, or create a new item if no existing one matches.'''
# Unique identifier for the device + channel.
# TODO: Isn't there something better?
uid = (
device.vendor,
device.model,
device.serial_number(),
device.connection_id(),
channel.index
)
# Find the correct item in the model.
for row in range(self.rowCount()):
item = self.item(row)
rid = item.data(MeasurementDataModel.idRole)
rid = tuple(rid) # PySide returns a list.
if uid == rid:
return item
# Nothing found, create a new item.
desc = '{} {}, {}'.format(
device.vendor, device.model, channel.name)
item = QtGui.QStandardItem()
item.setData(uid, MeasurementDataModel.idRole)
item.setData(desc, MeasurementDataModel.descRole)
item.setData({}, MeasurementDataModel.tracesRole)
item.setData(next(self._colorgen), MeasurementDataModel.colorRole)
self.appendRow(item)
self.sort(0)
return item
@QtCore.Slot(float, sr.classes.Device, sr.classes.Channel, tuple)
def update(self, timestamp, device, channel, data):
'''Update the data for the device (+channel) with the most recent
measurement from the given payload.'''
item = self.getItem(device, channel)
value, unit, mqflags = data
value_str = self.format_value(value)
unit_str = util.format_unit(unit)
mqflags_str = self.format_mqflags(mqflags)
# The display role is a tuple containing the value and the unit/flags.
disp = (value_str, ' '.join([unit_str, mqflags_str]))
item.setData(disp, QtCore.Qt.DisplayRole)
# The samples role is a dictionary that contains the old samples for each unit.
# Should be trimmed periodically, otherwise it grows larger and larger.
if not math.isinf(value) and not math.isnan(value):
sample = (timestamp, value)
traces = item.data(MeasurementDataModel.tracesRole)
# It's not possible to use 'collections.defaultdict' here, because
# PySide doesn't return the original type that was passed in.
if not (unit in traces):
traces[unit] = Trace()
traces[unit].append(sample)
item.setData(traces, MeasurementDataModel.tracesRole)
def clear_samples(self):
'''Removes all old samples from the model.'''
for row in range(self.rowCount()):
idx = self.index(row, 0)
self.setData(idx, {},
MeasurementDataModel.tracesRole)
class MultimeterDelegate(QtGui.QStyledItemDelegate):
'''Delegate to show the data items from a MeasurementDataModel.'''
def __init__(self, parent, font):
'''Initialize the delegate.
:param font: Font used for the text.
'''
super(self.__class__, self).__init__(parent)
self._nfont = font
fi = QtGui.QFontInfo(self._nfont)
self._nfontheight = fi.pixelSize()
fm = QtGui.QFontMetrics(self._nfont)
r = fm.boundingRect('-XX.XXXXXX X XX')
w = 1.4 * r.width() + 2 * self._nfontheight
h = 2.6 * self._nfontheight
self._size = QtCore.QSize(w, h)
def sizeHint(self, option=None, index=None):
return self._size
def _color_rect(self, outer):
'''Returns the dimensions of the clickable rectangle.'''
x1 = (outer.height() - self._nfontheight) / 2
r = QtCore.QRect(x1, x1, self._nfontheight, self._nfontheight)
r.translate(outer.topLeft())
return r
def paint(self, painter, options, index):
value, unit = index.data(QtCore.Qt.DisplayRole)
desc = index.data(MeasurementDataModel.descRole)
color = index.data(MeasurementDataModel.colorRole)
painter.setFont(self._nfont)
# Draw the clickable rectangle.
painter.fillRect(self._color_rect(options.rect), color)
# Draw the text
h = options.rect.height()
p = options.rect.topLeft()
p += QtCore.QPoint(h, (h + self._nfontheight) / 2 - 2)
painter.drawText(p, desc + ': ' + value + ' ' + unit)
def editorEvent(self, event, model, options, index):
if type(event) is QtGui.QMouseEvent:
if event.type() == QtCore.QEvent.MouseButtonPress:
rect = self._color_rect(options.rect)
if rect.contains(event.x(), event.y()):
c = index.data(MeasurementDataModel.colorRole)
c = QtGui.QColorDialog.getColor(c, None,
'Choose new color for channel')
if c.isValid():
# False if cancel is pressed (resulting in a black
# color).
item = model.itemFromIndex(index)
item.setData(c, MeasurementDataModel.colorRole)
return True
return False
| swegener/sigrok-meter | datamodel.py | Python | gpl-3.0 | 8,443 | 0.002961 |
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import absolute_import
import HTMLParser
import StringIO
import ast
import base64
import cgi
from collections import Mapping
import datetime
from decimal import Decimal
import gzip
import hashlib
from io import BytesIO
import json
import re
from tempfile import TemporaryFile
from pyLibrary import strings
from pyLibrary.dot import wrap, wrap_leaves, unwrap, unwraplist, split_field, join_field, coalesce
from pyLibrary.collections.multiset import Multiset
from pyLibrary.debugs.logs import Log, Except
from pyLibrary.env.big_data import FileString, safe_size
from pyLibrary.jsons import quote
from pyLibrary.jsons.encoder import json_encoder, pypy_json_encode
from pyLibrary.strings import expand_template
from pyLibrary.times.dates import Date
"""
DUE TO MY POOR MEMORY, THIS IS A LIST OF ALL CONVERSION ROUTINES
IN <from_type> "2" <to_type> FORMAT
"""
def value2json(obj, pretty=False):
try:
json = json_encoder(obj, pretty=pretty)
if json == None:
Log.note(str(type(obj)) + " is not valid{{type}}JSON", type= " (pretty) " if pretty else " ")
Log.error("Not valid JSON: " + str(obj) + " of type " + str(type(obj)))
return json
except Exception, e:
e = Except.wrap(e)
try:
json = pypy_json_encode(obj)
return json
except Exception:
pass
Log.error("Can not encode into JSON: {{value}}", value=repr(obj), cause=e)
def remove_line_comment(line):
mode = 0 # 0=code, 1=inside_string, 2=escaping
for i, c in enumerate(line):
if c == '"':
if mode == 0:
mode = 1
elif mode == 1:
mode = 0
else:
mode = 1
elif c == '\\':
if mode == 0:
mode = 0
elif mode == 1:
mode = 2
else:
mode = 1
elif mode == 2:
mode = 1
elif c == "#" and mode == 0:
return line[0:i]
elif c == "/" and mode == 0 and line[i + 1] == "/":
return line[0:i]
return line
def json2value(json_string, params={}, flexible=False, leaves=False):
"""
:param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value
"""
if isinstance(json_string, str):
Log.error("only unicode json accepted")
try:
if flexible:
# REMOVE """COMMENTS""", # COMMENTS, //COMMENTS, AND \n \r
# DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py# L58
json_string = re.sub(r"\"\"\".*?\"\"\"", r"\n", json_string, flags=re.MULTILINE)
json_string = "\n".join(remove_line_comment(l) for l in json_string.split("\n"))
# ALLOW DICTIONARY'S NAME:VALUE LIST TO END WITH COMMA
json_string = re.sub(r",\s*\}", r"}", json_string)
# ALLOW LISTS TO END WITH COMMA
json_string = re.sub(r",\s*\]", r"]", json_string)
if params:
json_string = expand_template(json_string, params)
# LOOKUP REFERENCES
value = wrap(json_decoder(json_string))
if leaves:
value = wrap_leaves(value)
return value
except Exception, e:
e = Except.wrap(e)
if "Expecting '" in e and "' delimiter: line" in e:
line_index = int(strings.between(e.message, " line ", " column ")) - 1
column = int(strings.between(e.message, " column ", " ")) - 1
line = json_string.split("\n")[line_index].replace("\t", " ")
if column > 20:
sample = "..." + line[column - 20:]
pointer = " " + (" " * 20) + "^"
else:
sample = line
pointer = (" " * column) + "^"
if len(sample) > 43:
sample = sample[:43] + "..."
Log.error("Can not decode JSON at:\n\t" + sample + "\n\t" + pointer + "\n")
base_str = unicode2utf8(strings.limit(json_string, 1000))
hexx_str = bytes2hex(base_str, " ")
try:
char_str = " " + (" ".join(c.decode("latin1") if ord(c) >= 32 else ".") for c in base_str)
except Exception:
char_str = " "
Log.error("Can not decode JSON:\n" + char_str + "\n" + hexx_str + "\n", e)
def string2datetime(value, format=None):
return Date(value, format).value
def str2datetime(value, format=None):
return string2datetime(value, format)
def datetime2string(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2str(value, format="%Y-%m-%d %H:%M:%S"):
return Date(value).format(format=format)
def datetime2unix(d):
try:
if d == None:
return None
elif isinstance(d, datetime.datetime):
epoch = datetime.datetime(1970, 1, 1)
elif isinstance(d, datetime.date):
epoch = datetime.date(1970, 1, 1)
else:
Log.error("Can not convert {{value}} of type {{type}}", value= d, type= d.__class__)
diff = d - epoch
return Decimal(long(diff.total_seconds() * 1000000)) / 1000000
except Exception, e:
Log.error("Can not convert {{value}}", value= d, cause=e)
def datetime2milli(d):
return datetime2unix(d) * 1000
def timedelta2milli(v):
return v.total_seconds()
def unix2datetime(u):
try:
if u == None:
return None
if u == 9999999999: # PYPY BUG https://bugs.pypy.org/issue1697
return datetime.datetime(2286, 11, 20, 17, 46, 39)
return datetime.datetime.utcfromtimestamp(u)
except Exception, e:
Log.error("Can not convert {{value}} to datetime", value= u, cause=e)
def milli2datetime(u):
if u == None:
return None
return unix2datetime(u / 1000.0)
def dict2Multiset(dic):
if dic == None:
return None
output = Multiset()
output.dic = unwrap(dic).copy()
return output
def multiset2dict(value):
"""
CONVERT MULTISET TO dict THAT MAPS KEYS TO MAPS KEYS TO KEY-COUNT
"""
if value == None:
return None
return dict(value.dic)
def table2list(
column_names, # tuple of columns names
rows # list of tuples
):
return wrap([dict(zip(column_names, r)) for r in rows])
def table2tab(
column_names, # tuple of columns names
rows # list of tuples
):
def row(r):
return "\t".join(map(value2json, r))
return row(column_names)+"\n"+("\n".join(row(r) for r in rows))
def list2tab(rows):
columns = set()
for r in wrap(rows):
columns |= set(k for k, v in r.leaves())
keys = list(columns)
output = []
for r in wrap(rows):
output.append("\t".join(value2json(r[k]) for k in keys))
return "\t".join(keys) + "\n" + "\n".join(output)
def list2table(rows, column_names=None):
if column_names:
keys = list(set(column_names))
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
output = [[unwraplist(r[k]) for k in keys] for r in rows]
return wrap({
"meta": {"format": "table"},
"header": keys,
"data": output
})
def list2cube(rows, column_names=None):
if column_names:
keys = column_names
else:
columns = set()
for r in rows:
columns |= set(r.keys())
keys = list(columns)
data = {k: [] for k in keys}
output = wrap({
"meta": {"format": "cube"},
"edges": [
{
"name": "rownum",
"domain": {"type": "rownum", "min": 0, "max": len(rows), "interval": 1}
}
],
"data": data
})
for r in rows:
for k in keys:
data[k].append(r[k])
return output
def value2string(value):
# PROPER NULL HANDLING
if value == None:
return None
return unicode(value)
def value2quote(value):
# RETURN PRETTY PYTHON CODE FOR THE SAME
if isinstance(value, basestring):
return string2quote(value)
else:
return repr(value)
def string2quote(value):
if value == None:
return "None"
return quote(value)
string2regexp = re.escape
def string2url(value):
if isinstance(value, unicode):
return "".join([_map2url[c] for c in unicode2latin1(value)])
elif isinstance(value, str):
return "".join([_map2url[c] for c in value])
else:
Log.error("Expecting a string")
def value2url(value):
if value == None:
Log.error("")
if isinstance(value, Mapping):
output = "&".join([value2url(k) + "=" + (value2url(v) if isinstance(v, basestring) else value2url(value2json(v))) for k,v in value.items()])
elif isinstance(value, unicode):
output = "".join([_map2url[c] for c in unicode2latin1(value)])
elif isinstance(value, str):
output = "".join([_map2url[c] for c in value])
elif hasattr(value, "__iter__"):
output = ",".join(value2url(v) for v in value)
else:
output = unicode(value)
return output
def url_param2value(param):
"""
CONVERT URL QUERY PARAMETERS INTO DICT
"""
if isinstance(param, unicode):
param = param.encode("ascii")
def _decode(v):
output = []
i = 0
while i < len(v):
c = v[i]
if c == "%":
d = hex2bytes(v[i + 1:i + 3])
output.append(d)
i += 3
else:
output.append(c)
i += 1
output = (b"".join(output)).decode("latin1")
try:
return json2value(output)
except Exception:
pass
return output
query = {}
for p in param.split(b'&'):
if not p:
continue
if p.find(b"=") == -1:
k = p
v = True
else:
k, v = p.split(b"=")
v = _decode(v)
u = query.get(k)
if u is None:
query[k] = v
elif isinstance(u, list):
u += [v]
else:
query[k] = [u, v]
return query
def html2unicode(value):
# http://stackoverflow.com/questions/57708/convert-xml-html-entities-into-unicode-string-in-python
return HTMLParser.HTMLParser().unescape(value)
def unicode2html(value):
return cgi.escape(value)
def unicode2latin1(value):
output = value.encode("latin1")
return output
def quote2string(value):
try:
return ast.literal_eval(value)
except Exception:
pass
# RETURN PYTHON CODE FOR THE SAME
def value2code(value):
return repr(value)
def DataFrame2string(df, columns=None):
output = StringIO.StringIO()
try:
df.to_csv(output, sep="\t", header=True, cols=columns, engine='python')
return output.getvalue()
finally:
output.close()
def ascii2char(ascii):
return chr(ascii)
def char2ascii(char):
return ord(char)
def ascii2unicode(value):
return value.decode("latin1")
def latin12hex(value):
return value.encode("hex")
def int2hex(value, size):
return (("0" * size) + hex(value)[2:])[-size:]
def hex2bytes(value):
return value.decode("hex")
def bytes2hex(value, separator=" "):
return separator.join("%02X" % ord(x) for x in value)
def base642bytearray(value):
return bytearray(base64.b64decode(value))
def base642bytes(value):
return base64.b64decode(value)
def bytes2base64(value):
return base64.b64encode(value).decode("utf8")
def bytes2sha1(value):
if isinstance(value, unicode):
Log.error("can not convert unicode to sha1")
sha = hashlib.sha1(value)
return sha.hexdigest()
def value2intlist(value):
if value == None:
return None
elif hasattr(value, '__iter__'):
output = [int(d) for d in value if d != "" and d != None]
return output
elif value.strip() == "":
return None
else:
return [int(value)]
def value2int(value):
if value == None:
return None
else:
return int(value)
def value2number(v):
try:
if isinstance(v, float) and round(v, 0) != v:
return v
# IF LOOKS LIKE AN INT, RETURN AN INT
return int(v)
except Exception:
try:
return float(v)
except Exception, e:
Log.error("Not a number ({{value}})", value= v, cause=e)
def utf82unicode(value):
return value.decode('utf8')
def unicode2utf8(value):
return value.encode('utf8')
def latin12unicode(value):
if isinstance(value, unicode):
Log.error("can not convert unicode from latin1")
try:
return unicode(value.decode('iso-8859-1'))
except Exception, e:
Log.error("Can not convert {{value|quote}} to unicode", value=value)
def pipe2value(value):
type = value[0]
if type == '0':
return None
if type == 'n':
return value2number(value[1::])
if type != 's' and type != 'a':
Log.error("unknown pipe type ({{type}}) in {{value}}", type= type, value= value)
# EXPECTING MOST STRINGS TO NOT HAVE ESCAPED CHARS
output = _unPipe(value)
if type == 's':
return output
return [pipe2value(v) for v in output.split("|")]
def zip2bytes(compressed):
"""
UNZIP DATA
"""
if hasattr(compressed, "read"):
return gzip.GzipFile(fileobj=compressed, mode='r')
buff = BytesIO(compressed)
archive = gzip.GzipFile(fileobj=buff, mode='r')
return safe_size(archive)
def bytes2zip(bytes):
"""
RETURN COMPRESSED BYTES
"""
if hasattr(bytes, "read"):
buff = TemporaryFile()
archive = gzip.GzipFile(fileobj=buff, mode='w')
for b in bytes:
archive.write(b)
archive.close()
buff.seek(0)
return FileString(buff)
buff = BytesIO()
archive = gzip.GzipFile(fileobj=buff, mode='w')
archive.write(bytes)
archive.close()
return buff.getvalue()
def ini2value(ini_content):
"""
INI FILE CONTENT TO Dict
"""
from ConfigParser import ConfigParser
buff = StringIO.StringIO(ini_content)
config = ConfigParser()
config._read(buff, "dummy")
output = {}
for section in config.sections():
output[section]=s = {}
for k, v in config.items(section):
s[k]=v
return wrap(output)
_map2url = {chr(i): latin12unicode(chr(i)) for i in range(32, 256)}
for c in " {}<>;/?:@&=+$,":
_map2url[c] = "%" + int2hex(ord(c), 2)
def _unPipe(value):
s = value.find("\\", 1)
if s < 0:
return value[1::]
result = ""
e = 1
while True:
c = value[s + 1]
if c == 'p':
result = result + value[e:s] + '|'
s += 2
e = s
elif c == '\\':
result = result + value[e:s] + '\\'
s += 2
e = s
else:
s += 1
s = value.find("\\", s)
if s < 0:
break
return result + value[e::]
json_decoder = json.JSONDecoder().decode
def json_schema_to_markdown(schema):
from pyLibrary.queries import qb
def _md_code(code):
return "`"+code+"`"
def _md_italic(value):
return "*"+value+"*"
def _inner(schema, parent_name, indent):
more_lines = []
for k,v in schema.items():
full_name = join_field(split_field(parent_name)+[k])
details = indent+"* "+_md_code(full_name)
if v.type:
details += " - "+_md_italic(v.type)
else:
Log.error("{{full_name}} is missing type", full_name=full_name)
if v.description:
details += " " + v.description
more_lines.append(details)
if v.type in ["object", "array", "nested"]:
more_lines.extend(_inner(v.properties, full_name, indent+" "))
return more_lines
lines = []
if schema.title:
lines.append("#"+schema.title)
lines.append(schema.description)
lines.append("")
for k, v in qb.sort(schema.properties.items(), 0):
full_name = k
if v.type in ["object", "array", "nested"]:
lines.append("##"+_md_code(full_name)+" Property")
if v.description:
lines.append(v.description)
lines.append("")
if v.type in ["object", "array", "nested"]:
lines.extend(_inner(v.properties, full_name, " "))
else:
lines.append("##"+_md_code(full_name)+" ("+v.type+")")
if v.description:
lines.append(v.description)
return "\n".join(lines)
| klahnakoski/MoDevETL | pyLibrary/convert.py | Python | mpl-2.0 | 17,324 | 0.003002 |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Copyright 2012 Jens Hoffmann (hoffmaje)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from django.db import models
from django.contrib.auth.models import User
| hoffmaje/layla | layla/vocabularymanager/models.py | Python | agpl-3.0 | 240 | 0.004167 |
print ("I'm not a function")
def my_function():
print("Hey I'm a function!")
def brett(val):
for i in range(val):
print("I'm a function with args!")
my_function()
brett(5) | CiscoDevNet/coding-skills-sample-code | coding202-parsing-json/call-functions.py | Python | apache-2.0 | 217 | 0.041475 |
import sublime, sublime_plugin, os
class CopyFilenameCommand(sublime_plugin.TextCommand):
def run(self, edit):
if len(self.view.file_name()) > 0:
filename = os.path.split(self.view.file_name())[1]
sublime.set_clipboard(filename)
sublime.status_message("Copied file name: %s" % filename)
def is_enabled(self):
return self.view.file_name() and len(self.view.file_name()) > 0
| nwjlyons/copy-file-name | copy_filename.py | Python | mit | 435 | 0.004598 |
#!/usr/bin/env python
from os.path import join
from config import Config
from group import HostGroup, PortGroup
from parser import Parser
from renderers.junos import JunosRenderer
from renderers.ios import IOSRenderer
from deployers.junos import JunosDeployer
from deployers.ios import IOSDeployer
from deployers.iosscp import SCPDeployer
class Device(object):
def __init__(self):
self._name = None
self._vendor = None
self._transport = None
self._save_config = None
self._include_list = list()
self._rules = list()
self._hostgroups = list()
self._portgroups = list()
self._config = Config()
self._rendered_groups = list()
self._rendered_rules = dict()
self._rendered_config = ''
@property
def vendor(self):
return self._vendor
@vendor.setter
def vendor(self, value):
if value not in ['junos', 'ios', 'arista', 'asa']:
raise Exception("The only vendors currently supported are junos, arista, ios, asa")
self._vendor = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def transport(self):
return self._transport
@transport.setter
def transport(self, value):
if value not in ['ssh']:
raise Exception("The only transport supported currently is ssh")
self._transport = value
@property
def rendered_config(self):
return self._rendered_config
@rendered_config.setter
def rendered_config(self, value):
self._rendered_config = value
@property
def rendered_rules(self):
return self._rendered_rules
@property
def rendered_groups(self):
return self._rendered_groups
@property
def hostgroups(self):
return self._hostgroups
@property
def portgroups(self):
return self._portgroups
@property
def rules(self):
return self._rules
@property
def save_config(self):
return self._save_config
@save_config.setter
def save_config(self, value):
self._save_config = value
def add_include(self, value):
self._include_list.append(value)
def parse_file(self, name):
self.name = name
config = Config()
try:
f = open('{}/{}'.format(config.devices, name))
except Exception, e:
print('Could not open device file', e)
raise e
lines = f.readlines()
for line in lines:
if line.startswith('#'):
continue
if line.strip().startswith('vendor'):
self.vendor = line.strip().split(' ')[1]
if line.strip().startswith('transport'):
self.transport = line.strip().split(' ')[1]
if line.strip().startswith('save_config'):
self.save_config = line.strip().split(' ')[1]
if line.strip().startswith('include'):
self.add_include(line.strip().split(' ')[1])
def print_stats(self):
for hg in self._hostgroups:
hg.print_stats()
for rule in self._rules:
rule.print_stats()
def render(self):
print('Rendering {}'.format(self._name))
for include in self._include_list:
parsed_ruleset = Parser()
parsed_ruleset.parse_file(join(self._config.policies, include))
self._rules.append(parsed_ruleset)
for ruleset in self._rules:
self.resolve_hostgroups(ruleset)
self.resolve_portgroups(ruleset)
if self._vendor == 'junos':
renderer = JunosRenderer(self)
renderer.render()
if self._vendor == 'ios':
renderer = IOSRenderer(self)
renderer.render()
def render_to_file_and_deploy(self):
self.render()
if self._vendor == 'junos':
deployer = JunosDeployer(self)
deployer.render_to_file_and_deploy()
if self._vendor == 'ios':
#deployer = IOSDeployer(self)
deployer = SCPDeployer(self)
deployer.render_to_file_and_deploy()
def print_rendered_config(self):
print self._rendered_config
def resolve_hostgroup(self, hgname):
hg = HostGroup(hgname)
hg.parse_file()
if hg.has_inline_groups:
for ihg in hg.inline_groups:
if ihg not in self._hostgroups:
self._hostgroups.append(ihg)
if hg not in self._hostgroups:
self._hostgroups.append(hg)
def resolve_hostgroups(self, ruleset):
for rule in ruleset.tcp_rules:
if type(rule.src) == str and rule.src_is_group:
self.resolve_hostgroup(str(rule.src)[1:])
if type(rule.dst) == str and rule.dst_is_group:
self.resolve_hostgroup(str(rule.dst)[1:])
def resolve_portgroup(self, pgname):
pg = PortGroup(pgname)
pg.parse_file()
if pg not in self._portgroups:
self._portgroups.append(pg)
def resolve_portgroups(self, ruleset):
for rule in ruleset.tcp_rules:
if type(rule.srcport) == str and rule.srcport_is_group:
self.resolve_portgroup(str(rule.srcport)[1:])
if type(rule.dstport) == str and rule.dstport_is_group:
self.resolve_portgroup(str(rule.dstport)[1:])
| favoretti/accessninja | accessninja/device.py | Python | mit | 5,511 | 0.000363 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Script para ejecutar PyTiger2C desde la linea de comandos.
"""
import os
import sys
import optparse
import subprocess
# Add the directory containing the packages in the source distribution to the path.
# This should be removed when Tiger2C is installed.
PACKAGES_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, 'packages'))
sys.path.insert(0, PACKAGES_DIR)
from pytiger2c import __version__, __authors__, tiger2c, tiger2dot
from pytiger2c.errors import PyTiger2CError
EXIT_SUCCESS, EXIT_FAILURE = 0, 1
def _parse_args(argv):
"""
Reconoce las opciones especificadas como argumentos.
@type argv: C{list}
@param argv: Lista de argumentos del programa.
@rtype: C{tuple}
@return: Retorna una tupla donde el primer elemento es una estructura
que almacena la información acerca de las opciones especificadas
y el segundo elemento es una lista con el resto de los argumentos.
"""
usage = '%prog <tiger-file> --output <output-file> [--output-type <output-type>]'
version = '%%prog (PyTiger2C) %s\n' % __version__
authors = '\n'.join(['Copyright (C) 2009, 2010 %s' % a for a in __authors__])
desc = 'Translates a Tiger program received as argument into a C program ' \
'and then compiles the C program into an executable using a C compiler. ' \
'This behavior can be modified using the --output-type option.'
parser = optparse.OptionParser(usage=usage,
version=version + authors,
description=desc,
prog=os.path.basename(argv[0]))
parser.add_option('-o', '--output', action='store', dest='output', metavar='FILE',
help='write the output to FILE')
parser.add_option('-t', '--output-type', action='store', dest='output_type', metavar='TYPE',
type='choice', choices=('ast', 'c', 'binary'),
help="output type: 'ast', 'c' or 'binary' (default '%default')")
parser.set_default('output_type', 'binary')
options, args = parser.parse_args(args=argv[1:])
optparse.check_choice(parser.get_option('--output-type'), '--output-type', options.output_type)
if not options.output:
parser.error('missing required --output option')
elif len(args) != 1:
parser.error('invalid number of arguments')
else:
return options, args
def main(argv):
"""
Función principal del script.
@type argv: C{list}
@param argv: Lista de argumentos del programa.
@rtype: C{int}
@return: Retorna 0 si no ocurrió ningún error durante la ejecución
del programa y 1 en el caso contrario.
"""
options, args = _parse_args(argv)
tiger_filename = os.path.abspath(args[0])
output_filename = os.path.abspath(options.output)
try:
if options.output_type == 'ast':
tiger2dot(tiger_filename, output_filename)
elif options.output_type == 'c':
tiger2c(tiger_filename, output_filename)
# Translation completed. Beautify the code using GNU Indent.
INDENT_CMD = ['indent', '-gnu', '-l100', '-o', output_filename, output_filename]
if subprocess.call(INDENT_CMD) != EXIT_SUCCESS:
# Leave the c file for debugging.
sys.exit(EXIT_FAILURE)
elif options.output_type == 'binary':
basename = os.path.basename(tiger_filename)
index = basename.rfind('.')
c_filename = '%s.c' % (basename[:index] if index > 0 else basename)
c_filename = os.path.join(os.path.dirname(tiger_filename), c_filename)
tiger2c(tiger_filename, c_filename)
# Translation completed. Compile using GCC.
GCC_CMD = ['gcc', c_filename, '-o', output_filename, '-std=c99', '-lgc']
if subprocess.call(GCC_CMD) != EXIT_SUCCESS:
# Leave the temporal c file for debugging.
sys.exit(EXIT_FAILURE)
os.unlink(c_filename)
except PyTiger2CError, error:
print >> sys.stderr, error
sys.exit(EXIT_FAILURE)
else:
sys.exit(EXIT_SUCCESS)
if __name__ == '__main__':
main(sys.argv) | yasserglez/pytiger2c | scripts/pytiger2c.py | Python | mit | 4,333 | 0.006238 |
from yowsup.stacks import YowStack
from .layer import EchoLayer
from yowsup.layers import YowLayerEvent
from yowsup.layers.auth import YowCryptLayer, YowAuthenticationProtocolLayer, AuthError
from yowsup.layers.coder import YowCoderLayer
from yowsup.layers.network import YowNetworkLayer
from yowsup.layers.protocol_messages import YowMessagesProtocolLayer
from yowsup.layers.protocol_media import YowMediaProtocolLayer
from yowsup.layers.stanzaregulator import YowStanzaRegulator
from yowsup.layers.protocol_receipts import YowReceiptProtocolLayer
from yowsup.layers.protocol_acks import YowAckProtocolLayer
from yowsup.layers.logger import YowLoggerLayer
from yowsup.layers.protocol_iq import YowIqProtocolLayer
from yowsup.layers.protocol_calls import YowCallsProtocolLayer
from yowsup.layers import YowParallelLayer
class YowsupEchoStack(object):
def __init__(self, credentials, encryptionEnabled = False):
if encryptionEnabled:
from yowsup.layers.axolotl import YowAxolotlLayer
layers = (
EchoLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer]),
YowAxolotlLayer,
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
else:
layers = (
EchoLayer,
YowParallelLayer([YowAuthenticationProtocolLayer, YowMessagesProtocolLayer, YowReceiptProtocolLayer, YowAckProtocolLayer, YowMediaProtocolLayer, YowIqProtocolLayer, YowCallsProtocolLayer]),
YowLoggerLayer,
YowCoderLayer,
YowCryptLayer,
YowStanzaRegulator,
YowNetworkLayer
)
self.stack = YowStack(layers)
self.stack.setCredentials(credentials)
def start(self):
self.stack.broadcastEvent(YowLayerEvent(YowNetworkLayer.EVENT_STATE_CONNECT))
try:
self.stack.loop()
except AuthError as e:
print("Authentication Error: %s" % e.message)
| bassijtsma/chatbot | yowsup/demos/echoclient/stack.py | Python | gpl-3.0 | 2,467 | 0.008512 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_user
version_added: "2.4"
author:
- "Trishna Guha (@trishnaguha)"
- "Sebastiaan van Doesselaar (@sebasdoes)"
- "Kedar Kekan (@kedarX)"
short_description: Manage the aggregate of local users on Cisco IOS XR device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the aggregate of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
extends_documentation_fragment: iosxr
notes:
- Tested against IOS XRv 6.1.2
options:
aggregate:
description:
- The set of username objects to be configured on the remote
Cisco IOS XR device. The list entries can either be the username
or a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the Cisco IOS XR device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
configured_password:
description:
- The password to be configured on the Cisco IOS XR device. The password
needs to be provided in clear text. Password is encrypted on the device
when used with I(cli) and by Ansible when used with I(netconf)
using the same MD5 hash technique with salt size of 3.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
group:
description:
- Configures the group for the username in the
device running configuration. The argument accepts a string value
defining the group name. This argument does not check if the group
has been configured on the device.
aliases: ['role']
groups:
version_added: "2.5"
description:
- Configures the groups for the username in the device running
configuration. The argument accepts a list of group names.
This argument does not check if the group has been configured
on the device. It is similar to the aggregrate command for
usernames, but lets you configure multiple groups for the user(s).
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user and the current defined set of users.
type: bool
default: false
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
- Applicable only when using network_cli transport
type: bool
default: false
version_added: "2.8"
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
public_key:
version_added: "2.5"
description:
- Configures the contents of the public keyfile to upload to the IOS-XR node.
This enables users to login using the accompanying private key. IOS-XR
only accepts base64 decoded files, so this will be decoded and uploaded
to the node. Do note that this requires an OpenSSL public key file,
PuTTy generated files will not work! Mutually exclusive with
public_key_contents. If used with multiple users in aggregates, then the
same key file is used for all users.
public_key_contents:
version_added: "2.5"
description:
- Configures the contents of the public keyfile to upload to the IOS-XR node.
This enables users to login using the accompanying private key. IOS-XR
only accepts base64 decoded files, so this will be decoded and uploaded
to the node. Do note that this requires an OpenSSL public key file,
PuTTy generated files will not work! Mutually exclusive with
public_key.If used with multiple users in aggregates, then the
same key file is used for all users.
requirements:
- base64 when using I(public_key_contents) or I(public_key)
- paramiko when using I(public_key_contents) or I(public_key)
"""
EXAMPLES = """
- name: create a new user
iosxr_user:
name: ansible
configured_password: mypassword
state: present
- name: create a new user in admin configuration mode
iosxr_user:
name: ansible
configured_password: mypassword
admin: True
state: present
- name: remove all users except admin
iosxr_user:
purge: True
- name: set multiple users to group sys-admin
iosxr_user:
aggregate:
- name: netop
- name: netend
group: sysadmin
state: present
- name: set multiple users to multiple groups
iosxr_user:
aggregate:
- name: netop
- name: netend
groups:
- sysadmin
- root-system
state: present
- name: Change Password for User netop
iosxr_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
- name: Add private key authentication for user netop
iosxr_user:
name: netop
state: present
public_key_contents: "{{ lookup('file', '/home/netop/.ssh/id_rsa.pub' }}"
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- username ansible secret password group sysadmin
- username admin secret admin
xml:
description: NetConf rpc xml sent to device with transport C(netconf)
returned: always (empty list when no xml rpc to send)
type: list
version_added: 2.5
sample:
- '<config xmlns:xc=\"urn:ietf:params:xml:ns:netconf:base:1.0\">
<aaa xmlns=\"http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-lib-cfg\">
<usernames xmlns=\"http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-locald-cfg\">
<username xc:operation=\"merge\">
<name>test7</name>
<usergroup-under-usernames>
<usergroup-under-username>
<name>sysadmin</name>
</usergroup-under-username>
</usergroup-under-usernames>
<secret>$1$ZsXC$zZ50wqhDC543ZWQkkAHLW0</secret>
</username>
</usernames>
</aaa>
</config>'
"""
import os
from functools import partial
from copy import deepcopy
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config, is_netconf, is_cliconf
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec, build_xml, etree_findall
try:
from base64 import b64decode
HAS_B64 = True
except ImportError:
HAS_B64 = False
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
class PublicKeyManager(object):
def __init__(self, module, result):
self._module = module
self._result = result
def convert_key_to_base64(self):
""" IOS-XR only accepts base64 decoded files, this converts the public key to a temp file.
"""
if self._module.params['aggregate']:
name = 'aggregate'
else:
name = self._module.params['name']
if self._module.params['public_key_contents']:
key = self._module.params['public_key_contents']
elif self._module.params['public_key']:
readfile = open(self._module.params['public_key'], 'r')
key = readfile.read()
splitfile = key.split()[1]
base64key = b64decode(splitfile)
base64file = open('/tmp/publickey_%s.b64' % (name), 'wb')
base64file.write(base64key)
base64file.close()
return '/tmp/publickey_%s.b64' % (name)
def copy_key_to_node(self, base64keyfile):
""" Copy key to IOS-XR node. We use SFTP because older IOS-XR versions don't handle SCP very well.
"""
if (self._module.params['host'] is None or self._module.params['provider']['host'] is None):
return False
if (self._module.params['username'] is None or self._module.params['provider']['username'] is None):
return False
if self._module.params['aggregate']:
name = 'aggregate'
else:
name = self._module.params['name']
src = base64keyfile
dst = '/harddisk:/publickey_%s.b64' % (name)
user = self._module.params['username'] or self._module.params['provider']['username']
node = self._module.params['host'] or self._module.params['provider']['host']
password = self._module.params['password'] or self._module.params['provider']['password']
ssh_keyfile = self._module.params['ssh_keyfile'] or self._module.params['provider']['ssh_keyfile']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not ssh_keyfile:
ssh.connect(node, username=user, password=password)
else:
ssh.connect(node, username=user, allow_agent=True)
sftp = ssh.open_sftp()
sftp.put(src, dst)
sftp.close()
ssh.close()
def addremovekey(self, command):
""" Add or remove key based on command
"""
if (self._module.params['host'] is None or self._module.params['provider']['host'] is None):
return False
if (self._module.params['username'] is None or self._module.params['provider']['username'] is None):
return False
user = self._module.params['username'] or self._module.params['provider']['username']
node = self._module.params['host'] or self._module.params['provider']['host']
password = self._module.params['password'] or self._module.params['provider']['password']
ssh_keyfile = self._module.params['ssh_keyfile'] or self._module.params['provider']['ssh_keyfile']
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if not ssh_keyfile:
ssh.connect(node, username=user, password=password)
else:
ssh.connect(node, username=user, allow_agent=True)
ssh_stdin, ssh_stdout, ssh_stderr = ssh.exec_command('%s \r' % (command))
readmsg = ssh_stdout.read(100) # We need to read a bit to actually apply for some reason
if ('already' in readmsg) or ('removed' in readmsg) or ('really' in readmsg):
ssh_stdin.write('yes\r')
ssh_stdout.read(1) # We need to read a bit to actually apply for some reason
ssh.close()
return readmsg
def run(self):
if self._module.params['state'] == 'present':
if not self._module.check_mode:
key = self.convert_key_to_base64()
copykeys = self.copy_key_to_node(key)
if copykeys is False:
self._result['warnings'].append('Please set up your provider before running this playbook')
if self._module.params['aggregate']:
for user in self._module.params['aggregate']:
cmdtodo = "admin crypto key import authentication rsa username %s harddisk:/publickey_aggregate.b64" % (user)
addremove = self.addremovekey(cmdtodo)
if addremove is False:
self._result['warnings'].append('Please set up your provider before running this playbook')
else:
cmdtodo = "admin crypto key import authentication rsa username %s harddisk:/publickey_%s.b64" % \
(self._module.params['name'], self._module.params['name'])
addremove = self.addremovekey(cmdtodo)
if addremove is False:
self._result['warnings'].append('Please set up your provider before running this playbook')
elif self._module.params['state'] == 'absent':
if not self._module.check_mode:
if self._module.params['aggregate']:
for user in self._module.params['aggregate']:
cmdtodo = "admin crypto key zeroize authentication rsa username %s" % (user)
addremove = self.addremovekey(cmdtodo)
if addremove is False:
self._result['warnings'].append('Please set up your provider before running this playbook')
else:
cmdtodo = "admin crypto key zeroize authentication rsa username %s" % (self._module.params['name'])
addremove = self.addremovekey(cmdtodo)
if addremove is False:
self._result['warnings'].append('Please set up your provider before running this playbook')
elif self._module.params['purge'] is True:
if not self._module.check_mode:
cmdtodo = "admin crypto key zeroize authentication rsa all"
addremove = self.addremovekey(cmdtodo)
if addremove is False:
self._result['warnings'].append('Please set up your provider before running this playbook')
return self._result
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
class ConfigBase(object):
def __init__(self, module, result, flag=None):
self._module = module
self._result = result
self._want = list()
self._have = list()
def get_param_value(self, key, item):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = self._module.params[key]
# if key does exist, do a type check on it to validate it
else:
value_type = self._module.argument_spec[key].get('type', 'str')
type_checker = self._module._CHECK_ARGUMENT_TYPES_DISPATCHER[value_type]
type_checker(item[key])
value = item[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, self._module)
return value
def map_params_to_obj(self):
users = self._module.params['aggregate']
aggregate = list()
if not users:
if not self._module.params['name'] and self._module.params['purge']:
pass
elif not self._module.params['name']:
self._module.fail_json(msg='username is required')
else:
aggregate = [{'name': self._module.params['name']}]
else:
for item in users:
if not isinstance(item, dict):
aggregate.append({'name': item})
elif 'name' not in item:
self._module.fail_json(msg='name is required')
else:
aggregate.append(item)
for item in aggregate:
get_value = partial(self.get_param_value, item=item)
item['configured_password'] = get_value('configured_password')
item['group'] = get_value('group')
item['groups'] = get_value('groups')
item['state'] = get_value('state')
self._want.append(item)
class CliConfiguration(ConfigBase):
def __init__(self, module, result):
super(CliConfiguration, self).__init__(module, result)
def map_config_to_obj(self):
data = get_config(self._module, config_filter='username')
users = data.strip().rstrip('!').split('!')
for user in users:
user_config = user.strip().splitlines()
name = user_config[0].strip().split()[1]
group = None
if len(user_config) > 1:
group_or_secret = user_config[1].strip().split()
if group_or_secret[0] == 'group':
group = group_or_secret[1]
obj = {
'name': name,
'state': 'present',
'configured_password': None,
'group': group
}
self._have.append(obj)
def map_obj_to_commands(self):
commands = list()
for w in self._want:
name = w['name']
state = w['state']
obj_in_have = search_obj_in_list(name, self._have)
if state == 'absent' and obj_in_have:
commands.append('no username ' + name)
elif state == 'present' and not obj_in_have:
user_cmd = 'username ' + name
commands.append(user_cmd)
if w['configured_password']:
commands.append(user_cmd + ' secret ' + w['configured_password'])
if w['group']:
commands.append(user_cmd + ' group ' + w['group'])
elif w['groups']:
for group in w['groups']:
commands.append(user_cmd + ' group ' + group)
elif state == 'present' and obj_in_have:
user_cmd = 'username ' + name
if self._module.params['update_password'] == 'always' and w['configured_password']:
commands.append(user_cmd + ' secret ' + w['configured_password'])
if w['group'] and w['group'] != obj_in_have['group']:
commands.append(user_cmd + ' group ' + w['group'])
elif w['groups']:
for group in w['groups']:
commands.append(user_cmd + ' group ' + group)
if self._module.params['purge']:
want_users = [x['name'] for x in self._want]
have_users = [x['name'] for x in self._have]
for item in set(have_users).difference(set(want_users)):
if item != 'admin':
commands.append('no username %s' % item)
if 'no username admin' in commands:
self._module.fail_json(msg='cannot delete the `admin` account')
self._result['commands'] = []
if commands:
commit = not self._module.check_mode
admin = self._module.params['admin']
diff = load_config(self._module, commands, commit=commit, admin=admin)
if diff:
self._result['diff'] = dict(prepared=diff)
self._result['commands'] = commands
self._result['changed'] = True
def run(self):
self.map_params_to_obj()
self.map_config_to_obj()
self.map_obj_to_commands()
return self._result
class NCConfiguration(ConfigBase):
def __init__(self, module, result):
super(NCConfiguration, self).__init__(module, result)
self._locald_meta = collections.OrderedDict()
self._locald_group_meta = collections.OrderedDict()
def generate_md5_hash(self, arg):
'''
Generate MD5 hash with randomly generated salt size of 3.
:param arg:
:return passwd:
'''
cmd = "openssl passwd -salt `openssl rand -base64 3` -1 "
return os.popen(cmd + arg).readlines()[0].strip()
def map_obj_to_xml_rpc(self):
self._locald_meta.update([
('aaa_locald', {'xpath': 'aaa/usernames', 'tag': True, 'ns': True}),
('username', {'xpath': 'aaa/usernames/username', 'tag': True, 'attrib': "operation"}),
('a:name', {'xpath': 'aaa/usernames/username/name'}),
('a:configured_password', {'xpath': 'aaa/usernames/username/secret', 'operation': 'edit'}),
])
self._locald_group_meta.update([
('aaa_locald', {'xpath': 'aaa/usernames', 'tag': True, 'ns': True}),
('username', {'xpath': 'aaa/usernames/username', 'tag': True, 'attrib': "operation"}),
('a:name', {'xpath': 'aaa/usernames/username/name'}),
('usergroups', {'xpath': 'aaa/usernames/username/usergroup-under-usernames', 'tag': True, 'operation': 'edit'}),
('usergroup', {'xpath': 'aaa/usernames/username/usergroup-under-usernames/usergroup-under-username', 'tag': True, 'operation': 'edit'}),
('a:group', {'xpath': 'aaa/usernames/username/usergroup-under-usernames/usergroup-under-username/name', 'operation': 'edit'}),
])
state = self._module.params['state']
_get_filter = build_xml('aaa', opcode="filter")
running = get_config(self._module, source='running', config_filter=_get_filter)
elements = etree_findall(running, 'username')
users = list()
for element in elements:
name_list = etree_findall(element, 'name')
users.append(name_list[0].text)
list_size = len(name_list)
if list_size == 1:
self._have.append({'name': name_list[0].text, 'group': None, 'groups': None})
elif list_size == 2:
self._have.append({'name': name_list[0].text, 'group': name_list[1].text, 'groups': None})
elif list_size > 2:
name_iter = iter(name_list)
next(name_iter)
tmp_list = list()
for name in name_iter:
tmp_list.append(name.text)
self._have.append({'name': name_list[0].text, 'group': None, 'groups': tmp_list})
locald_params = list()
locald_group_params = list()
opcode = None
if state == 'absent':
opcode = "delete"
for want_item in self._want:
if want_item['name'] in users:
want_item['configured_password'] = None
locald_params.append(want_item)
elif state == 'present':
opcode = "merge"
for want_item in self._want:
if want_item['name'] not in users:
want_item['configured_password'] = self.generate_md5_hash(want_item['configured_password'])
locald_params.append(want_item)
if want_item['group'] is not None:
locald_group_params.append(want_item)
if want_item['groups'] is not None:
for group in want_item['groups']:
want_item['group'] = group
locald_group_params.append(want_item.copy())
else:
if self._module.params['update_password'] == 'always' and want_item['configured_password'] is not None:
want_item['configured_password'] = self.generate_md5_hash(want_item['configured_password'])
locald_params.append(want_item)
else:
want_item['configured_password'] = None
obj_in_have = search_obj_in_list(want_item['name'], self._have)
if want_item['group'] is not None and want_item['group'] != obj_in_have['group']:
locald_group_params.append(want_item)
elif want_item['groups'] is not None:
for group in want_item['groups']:
want_item['group'] = group
locald_group_params.append(want_item.copy())
purge_params = list()
if self._module.params['purge']:
want_users = [x['name'] for x in self._want]
have_users = [x['name'] for x in self._have]
for item in set(have_users).difference(set(want_users)):
if item != 'admin':
purge_params.append({'name': item})
self._result['xml'] = []
_edit_filter_list = list()
if opcode is not None:
if locald_params:
_edit_filter_list.append(build_xml('aaa', xmap=self._locald_meta,
params=locald_params, opcode=opcode))
if locald_group_params:
_edit_filter_list.append(build_xml('aaa', xmap=self._locald_group_meta,
params=locald_group_params, opcode=opcode))
if purge_params:
_edit_filter_list.append(build_xml('aaa', xmap=self._locald_meta,
params=purge_params, opcode="delete"))
diff = None
if _edit_filter_list:
commit = not self._module.check_mode
diff = load_config(self._module, _edit_filter_list, commit=commit, running=running,
nc_get_filter=_get_filter)
if diff:
if self._module._diff:
self._result['diff'] = dict(prepared=diff)
self._result['xml'] = _edit_filter_list
self._result['changed'] = True
def run(self):
self.map_params_to_obj()
self.map_obj_to_xml_rpc()
return self._result
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
configured_password=dict(no_log=True),
update_password=dict(default='always', choices=['on_create', 'always']),
admin=dict(type='bool', default=False),
public_key=dict(),
public_key_contents=dict(),
group=dict(aliases=['role']),
groups=dict(type='list', elements='dict'),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
mutually_exclusive = [('name', 'aggregate'), ('public_key', 'public_key_contents'), ('group', 'groups')]
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection'],
mutually_exclusive=mutually_exclusive),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(iosxr_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
if (module.params['public_key_contents'] or module.params['public_key']):
if not HAS_B64:
module.fail_json(
msg='library base64 is required but does not appear to be '
'installed. It can be installed using `pip install base64`'
)
if not HAS_PARAMIKO:
module.fail_json(
msg='library paramiko is required but does not appear to be '
'installed. It can be installed using `pip install paramiko`'
)
result = {'changed': False, 'warnings': []}
if module.params['password'] and not module.params['configured_password']:
result['warnings'].append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
config_object = None
if is_cliconf(module):
module.deprecate(msg="cli support for 'iosxr_user' is deprecated. Use transport netconf instead",
version="2.9")
config_object = CliConfiguration(module, result)
elif is_netconf(module):
config_object = NCConfiguration(module, result)
if config_object:
result = config_object.run()
if module.params['public_key_contents'] or module.params['public_key']:
pubkey_object = PublicKeyManager(module, result)
result = pubkey_object.run()
module.exit_json(**result)
if __name__ == '__main__':
main()
| Jorge-Rodriguez/ansible | lib/ansible/modules/network/iosxr/iosxr_user.py | Python | gpl-3.0 | 29,071 | 0.002752 |
# Copyright 2014-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import json
import os
from collections import OrderedDict
import tarfile
import re
import logging
import errno
import copy
import hashlib
# PyPi/standard library > 3.4
# it has to be PurePath
from pathlib import PurePath
# JSON Schema, pip install jsonschema, Verify JSON Schemas, MIT
import jsonschema
# Ordered JSON, , read & write json, internal
from yotta.lib import ordered_json
# fsutils, , misc filesystem utils, internal
from yotta.lib import fsutils
# Registry Access, , access packages in the registry, internal
from yotta.lib import registry_access
# These patterns are used in addition to any glob expressions defined by the
# .yotta_ignore file
Default_Publish_Ignore = [
'/upload.tar.[gb]z',
'/.git',
'/.hg',
'/.svn',
'/yotta_modules',
'/yotta_targets',
'/build',
'.DS_Store',
'*.sw[ponml]',
'*~',
'._.*',
'.yotta.json'
]
Readme_Regex = re.compile('^readme(?:\.md)', re.IGNORECASE)
Ignore_List_Fname = '.yotta_ignore'
Shrinkwrap_Fname = 'yotta-shrinkwrap.json'
Shrinkwrap_Schema = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'schema', 'shrinkwrap.json')
Origin_Info_Fname = '.yotta_origin.json'
logger = logging.getLogger('components')
def tryTerminate(process):
try:
process.terminate()
except OSError as e:
# if the error is "no such process" then the process probably exited
# while we were waiting for it, so don't raise an exception
if e.errno != errno.ESRCH:
raise
class InvalidDescription(Exception):
pass
# OptionalFileWrapper provides a scope object that can wrap a none-existent file
class OptionalFileWrapper(object):
def __init__(self, fname=None, mode=None):
self.fname = fname
self.mode = mode
super(OptionalFileWrapper, self).__init__()
def __enter__(self):
if self.fname:
self.file = open(self.fname, self.mode)
else:
self.file = open(os.devnull)
return self
def __exit__(self, type, value, traceback):
self.file.close()
def contents(self):
if self.fname:
return self.file.read()
else:
return ''
def extension(self):
if self.fname:
return os.path.splitext(self.fname)[1]
else:
return ''
def __nonzero__(self):
return bool(self.fname)
# python 3 truthiness
def __bool__(self):
return bool(self.fname)
class DependencySpec(object):
def __init__(self, name, version_req, is_test_dependency=False, shrinkwrap_version_req=None, specifying_module=None):
self.name = name
self.version_req = version_req
self.specifying_module = specifying_module # for diagnostic info only, may not be present
self.is_test_dependency = is_test_dependency
self.shrinkwrap_version_req = shrinkwrap_version_req
def isShrinkwrapped(self):
return self.shrinkwrap_version_req is not None
def nonShrinkwrappedVersionReq(self):
''' return the dependency specification ignoring any shrinkwrap '''
return self.version_req
def versionReq(self):
''' return the dependency specification, which may be from a shrinkwrap file '''
return self.shrinkwrap_version_req or self.version_req
def __unicode__(self):
return u'%s at %s' % (self.name, self.version_req)
def __str__(self):
import sys
# in python 3 __str__ must return a string (i.e. unicode), in
# python 2, it must not return unicode, so:
if sys.version_info[0] >= 3:
return self.__unicode__()
else:
return self.__unicode__().encode('utf8')
def __repr__(self):
return self.__unicode__()
def tryReadJSON(filename, schemaname):
r = None
try:
with open(filename, 'r') as jsonfile:
r = ordered_json.load(filename)
if schemaname is not None:
with open(schemaname, 'r') as schema_file:
schema = json.load(schema_file)
validator = jsonschema.Draft4Validator(schema)
for error in validator.iter_errors(r):
logger.error(
'%s is not valid under the schema: %s value %s',
filename,
u'.'.join([str(x) for x in error.path]),
error.message
)
except IOError as e:
if e.errno != errno.ENOENT:
raise
return r
# Pack represents the common parts of Target and Component objects (versions,
# VCS, etc.)
class Pack(object):
schema_errors_displayed = set()
def __init__(
self,
path,
description_filename,
installed_linked,
schema_filename = None,
latest_suitable_version = None,
inherit_shrinkwrap = None
):
# version, , represent versions and specifications, internal
from yotta.lib import version
# vcs, , represent version controlled directories, internal
from yotta.lib import vcs
# resolve links at creation time, to minimise path lengths:
self.unresolved_path = path
self.path = fsutils.realpath(path)
self.installed_linked = installed_linked
self.vcs = None
self.error = None
self.latest_suitable_version = latest_suitable_version
self.version = None
self.description_filename = description_filename
self.ignore_list_fname = Ignore_List_Fname
self.ignore_patterns = copy.copy(Default_Publish_Ignore)
self.origin_info = None
description_file = os.path.join(path, description_filename)
if os.path.isfile(description_file):
try:
self.description = ordered_json.load(description_file)
if self.description:
if not 'name' in self.description:
raise Exception('missing "name"')
if 'version' in self.description:
self.version = version.Version(self.description['version'])
else:
raise Exception('missing "version"')
except Exception as e:
self.description = OrderedDict()
self.error = "Description invalid %s: %s" % (description_file, e);
logger.debug(self.error)
raise InvalidDescription(self.error)
else:
self.error = "No %s file." % description_filename
self.description = OrderedDict()
try:
with open(os.path.join(path, self.ignore_list_fname), 'r') as ignorefile:
self.ignore_patterns += self._parseIgnoreFile(ignorefile)
except IOError as e:
if e.errno != errno.ENOENT:
raise
# warn about invalid yotta versions before schema errors (as new yotta
# might introduce new schema)
yotta_version_spec = None
if self.description and self.description.get('yotta', None):
try:
yotta_version_spec = version.Spec(self.description['yotta'])
except ValueError as e:
logger.warning(
"could not parse yotta version spec '%s' from %s: it "+
"might require a newer version of yotta",
self.description['yotta'],
self.description['name']
)
if yotta_version_spec is not None:
import yotta
yotta_version = version.Version(yotta.__version__)
if not yotta_version_spec.match(yotta_version):
self.error = "requires yotta version %s (current version is %s). see http://docs.yottabuild.org for update instructions" % (
str(yotta_version_spec),
str(yotta_version)
)
if self.description and schema_filename and not self.path in self.schema_errors_displayed:
self.schema_errors_displayed.add(self.path)
have_errors = False
with open(schema_filename, 'r') as schema_file:
schema = json.load(schema_file)
validator = jsonschema.Draft4Validator(schema)
for error in validator.iter_errors(self.description):
if not have_errors:
logger.warning(u'%s has invalid %s:' % (
os.path.split(self.path.rstrip('/'))[1],
description_filename
))
have_errors = True
logger.warning(u" %s value %s" % (u'.'.join([str(x) for x in error.path]), error.message))
# for now schema validation errors aren't fatal... will be soon
# though!
#if have_errors:
# raise InvalidDescription('Invalid %s' % description_filename)
self.inherited_shrinkwrap = None
self.shrinkwrap = None
# we can only apply shrinkwraps to instances with valid descriptions:
# instances do not become valid after being invalid so this is safe
# (but it means you cannot trust the shrinkwrap of an invalid
# component)
# (note that it is unsafe to use the __bool__ operator on self here as
# we are not fully constructed)
if self.description:
self.inherited_shrinkwrap = inherit_shrinkwrap
self.shrinkwrap = tryReadJSON(os.path.join(path, Shrinkwrap_Fname), Shrinkwrap_Schema)
if self.shrinkwrap:
logger.warning('dependencies of %s are pegged by yotta-shrinkwrap.json', self.getName())
if self.inherited_shrinkwrap:
logger.warning('shrinkwrap in %s overrides inherited shrinkwrap', self.getName())
#logger.info('%s created with inherited_shrinkwrap %s', self.getName(), self.inherited_shrinkwrap)
self.vcs = vcs.getVCS(path)
def getShrinkwrap(self):
return self.shrinkwrap or self.inherited_shrinkwrap
def getShrinkwrapMapping(self, variant='modules'):
shrinkwrap = self.getShrinkwrap()
assert(variant in ['modules', 'targets'])
if shrinkwrap and variant in shrinkwrap:
return {
x['name']: x['version'] for x in shrinkwrap[variant]
}
else:
return {}
def origin(self):
''' Read the .yotta_origin.json file (if present), and return the value
of the 'url' property '''
if self.origin_info is None:
self.origin_info = {}
try:
self.origin_info = ordered_json.load(os.path.join(self.path, Origin_Info_Fname))
except IOError:
pass
return self.origin_info.get('url', None)
def getRegistryNamespace(self):
raise NotImplementedError("must be implemented by subclass")
def exists(self):
return os.path.exists(self.description_filename)
def getError(self):
''' If this isn't a valid component/target, return some sort of
explanation about why that is. '''
return self.error
def setError(self, error):
''' Set an error: note that setting an error does not make the module
invalid if it would otherwise be valid.
'''
self.error = error
def getDescriptionFile(self):
return os.path.join(self.path, self.description_filename)
def installedLinked(self):
return self.installed_linked
def setLatestAvailable(self, version):
self.latest_suitable_version = version
def outdated(self):
''' Return a truthy object if a newer suitable version is available,
otherwise return None.
(in fact the object returned is a ComponentVersion that can be used
to get the newer version)
'''
if self.latest_suitable_version and self.latest_suitable_version > self.version:
return self.latest_suitable_version
else:
return None
def vcsIsClean(self):
''' Return true if the directory is not version controlled, or if it is
version controlled with a supported system and is in a clean state
'''
if not self.vcs:
return True
return self.vcs.isClean()
def commitVCS(self, tag=None):
''' Commit the current working directory state (or do nothing if the
working directory is not version controlled)
'''
if not self.vcs:
return
self.vcs.commit(message='version %s' % tag, tag=tag)
def getVersion(self):
''' Return the version as specified by the package file.
This will always be a real version: 1.2.3, not a hash or a URL.
Note that a component installed through a URL still provides a real
version - so if the first component to depend on some component C
depends on it via a URI, and a second component depends on a
specific version 1.2.3, dependency resolution will only succeed if
the version of C obtained from the URL happens to be 1.2.3
'''
return self.version
def getName(self):
if self.description:
return self.description['name']
else:
return None
def getKeywords(self):
if self.description:
return self.description.get('keywords', [])
else:
return []
def _parseIgnoreFile(self, f):
r = []
for l in f:
l = l.rstrip('\n\r')
if not l.startswith('#') and len(l):
r.append(l)
return r
def ignores(self, path):
''' Test if this module ignores the file at "path", which must be a
path relative to the root of the module.
If a file is within a directory that is ignored, the file is also
ignored.
'''
test_path = PurePath('/', path)
# also check any parent directories of this path against the ignore
# patterns:
test_paths = tuple([test_path] + list(test_path.parents))
for exp in self.ignore_patterns:
for tp in test_paths:
if tp.match(exp):
logger.debug('"%s" ignored ("%s" matched "%s")', path, tp, exp)
return True
return False
def setVersion(self, version):
self.version = version
self.description['version'] = str(self.version)
def setName(self, name):
self.description['name'] = name
def writeDescription(self):
''' Write the current (possibly modified) component description to a
package description file in the component directory.
'''
ordered_json.dump(os.path.join(self.path, self.description_filename), self.description)
if self.vcs:
self.vcs.markForCommit(self.description_filename)
def generateTarball(self, file_object):
''' Write a tarball of the current component/target to the file object
"file_object", which must already be open for writing at position 0
'''
archive_name = '%s-%s' % (self.getName(), self.getVersion())
def filterArchive(tarinfo):
if tarinfo.name.find(archive_name) == 0 :
unprefixed_name = tarinfo.name[len(archive_name)+1:]
tarinfo.mode &= 0o775
else:
unprefixed_name = tarinfo.name
if self.ignores(unprefixed_name):
return None
else:
return tarinfo
with tarfile.open(fileobj=file_object, mode='w:gz') as tf:
logger.info('generate archive extracting to "%s"' % archive_name)
tf.add(self.path, arcname=archive_name, filter=filterArchive)
def findAndOpenReadme(self):
files = os.listdir(self.path)
readme_files = [x for x in files if Readme_Regex.match(x)]
reamde_file_if_found = None
for f in readme_files:
if f.endswith('.md'):
return OptionalFileWrapper(f, 'r')
if len(readme_files):
# if we have multiple files and none of them end with .md, then we're
# in some hellish world where files have the same name with different
# casing. Just choose the first in the directory listing:
return OptionalFileWrapper(readme_files[0], 'r')
else:
# no readme files: return an empty file wrapper
return OptionalFileWrapper()
def publish(self, registry=None):
''' Publish to the appropriate registry, return a description of any
errors that occured, or None if successful.
No VCS tagging is performed.
'''
if (registry is None) or (registry == registry_access.Registry_Base_URL):
if 'private' in self.description and self.description['private']:
return "this %s is private and cannot be published" % (self.description_filename.split('.')[0])
upload_archive = os.path.join(self.path, 'upload.tar.gz')
fsutils.rmF(upload_archive)
fd = os.open(upload_archive, os.O_CREAT | os.O_EXCL | os.O_RDWR | getattr(os, "O_BINARY", 0))
with os.fdopen(fd, 'rb+') as tar_file:
tar_file.truncate()
self.generateTarball(tar_file)
logger.debug('generated tar file of length %s', tar_file.tell())
tar_file.seek(0)
# calculate the hash of the file before we upload it:
shasum = hashlib.sha256()
while True:
chunk = tar_file.read(1000)
if not chunk:
break
shasum.update(chunk)
logger.debug('generated tar file has hash %s', shasum.hexdigest())
tar_file.seek(0)
with self.findAndOpenReadme() as readme_file_wrapper:
if not readme_file_wrapper:
logger.warning("no readme.md file detected")
with open(self.getDescriptionFile(), 'r') as description_file:
return registry_access.publish(
self.getRegistryNamespace(),
self.getName(),
self.getVersion(),
description_file,
tar_file,
readme_file_wrapper.file,
readme_file_wrapper.extension().lower(),
registry=registry
)
def unpublish(self, registry=None):
''' Try to un-publish the current version. Return a description of any
errors that occured, or None if successful.
'''
return registry_access.unpublish(
self.getRegistryNamespace(),
self.getName(),
self.getVersion(),
registry=registry
)
def getScript(self, scriptname):
''' Return the specified script command. If the first part of the
command is a .py file, then the current python interpreter is
prepended.
If the script is a single string, rather than an array, it is
shlex-split.
'''
script = self.description.get('scripts', {}).get(scriptname, None)
if script is not None:
if isinstance(script, str) or isinstance(script, type(u'unicode string')):
import shlex
script = shlex.split(script)
# if the command is a python script, run it with the python
# interpreter being used to run yotta, also fetch the absolute path
# to the script relative to this module (so that the script can be
# distributed with the module, no matter what current working
# directory it will be executed in):
if len(script) and script[0].lower().endswith('.py'):
if not os.path.isabs(script[0]):
absscript = os.path.abspath(os.path.join(self.path, script[0]))
logger.debug('rewriting script %s to be absolute path %s', script[0], absscript)
script[0] = absscript
import sys
script = [sys.executable] + script
return script
@fsutils.dropRootPrivs
def runScript(self, scriptname, additional_environment=None):
''' Run the specified script from the scripts section of the
module.json file in the directory of this module.
'''
import subprocess
import shlex
command = self.getScript(scriptname)
if command is None:
logger.debug('%s has no script %s', self, scriptname)
return 0
if not len(command):
logger.error("script %s of %s is empty", scriptname, self.getName())
return 1
# define additional environment variables for scripts:
env = os.environ.copy()
if additional_environment is not None:
env.update(additional_environment)
errcode = 0
child = None
try:
logger.debug('running script: %s', command)
child = subprocess.Popen(
command, cwd = self.path, env = env
)
child.wait()
if child.returncode:
logger.error(
"script %s (from %s) exited with non-zero status %s",
scriptname,
self.getName(),
child.returncode
)
errcode = child.returncode
child = None
finally:
if child is not None:
tryTerminate(child)
return errcode
@classmethod
def ensureOrderedDict(cls, sequence=None):
# !!! NB: MUST return the same object if the object is already an
# ordered dictionary. we rely on spooky-action-at-a-distance to keep
# the "available components" dictionary synchronised between all users
if isinstance(sequence, OrderedDict):
return sequence
elif sequence:
return OrderedDict(sequence)
else:
return OrderedDict()
def __repr__(self):
if not self:
return "INVALID COMPONENT @ %s: %s" % (self.path, self.description)
return "%s %s at %s" % (self.description['name'], self.description['version'], self.path)
# provided for truthiness testing, we test true only if we successfully
# read a package file
def __nonzero__(self):
return bool(self.description)
# python 3 truthiness
def __bool__(self):
return bool(self.description)
| ARMmbed/yotta | yotta/lib/pack.py | Python | apache-2.0 | 23,065 | 0.002948 |
import os
sys = os.system
CC = 'g++ {} -std=gnu++0x -Wall'
FLAG_clear = ['/c', '-c']
FLAG_window = ['/w', '-w']
FLAG_exit = ['/e', '-e']
def main():
print('List of existing <*.cpp> files:')
files = []
counter = 0
for file in os.listdir():
if file[-4:] == '.cpp':
counter += 1
files.append(file)
print('{:->3d}) {}'.format(counter, file[:-4]))
name = ''
flags = []
command, *ex = input('Enter your <command> [<name>] [<*flags>]: ').split()
if len(ex):
name = ex[0]
flags = list(ex[1:])
try:
name = files[int(name) - 1]
except:
if name[0] == '#':
try:
fileid = int(name[1:])
name = files[fileid - 1]
except:
pass
else:
flags = list(ex)
if command == 'open':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
if len(list(set(FLAG_window).intersection(set(flags)))) > 0:
sys('start {}'.format(name))
else:
sys('{}'.format(name))
elif command == 'compile':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('Compiling...')
err = sys((CC+' -o {}.exe').format(name, name[:-4]))
if err:
print('Error during compiling. <{}>'.format(err))
else:
print('Compiled succesfully.')
elif command == 'run':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('Compiling...')
err = sys((CC+' -o {}.exe').format(name, name[:-4]))
if err:
print('Error during compiling. <{}>'.format(err))
else:
print('Compiled succesfully. Starting:\n' + '-' * 31)
if len(list(set(FLAG_window).intersection(set(flags)))) > 0:
err2 = sys('start {}.exe'.format(name[:-4]))
else:
err2 = sys('{}.exe'.format(name[:-4]))
if err2:
print('-' * 30 + '\nError during execution. <{}>'.format(err2))
else:
print('-' * 17 + '\nDone succesfully.')
elif command == 'list':
if name != '':
if len(list(set(FLAG_clear).intersection(set(flags)))) > 0:
sys('cls')
print('List of existing <*.{}> files:'.format(name))
l = len(name)
for file in os.listdir():
if file[-l:] == name:
print('{:>20}'.format(file[:-l - 1]))
else:
print('List of all existing files:')
for file in os.listdir():
print('{:>20}'.format(file))
if len(list(set(FLAG_exit).intersection(set(flags)))) == 0:
input('-' * 25 + '\nEnd. Press enter to exit: ')
main()
| Lipen/LipenDev | Azeroth/Pandaria/process.py | Python | gpl-3.0 | 2,341 | 0.032892 |
# -*- coding: utf-8 -*-
# main.py
import webapp2
from authomatic import Authomatic
from authomatic.adapters import Webapp2Adapter
from config import CONFIG
# Instantiate Authomatic.
authomatic = Authomatic(config=CONFIG, secret='some random secret string')
# Create a simple request handler for the login procedure.
class Login(webapp2.RequestHandler):
# The handler must accept GET and POST http methods and
# Accept any HTTP method and catch the "provider_name" URL variable.
def any(self, provider_name):
# It all begins with login.
result = authomatic.login(Webapp2Adapter(self), provider_name)
# Do not write anything to the response if there is no result!
if result:
# If there is result, the login procedure is over and we can write
# to response.
self.response.write('<a href="..">Home</a>')
if result.error:
# Login procedure finished with an error.
self.response.write(
u'<h2>Damn that error: {}</h2>'.format(result.error.message))
elif result.user:
# Hooray, we have the user!
# OAuth 2.0 and OAuth 1.0a provide only limited user data on login,
# We need to update the user to get more info.
if not (result.user.name and result.user.id):
result.user.update()
# Welcome the user.
self.response.write(u'<h1>Hi {}</h1>'.format(result.user.name))
self.response.write(
u'<h2>Your id is: {}</h2>'.format(result.user.id))
self.response.write(
u'<h2>Your email is: {}</h2>'.format(result.user.email))
# Seems like we're done, but there's more we can do...
# If there are credentials (only by AuthorizationProvider),
# we can _access user's protected resources.
if result.user.credentials:
# Each provider has it's specific API.
if result.provider.name == 'fb':
self.response.write(
'Your are logged in with Facebook.<br />')
# We will access the user's 5 most recent statuses.
url = 'https://graph.facebook.com/{}?fields=feed.limit(5)'
url = url.format(result.user.id)
# Access user's protected resource.
response = result.provider.access(url)
if response.status == 200:
# Parse response.
statuses = response.data.get('feed').get('data')
error = response.data.get('error')
if error:
self.response.write(
u'Damn that error: {}!'.format(error))
elif statuses:
self.response.write(
'Your 5 most recent statuses:<br />')
for message in statuses:
text = message.get('message')
date = message.get('created_time')
self.response.write(
u'<h3>{}</h3>'.format(text))
self.response.write(
u'Posted on: {}'.format(date))
else:
self.response.write(
'Damn that unknown error!<br />')
self.response.write(
u'Status: {}'.format(response.status))
if result.provider.name == 'tw':
self.response.write(
'Your are logged in with Twitter.<br />')
# We will get the user's 5 most recent tweets.
url = 'https://api.twitter.com/1.1/statuses/user_timeline.json'
# You can pass a dictionary of querystring parameters.
response = result.provider.access(url, {'count': 5})
# Parse response.
if response.status == 200:
if isinstance(response.data, list):
# Twitter returns the tweets as a JSON list.
self.response.write(
'Your 5 most recent tweets:')
for tweet in response.data:
text = tweet.get('text')
date = tweet.get('created_at')
self.response.write(
u'<h3>{}</h3>'.format(text.replace(u'\u2013', '[???]')))
self.response.write(
u'Tweeted on: {}'.format(date))
elif response.data.get('errors'):
self.response.write(u'Damn that error: {}!'.
format(response.data.get('errors')))
else:
self.response.write(
'Damn that unknown error!<br />')
self.response.write(
u'Status: {}'.format(response.status))
# Create a home request handler just that you don't have to enter the urls
# manually.
class Home(webapp2.RequestHandler):
def get(self):
# Create links to the Login handler.
self.response.write(
'Login with <a href="login/fb">Facebook</a>.<br />')
self.response.write('Login with <a href="login/tw">Twitter</a>.<br />')
# Create OpenID form where the user can specify their claimed identifier.
# The library by default extracts the identifier from the "id"
# parameter.
self.response.write('''
<form action="login/oi">
<input type="text" name="id" value="me.yahoo.com" />
<input type="submit" value="Authenticate With OpenID">
</form>
''')
# Create GAEOpenID form
self.response.write('''
<form action="login/gae_oi">
<input type="text" name="id" value="me.yahoo.com" />
<input type="submit" value="Authenticate With GAEOpenID">
</form>
''')
# Create routes.
ROUTES = [webapp2.Route(r'/login/<:.*>', Login, handler_method='any'),
webapp2.Route(r'/', Home)]
# Instantiate the webapp2 WSGI application.
app = webapp2.WSGIApplication(ROUTES, debug=True)
| peterhudec/authomatic | examples/gae/simple/main.py | Python | mit | 6,966 | 0.001005 |
# -*- twisted.conch.test.test_mixin -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
import time
from twisted.internet import reactor, protocol
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.conch import mixin
class TestBufferingProto(mixin.BufferingMixin):
scheduled = False
rescheduled = 0
def schedule(self):
self.scheduled = True
return object()
def reschedule(self, token):
self.rescheduled += 1
class BufferingTest(unittest.TestCase):
def testBuffering(self):
p = TestBufferingProto()
t = p.transport = StringTransport()
self.failIf(p.scheduled)
L = ['foo', 'bar', 'baz', 'quux']
p.write('foo')
self.failUnless(p.scheduled)
self.failIf(p.rescheduled)
for s in L:
n = p.rescheduled
p.write(s)
self.assertEquals(p.rescheduled, n + 1)
self.assertEquals(t.value(), '')
p.flush()
self.assertEquals(t.value(), 'foo' + ''.join(L))
| sorenh/cc | vendor/Twisted-10.0.0/twisted/conch/test/test_mixin.py | Python | apache-2.0 | 1,110 | 0.001802 |
AZ09 = ["A","B","C","D"]
MorseAZ09 = [".-","-...","-.-.","-.."]
def str2morse(string):
string = string.upper()
ret = ""
for c in string:
ret += MorseAZ09[AZ09.index(c)] +" "
return ret
# alphanumeric to morse code dictionary
AN2Morse = {"A":".-",
"B":"-...",
"C":"-.-.",
"D":"-..",
"E":".",
"F":"..-.",
"G":"--.",
"H":"....",
"I":"..",
"J":".---",
"K":"-.-",
"L":".-..",
"M":"--",
"N":"-.",
"O":"---",
"P":".--.",
"Q":"--.-",
"R":".-.",
"S":"...",
"T":"-",
"U":"..-",
"V":"...-",
"W":".--",
"X":"-..-",
"Y":"-.--",
"Z":"--..",
"1":".----",
"2":"..---",
"3":"...--",
"4":"....-",
"5":".....",
"6":"-....",
"7":"--...",
"8":"---..",
"9":"----.",
"0":"-----",
" ":" ",
#"\\":"-.-.-",
"!":"-.-.--",
"@":".--.-.",
#"#":"--.-.",
"$":"...-..-",
#"%":"---.-",
#"^":"",
"&":".-...",
#"*":"...-.",
"(":"-.--.",
")":"-.--.-",
"-":"-....-",
":":"---...",
'"':".-..-.",
"'":".----.",
"+":".-.-.",
#"_":"",
"?":"..--..",
".":".......",#beacuse the morse code for '.' is the same as the stop!
"/":"-..-.",
#">":"-.---",
#"<":"-.--.",
#";":"",
",":"--..--",
#"~":".---..",
#"`":"-..-.-",
"=":"-...-",
#"|":"--.-.-",
"{":"-.--.",
"}":"-.--.-",
"[":"-.--.",
"]":"-.--.-", #all brackets and parentheses have the same code
#commented out keys with values are from here:
#http://www.tandemmaster.org/morse_code.html
}
Morse2AN = {v:k for (k,v) in AN2Morse.items()}
splitLetter = " "
def an2morse(string):
return [AN2Morse[c] for c in string.upper()]
def morse2bit(morseList):
bitList = []
for ch in morseList:
for elem in ch:
if elem == ".":
bitList.append("1")
elif elem == "-":
bitList += ["1", "1", "1"]
elif elem == " ":
bitList.append("0")
bitList.append("0") # end of dot or dash
bitList += ["0", "0"] # end of character
return bitList
def seq2tuple(onOffSeq):
tupleList = []
start0 = start1 = 0
while True:
try:
start1 = onOffSeq.index('1', start0)
tupleList.append(('0', start1-start0))
start0 = onOffSeq.index('0', start1)
tupleList.append(('1', start0-start1))
except:
if len(tupleList) > 0 and tupleList[0][0] == '0':
tupleList = tupleList[1:]
return tupleList
def tuple2bitData(tupleList):
bitDataList = [] # ex: [('1',1), ('0',3), ('1',3), ...]
lenOfDot = findLenOfDot(tupleList)
newList = removeNoise(tupleList,lenOfDot)
for e in newList:
ref = e[1] / lenOfDot
l = 7 if ref > 5 else 3 if ref > 2 else 1
bitDataList.append((e[0], l))
return bitDataList
def removeNoise(tupleList, lenOfDot):
tmp = []
for e in tupleList:
if e[1] / lenOfDot > 0.5:
tmp.append(e)
if len(tmp) < 2:
return tmp
ret = [tmp[0]]
for i in range(1, len(tmp)):
if ret[-1][0] == tmp[i][0]:
ret[-1] = (ret[-1][0], ret[-1][1] + tmp[i][1])
else:
ret.append(tmp[i])
return ret
def findLenOfDot(tupleList):
listOfOne = [e[1] for e in tupleList if e[0] == '1']
avg = sum(listOfOne) / len(listOfOne)
listOfDot = [e for e in listOfOne if e < avg]
return sum(listOfDot) / len(listOfDot)
def bitData2morse(bitDataList):
morseList = []
ch = ''
for e in bitDataList:
if e[0] == '0' or e[0] == False:
if e[1] != 1 and ch != '':
morseList.append(ch)
ch = ''
if e[1] >= 6:
morseList.append(" ")
elif e[0] == '1' or e[0] == True:
if e[1] == 1:
ch += '.'
elif e[1] == 3:
ch += '-'
if ch != '':
morseList.append(ch)
return morseList
def morse2an(morseList):
return "".join([Morse2AN[m] for m in morseList])
def an2bit(string):
return morse2bit(an2morse(string))
def seq2an(onOffSeq):
return morse2an(bitData2morse(tuple2bitData(seq2tuple(onOffSeq))))
| hgijeon/NetworkLayer | morse.py | Python | gpl-2.0 | 5,184 | 0.021991 |
#! /usr/bin/env python
"""Script to find missing GLUT entry points"""
from OpenGL import GLUT
import subprocess, re
func_finder = re.compile( 'FGAPIENTRY (\w+)\(' )
constant_finder = re.compile( '#define\W+([0-9a-zA-Z_]+)\W+((0x)?\d+)' )
INCLUDE_DIR = '/usr/include/GL'
def defined( ):
"""Grep FGAPIENTRY headers from /usr/include/GL"""
pipe = subprocess.Popen( 'grep -r FGAPIENTRY %(INCLUDE_DIR)s/*'%globals(), shell=True, stdout=subprocess.PIPE )
stdout,stderr = pipe.communicate()
return stdout
def constants():
pipe = subprocess.Popen( 'grep -r "#define" %(INCLUDE_DIR)s/*glut*'%globals(), shell=True, stdout=subprocess.PIPE )
stdout,stderr = pipe.communicate()
return stdout
def main():
headers = {}
for line in defined().splitlines():
match = func_finder.search( line )
if match:
headers[match.group(1)] = line.split(':',1)[0]
for key in headers.keys():
if hasattr( GLUT, key ):
del headers[key]
import pprint
pprint.pprint( headers )
missing = {}
for line in constants().splitlines():
match = constant_finder.search( line )
if match:
key,value=(match.group(1),match.group(2))
if not hasattr( GLUT, key ):
file = line.split(':',1)[0]
missing.setdefault(file,[]).append( (key,value))
for file,variables in missing.items():
print file
variables.sort()
for key,value in variables:
print '%s=%s'%(key,value)
if __name__ == "__main__":
main()
| frederica07/Dragon_Programming_Process | PyOpenGL-3.0.2/src/missingglut.py | Python | bsd-2-clause | 1,579 | 0.031032 |
# -*- coding: utf-8 -*-
""" Asset
@author: Michael Howden ([email protected])
@date-created: 2011-03-18
Asset Management Functionality
"""
prefix = request.controller
resourcename = request.function
#==============================================================================
response.menu_options = [
#[T("Home"), False, URL(r=request, c="asset", f="index")],
[T("Assets"), False, URL(r=request, c="asset", f="asset"),
[
[T("List"), False, URL(r=request, c="asset", f="asset")],
[T("Add"), False, URL(r=request, c="asset", f="asset", args="create")],
]],
[T("Catalog Items"), False, URL(r=request, c="supply", f="item"),
[
[T("List"), False, URL(r=request, c="supply", f="item")],
[T("Add"), False, URL(r=request, c="supply", f="item", args="create")],
]],
]
def index():
"""
"""
module_name = deployment_settings.modules[prefix].name_nice
response.title = module_name
return dict(module_name=module_name)
#==============================================================================
def shn_asset_rheader(r):
""" Resource Header for Items """
if r.representation == "html":
asset = r.record
if asset:
tabs = [
(T("Edit Details"), None),
(T("Assignments"), "assign"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
item = db.asset_asset.item_id.represent(asset.item_id)
rheader = DIV(TABLE(TR( TH("%s: " % T("Asset Number")),
asset.number,
TH("%s: " % T("Item")), item,
),
),
rheader_tabs
)
return rheader
return None
#==============================================================================
def asset():
""" RESTful CRUD controller """
tablename = "%s_%s" % (prefix, resourcename)
table = db[tablename]
return s3_rest_controller(prefix, resourcename, rheader=shn_asset_rheader)
# END ========================================================================= | sinsai/Sahana_eden | controllers/asset.py | Python | mit | 2,306 | 0.006071 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
import numpy
import tensorflow as tf
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
FLAGS = tf.app.flags.FLAGS
def maybe_download(filename):
"""Download the data from Yann's web, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.Size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, 1)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = numpy.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=numpy.float32)
labels = numpy.zeros(shape=(num_images,), dtype=numpy.int64)
for image in xrange(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
numpy.sum(numpy.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
tf.float32,
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
tf.float32,
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when when we call:
# {tf.initialize_all_variables().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED))
conv1_biases = tf.Variable(tf.zeros([32]))
conv2_weights = tf.Variable(
tf.truncated_normal([5, 5, 32, 64],
stddev=0.1,
seed=SEED))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64]))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal(
[IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512]))
fc2_weights = tf.Variable(
tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED))
fc2_biases = tf.Variable(tf.constant(0.1, shape=[NUM_LABELS]))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list()
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0)
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = numpy.ndarray(shape=(size, NUM_LABELS), dtype=numpy.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.initialize_all_variables().run()
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph is should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
| DailyActie/Surrogate-Model | 01-codes/tensorflow-master/tensorflow/models/image/mnist/convolutional.py | Python | mit | 13,852 | 0.000361 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import pytest
import simple_graph
@pytest.fixture(scope="function")
def create_graph():
new_graph = simple_graph.G()
return new_graph
@pytest.fixture(scope="function")
def build_graph(create_graph):
jerry = simple_graph.Node('Jerry', 5)
allen = simple_graph.Node('Allen', 8)
six = simple_graph.Node('6', 6)
# jerry2allen = simple_graph.Edge(jerry, allen)
# allen2six = simple_graph.Edge(allen, six)
create_graph.add_node(jerry)
create_graph.add_node(allen)
create_graph.add_node(six)
create_graph.add_edge(jerry, allen)
create_graph.add_edge(allen, six)
return create_graph
# g.nodes(): return a list of all nodes in the graph
def test_nodes(build_graph):
build_graph_node_names = [i.name for i in build_graph.nodes()]
assert set(build_graph_node_names) == set(['Jerry', 'Allen', '6'])
# g.edges(): return a list of all edges in the graph
def test_edges(build_graph):
build_graph_edge_names = [(i[0].name, i[1].name) for i in build_graph.edges()]
assert set(build_graph_edge_names) == set([('Jerry', 'Allen'), ('Allen', '6')])
# g.add_node(n): adds a new node 'n' to the graph
def test_add_node(build_graph):
new_node = simple_graph.Node('Jimmy', 0)
build_graph.add_node(new_node)
assert new_node in build_graph.nodes()
# g.add_edge(n1, n2): adds a new edge to the graph connecting 'n1' and 'n2', if
# either n1 or n2 are not already present in the graph, they should be added.
def test_add_edge(build_graph):
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
build_graph.add_node(new_node1)
build_graph.add_node(new_node2)
build_graph.add_edge(new_node1, new_node2)
assert new_node1, new_node2 in build_graph.edges()
def test_add_edge_from_new_nodes():
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
build_graph.add_edge(new_node1, new_node2)
assert new_node1, new_node2 in build_graph.edges()
# g.del_node(n): deletes the node 'n' from the graph, raises an error if no
# such node exists
def test_del_node(build_graph):
current_node = build_graph.nodes()[0]
build_graph.del_node(current_node)
assert current_node not in build_graph.nodes()
# we expect edges to be consistent and updated with nodes
assert current_node not in [
build_graph.edges()[i] for i in range(len(build_graph.edge()))
]
def test_del_nonexistent_node(build_graph):
new_node = simple_graph.Node('new', 1)
# not in build_graph
with pytest.raises(ValueError):
assert build_graph.del_node(new_node)
# g.del_edge(n1, n2): deletes the edge connecting 'n1' and 'n2' from the graph,
# raises an error if no such edge exists
def test_del_edge(build_graph):
current_edge = build_graph.edges()[0]
build_graph.del_edge(current_edge)
assert current_edge not in build_graph.edges()
def test_del_nonexistent_edge(build_graph):
new_node1 = simple_graph.Node('new1', 1)
new_node2 = simple_graph.Node('new2', 2)
new_edge = (new_node1, new_node2)
with pytest.raises(ValueError):
assert build_graph.del_node(new_edge)
# g.has_node(n): True if node 'n' is contained in the graph, False if not.
def test_has_node(build_graph):
contained_node = build_graph.nodes()[0]
assert build_graph.test_has_node(contained_node)
def test_node_not_contained(build_graph):
new_node = simple_graph.Node('new', 1)
assert not build_graph.test_has_node(new_node)
# g.neighbors(n): returns the list of all nodes connected to 'n' by edges,
# raises an error if n is not in g
def test_neighbors(build_graph):
pass
# g.adjacent(n1, n2): returns True if there is an edge connecting n1 and n2,
# False if not, raises an error if either of the supplied nodes are not in g
def test_adjacent(build_graph):
pass
| jesseklein406/data-structures | tests/test_simple_graph.py | Python | mit | 3,955 | 0.000759 |
import json
import re
from collections import defaultdict
from django.conf import settings
from django.db.models import Count
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from django.template import Context, loader
from django.contrib.contenttypes.models import ContentType
from taggit.models import TaggedItem
from catmaid.control.common import makeJSON_legacy_list
from catmaid.control.project import get_project_qs_for_user, extend_projects
from catmaid.models import DataView, DataViewType, Project, Stack, ProjectStack
def get_data_view_type_comment( request ):
""" Return the comment of a specific data view type.
"""
requested_id = request.REQUEST["data_view_type_id"]
if requested_id == "":
text = "Please select a valid data view type."
else:
try:
data_view_type_id = int(requested_id)
text = DataViewType.objects.get(pk=data_view_type_id).comment
except:
text = "Sorry, the configuration help text couldn't be retrieved."
result = { 'comment':text }
return HttpResponse(json.dumps(result), content_type="application/json")
def dataview_to_dict( dataview ):
""" Creates a dicitonary of the dataviews' properties.
"""
return {
'id': dataview.id,
'title': dataview.title,
'code_type': dataview.data_view_type.code_type,
'config': dataview.config,
'note': dataview.comment
}
def get_data_view_type( request, data_view_id ):
""" Returns the type of a particular data view.
"""
dv = get_object_or_404(DataView, pk=data_view_id)
code_type = dv.data_view_type.code_type
return HttpResponse(json.dumps({ 'type': code_type }))
def get_available_data_views( request ):
""" Returns a list of all available data views.
"""
all_views = DataView.objects.order_by("position")
dataviews = map(dataview_to_dict, all_views)
return HttpResponse(json.dumps(makeJSON_legacy_list(dataviews)), content_type="application/json")
def get_default_properties( request ):
""" Return the properies of the default data view.
"""
default = DataView.objects.filter(is_default=True)[0]
default = dataview_to_dict( default )
return HttpResponse(json.dumps(default), content_type="application/json")
def get_default_data_view( request ):
""" Return the data view that is marked as the default. If there
is more than one view marked as default, the first one is returned.
"""
default = DataView.objects.filter(is_default=True)[0]
return get_data_view( request, default.id )
def natural_sort(l, field):
""" Natural sorting of a list wrt. to its 'title' attribute.
Based on: http://stackoverflow.com/questions/4836710
"""
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', getattr(key, field)) ]
return sorted(l, key = alphanum_key)
def get_data_view( request, data_view_id ):
""" Returns a rendered template for the given view.
"""
# Load the template
dv = get_object_or_404(DataView, pk=data_view_id)
code_type = dv.data_view_type.code_type
template = loader.get_template( "catmaid/" + code_type + ".html" )
# Get project information and pass all to the template context
config = json.loads( dv.config )
# Get all the projects that are visible for the current user
projects = get_project_qs_for_user(request.user).prefetch_related('stacks')
# If requested, filter projects by tags. Otherwise, get all.
if "filter_tags" in config:
filter_tags = config["filter_tags"]
# Only get projects that have all the filter tags set
projects = projects.filter( tags__name__in=filter_tags ).annotate(
repeat_count=Count("id") ).filter( repeat_count=len(filter_tags) )
# Build a stack index
stack_index = defaultdict(list)
stacks_of = defaultdict(list)
for p in projects:
for s in p.stacks.all():
stack_index[s.id] = s
stacks_of[p.id].append(s)
# Extend the project list with additional information like editabilty
projects = extend_projects( request.user, projects )
# Sort by default
if "sort" not in config or config["sort"] == True:
projects = natural_sort( projects, "title" )
# Build project index
project_index = dict([(p.id, p) for p in projects])
project_ids = set(project_index.keys())
# Build tag index
ct = ContentType.objects.get_for_model(Project)
tag_links = TaggedItem.objects.filter(content_type=ct) \
.values_list('object_id', 'tag__name')
tag_index = defaultdict(set)
for pid, t in tag_links:
if pid in project_ids:
tag_index[t].add(pid)
context = Context({
'data_view': dv,
'projects': projects,
'config': config,
'settings': settings,
'tag_index': tag_index,
'project_index': project_index,
'stack_index': stack_index,
'stacks_of': stacks_of,
'STATIC_URL': settings.STATIC_URL,
})
return HttpResponse( template.render( context ) )
| aschampion/CATMAID | django/applications/catmaid/control/data_view.py | Python | gpl-3.0 | 5,209 | 0.011327 |
'''Handle configuration for zarkov.
We support full configuration on the command line with defaults supplied by
either an .ini-style config file or a yaml (and thus json) config file.
'''
import sys
import logging.config
from optparse import OptionParser
from ConfigParser import ConfigParser
import yaml
import colander
import ming
log = logging.getLogger(__name__)
re_zmq = colander.Regex(
r'(tcp|inproc)://(.+?)(:\d+)?',
'Invalid zeromq URI')
re_ip_port = colander.Regex(
r'(.+?)(:\d+)?',
'Invalid address')
re_mongodb = colander.Regex(
r'(mongodb|mim)://(.+?)(:\d+)?',
'Invalid mongodb URI')
class BackdoorSchema(colander.MappingSchema):
command = colander.SchemaNode(colander.String())
port=colander.SchemaNode(colander.Int())
class BackdoorsSchema(colander.SequenceSchema):
backdoor = BackdoorSchema()
class LogStreamPluginSchema(colander.MappingSchema):
entry_point = colander.SchemaNode(colander.String())
port = colander.SchemaNode(colander.Int())
class LogStreamSchema(colander.SequenceSchema):
plugin = LogStreamPluginSchema()
class ZeroMQURIs(colander.SequenceSchema):
uri = colander.SchemaNode(
colander.String(), validator=re_zmq)
class LoadBalanceSchema(colander.MappingSchema):
incoming_bind = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6543')
outgoing_uris = ZeroMQURIs()
class WebEventSchema(colander.MappingSchema):
bind = colander.SchemaNode(
colander.String(), validator=re_ip_port)
class DBSchema(colander.MappingSchema):
name = colander.SchemaNode(colander.String())
master = colander.SchemaNode(
colander.String(), validator=re_mongodb,
missing='mongodb://localhost:27017')
database = colander.SchemaNode(colander.String())
use_gevent = colander.SchemaNode(colander.Bool(), missing=True)
class ExtraDBSchema(colander.SequenceSchema):
dbs = DBSchema()
class ZMRConfigSchema(colander.MappingSchema):
req_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://127.0.0.1:5555')
req_bind = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:5555')
worker_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0')
reduce_count = colander.SchemaNode(
colander.Int(), missing=256)
event_limit = colander.SchemaNode(
colander.Int(), missing=100000)
job_root = colander.SchemaNode(
colander.String(), missing='/tmp/zmr')
map_chunk_size = colander.SchemaNode(
colander.Int(), missing=5e6)
map_chunks_per_page = colander.SchemaNode(
colander.Int(), missing=20)
outstanding_chunks = colander.SchemaNode(
colander.Int(), missing=4)
max_chunk_timeout = colander.SchemaNode(
colander.Int(), missing=600)
request_greenlets = colander.SchemaNode(
colander.Int(), missing=16)
compress = colander.SchemaNode(
colander.Int(), missing=0)
src_port = colander.SchemaNode(
colander.Int(), missing=0)
sink_port = colander.SchemaNode(
colander.Int(), missing=0)
processes_per_worker = colander.SchemaNode(
colander.Int(), missing=0)
requests_per_worker_process = colander.SchemaNode(
colander.Int(), missing=256)
suicide_level = colander.SchemaNode(
colander.Int(), missing=3 * 2**20)
class ConfigSchema(colander.MappingSchema):
bson_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://127.0.0.1:6543')
json_uri = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://1227.0.0.1:6544')
bson_bind_address = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6543')
json_bind_address = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6544')
publish_bind_address = colander.SchemaNode(
colander.String(), validator=re_zmq,
missing='tcp://0.0.0.0:6545')
web_port = colander.SchemaNode(colander.Int(), missing=8081)
backdoor = BackdoorsSchema(missing=[])
mongo_uri = colander.SchemaNode(
colander.String(), validator=re_mongodb,
missing='mongodb://localhost:27017')
mongo_database = colander.SchemaNode(
colander.String(), missing='zarkov')
mongo_username = colander.SchemaNode(
colander.String(), missing=None)
mongo_password = colander.SchemaNode(
colander.String(), missing=None)
verbose = colander.SchemaNode(
colander.Bool(), missing=False)
incremental = colander.SchemaNode(
colander.Bool(), missing=True)
num_event_servers = colander.SchemaNode(
colander.Int(), missing=0)
num_event_logs = colander.SchemaNode(
colander.Int(), missing=4)
journal = colander.SchemaNode(
colander.String(), missing='journal')
journal_file_size = colander.SchemaNode(
colander.Int(), missing=2**18)
journal_min_files = colander.SchemaNode(
colander.Int(), missing=4)
zmr = ZMRConfigSchema(missing=None)
logstream = LogStreamSchema(missing=None)
loadbalance = LoadBalanceSchema(missing=None)
webevent = WebEventSchema(missing=None)
extra_dbs = ExtraDBSchema(missing=[])
extra = colander.SchemaNode(colander.Mapping(), missing={})
flush_mmap = colander.SchemaNode(
colander.Bool(), missing=False)
def configure(args=None):
'''Load the options and configure the system'''
if args is None: args = sys.argv
options, args = get_options(args)
if options.verbose:
log.info('Settings:')
for k,v in sorted(options.__dict__.items()):
log.info(' %s: %r', k, v)
ming_config = {
'ming.zarkov.master':options.mongo_uri,
'ming.zarkov.database':options.mongo_database,
'ming.zarkov.use_gevent':True}
for dbinfo in options.extra_dbs:
dbinfo = dict(dbinfo)
prefix = 'ming.%s.' % dbinfo.pop('name')
for k,v in dbinfo.items():
ming_config[prefix + k] = v
if options.mongo_username:
ming_config['ming.zarkov.authenticate.name'] = options.mongo_username
if options.mongo_username:
ming_config['ming.zarkov.authenticate.password'] = options.mongo_password
ming.configure(**ming_config)
if options.pdb:
sys.excepthook = postmortem_hook
return options, args
def get_options(argv):
'''Load the options from argv and any config files specified'''
defaults=dict(
bind_address='tcp://0.0.0.0:6543',
backdoor=None,
password=None,
mongo_uri='mongodb://127.0.0.1:27017',
mongo_database='zarkov',
journal='journal',
verbose=False,
incremental=10)
optparser = get_parser(defaults)
options, args = optparser.parse_args(argv)
config_schema = ConfigSchema()
defaults.update(config_schema.deserialize({}))
if options.ini_file:
config = ConfigParser()
config.read(options.ini_file)
log.info('About to configure logging')
logging.config.fileConfig(options.ini_file, disable_existing_loggers=False)
log.info('Configured logging')
if config.has_section('zarkov'):
defaults.update(
(k, eval(v)) for k,v in config.items('zarkov'))
if options.yaml_file:
with open(options.yaml_file) as fp:
yaml_obj = yaml.load(fp.read())
yaml_obj = config_schema.deserialize(yaml_obj)
if yaml_obj:
defaults.update(yaml_obj)
else:
log.warning('No configuration found -- empty yaml file %r?',
options.yaml_file)
optparser = get_parser(defaults)
options, args = optparser.parse_args(argv)
return options, args
def get_parser(defaults):
'''Build a command line OptionParser based on the given defaults'''
optparser = OptionParser(
usage=('%prog [--options]'))
optparser.set_defaults(**defaults)
optparser.add_option(
'-i', '--config-ini', dest='ini_file',
help='Load options from config (.ini) file')
optparser.add_option(
'-y', '--config-yaml', dest='yaml_file',
help='Load options from config (.yaml) file')
optparser.add_option(
'-l', '--listen', dest='bind_address',
help='IP address on which to listen for connections')
optparser.add_option(
'-p', '--port', dest='port',
type='int',
help='Port to listen for connections')
optparser.add_option(
'--password', dest='password',
help='Password to require for connection')
optparser.add_option(
'--mongo-uri', dest='mongo_uri',
help='URI for MongoDB server in which to store data')
optparser.add_option(
'--mongo-database', dest='mongo_database',
help='MongoDB database in which to store data')
optparser.add_option(
'--journal', dest='journal',
help='Filename to use for journalling')
optparser.add_option(
'--pdb', dest='pdb', action='store_true',
help='Drop into pdb on unhandled exceptions')
optparser.add_option(
'--profile', dest='profile',
help='Profile the run into the given filename')
optparser.add_option(
'-v', '--verbose', dest='verbose',
action='store_true')
optparser.add_option(
'-b', '--backdoor', dest='backdoor')
optparser.add_option(
'--incremental', dest='incremental',
type='int',
help=('how many events to log before triggering an incremental aggregation'
' (0 to disable)'))
return optparser
def postmortem_hook(etype, value, tb): # pragma no cover
import pdb, traceback
try:
from IPython.core.debugger import Pdb
sys.stderr.write('Entering post-mortem IPDB shell\n')
p = Pdb(color_scheme='Linux')
p.reset()
p.setup(None, tb)
p.print_stack_trace()
sys.stderr.write('%s: %s\n' % ( etype, value))
p.cmdloop()
p.forget()
# p.interaction(None, tb)
except ImportError:
sys.stderr.write('Entering post-mortem PDB shell\n')
traceback.print_exception(etype, value, tb)
pdb.post_mortem(tb)
| joeywen/zarkov | zarkov/config.py | Python | apache-2.0 | 10,451 | 0.003158 |
import csv
from django.contrib import messages
from django.shortcuts import HttpResponseRedirect, resolve_url, HttpResponse
from django.views.generic import FormView, ListView
from lmgtfy.forms import MainForm
from lmgtfy.helpers import search_bing, check_valid_tld
from lmgtfy.models import Domain, DomainSearch, DomainSearchResult
class MainView(FormView):
template_name = 'main.html'
form_class = MainForm
success_url = '.'
def get_context_data(self, **kwargs):
context = super(MainView, self).get_context_data(**kwargs)
domains_and_latest_counts = []
for domain in Domain.objects.order_by("-id")[:50]:
domain_search_latest = domain.domainsearch_set.all().last()
if not domain_search_latest:
continue
count = domain_search_latest.domainsearchresult_set.count()
domains_and_latest_counts.append((domain.name, count))
context['table_data'] = domains_and_latest_counts
return context
def form_valid(self, form):
data = form.cleaned_data
domain = data['domain_base']
domain_is_whitelisted = check_valid_tld(domain)
if not domain_is_whitelisted:
messages.info(
self.request,
"Sorry, but to limit the cost of running this service, we have not enabled searching this domain name (%s)." % domain
)
return HttpResponseRedirect(resolve_url('home'))
search_done = search_bing(domain)
if not search_done:
messages.info(
self.request,
"This domain has already been requested today! Here is what we've gathered."
)
else:
messages.info(
self.request,
"Gathering results now. They will be displayed shortly."
)
return HttpResponseRedirect(
resolve_url('domain_result', domain)
)
main_view = MainView.as_view()
class SearchResultView(ListView):
template_name = 'result.html'
model = DomainSearchResult
success_url = '.'
def get_queryset(self):
qs = super(SearchResultView, self).get_queryset()
try:
domain = self.kwargs['domain']
fmt = self.kwargs.get('fmt')
except:
raise Exception('Invalid URL parameter has been passed.')
qs = qs.filter(
search_instance__domain__name=domain
).order_by('result')
if fmt:
qs = qs.filter(fmt=fmt)
return qs
def get_context_data(self, **kwargs):
context = super(SearchResultView, self).get_context_data(**kwargs)
domain_name = self.kwargs['domain']
context['domain_name'] = domain_name
context['format'] = self.kwargs.get('fmt')
self.kwargs['fmt'] = None # clear the format
# so that we get a list of all of the formats for the domain
qs = set(self.get_queryset().values_list('fmt', flat=True))
context['file_formats'] = list(qs)
domain = Domain.objects.filter(name=domain_name)
search_being_performed = len(DomainSearch.objects.filter(domain=domain, completed_at=None)) > 0
if search_being_performed:
messages.info(
self.request,
"We're gathering more results right now. This page will refresh in 10 seconds."
)
context['refresh_counter'] = 10
return context
search_result_view = SearchResultView.as_view()
def get_csv(request, domain):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % domain
writer = csv.writer(response)
qs = DomainSearchResult.objects.filter(
search_instance__domain__name=domain
).order_by('result').distinct()
writer.writerow(["title", "format", "kilobyte_size", "url"])
for result in qs:
writer.writerow([result.title.encode("utf-8"), result.fmt.encode("utf-8"),
result.size, result.result.encode("utf-8")])
return response
| opendata/lmgtdfy | lmgtfy/views.py | Python | mit | 4,151 | 0.003854 |
#!/usr/bin/env python2.7
import os, sys
from Bio import SeqIO
import argparse
from fast5tools.fxclass import *
#################################################
## Argument Parser
#################################################
parser = argparse.ArgumentParser(description = """
Given path(s) to fasta/fastq file(s) and/or directories containing them,
return sequences filtered by any of the following:
1. read type (2d, template, complement)
2. length
3. mean quality score
4. channel number
5. read number
6. ASIC ID
7. Run ID
8. Device ID
9. Base-calling model
Return in same format as input or choose:
fasta, fastq, qual, intqual, oldfalcon, newfalcon
Note: newfalcon output is fasta with falcon-compatible headers.
For newer Falcon:
>asic_run_device_basecallingmodel/i/0_readlen OtherInfo
Where
i is order it was encountered in
OtherInfo will include readtype,mean quality score, read number, channel number
For oldfalcon output:
>000_000/i/0_readlen OriginalFastaHeader
Where i is number read is encountered in.
TODO: fastaqual, fastaintqual
NOTE:
Fasta can be converted to fastq or quals, BUT the quals will not be correct per se.
First, they will be related to the mean q-score (Q).
Second, they will be rounded to the nearest int.
Thus, the mean q-score in the header/seqname will not be consistent with the mean of the quality scores.
It is related by int(round(Q)).
For now, input files are fasta, fastq, or dirs with them.
TODO: Allow tar/tarlite approach. Allow gzipped. Allow FOFN.
TODO: falconizeFast5DerivedFastx.py for more options and more description/info.
John Urban (2015, 2016)
""", formatter_class = argparse.RawTextHelpFormatter)
parser.add_argument('fastx', metavar='fastx', nargs='+',
type= str,
help='''Paths to as many fasta and/or fastq files and/or directories filled with them as you want.
Assumes all fasta files have '.fasta', '.fa', and/or '.fna' extensions (only accepts these).
Assumes all fastq files have '.fastq' or '.fq' extensions (only accepts these).
Assumes given that given one of the above extensions, the internal formatting is consistent with either fasta or fastq.
If inside dir of dirs with desired files, then can just do "*" to get all files from all dirs.''')
parser.add_argument('-r', '--readtype', default="all",
type= str,
help='''Choose type of fasta to get.
Choices: 'template', 'complement', '2d', 'molecule', 'all', 'MoleQual'.
Default: all.
There is no need to write full word for options - can do: t, c, 2, m, a, M.
CAUTION: for now, if the word given begins with "m" it is "molecule"; if "M", it is MoleQual. This means that 'molequal' will return 'molecule' results. etc.
Molecule returns single fastx for when there is more than 1 record for a given Channel#/Read#/Run_ID/ASIC_ID:
if 2d present, return 2d.
elif complement present with no 2d, return longer of template or complement.
elif only template present, return template.
'MoleQual' is similar to molecule.
It differs only in choosing between template and complement when a 2D is not present.
Instead of choosing the longer one, it chooses the one with a higher quality mean quality score.
NOTE: it is assumed that reads from same molecule (shared Channel#/Read#/Run_ID/ASIC_ID)
are clustered together (i.e. occur consecutively) in given input.
If not, then molecule and MoleQual protocols will not work as expected.''')
parser.add_argument('-i', '--intype', type=str, default='fasta',
help=''' Choices: fasta, fastq, input.
Default: fasta.
Note: input (one or both formats found in input).
Declare which input types are to be explored for filtering.
Since one can direct this script at directories that may contain both fasta and fastq,
this gives an extra level of awareness to explore only a given file type (or both).
One may also want to look at both fasta and fastq, but output only fasta (see -o).
In rare cases, one may want to read in both types, and return the same type (-i input, -o input).
For now all output is directed to stdout, so the latter case is not recommended.
In the future, if output from each given input file can automatically be directed to a similarly named
output file (with .filtered. added in, then it might make more sense.''')
parser.add_argument('-o', '--outtype', type=str, default='fasta',
help = '''Choices: input, fasta, fastq, qual, intqual, falcon.
Default: fasta.
Note: input (whatever format the file comes in as).
See -i for discussion on use cases.
falcon: returns fasta but with fasta headers compatible with FALCON assembler.
TODO:
fastaqual, fastaintqual''')
parser.add_argument('--minlen', type=int, default=0, help='''Only report reads >= minlen. Default: 0 bp.''')
parser.add_argument('--maxlen', type=int, default=int(3e9), help='''Only report reads <= maxlen. Default: 3 billion bp.''')
parser.add_argument('--minq', type=float, default=0, help='''Only report reads with mean quality scores >= Q. Default: 0.''')
parser.add_argument('--maxq', type=float, default=int(10e3), help='''Only report reads with mean quality scores <= Q.
Default: 10000 (this is orders of magnitude higher than normal max which are always < 20)''')
parser.add_argument('--channel', type=str, default=None, help='''Only report reads from given channel number. Default: reports from any/all channels present.''')
parser.add_argument('--readnum', type=str, default=None, help='''Only report reads with given read number. Default: reports from any/all read numbers.''')
parser.add_argument('--asic', type=str, default=None, help='''Only report reads with given asic ID. Default: reports from any/all ASIC IDs present.''')
parser.add_argument('--run', type=str, default=None, help='''Only report reads with given run ID. Default: reports from any/all Run IDs present.''')
parser.add_argument('--device', type=str, default=None, help='''Only report reads with given device ID. Default: reports from any/all Device IDs present.''')
parser.add_argument('--model', type=str, default=None, help='''Only report reads with given bas-calling model ID. Default: reports from any/all basecalling IDs present.''')
parser.add_argument('--rule', type=str, default='and', help='''Require each sequence to pass ALL the filters (use 'and') or pass at least N filters (use 'or')''')
parser.add_argument('--minscore', type=int, default=1, help='''If requiring sequences only pass at least N filters (--rule 'or'), then specify minimum number of filters to pass. Default: 1.''')
##parser.add_argument('--tarlite', action='store_true', default=False, help=''' This method extracts 1 file from a given tarchive at a time, processes, and deletes it.
##The older still-default routine extracts the entirety of all given tarchives at once, then processes files.
##The default method will therefore require >2*tarchive amount of disk space (i.e. the tar.gz and its extracted contents).
##The tarlite method only requires the disk space already taken by the tarchive and enough for 1 additional file at a time.
##Tarlite may become the default method after some testing if it performs at similar speeds.''')
args = parser.parse_args()
#################################################
## deal with some of the arguments
#################################################
assert args.outtype in ("fasta", "fastq", "qual", "intqual", "falcon", "oldfalcon", "newfalcon")
assert args.intype in ("input", "fasta", "fastq", "both")
assert args.readtype[0] in "tc2maM"
if args.readtype[0] == "t":
args.readtype = "template"
elif args.readtype[0] == "c":
args.readtype = "complement"
elif args.readtype[0] == "2":
args.readtype = "2d"
elif args.readtype[0] == "m":
args.readtype = "molecule"
elif args.readtype[0] == "a":
args.readtype = "all"
elif args.readtype[0] == "M":
args.readtype = "MoleQual"
if args.intype == 'input' or args.intype == "both":
intypes = ['fasta', 'fastq']
elif args.intype == 'fasta':
intypes = ['fasta']
elif args.intype == 'fastq':
intypes = ['fastq']
def filter_by_entry(readtype, minlen, maxlen, minq, maxq, channel, readnum, asic, runid, deviceid, modelid, minscore, filter_rule, intypes, outtype):
## if want all or specific read type, just filter by entry
rtype = readtype
falcon_i = 0
for fxfile in FastXFileList(args.fastx, intypes=intypes):
for fx in fxfile:
falcon_i += 1
if readtype == "all":
rtype = fx.get_read_type()
if fx.passes_filter(rtype, minlen, maxlen, minq, maxq, channel, readnum, asic, runid, deviceid, modelid, minscore, filter_rule):
print fx.get_fastx_entry(outtype, falcon_i)
def filter_by_molecule(readtype, minlen, maxlen, minq, maxq, channel, readnum, asic, runid, deviceid, modelid, minscore, filter_rule, intypes, outtype):
## if filtering by molecule or molequal; else just filter by entry
falcon_i = 0
for fxfile in FastXFileList(args.fastx, intypes=intypes):
fxmol = None
for fx in fxfile:
falcon_i += 1
#if not initiated or current molecule not same as previous molecule, start new molecule
## elif initiated but new entry is from new molecule, print desired entry from previous molecule and start new molecule
## elif initiated and new entry is from current molecule, add it to the molecule info
if fxmol == None:
fxmol = FastXMolecule(fx)
elif fx.get_molecule_name() != fxmol.get_molecule_name():
rtype = fxmol.interpret(readtype)
if fxmol.passes_filter(rtype, minlen, maxlen, minq, maxq, channel, readnum, asic, runid, deviceid, modelid, minscore, filter_rule):
print fxmol.get_fastx_entry(rtype, outtype, falcon_i)
fxmol = FastXMolecule(fx)
elif fx.get_molecule_name() == fxmol.get_molecule_name():
fxmol.add_fx(fx)
else:
quit("Error in code or file.....")
## process last molecule
rtype = fxmol.interpret(readtype)
if fxmol.passes_filter(rtype, minlen, maxlen, minq, maxq, channel, readnum, asic, runid, deviceid, modelid, minscore, filter_rule):
print fxmol.get_fastx_entry(rtype, outtype, falcon_i)
if args.readtype in ('template', '2d', 'complement', 'all'):
filter_by_entry(readtype=args.readtype, minlen=args.minlen, maxlen=args.maxlen, minq=args.minq, maxq=args.maxq, channel=args.channel, readnum=args.readnum, asic=args.asic, runid=args.run, deviceid=args.device, modelid=args.model, minscore=args.minscore, filter_rule=args.rule, intypes=intypes, outtype=args.outtype)
elif args.readtype in ('molecule', 'MoleQual'):
filter_by_molecule(readtype=args.readtype, minlen=args.minlen, maxlen=args.maxlen, minq=args.minq, maxq=args.maxq, channel=args.channel, readnum=args.readnum, asic=args.asic, runid=args.run, deviceid=args.device, modelid=args.model, minscore=args.minscore, filter_rule=args.rule, intypes=intypes, outtype=args.outtype)
| JohnUrban/fast5tools | bin/filterFast5DerivedFastx.py | Python | mit | 11,145 | 0.007447 |
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('myfc.py', r"""
import sys
fline = '#'+sys.argv[1]+'\n'
outfile = open(sys.argv[2], 'wb')
infile = open(sys.argv[3], 'rb')
for l in [l for l in infile.readlines() if l != fline]:
outfile.write(l)
sys.exit(0)
""")
if not TestSCons.case_sensitive_suffixes('.f','.F'):
f95pp = 'f95'
else:
f95pp = 'f95pp'
test.write('SConstruct', """
env = Environment(F95COM = r'%(_python_)s myfc.py f95 $TARGET $SOURCES',
F95COMSTR = 'Building f95 $TARGET from $SOURCES',
F95PPCOM = r'%(_python_)s myfc.py f95pp $TARGET $SOURCES',
F95PPCOMSTR = 'Building f95pp $TARGET from $SOURCES',
OBJSUFFIX='.obj')
env.Object(source = 'test01.f95')
env.Object(source = 'test02.F95')
""" % locals())
test.write('test01.f95', "A .f95 file.\n#f95\n")
test.write('test02.F95', "A .F95 file.\n#%s\n" % f95pp)
test.run(stdout = test.wrap_stdout("""\
Building f95 test01.obj from test01.f95
Building %(f95pp)s test02.obj from test02.F95
""" % locals()))
test.must_match('test01.obj', "A .f95 file.\n")
test.must_match('test02.obj', "A .F95 file.\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Distrotech/scons | test/Fortran/F95COMSTR.py | Python | mit | 2,501 | 0.001599 |
"""Support for Etekcity VeSync switches."""
import logging
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .common import VeSyncDevice
from .const import DOMAIN, VS_DISCOVERY, VS_DISPATCHERS, VS_SWITCHES
_LOGGER = logging.getLogger(__name__)
DEV_TYPE_TO_HA = {
"wifi-switch-1.3": "outlet",
"ESW03-USA": "outlet",
"ESW01-EU": "outlet",
"ESW15-USA": "outlet",
"ESWL01": "switch",
"ESWL03": "switch",
"ESO15-TB": "outlet",
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches."""
async def async_discover(devices):
"""Add new devices to platform."""
_async_setup_entities(devices, async_add_entities)
disp = async_dispatcher_connect(
hass, VS_DISCOVERY.format(VS_SWITCHES), async_discover
)
hass.data[DOMAIN][VS_DISPATCHERS].append(disp)
_async_setup_entities(hass.data[DOMAIN][VS_SWITCHES], async_add_entities)
return True
@callback
def _async_setup_entities(devices, async_add_entities):
"""Check if device is online and add entity."""
dev_list = []
for dev in devices:
if DEV_TYPE_TO_HA.get(dev.device_type) == "outlet":
dev_list.append(VeSyncSwitchHA(dev))
elif DEV_TYPE_TO_HA.get(dev.device_type) == "switch":
dev_list.append(VeSyncLightSwitch(dev))
else:
_LOGGER.warning(
"%s - Unknown device type - %s", dev.device_name, dev.device_type
)
continue
async_add_entities(dev_list, update_before_add=True)
class VeSyncSwitchHA(VeSyncDevice, SwitchEntity):
"""Representation of a VeSync switch."""
def __init__(self, plug):
"""Initialize the VeSync switch device."""
super().__init__(plug)
self.smartplug = plug
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attr = {}
if hasattr(self.smartplug, "weekly_energy_total"):
attr["voltage"] = self.smartplug.voltage
attr["weekly_energy_total"] = self.smartplug.weekly_energy_total
attr["monthly_energy_total"] = self.smartplug.monthly_energy_total
attr["yearly_energy_total"] = self.smartplug.yearly_energy_total
return attr
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self.smartplug.power
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return self.smartplug.energy_today
def update(self):
"""Update outlet details and energy usage."""
self.smartplug.update()
self.smartplug.update_energy()
class VeSyncLightSwitch(VeSyncDevice, SwitchEntity):
"""Handle representation of VeSync Light Switch."""
def __init__(self, switch):
"""Initialize Light Switch device class."""
super().__init__(switch)
self.switch = switch
| nkgilley/home-assistant | homeassistant/components/vesync/switch.py | Python | apache-2.0 | 3,095 | 0.000323 |
import numpy as np
from scipy._lib._util import check_random_state
def rvs_ratio_uniforms(pdf, umax, vmin, vmax, size=1, c=0, random_state=None):
"""
Generate random samples from a probability density function using the
ratio-of-uniforms method.
Parameters
----------
pdf : callable
A function with signature `pdf(x)` that is proportional to the
probability density function of the distribution.
umax : float
The upper bound of the bounding rectangle in the u-direction.
vmin : float
The lower bound of the bounding rectangle in the v-direction.
vmax : float
The upper bound of the bounding rectangle in the v-direction.
size : int or tuple of ints, optional
Defining number of random variates (default is 1).
c : float, optional.
Shift parameter of ratio-of-uniforms method, see Notes. Default is 0.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
rvs : ndarray
The random variates distributed according to the probability
distribution defined by the pdf.
Notes
-----
Given a univariate probability density function `pdf` and a constant `c`,
define the set ``A = {(u, v) : 0 < u <= sqrt(pdf(v/u + c))}``.
If `(U, V)` is a random vector uniformly distributed over `A`,
then `V/U + c` follows a distribution according to `pdf`.
The above result (see [1]_, [2]_) can be used to sample random variables
using only the pdf, i.e. no inversion of the cdf is required. Typical
choices of `c` are zero or the mode of `pdf`. The set `A` is a subset of
the rectangle ``R = [0, umax] x [vmin, vmax]`` where
- ``umax = sup sqrt(pdf(x))``
- ``vmin = inf (x - c) sqrt(pdf(x))``
- ``vmax = sup (x - c) sqrt(pdf(x))``
In particular, these values are finite if `pdf` is bounded and
``x**2 * pdf(x)`` is bounded (i.e. subquadratic tails).
One can generate `(U, V)` uniformly on `R` and return
`V/U + c` if `(U, V)` are also in `A` which can be directly
verified.
The algorithm is not changed if one replaces `pdf` by k * `pdf` for any
constant k > 0. Thus, it is often convenient to work with a function
that is proportional to the probability density function by dropping
unneccessary normalization factors.
Intuitively, the method works well if `A` fills up most of the
enclosing rectangle such that the probability is high that `(U, V)`
lies in `A` whenever it lies in `R` as the number of required
iterations becomes too large otherwise. To be more precise, note that
the expected number of iterations to draw `(U, V)` uniformly
distributed on `R` such that `(U, V)` is also in `A` is given by
the ratio ``area(R) / area(A) = 2 * umax * (vmax - vmin) / area(pdf)``,
where `area(pdf)` is the integral of `pdf` (which is equal to one if the
probability density function is used but can take on other values if a
function proportional to the density is used). The equality holds since
the area of `A` is equal to 0.5 * area(pdf) (Theorem 7.1 in [1]_).
If the sampling fails to generate a single random variate after 50000
iterations (i.e. not a single draw is in `A`), an exception is raised.
If the bounding rectangle is not correctly specified (i.e. if it does not
contain `A`), the algorithm samples from a distribution different from
the one given by `pdf`. It is therefore recommended to perform a
test such as `~scipy.stats.kstest` as a check.
References
----------
.. [1] L. Devroye, "Non-Uniform Random Variate Generation",
Springer-Verlag, 1986.
.. [2] W. Hoermann and J. Leydold, "Generating generalized inverse Gaussian
random variates", Statistics and Computing, 24(4), p. 547--557, 2014.
.. [3] A.J. Kinderman and J.F. Monahan, "Computer Generation of Random
Variables Using the Ratio of Uniform Deviates",
ACM Transactions on Mathematical Software, 3(3), p. 257--260, 1977.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Simulate normally distributed random variables. It is easy to compute the
bounding rectangle explicitly in that case. For simplicity, we drop the
normalization factor of the density.
>>> f = lambda x: np.exp(-x**2 / 2)
>>> v_bound = np.sqrt(f(np.sqrt(2))) * np.sqrt(2)
>>> umax, vmin, vmax = np.sqrt(f(0)), -v_bound, v_bound
>>> rvs = stats.rvs_ratio_uniforms(f, umax, vmin, vmax, size=2500,
... random_state=rng)
The K-S test confirms that the random variates are indeed normally
distributed (normality is not rejected at 5% significance level):
>>> stats.kstest(rvs, 'norm')[1]
0.250634764150542
The exponential distribution provides another example where the bounding
rectangle can be determined explicitly.
>>> rvs = stats.rvs_ratio_uniforms(lambda x: np.exp(-x), umax=1,
... vmin=0, vmax=2*np.exp(-1), size=1000,
... random_state=rng)
>>> stats.kstest(rvs, 'expon')[1]
0.21121052054580314
"""
if vmin >= vmax:
raise ValueError("vmin must be smaller than vmax.")
if umax <= 0:
raise ValueError("umax must be positive.")
size1d = tuple(np.atleast_1d(size))
N = np.prod(size1d) # number of rvs needed, reshape upon return
# start sampling using ratio of uniforms method
rng = check_random_state(random_state)
x = np.zeros(N)
simulated, i = 0, 1
# loop until N rvs have been generated: expected runtime is finite.
# to avoid infinite loop, raise exception if not a single rv has been
# generated after 50000 tries. even if the expected numer of iterations
# is 1000, the probability of this event is (1-1/1000)**50000
# which is of order 10e-22
while simulated < N:
k = N - simulated
# simulate uniform rvs on [0, umax] and [vmin, vmax]
u1 = umax * rng.uniform(size=k)
v1 = rng.uniform(vmin, vmax, size=k)
# apply rejection method
rvs = v1 / u1 + c
accept = (u1**2 <= pdf(rvs))
num_accept = np.sum(accept)
if num_accept > 0:
x[simulated:(simulated + num_accept)] = rvs[accept]
simulated += num_accept
if (simulated == 0) and (i*N >= 50000):
msg = ("Not a single random variate could be generated in {} "
"attempts. The ratio of uniforms method does not appear "
"to work for the provided parameters. Please check the "
"pdf and the bounds.".format(i*N))
raise RuntimeError(msg)
i += 1
return np.reshape(x, size1d)
| WarrenWeckesser/scipy | scipy/stats/_rvs_sampling.py | Python | bsd-3-clause | 7,177 | 0 |
string = input()
string[0] = "a"
| LTKills/languages | python/data_structures/strings.py | Python | gpl-3.0 | 36 | 0.027778 |
#!/usr/bin/env python3
import re
a = [[0 for x in range(25)] for y in range(13)]
f=open("../distrib/spiral.txt","r")
s=f.readline().strip()
dx, dy = [0, 1, 0, -1], [1, 0, -1, 0]
x, y, c = 0, -1, 1
l=0
for i in range(13+13-1):
if i%2==0:
for j in range((25+25-i)//2):
x += dx[i % 4]
y += dy[i % 4]
#print(x,y,l)
a[x][y] = s[l]
l=l+1
c += 1
else:
for j in range((13+13-i)//2):
x += dx[i % 4]
y += dy[i % 4]
#print(x,y,l)
a[x][y] = s[l]
l=l+1
c += 1
for i in a:
for k in i:
k=re.sub(r"¦","█",k)
k=re.sub(r"¯","▀",k)
k=re.sub(r"_","▄",k)
print(k,end="")
print()
| DISMGryphons/GryphonCTF2017-Challenges | challenges/misc/Spirals/solution/solution.py | Python | gpl-3.0 | 812 | 0.032338 |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2017 Georgi Georgiev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume manager manages creating, attaching, detaching, and persistent storage.
Persistent storage volumes keep their state independent of instances. You can
attach to an instance, terminate the instance, spawn a new instance (even
one from a different image) and re-attach the volume with the same data
intact.
**Related Flags**
:volume_manager: The module name of a class derived from
:class:`manager.Manager` (default:
:class:`cinder.volume.manager.Manager`).
:volume_driver: Used by :class:`Manager`. Defaults to
:class:`cinder.volume.drivers.lvm.LVMVolumeDriver`.
:volume_group: Name of the group that will contain exported volumes (default:
`cinder-volumes`)
:num_shell_tries: Number of times to attempt to run commands (default: 3)
"""
#ge0rgi:added is_volume_trusted
import requests
import time
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_service import periodic_task
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
profiler = importutils.try_import('osprofiler.profiler')
import six
from taskflow import exceptions as tfe
from cinder.common import constants
from cinder import compute
from cinder import context
from cinder import coordination
from cinder import db
from cinder import exception
from cinder import flow_utils
from cinder import keymgr as key_manager
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import cache as image_cache
from cinder.image import glance
from cinder.image import image_utils
from cinder import manager
from cinder.message import api as message_api
from cinder.message import defined_messages
from cinder.message import resource_types
from cinder import objects
from cinder.objects import cgsnapshot
from cinder.objects import consistencygroup
from cinder.objects import fields
from cinder import quota
from cinder import utils
from cinder import volume as cinder_volume
from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume.flows.manager import manage_existing_snapshot
from cinder.volume import group_types
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from cinder.scheduler.filters.asset_tag_filter import TrustAssertionFilter
LOG = logging.getLogger(__name__)
QUOTAS = quota.QUOTAS
CGQUOTAS = quota.CGQUOTAS
GROUP_QUOTAS = quota.GROUP_QUOTAS
VALID_REMOVE_VOL_FROM_CG_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_REMOVE_VOL_FROM_GROUP_STATUS = (
'available',
'in-use',
'error',
'error_deleting')
VALID_ADD_VOL_TO_CG_STATUS = (
'available',
'in-use')
VALID_ADD_VOL_TO_GROUP_STATUS = (
'available',
'in-use')
VALID_CREATE_CG_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_GROUP_SRC_SNAP_STATUS = (fields.SnapshotStatus.AVAILABLE,)
VALID_CREATE_CG_SRC_CG_STATUS = ('available',)
VALID_CREATE_GROUP_SRC_GROUP_STATUS = ('available',)
VA_LIST = objects.VolumeAttachmentList
volume_manager_opts = [
cfg.StrOpt('volume_driver',
default='cinder.volume.drivers.lvm.LVMVolumeDriver',
help='Driver to use for volume creation'),
cfg.IntOpt('migration_create_volume_timeout_secs',
default=300,
help='Timeout for creating the volume to migrate to '
'when performing volume migration (seconds)'),
cfg.BoolOpt('volume_service_inithost_offload',
default=False,
help='Offload pending volume delete during '
'volume service startup'),
cfg.StrOpt('zoning_mode',
help='FC Zoning mode configured'),
cfg.StrOpt('extra_capabilities',
default='{}',
help='User defined capabilities, a JSON formatted string '
'specifying key/value pairs. The key/value pairs can '
'be used by the CapabilitiesFilter to select between '
'backends when requests specify volume types. For '
'example, specifying a service level or the geographical '
'location of a backend, then creating a volume type to '
'allow the user to select by these different '
'properties.'),
cfg.BoolOpt('suppress_requests_ssl_warnings',
default=False,
help='Suppress requests library SSL certificate warnings.'),
]
CONF = cfg.CONF
CONF.register_opts(volume_manager_opts)
MAPPING = {
'cinder.volume.drivers.hds.nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hds.iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.hitachi.hnas_nfs.HDSNFSDriver':
'cinder.volume.drivers.hitachi.hnas_nfs.HNASNFSDriver',
'cinder.volume.drivers.hitachi.hnas_iscsi.HDSISCSIDriver':
'cinder.volume.drivers.hitachi.hnas_iscsi.HNASISCSIDriver',
'cinder.volume.drivers.ibm.xiv_ds8k':
'cinder.volume.drivers.ibm.ibm_storage',
'cinder.volume.drivers.emc.scaleio':
'cinder.volume.drivers.dell_emc.scaleio.driver',
'cinder.volume.drivers.emc.vnx.driver.EMCVNXDriver':
'cinder.volume.drivers.dell_emc.vnx.driver.VNXDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOISCSIDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOISCSIDriver',
'cinder.volume.drivers.emc.xtremio.XtremIOFibreChannelDriver':
'cinder.volume.drivers.dell_emc.xtremio.XtremIOFCDriver',
'cinder.volume.drivers.datera.DateraDriver':
'cinder.volume.drivers.datera.datera_iscsi.DateraDriver',
'cinder.volume.drivers.emc.emc_vmax_iscsi.EMCVMAXISCSIDriver':
'cinder.volume.drivers.dell_emc.vmax.iscsi.VMAXISCSIDriver',
'cinder.volume.drivers.emc.emc_vmax_fc.EMCVMAXFCDriver':
'cinder.volume.drivers.dell_emc.vmax.fc.VMAXFCDriver',
'cinder.volume.drivers.eqlx.DellEQLSanISCSIDriver':
'cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver',
}
class VolumeManager(manager.CleanableManager,
manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = volume_rpcapi.VolumeAPI.RPC_API_VERSION
target = messaging.Target(version=RPC_API_VERSION)
# On cloning a volume, we shouldn't copy volume_type, consistencygroup
# and volume_attachment, because the db sets that according to [field]_id,
# which we do copy. We also skip some other values that are set during
# creation of Volume object.
_VOLUME_CLONE_SKIP_PROPERTIES = {
'id', '_name_id', 'name_id', 'name', 'status',
'attach_status', 'migration_status', 'volume_type',
'consistencygroup', 'volume_attachment', 'group'}
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = config.Configuration(volume_manager_opts,
config_group=service_name)
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warning(_LW("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
vol_db_empty = self._set_voldb_empty_at_startup_indicator(
context.get_admin_context())
LOG.debug("Cinder Volume DB check: vol_db_empty=%s", vol_db_empty)
# We pass the current setting for service.active_backend_id to
# the driver on init, in case there was a restart or something
curr_active_backend_id = None
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
# NOTE(jdg): This is to solve problems with unit tests
LOG.info(_LI("Service not found for updating "
"active_backend_id, assuming default "
"for driver init."))
else:
curr_active_backend_id = service.active_backend_id
if self.configuration.suppress_requests_ssl_warnings:
LOG.warning(_LW("Suppressing requests library SSL Warnings"))
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecureRequestWarning)
requests.packages.urllib3.disable_warnings(
requests.packages.urllib3.exceptions.InsecurePlatformWarning)
self.key_manager = key_manager.API(CONF)
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db,
host=self.host,
cluster_name=self.cluster,
is_vol_db_empty=vol_db_empty,
active_backend_id=curr_active_backend_id)
if self.cluster and not self.driver.SUPPORTS_ACTIVE_ACTIVE:
msg = _LE('Active-Active configuration is not currently supported '
'by driver %s.') % volume_driver
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
self.message_api = message_api.API()
if CONF.profiler.enabled and profiler is not None:
self.driver = profiler.trace_cls("driver")(self.driver)
try:
self.extra_capabilities = jsonutils.loads(
self.driver.configuration.extra_capabilities)
except AttributeError:
self.extra_capabilities = {}
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Invalid JSON: %s"),
self.driver.configuration.extra_capabilities)
if self.driver.configuration.safe_get(
'image_volume_cache_enabled'):
max_cache_size = self.driver.configuration.safe_get(
'image_volume_cache_max_size_gb')
max_cache_entries = self.driver.configuration.safe_get(
'image_volume_cache_max_count')
self.image_volume_cache = image_cache.ImageVolumeCache(
self.db,
cinder_volume.API(),
max_cache_size,
max_cache_entries
)
LOG.info(_LI('Image-volume cache enabled for host %(host)s.'),
{'host': self.host})
else:
LOG.info(_LI('Image-volume cache disabled for host %(host)s.'),
{'host': self.host})
self.image_volume_cache = None
if CONF.trusted_computing:
self.asset_tag_filter = TrustAssertionFilter()
def _count_allocated_capacity(self, ctxt, volume):
pool = vol_utils.extract_host(volume['host'], 'pool')
if pool is None:
# No pool name encoded in host, so this is a legacy
# volume created before pool is introduced, ask
# driver to provide pool info if it has such
# knowledge and update the DB.
try:
pool = self.driver.get_pool(volume)
except Exception:
LOG.exception(_LE('Fetch volume pool name failed.'),
resource=volume)
return
if pool:
new_host = vol_utils.append_host(volume['host'],
pool)
self.db.volume_update(ctxt, volume['id'],
{'host': new_host})
else:
# Otherwise, put them into a special fixed pool with
# volume_backend_name being the pool name, if
# volume_backend_name is None, use default pool name.
# This is only for counting purpose, doesn't update DB.
pool = (self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume['host'], 'pool', True))
try:
pool_stat = self.stats['pools'][pool]
except KeyError:
# First volume in the pool
self.stats['pools'][pool] = dict(
allocated_capacity_gb=0)
pool_stat = self.stats['pools'][pool]
pool_sum = pool_stat['allocated_capacity_gb']
pool_sum += volume['size']
self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
self.stats['allocated_capacity_gb'] += volume['size']
def _set_voldb_empty_at_startup_indicator(self, ctxt):
"""Determine if the Cinder volume DB is empty.
A check of the volume DB is done to determine whether it is empty or
not at this point.
:param ctxt: our working context
"""
vol_entries = self.db.volume_get_all(ctxt, None, 1, filters=None)
if len(vol_entries) == 0:
LOG.info(_LI("Determined volume DB was empty at startup."))
return True
else:
LOG.info(_LI("Determined volume DB was not empty at startup."))
return False
def _sync_provider_info(self, ctxt, volumes, snapshots):
# NOTE(jdg): For now this just updates provider_id, we can add more
# items to the update if they're relevant but we need to be safe in
# what we allow and add a list of allowed keys. Things that make sense
# are provider_*, replication_status etc
updates, snapshot_updates = self.driver.update_provider_info(
volumes, snapshots)
if updates:
for volume in volumes:
# NOTE(JDG): Make sure returned item is in this hosts volumes
update = (
[updt for updt in updates if updt['id'] ==
volume['id']])
if update:
update = update[0]
self.db.volume_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
# NOTE(jdg): snapshots are slighty harder, because
# we do not have a host column and of course no get
# all by host, so we use a get_all and bounce our
# response off of it
if snapshot_updates:
cinder_snaps = self.db.snapshot_get_all(ctxt)
for snap in cinder_snaps:
# NOTE(jdg): For now we only update those that have no entry
if not snap.get('provider_id', None):
update = (
[updt for updt in snapshot_updates if updt['id'] ==
snap['id']][0])
if update:
self.db.snapshot_update(
ctxt,
update['id'],
{'provider_id': update['provider_id']})
def _include_resources_in_cluster(self, ctxt):
LOG.info(_LI('Including all resources from host %(host)s in cluster '
'%(cluster)s.'),
{'host': self.host, 'cluster': self.cluster})
num_vols = objects.VolumeList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cgs = objects.ConsistencyGroupList.include_in_cluster(
ctxt, self.cluster, host=self.host)
num_cache = db.image_volume_cache_include_in_cluster(
ctxt, self.cluster, host=self.host)
LOG.info(_LI('%(num_vols)s volumes, %(num_cgs)s consistency groups, '
'and %(num_cache)s image volume caches from host '
'%(host)s have been included in cluster %(cluster)s.'),
{'num_vols': num_vols, 'num_cgs': num_cgs,
'host': self.host, 'cluster': self.cluster,
'num_cache': num_cache})
def init_host(self, added_to_cluster=None, **kwargs):
"""Perform any required initialization."""
ctxt = context.get_admin_context()
if not self.driver.supported:
utils.log_unsupported_driver_warning(self.driver)
if not self.configuration.enable_unsupported_driver:
LOG.error(_LE("Unsupported drivers are disabled."
" You can re-enable by adding "
"enable_unsupported_driver=True to the "
"driver section in cinder.conf"),
resource={'type': 'driver',
'id': self.__class__.__name__})
return
# If we have just added this host to a cluster we have to include all
# our resources in that cluster.
if added_to_cluster:
self._include_resources_in_cluster(ctxt)
LOG.info(_LI("Starting volume driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception:
LOG.exception(_LE("Failed to initialize driver."),
resource={'type': 'driver',
'id': self.__class__.__name__})
# we don't want to continue since we failed
# to initialize the driver correctly.
return
# Initialize backend capabilities list
self.driver.init_capabilities()
volumes = self._get_my_volumes(ctxt)
snapshots = self._get_my_snapshots(ctxt)
self._sync_provider_info(ctxt, volumes, snapshots)
# FIXME volume count for exporting is wrong
self.stats['pools'] = {}
self.stats.update({'allocated_capacity_gb': 0})
try:
for volume in volumes:
# available volume should also be counted into allocated
if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
self._count_allocated_capacity(ctxt, volume)
try:
if volume['status'] in ['in-use']:
self.driver.ensure_export(ctxt, volume)
except Exception:
LOG.exception(_LE("Failed to re-export volume, "
"setting to ERROR."),
resource=volume)
volume.conditional_update({'status': 'error'},
{'status': 'in-use'})
# All other cleanups are processed by parent class CleanableManager
except Exception:
LOG.exception(_LE("Error during re-export on driver init."),
resource=volume)
return
self.driver.set_throttle()
# at this point the driver is considered initialized.
# NOTE(jdg): Careful though because that doesn't mean
# that an entry exists in the service table
self.driver.set_initialized()
# Keep the image tmp file clean when init host.
backend_name = vol_utils.extract_host(self.service_topic_queue)
image_utils.cleanup_temporary_file(backend_name)
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
LOG.info(_LI("Driver initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
# Make sure to call CleanableManager to do the cleanup
super(VolumeManager, self).init_host(added_to_cluster=added_to_cluster,
**kwargs)
def init_host_with_rpc(self):
LOG.info(_LI("Initializing RPC dependent components of volume "
"driver %(driver_name)s (%(version)s)"),
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
# Make sure the driver is initialized first
utils.log_unsupported_driver_warning(self.driver)
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
LOG.error(_LE("Cannot complete RPC initialization because "
"driver isn't initialized properly."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
return
stats = self.driver.get_volume_stats(refresh=True)
svc_host = vol_utils.extract_host(self.host, 'backend')
try:
service = objects.Service.get_by_args(
context.get_admin_context(),
svc_host,
constants.VOLUME_BINARY)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Service not found for updating "
"replication_status."))
if service.replication_status != (
fields.ReplicationStatus.FAILED_OVER):
if stats and stats.get('replication_enabled', False):
service.replication_status = fields.ReplicationStatus.ENABLED
else:
service.replication_status = fields.ReplicationStatus.DISABLED
service.save()
LOG.info(_LI("Driver post RPC initialization completed successfully."),
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
def _do_cleanup(self, ctxt, vo_resource):
if isinstance(vo_resource, objects.Volume):
if vo_resource.status == 'downloading':
self.driver.clear_download(ctxt, vo_resource)
elif vo_resource.status == 'uploading':
# Set volume status to available or in-use.
self.db.volume_update_status_based_on_attachment(
ctxt, vo_resource.id)
elif vo_resource.status == 'deleting':
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume, ctxt,
vo_resource, cascade=True)
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, vo_resource, cascade=True)
# We signal that we take care of cleaning the worker ourselves
# (with set_workers decorator in delete_volume method) so
# do_cleanup method doesn't need to remove it.
return True
# For Volume creating and downloading and for Snapshot downloading
# statuses we have to set status to error
if vo_resource.status in ('creating', 'downloading'):
vo_resource.status = 'error'
vo_resource.save()
def is_working(self):
"""Return if Manager is ready to accept requests.
This is to inform Service class that in case of volume driver
initialization failure the manager is actually down and not ready to
accept any requests.
"""
return self.driver.initialized
def _set_resource_host(self, resource):
"""Set the host field on the DB to our own when we are clustered."""
if (resource.is_clustered and
not vol_utils.hosts_are_equivalent(resource.host, self.host)):
pool = vol_utils.extract_host(resource.host, 'pool')
resource.host = vol_utils.append_host(self.host, pool)
resource.save()
@objects.Volume.set_workers
def create_volume(self, context, volume, request_spec=None,
filter_properties=None, allow_reschedule=True):
"""Creates the volume."""
# Log about unsupported drivers
utils.log_unsupported_driver_warning(self.driver)
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(volume)
context_elevated = context.elevated()
if filter_properties is None:
filter_properties = {}
if request_spec is None:
request_spec = objects.RequestSpec()
try:
# NOTE(flaper87): Driver initialization is
# verified by the task itself.
flow_engine = create_volume.get_flow(
context_elevated,
self,
self.db,
self.driver,
self.scheduler_rpcapi,
self.host,
volume,
allow_reschedule,
context,
request_spec,
filter_properties,
image_volume_cache=self.image_volume_cache,
)
except Exception:
msg = _("Create manager volume flow failed.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
snapshot_id = request_spec.get('snapshot_id')
source_volid = request_spec.get('source_volid')
source_replicaid = request_spec.get('source_replicaid')
if snapshot_id is not None:
# Make sure the snapshot is not deleted until we are done with it.
locked_action = "%s-%s" % (snapshot_id, 'delete_snapshot')
elif source_volid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_volid, 'delete_volume')
elif source_replicaid is not None:
# Make sure the volume is not deleted until we are done with it.
locked_action = "%s-%s" % (source_replicaid, 'delete_volume')
else:
locked_action = None
def _run_flow():
# This code executes create volume flow. If something goes wrong,
# flow reverts all job that was done and reraises an exception.
# Otherwise, all data that was generated by flow becomes available
# in flow engine's storage.
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# NOTE(dulek): Flag to indicate if volume was rescheduled. Used to
# decide if allocated_capacity should be incremented.
rescheduled = False
try:
if locked_action is None:
_run_flow()
else:
with coordination.Lock(locked_action):
_run_flow()
finally:
try:
flow_engine.storage.fetch('refreshed')
except tfe.NotFound:
# If there's no vol_ref, then flow is reverted. Lets check out
# if rescheduling occurred.
try:
rescheduled = flow_engine.storage.get_revert_result(
create_volume.OnFailureRescheduleTask.make_name(
[create_volume.ACTION]))
except tfe.NotFound:
pass
if not rescheduled:
# NOTE(dulek): Volume wasn't rescheduled so we need to update
# volume stats as these are decremented on delete.
self._update_allocated_capacity(volume)
LOG.info(_LI("Created volume successfully."), resource=volume)
return volume.id
def _check_is_our_resource(self, resource):
if resource.host:
res_backend = vol_utils.extract_host(resource.service_topic_queue)
backend = vol_utils.extract_host(self.service_topic_queue)
if res_backend != backend:
msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not '
'local to %(backend)s.') %
{'resource': resource.obj_name, 'id': resource.id,
'backend': backend})
raise exception.Invalid(msg)
@coordination.synchronized('{volume.id}-{f_name}')
@objects.Volume.set_workers
def delete_volume(self, context, volume, unmanage_only=False,
cascade=False):
"""Deletes and unexports volume.
1. Delete a volume(normal case)
Delete a volume and update quotas.
2. Delete a migration volume
If deleting the volume in a migration, we want to skip
quotas but we need database updates for the volume.
"""
context = context.elevated()
try:
volume.refresh()
except exception.VolumeNotFound:
# NOTE(thingee): It could be possible for a volume to
# be deleted when resuming deletes from init_host().
LOG.debug("Attempted delete of non-existent volume: %s", volume.id)
return
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED:
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
if unmanage_only and cascade:
# This could be done, but is ruled out for now just
# for simplicity.
raise exception.Invalid(
reason=_("Unmanage and cascade delete options "
"are mutually exclusive."))
# The status 'deleting' is not included, because it only applies to
# the source volume to be deleted after a migration. No quota
# needs to be handled for it.
is_migrating = volume.migration_status not in (None, 'error',
'success')
is_migrating_dest = (is_migrating and
volume.migration_status.startswith(
'target:'))
notification = "delete.start"
if unmanage_only:
notification = "unmanage.start"
self._notify_about_volume_usage(context, volume, notification)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context, volume)
if unmanage_only:
self.driver.unmanage(volume)
elif cascade:
LOG.debug('Performing cascade delete.')
snapshots = objects.SnapshotList.get_all_for_volume(context,
volume.id)
for s in snapshots:
if s.status != 'deleting':
self._clear_db(context, is_migrating_dest, volume,
'error_deleting')
msg = (_("Snapshot %(id)s was found in state "
"%(state)s rather than 'deleting' during "
"cascade delete.") % {'id': s.id,
'state': s.status})
raise exception.InvalidSnapshot(reason=msg)
self.delete_snapshot(context, s)
LOG.debug('Snapshots deleted, issuing volume delete')
self.driver.delete_volume(volume)
else:
self.driver.delete_volume(volume)
except exception.VolumeIsBusy:
LOG.error(_LE("Unable to delete busy volume."),
resource=volume)
# If this is a destination volume, we have to clear the database
# record to avoid user confusion.
self._clear_db(context, is_migrating_dest, volume,
'available')
return
except Exception:
with excutils.save_and_reraise_exception():
# If this is a destination volume, we have to clear the
# database record to avoid user confusion.
new_status = 'error_deleting'
if unmanage_only is True:
new_status = 'error_unmanaging'
self._clear_db(context, is_migrating_dest, volume,
new_status)
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
# Get reservations
try:
reservations = None
if volume.status != 'error_managing_deleting':
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
LOG.exception(_LE("Failed to update usages deleting volume."),
resource=volume)
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
volume.destroy()
# If deleting source/destination volume in a migration, we should
# skip quotas.
if not is_migrating:
notification = "delete.end"
if unmanage_only:
notification = "unmanage.end"
self._notify_about_volume_usage(context, volume, notification)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
size = volume.size
try:
self.stats['pools'][pool]['allocated_capacity_gb'] -= size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=-size)
self.publish_service_capabilities(context)
msg = _LI("Deleted volume successfully.")
if unmanage_only:
msg = _LI("Unmanaged volume successfully.")
LOG.info(msg, resource=volume)
def _clear_db(self, context, is_migrating_dest, volume_ref, status):
# This method is called when driver.unmanage() or
# driver.delete_volume() fails in delete_volume(), so it is already
# in the exception handling part.
if is_migrating_dest:
volume_ref.destroy()
LOG.error(_LE("Unable to delete the destination volume "
"during volume migration, (NOTE: database "
"record needs to be deleted)."), resource=volume_ref)
else:
volume_ref.status = status
volume_ref.save()
@objects.Snapshot.set_workers
def create_snapshot(self, context, snapshot):
"""Creates and exports the snapshot."""
context = context.elevated()
self._notify_about_snapshot_usage(
context, snapshot, "create.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
model_update = self.driver.create_snapshot(snapshot)
if model_update:
snapshot.update(model_update)
snapshot.save()
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
vol_ref = self.db.volume_get(context, snapshot.volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot.id, snapshot.volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.exception(_LE("Failed updating snapshot"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': snapshot.volume_id},
resource=snapshot)
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
self._notify_about_snapshot_usage(context, snapshot, "create.end")
LOG.info(_LI("Create snapshot completed successfully"),
resource=snapshot)
return snapshot.id
@coordination.synchronized('{snapshot.id}-{f_name}')
def delete_snapshot(self, context, snapshot, unmanage_only=False):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot._context = context
project_id = snapshot.project_id
self._notify_about_snapshot_usage(
context, snapshot, "delete.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the snapshot status updated.
utils.require_driver_initialized(self.driver)
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
snapshot.context = context
snapshot.save()
if unmanage_only:
self.driver.unmanage_snapshot(snapshot)
else:
self.driver.delete_snapshot(snapshot)
except exception.SnapshotIsBusy:
LOG.error(_LE("Delete snapshot failed, due to snapshot busy."),
resource=snapshot)
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.save()
return
except Exception:
with excutils.save_and_reraise_exception():
snapshot.status = fields.SnapshotStatus.ERROR_DELETING
snapshot.save()
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = self.db.volume_get(context, snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Update snapshot usages failed."),
resource=snapshot)
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot.id)
snapshot.destroy()
self._notify_about_snapshot_usage(context, snapshot, "delete.end")
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
msg = _LI("Delete snapshot completed successfully.")
if unmanage_only:
msg = _LI("Unmanage snapshot completed successfully.")
LOG.info(msg, resource=snapshot)
@coordination.synchronized('{volume_id}')
def attach_volume(self, context, volume_id, instance_uuid, host_name,
mountpoint, mode, volume=None):
"""Updates db to show volume is attached."""
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look
# up the volume by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
# Get admin_metadata. This needs admin context.
with volume.obj_as_admin():
volume_metadata = volume.admin_metadata
# check the volume status before attaching
if volume.status == 'attaching':
if (volume_metadata.get('attached_mode') and
volume_metadata.get('attached_mode') != mode):
raise exception.InvalidVolume(
reason=_("being attached by different mode"))
if (volume.status == 'in-use' and not volume.multiattach
and not volume.migration_status):
raise exception.InvalidVolume(
reason=_("volume is already attached"))
host_name_sanitized = utils.sanitize_hostname(
host_name) if host_name else None
if instance_uuid:
attachments = (
VA_LIST.get_all_by_instance_uuid(
context, instance_uuid))
else:
attachments = (
VA_LIST.get_all_by_host(
context, host_name_sanitized))
if attachments:
# check if volume<->instance mapping is already tracked in DB
for attachment in attachments:
if attachment['volume_id'] == volume_id:
volume.status = 'in-use'
volume.save()
return attachment
self._notify_about_volume_usage(context, volume,
"attach.start")
attachment = volume.begin_attach(mode)
if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
raise exception.InvalidUUID(uuid=instance_uuid)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
self.message_api.create(
context, defined_messages.EventIds.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=volume.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=volume.id)
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Attaching volume %(volume_id)s to instance '
'%(instance)s at mountpoint %(mount)s on host '
'%(host)s.'),
{'volume_id': volume_id, 'instance': instance_uuid,
'mount': mountpoint, 'host': host_name_sanitized},
resource=volume)
self.driver.attach_volume(context,
volume,
instance_uuid,
host_name_sanitized,
mountpoint)
except Exception:
with excutils.save_and_reraise_exception():
attachment.attach_status = (
fields.VolumeAttachStatus.ERROR_ATTACHING)
attachment.save()
volume = attachment.finish_attach(
instance_uuid,
host_name_sanitized,
mountpoint,
mode)
self._notify_about_volume_usage(context, volume, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=volume)
return attachment
@coordination.synchronized('{volume_id}-{f_name}')
def detach_volume(self, context, volume_id, attachment_id=None,
volume=None):
"""Updates db to show volume is detached."""
# TODO(vish): refactor this into a more general "unreserve"
# FIXME(lixiaoy1): Remove this in v4.0 of RPC API.
if volume is None:
# For older clients, mimic the old behavior and look up the volume
# by its volume_id.
volume = objects.Volume.get_by_id(context, volume_id)
if attachment_id:
try:
attachment = objects.VolumeAttachment.get_by_id(context,
attachment_id)
except exception.VolumeAttachmentNotFound:
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
# We need to make sure the volume status is set to the correct
# status. It could be in detaching status now, and we don't
# want to leave it there.
volume.finish_detach(attachment_id)
return
else:
# We can try and degrade gracefully here by trying to detach
# a volume without the attachment_id here if the volume only has
# one attachment. This is for backwards compatibility.
attachments = volume.volume_attachment
if len(attachments) > 1:
# There are more than 1 attachments for this volume
# we have to have an attachment id.
msg = _("Detach volume failed: More than one attachment, "
"but no attachment_id provided.")
LOG.error(msg, resource=volume)
raise exception.InvalidVolume(reason=msg)
elif len(attachments) == 1:
attachment = attachments[0]
else:
# there aren't any attachments for this volume.
# so set the status to available and move on.
LOG.info(_LI("Volume detach called, but volume not attached."),
resource=volume)
volume.status = 'available'
volume.attach_status = fields.VolumeAttachStatus.DETACHED
volume.save()
return
self._notify_about_volume_usage(context, volume, "detach.start")
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
LOG.info(_LI('Detaching volume %(volume_id)s from instance '
'%(instance)s.'),
{'volume_id': volume_id,
'instance': attachment.get('instance_uuid')},
resource=volume)
self.driver.detach_volume(context, volume, attachment)
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment.get('id'), {
'attach_status':
fields.VolumeAttachStatus.ERROR_DETACHING})
# NOTE(jdg): We used to do an ensure export here to
# catch upgrades while volumes were attached (E->F)
# this was necessary to convert in-use volumes from
# int ID's to UUID's. Don't need this any longer
# We're going to remove the export here
# (delete the iscsi target)
try:
utils.require_driver_initialized(self.driver)
self.driver.remove_export(context.elevated(), volume)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Detach volume failed, due to "
"uninitialized driver."),
resource=volume)
except Exception as ex:
LOG.exception(_LE("Detach volume failed, due to "
"remove-export failure."),
resource=volume)
raise exception.RemoveExportException(volume=volume_id,
reason=six.text_type(ex))
volume.finish_detach(attachment.id)
self._notify_about_volume_usage(context, volume, "detach.end")
LOG.info(_LI("Detach volume completed successfully."), resource=volume)
def _create_image_cache_volume_entry(self, ctx, volume_ref,
image_id, image_meta):
"""Create a new image-volume and cache entry for it.
This assumes that the image has already been downloaded and stored
in the volume described by the volume_ref.
"""
image_volume = None
try:
if not self.image_volume_cache.ensure_space(ctx, volume_ref):
LOG.warning(_LW('Unable to ensure space for image-volume in'
' cache. Will skip creating entry for image'
' %(image)s on %(service)s.'),
{'image': image_id,
'service': volume_ref.service_topic_queue})
return
image_volume = self._clone_image_volume(ctx,
volume_ref,
image_meta)
if not image_volume:
LOG.warning(_LW('Unable to clone image_volume for image '
'%(image_id)s will not create cache entry.'),
{'image_id': image_id})
return
self.image_volume_cache.create_cache_entry(
ctx,
image_volume,
image_id,
image_meta
)
except exception.CinderException as e:
LOG.warning(_LW('Failed to create new image-volume cache entry.'
' Error: %(exception)s'), {'exception': e})
if image_volume:
self.delete_volume(ctx, image_volume)
def _clone_image_volume(self, ctx, volume, image_meta):
volume_type_id = volume.get('volume_type_id')
reserve_opts = {'volumes': 1, 'gigabytes': volume.size}
QUOTAS.add_volume_type_opts(ctx, reserve_opts, volume_type_id)
reservations = QUOTAS.reserve(ctx, **reserve_opts)
try:
new_vol_values = {k: volume[k] for k in set(volume.keys()) -
self._VOLUME_CLONE_SKIP_PROPERTIES}
new_vol_values['volume_type_id'] = volume_type_id
new_vol_values['attach_status'] = (
fields.VolumeAttachStatus.DETACHED)
new_vol_values['status'] = 'creating'
new_vol_values['project_id'] = ctx.project_id
new_vol_values['display_name'] = 'image-%s' % image_meta['id']
new_vol_values['source_volid'] = volume.id
LOG.debug('Creating image volume entry: %s.', new_vol_values)
image_volume = objects.Volume(context=ctx, **new_vol_values)
image_volume.create()
except Exception as ex:
LOG.exception(_LE('Create clone_image_volume: %(volume_id)s'
'for image %(image_id)s, '
'failed (Exception: %(except)s)'),
{'volume_id': volume.id,
'image_id': image_meta['id'],
'except': ex})
QUOTAS.rollback(ctx, reservations)
return
QUOTAS.commit(ctx, reservations,
project_id=new_vol_values['project_id'])
try:
self.create_volume(ctx, image_volume, allow_reschedule=False)
image_volume = objects.Volume.get_by_id(ctx, image_volume.id)
if image_volume.status != 'available':
raise exception.InvalidVolume(_('Volume is not available.'))
self.db.volume_admin_metadata_update(ctx.elevated(),
image_volume.id,
{'readonly': 'True'},
False)
return image_volume
except exception.CinderException:
LOG.exception(_LE('Failed to clone volume %(volume_id)s for '
'image %(image_id)s.'),
{'volume_id': volume.id,
'image_id': image_meta['id']})
try:
self.delete_volume(ctx, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete the image volume %(id)s.'),
{'id': volume.id})
return
def _clone_image_volume_and_add_location(self, ctx, volume, image_service,
image_meta):
"""Create a cloned volume and register its location to the image."""
if (image_meta['disk_format'] != 'raw' or
image_meta['container_format'] != 'bare'):
return False
image_volume_context = ctx
if self.driver.configuration.image_upload_use_internal_tenant:
internal_ctx = context.get_internal_tenant_context()
if internal_ctx:
image_volume_context = internal_ctx
image_volume = self._clone_image_volume(image_volume_context,
volume,
image_meta)
if not image_volume:
return False
# The image_owner metadata should be set before uri is added to
# the image so glance cinder store can check its owner.
image_volume_meta = {'image_owner': ctx.project_id}
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
uri = 'cinder://%s' % image_volume.id
image_registered = None
try:
image_registered = image_service.add_location(
ctx, image_meta['id'], uri, {})
except (exception.NotAuthorized, exception.Invalid,
exception.NotFound):
LOG.exception(_LE('Failed to register image volume location '
'%(uri)s.'), {'uri': uri})
if not image_registered:
LOG.warning(_LW('Registration of image volume URI %(uri)s '
'to image %(image_id)s failed.'),
{'uri': uri, 'image_id': image_meta['id']})
try:
self.delete_volume(image_volume_context, image_volume)
except exception.CinderException:
LOG.exception(_LE('Could not delete failed image volume '
'%(id)s.'), {'id': image_volume.id})
return False
image_volume_meta['glance_image_id'] = image_meta['id']
self.db.volume_metadata_update(image_volume_context,
image_volume.id,
image_volume_meta,
False)
return True
def copy_volume_to_image(self, context, volume_id, image_meta):
"""Uploads the specified volume to Glance.
image_meta is a dictionary containing the following keys:
'id', 'container_format', 'disk_format'
"""
payload = {'volume_id': volume_id, 'image_id': image_meta['id']}
image_service = None
try:
volume = objects.Volume.get_by_id(context, volume_id)
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
image_service, image_id = \
glance.get_remote_image_service(context, image_meta['id'])
if (self.driver.configuration.image_upload_use_cinder_backend
and self._clone_image_volume_and_add_location(
context, volume, image_service, image_meta)):
LOG.debug("Registered image volume location to glance "
"image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
else:
self.driver.copy_volume_to_image(context, volume,
image_service, image_meta)
LOG.debug("Uploaded volume to glance image-id: %(image_id)s.",
{'image_id': image_meta['id']},
resource=volume)
except Exception as error:
LOG.error(_LE("Upload volume to image encountered an error "
"(image-id: %(image_id)s)."),
{'image_id': image_meta['id']},
resource=volume)
if image_service is not None:
# Deletes the image if it is in queued or saving state
self._delete_image(context, image_meta['id'], image_service)
with excutils.save_and_reraise_exception():
payload['message'] = six.text_type(error)
if isinstance(error, exception.ImageLimitExceeded):
self.message_api.create(
context,
defined_messages.EventIds.IMAGE_FROM_VOLUME_OVER_QUOTA,
context.project_id,
resource_type=resource_types.VOLUME,
resource_uuid=volume_id)
finally:
self.db.volume_update_status_based_on_attachment(context,
volume_id)
LOG.info(_LI("Copy volume to image completed successfully."),
resource=volume)
def _delete_image(self, context, image_id, image_service):
"""Deletes an image stuck in queued or saving state."""
try:
image_meta = image_service.show(context, image_id)
image_status = image_meta.get('status')
if image_status == 'queued' or image_status == 'saving':
LOG.warning(_LW("Deleting image in unexpected status: "
"%(image_status)s."),
{'image_status': image_status},
resource={'type': 'image', 'id': image_id})
image_service.delete(context, image_id)
except Exception:
LOG.warning(_LW("Image delete encountered an error."),
exc_info=True, resource={'type': 'image',
'id': image_id})
def _parse_connection_options(self, context, volume, conn_info):
# Add qos_specs to connection info
typeid = volume.volume_type_id
specs = None
if typeid:
res = volume_types.get_volume_type_qos_specs(typeid)
qos = res['qos_specs']
# only pass qos_specs that is designated to be consumed by
# front-end, or both front-end and back-end.
if qos and qos.get('consumer') in ['front-end', 'both']:
specs = qos.get('specs')
qos_spec = dict(qos_specs=specs)
conn_info['data'].update(qos_spec)
# Add access_mode to connection info
volume_metadata = volume.admin_metadata
access_mode = volume_metadata.get('attached_mode')
if access_mode is None:
# NOTE(zhiyan): client didn't call 'os-attach' before
access_mode = ('ro'
if volume_metadata.get('readonly') == 'True'
else 'rw')
conn_info['data']['access_mode'] = access_mode
# Add encrypted flag to connection_info if not set in the driver.
if conn_info['data'].get('encrypted') is None:
encrypted = bool(volume.encryption_key_id)
conn_info['data']['encrypted'] = encrypted
# Add discard flag to connection_info if not set in the driver and
# configured to be reported.
if conn_info['data'].get('discard') is None:
discard_supported = (self.driver.configuration
.safe_get('report_discard_supported'))
if discard_supported:
conn_info['data']['discard'] = True
return conn_info
def initialize_connection(self, context, volume, connector):
"""Prepare volume for connection from host represented by connector.
This method calls the driver initialize_connection and returns
it to the caller. The connector parameter is a dictionary with
information about the host that will connect to the volume in the
following format::
{
'ip': ip,
'initiator': initiator,
}
ip: the ip address of the connecting machine
initiator: the iscsi initiator name of the connecting machine.
This can be None if the connecting machine does not support iscsi
connections.
driver is responsible for doing any necessary security setup and
returning a connection_info dictionary in the following format::
{
'driver_volume_type': driver_volume_type,
'data': data,
}
driver_volume_type: a string to identify the type of volume. This
can be used by the calling code to determine the
strategy for connecting to the volume. This could
be 'iscsi', 'rbd', 'sheepdog', etc.
data: this is the data that the calling code will use to connect
to the volume. Keep in mind that this will be serialized to
json in various places, so it should not contain any non-json
data types.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
# TODO(jdg): Add deprecation warning
utils.require_driver_initialized(self.driver)
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(context.elevated(),
volume, connector)
except exception.CinderException as ex:
msg = _("Create export of volume failed (%s)") % ex.msg
LOG.exception(msg, resource=volume)
raise exception.VolumeBackendAPIException(data=msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(context.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(context, volume, conn_info)
LOG.info(_LI("Initialize volume connection completed successfully."),
resource=volume)
return conn_info
def terminate_connection(self, context, volume_id, connector, force=False):
"""Cleanup connection from host represented by connector.
The format of connector is the same as for initialize_connection.
"""
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.terminate_connection(volume_ref, connector,
force=force)
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume_ref)
def remove_export(self, context, volume_id):
"""Removes an export for a volume."""
utils.require_driver_initialized(self.driver)
volume_ref = self.db.volume_get(context, volume_id)
try:
self.driver.remove_export(context, volume_ref)
except Exception:
msg = _("Remove volume export failed.")
LOG.exception(msg, resource=volume_ref)
raise exception.VolumeBackendAPIException(data=msg)
LOG.info(_LI("Remove volume export completed successfully."),
resource=volume_ref)
def accept_transfer(self, context, volume_id, new_user, new_project):
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
# NOTE(jdg): need elevated context as we haven't "given" the vol
# yet
volume_ref = self.db.volume_get(context.elevated(), volume_id)
# NOTE(jdg): Some drivers tie provider info (CHAP) to tenant
# for those that do allow them to return updated model info
model_update = self.driver.accept_transfer(context,
volume_ref,
new_user,
new_project)
if model_update:
try:
self.db.volume_update(context.elevated(),
volume_id,
model_update)
except exception.CinderException:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Update volume model for "
"transfer operation failed."),
resource=volume_ref)
self.db.volume_update(context.elevated(),
volume_id,
{'status': 'error'})
LOG.info(_LI("Transfer volume completed successfully."),
resource=volume_ref)
return model_update
def _connect_device(self, conn):
use_multipath = self.configuration.use_multipath_for_image_xfer
device_scan_attempts = self.configuration.num_volume_device_scan_tries
protocol = conn['driver_volume_type']
connector = utils.brick_get_connector(
protocol,
use_multipath=use_multipath,
device_scan_attempts=device_scan_attempts,
conn=conn)
vol_handle = connector.connect_volume(conn['data'])
root_access = True
if not connector.check_valid_device(vol_handle['path'], root_access):
if isinstance(vol_handle['path'], six.string_types):
raise exception.DeviceUnavailable(
path=vol_handle['path'],
reason=(_("Unable to access the backend storage via the "
"path %(path)s.") %
{'path': vol_handle['path']}))
else:
raise exception.DeviceUnavailable(
path=None,
reason=(_("Unable to access the backend storage via file "
"handle.")))
return {'conn': conn, 'device': vol_handle, 'connector': connector}
def _attach_volume(self, ctxt, volume, properties, remote=False,
attach_encryptor=False):
status = volume['status']
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
try:
conn = rpcapi.initialize_connection(ctxt, volume, properties)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume %(vol)s."),
{'vol': volume['id']})
self.db.volume_update(ctxt, volume['id'],
{'status': status})
else:
conn = self.initialize_connection(ctxt, volume, properties)
attach_info = self._connect_device(conn)
try:
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_attach_volume_encryptor(ctxt,
attach_info,
encryption)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach volume encryptor"
" %(vol)s."), {'vol': volume['id']})
self._detach_volume(ctxt, attach_info, volume, properties)
return attach_info
def _detach_volume(self, ctxt, attach_info, volume, properties,
force=False, remote=False,
attach_encryptor=False):
connector = attach_info['connector']
if attach_encryptor and (
volume_types.is_encrypted(ctxt,
volume.volume_type_id)):
encryption = self.db.volume_encryption_metadata_get(
ctxt.elevated(), volume.id)
if encryption:
utils.brick_detach_volume_encryptor(attach_info, encryption)
connector.disconnect_volume(attach_info['conn']['data'],
attach_info['device'])
if remote:
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.terminate_connection(ctxt, volume, properties, force=force)
rpcapi.remove_export(ctxt, volume)
else:
try:
self.terminate_connection(ctxt, volume['id'], properties,
force=force)
self.remove_export(ctxt, volume['id'])
except Exception as err:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Unable to terminate volume connection: '
'%(err)s.') % {'err': err})
def _copy_volume_data(self, ctxt, src_vol, dest_vol, remote=None):
"""Copy data from src_vol to dest_vol."""
LOG.debug('copy_data_between_volumes %(src)s -> %(dest)s.',
{'src': src_vol['name'], 'dest': dest_vol['name']})
attach_encryptor = False
# If the encryption method or key is changed, we have to
# copy data through dm-crypt.
if volume_types.volume_types_encryption_changed(
ctxt,
src_vol.volume_type_id,
dest_vol.volume_type_id):
attach_encryptor = True
properties = utils.brick_get_connector_properties()
dest_remote = remote in ['dest', 'both']
dest_attach_info = self._attach_volume(
ctxt, dest_vol, properties,
remote=dest_remote,
attach_encryptor=attach_encryptor)
try:
src_remote = remote in ['src', 'both']
src_attach_info = self._attach_volume(
ctxt, src_vol, properties,
remote=src_remote,
attach_encryptor=attach_encryptor)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to attach source volume for copy."))
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, remote=dest_remote,
attach_encryptor=attach_encryptor)
# Check the backend capabilities of migration destination host.
rpcapi = volume_rpcapi.VolumeAPI()
capabilities = rpcapi.get_capabilities(ctxt,
dest_vol.service_topic_queue,
False)
sparse_copy_volume = bool(capabilities and
capabilities.get('sparse_copy_volume',
False))
copy_error = True
try:
size_in_mb = int(src_vol['size']) * units.Ki # vol size is in GB
vol_utils.copy_volume(src_attach_info['device']['path'],
dest_attach_info['device']['path'],
size_in_mb,
self.configuration.volume_dd_blocksize,
sparse=sparse_copy_volume)
copy_error = False
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to copy volume %(src)s to %(dest)s."),
{'src': src_vol['id'], 'dest': dest_vol['id']})
finally:
try:
self._detach_volume(ctxt, dest_attach_info, dest_vol,
properties, force=copy_error,
remote=dest_remote,
attach_encryptor=attach_encryptor)
finally:
self._detach_volume(ctxt, src_attach_info, src_vol,
properties, force=copy_error,
remote=src_remote,
attach_encryptor=attach_encryptor)
def _migrate_volume_generic(self, ctxt, volume, backend, new_type_id):
rpcapi = volume_rpcapi.VolumeAPI()
# Create new volume on remote host
tmp_skip = {'snapshot_id', 'source_volid'}
skip = self._VOLUME_CLONE_SKIP_PROPERTIES | tmp_skip | {'host',
'cluster_name'}
new_vol_values = {k: volume[k] for k in set(volume.keys()) - skip}
if new_type_id:
new_vol_values['volume_type_id'] = new_type_id
if volume_types.volume_types_encryption_changed(
ctxt, volume.volume_type_id, new_type_id):
encryption_key_id = vol_utils.create_encryption_key(
ctxt, self.key_manager, new_type_id)
new_vol_values['encryption_key_id'] = encryption_key_id
new_volume = objects.Volume(
context=ctxt,
host=backend['host'],
cluster_name=backend.get('cluster_name'),
status='creating',
attach_status=fields.VolumeAttachStatus.DETACHED,
migration_status='target:%s' % volume['id'],
**new_vol_values
)
new_volume.create()
rpcapi.create_volume(ctxt, new_volume, None, None,
allow_reschedule=False)
# Wait for new_volume to become ready
starttime = time.time()
deadline = starttime + CONF.migration_create_volume_timeout_secs
# TODO(thangp): Replace get_by_id with refresh when it is available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
tries = 0
while new_volume.status != 'available':
tries += 1
now = time.time()
if new_volume.status == 'error':
msg = _("failed to create new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
elif now > deadline:
msg = _("timeout creating new_volume on destination")
self._clean_temporary_volume(ctxt, volume,
new_volume,
clean_db_only=True)
raise exception.VolumeMigrationFailed(reason=msg)
else:
time.sleep(tries ** 2)
# TODO(thangp): Replace get_by_id with refresh when it is
# available
new_volume = objects.Volume.get_by_id(ctxt, new_volume.id)
# Set skipped value to avoid calling
# function except for _create_raw_volume
tmp_skipped_values = {k: volume[k] for k in tmp_skip if volume.get(k)}
if tmp_skipped_values:
new_volume.update(tmp_skipped_values)
new_volume.save()
# Copy the source volume to the destination volume
try:
attachments = volume.volume_attachment
if not attachments:
# Pre- and post-copy driver-specific actions
self.driver.before_volume_copy(ctxt, volume, new_volume,
remote='dest')
self._copy_volume_data(ctxt, volume, new_volume, remote='dest')
self.driver.after_volume_copy(ctxt, volume, new_volume,
remote='dest')
# The above call is synchronous so we complete the migration
self.migrate_volume_completion(ctxt, volume, new_volume,
error=False)
else:
nova_api = compute.API()
# This is an async call to Nova, which will call the completion
# when it's done
for attachment in attachments:
instance_uuid = attachment['instance_uuid']
nova_api.update_server_volume(ctxt, instance_uuid,
volume.id,
new_volume.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE(
"Failed to copy volume %(vol1)s to %(vol2)s"), {
'vol1': volume.id, 'vol2': new_volume.id})
self._clean_temporary_volume(ctxt, volume,
new_volume)
def _clean_temporary_volume(self, ctxt, volume, new_volume,
clean_db_only=False):
# If we're in the migrating phase, we need to cleanup
# destination volume because source volume is remaining
if volume.migration_status == 'migrating':
try:
if clean_db_only:
# The temporary volume is not created, only DB data
# is created
new_volume.destroy()
else:
# The temporary volume is already created
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.delete_volume(ctxt, new_volume)
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find the temporary volume "
"%(vol)s in the database. There is no need "
"to clean up this volume."),
{'vol': new_volume.id})
else:
# If we're in the completing phase don't delete the
# destination because we may have already deleted the
# source! But the migration_status in database should
# be cleared to handle volume after migration failure
try:
new_volume.migration_status = None
new_volume.save()
except exception.VolumeNotFound:
LOG.info(_LI("Couldn't find destination volume "
"%(vol)s in the database. The entry might be "
"successfully deleted during migration "
"completion phase."),
{'vol': new_volume.id})
LOG.warning(_LW("Failed to migrate volume. The destination "
"volume %(vol)s is not deleted since the "
"source volume may have been deleted."),
{'vol': new_volume.id})
def migrate_volume_completion(self, ctxt, volume, new_volume, error=False):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
LOG.debug("migrate_volume_completion: completing migration for "
"volume %(vol1)s (temporary volume %(vol2)s",
{'vol1': volume.id, 'vol2': new_volume.id})
rpcapi = volume_rpcapi.VolumeAPI()
orig_volume_status = volume.previous_status
if error:
LOG.info(_LI("migrate_volume_completion is cleaning up an error "
"for volume %(vol1)s (temporary volume %(vol2)s"),
{'vol1': volume['id'], 'vol2': new_volume.id})
rpcapi.delete_volume(ctxt, new_volume)
updates = {'migration_status': 'error',
'status': orig_volume_status}
volume.update(updates)
volume.save()
return volume.id
volume.migration_status = 'completing'
volume.save()
# Detach the source volume (if it fails, don't fail the migration)
# As after detach and refresh, volume_attchments will be None.
# We keep volume_attachment for later attach.
volume_attachments = []
if orig_volume_status == 'in-use':
for attachment in volume.volume_attachment:
# Save the attachments the volume currently have
volume_attachments.append(attachment)
try:
self.detach_volume(ctxt, volume.id, attachment.id)
except Exception as ex:
LOG.error(_LE("Detach migration source volume "
"%(volume.id)s from instance "
"%(instance_id)s failed: %(err)s"),
{'err': ex,
'volume.id': volume.id,
'instance_id': attachment.id},
resource=volume)
# Give driver (new_volume) a chance to update things as needed
# after a successful migration.
# Note this needs to go through rpc to the host of the new volume
# the current host and driver object is for the "existing" volume.
rpcapi.update_migrated_volume(ctxt, volume, new_volume,
orig_volume_status)
volume.refresh()
new_volume.refresh()
# Swap src and dest DB records so we can continue using the src id and
# asynchronously delete the destination id
updated_new = volume.finish_volume_migration(new_volume)
updates = {'status': orig_volume_status,
'previous_status': volume.status,
'migration_status': 'success'}
# Restore the attachmens
if orig_volume_status == 'in-use':
for attachment in volume_attachments:
LOG.debug('Re-attaching: %s', attachment)
rpcapi.attach_volume(ctxt, volume,
attachment.instance_uuid,
attachment.attached_host,
attachment.mountpoint,
'rw')
volume.update(updates)
volume.save()
# Asynchronous deletion of the source volume in the back-end (now
# pointed by the target volume id)
try:
rpcapi.delete_volume(ctxt, updated_new)
except Exception as ex:
LOG.error(_LE('Failed to request async delete of migration source '
'vol %(vol)s: %(err)s'),
{'vol': volume.id, 'err': ex})
LOG.info(_LI("Complete-Migrate volume completed successfully."),
resource=volume)
return volume.id
def migrate_volume(self, ctxt, volume, host, force_host_copy=False,
new_type_id=None):
"""Migrate the volume to the specified host (called on source host)."""
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the migration status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.migration_status = 'error'
volume.save()
model_update = None
moved = False
status_update = None
if volume.status in ('retyping', 'maintenance'):
status_update = {'status': volume.previous_status}
volume.migration_status = 'migrating'
volume.save()
if not force_host_copy and new_type_id is None:
try:
LOG.debug("Issue driver.migrate_volume.", resource=volume)
moved, model_update = self.driver.migrate_volume(ctxt,
volume,
host)
if moved:
updates = {'host': host['host'],
'cluster_name': host.get('cluster_name'),
'migration_status': 'success',
'previous_status': volume.status}
if status_update:
updates.update(status_update)
if model_update:
updates.update(model_update)
volume.update(updates)
volume.save()
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
if not moved:
try:
self._migrate_volume_generic(ctxt, volume, host, new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
updates = {'migration_status': 'error'}
if status_update:
updates.update(status_update)
volume.update(updates)
volume.save()
LOG.info(_LI("Migrate volume completed successfully."),
resource=volume)
@periodic_task.periodic_task
def _report_driver_status(self, context):
if not self.driver.initialized:
if self.driver.configuration.config_group is None:
config_group = ''
else:
config_group = ('(config name %s)' %
self.driver.configuration.config_group)
LOG.warning(_LW("Update driver status failed: %(config_group)s "
"is uninitialized."),
{'config_group': config_group},
resource={'type': 'driver',
'id': self.driver.__class__.__name__})
else:
volume_stats = self.driver.get_volume_stats(refresh=True)
if self.extra_capabilities:
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
self._append_volume_stats(volume_stats)
# Append filter and goodness function if needed
volume_stats = (
self._append_filter_goodness_functions(volume_stats))
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
def _append_volume_stats(self, vol_stats):
pools = vol_stats.get('pools', None)
if pools and isinstance(pools, list):
for pool in pools:
pool_name = pool['pool_name']
try:
pool_stats = self.stats['pools'][pool_name]
except KeyError:
# Pool not found in volume manager
pool_stats = dict(allocated_capacity_gb=0)
pool.update(pool_stats)
def _append_filter_goodness_functions(self, volume_stats):
"""Returns volume_stats updated as needed."""
# Append filter_function if needed
if 'filter_function' not in volume_stats:
volume_stats['filter_function'] = (
self.driver.get_filter_function())
# Append goodness_function if needed
if 'goodness_function' not in volume_stats:
volume_stats['goodness_function'] = (
self.driver.get_goodness_function())
return volume_stats
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
self._publish_service_capabilities(context)
def _notify_about_volume_usage(self,
context,
volume,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_snapshot_usage(self,
context,
snapshot,
event_suffix,
extra_usage_info=None):
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_consistencygroup_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_group(context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_usage(self,
context,
group,
event_suffix,
volumes=None,
extra_usage_info=None):
vol_utils.notify_about_group_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not volumes:
volumes = self.db.volume_get_all_by_generic_group(
context, group.id)
if volumes:
for volume in volumes:
vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_cgsnapshot_usage(self,
context,
cgsnapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def _notify_about_group_snapshot_usage(self,
context,
group_snapshot,
event_suffix,
snapshots=None,
extra_usage_info=None):
vol_utils.notify_about_group_snapshot_usage(
context, group_snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
if not snapshots:
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
if snapshots:
for snapshot in snapshots:
vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
def extend_volume(self, context, volume, new_size, reservations):
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
volume.status = 'error_extending'
volume.save()
project_id = volume.project_id
size_increase = (int(new_size)) - volume.size
self._notify_about_volume_usage(context, volume, "resize.start")
try:
self.driver.extend_volume(volume, new_size)
except Exception:
LOG.exception(_LE("Extend volume failed."),
resource=volume)
try:
self.db.volume_update(context, volume.id,
{'status': 'error_extending'})
raise exception.CinderException(_("Volume %s: Error trying "
"to extend volume") %
volume.id)
finally:
QUOTAS.rollback(context, reservations, project_id=project_id)
return
QUOTAS.commit(context, reservations, project_id=project_id)
volume.update({'size': int(new_size), 'status': 'available'})
volume.save()
pool = vol_utils.extract_host(volume.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
extra_usage_info={'size': int(new_size)})
LOG.info(_LI("Extend volume completed successfully."),
resource=volume)
def _is_our_backend(self, host, cluster_name):
return ((not cluster_name and
vol_utils.hosts_are_equivalent(self.driver.host, host)) or
(cluster_name and
vol_utils.hosts_are_equivalent(self.driver.cluster_name,
cluster_name)))
def retype(self, context, volume, new_type_id, host,
migration_policy='never', reservations=None,
old_reservations=None):
def _retype_error(context, volume, old_reservations,
new_reservations, status_update):
try:
volume.update(status_update)
volume.save()
finally:
QUOTAS.rollback(context, old_reservations)
QUOTAS.rollback(context, new_reservations)
status_update = {'status': volume.previous_status}
if context.project_id != volume.project_id:
project_id = volume.project_id
else:
project_id = context.project_id
try:
# NOTE(flaper87): Verify the driver is enabled
# before going forward. The exception will be caught
# and the volume status updated.
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
# NOTE(flaper87): Other exceptions in this method don't
# set the volume status to error. Should that be done
# here? Setting the volume back to it's original status
# for now.
volume.update(status_update)
volume.save()
# If old_reservations has been passed in from the API, we should
# skip quotas.
# TODO(ntpttr): These reservation checks are left in to be backwards
# compatible with Liberty and can be removed in N.
if not old_reservations:
# Get old reservations
try:
reserve_opts = {'volumes': -1, 'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
# NOTE(wanghao): We don't need to reserve volumes and gigabytes
# quota for retyping operation since they didn't changed, just
# reserve volume_type and type gigabytes is fine.
reserve_opts.pop('volumes')
reserve_opts.pop('gigabytes')
old_reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
volume.update(status_update)
volume.save()
msg = _("Failed to update quota usage while retyping volume.")
LOG.exception(msg, resource=volume)
raise exception.CinderException(msg)
# We already got the new reservations
new_reservations = reservations
# If volume types have the same contents, no need to do anything.
# Use the admin contex to be able to access volume extra_specs
retyped = False
diff, all_equal = volume_types.volume_types_diff(
context.elevated(), volume.volume_type_id, new_type_id)
if all_equal:
retyped = True
# Call driver to try and change the type
retype_model_update = None
# NOTE(jdg): Check to see if the destination host or cluster (depending
# if it's the volume is in a clustered backend or not) is the same as
# the current. If it's not don't call the driver.retype method,
# otherwise drivers that implement retype may report success, but it's
# invalid in the case of a migrate.
# We assume that those that support pools do this internally
# so we strip off the pools designation
if (not retyped and
not diff.get('encryption') and
self._is_our_backend(host['host'], host.get('cluster_name'))):
try:
new_type = volume_types.get_volume_type(context.elevated(),
new_type_id)
with volume.obj_as_admin():
ret = self.driver.retype(context,
volume,
new_type,
diff,
host)
# Check if the driver retype provided a model update or
# just a retype indication
if type(ret) == tuple:
retyped, retype_model_update = ret
else:
retyped = ret
if retyped:
LOG.info(_LI("Volume %s: retyped successfully"), volume.id)
except Exception:
retyped = False
LOG.exception(_LE("Volume %s: driver error when trying to "
"retype, falling back to generic "
"mechanism."), volume.id)
# We could not change the type, so we need to migrate the volume, where
# the destination volume will be of the new type
if not retyped:
if migration_policy == 'never':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Retype requires migration but is not allowed.")
raise exception.VolumeMigrationFailed(reason=msg)
snaps = objects.SnapshotList.get_all_for_volume(context,
volume.id)
if snaps:
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not have snapshots.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
# Don't allow volume with replicas to be migrated
rep_status = volume.replication_status
if rep_status is not None and rep_status != 'disabled':
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
msg = _("Volume must not be replicated.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
volume.migration_status = 'starting'
volume.save()
try:
self.migrate_volume(context, volume, host,
new_type_id=new_type_id)
except Exception:
with excutils.save_and_reraise_exception():
_retype_error(context, volume, old_reservations,
new_reservations, status_update)
else:
model_update = {'volume_type_id': new_type_id,
'host': host['host'],
'cluster_name': host.get('cluster_name'),
'status': status_update['status']}
if retype_model_update:
model_update.update(retype_model_update)
self._set_replication_status(diff, model_update)
volume.update(model_update)
volume.save()
if old_reservations:
QUOTAS.commit(context, old_reservations, project_id=project_id)
if new_reservations:
QUOTAS.commit(context, new_reservations, project_id=project_id)
self._notify_about_volume_usage(
context, volume, "retype",
extra_usage_info={'volume_type': new_type_id})
self.publish_service_capabilities(context)
LOG.info(_LI("Retype volume completed successfully."),
resource=volume)
@staticmethod
def _set_replication_status(diff, model_update):
"""Update replication_status in model_update if it has changed."""
if not diff or model_update.get('replication_status'):
return
diff_specs = diff.get('extra_specs', {})
replication_diff = diff_specs.get('replication_enabled')
if replication_diff:
is_replicated = vol_utils.is_replicated_str(replication_diff[1])
if is_replicated:
replication_status = fields.ReplicationStatus.ENABLED
else:
replication_status = fields.ReplicationStatus.DISABLED
model_update['replication_status'] = replication_status
def manage_existing(self, ctxt, volume, ref=None):
vol_ref = self._run_manage_existing_flow_engine(
ctxt, volume, ref)
self._update_stats_for_managed(vol_ref)
LOG.info(_LI("Manage existing volume completed successfully."),
resource=vol_ref)
return vol_ref.id
def _update_stats_for_managed(self, volume_reference):
# Update volume stats
pool = vol_utils.extract_host(volume_reference.host, 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
volume_reference.host, 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] \
+= volume_reference.size
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=volume_reference.size)
def _run_manage_existing_flow_engine(self, ctxt, volume, ref):
try:
flow_engine = manage_existing.get_flow(
ctxt,
self.db,
self.driver,
self.host,
volume,
ref,
)
except Exception:
msg = _("Failed to create manage_existing flow.")
LOG.exception(msg, resource={'type': 'volume', 'id': volume.id})
raise exception.CinderException(msg)
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
# Fetch created volume from storage
vol_ref = flow_engine.storage.fetch('volume')
return vol_ref
def _get_my_resources(self, ctxt, ovo_class_list):
if self.cluster:
filters = {'cluster_name': self.cluster}
else:
filters = {'host': self.host}
return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters)
def _get_my_volumes(self, ctxt):
return self._get_my_resources(ctxt, objects.VolumeList)
def _get_my_snapshots(self, ctxt):
return self._get_my_resources(ctxt, objects.SnapshotList)
def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys,
sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to uninitialized driver."))
cinder_volumes = self._get_my_volumes(ctxt)
try:
driver_entries = self.driver.get_manageable_volumes(
cinder_volumes, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableVolumeList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable volumes failed, due "
"to driver error."))
return driver_entries
def create_consistencygroup(self, context, group):
"""Creates the consistency group."""
return self._create_group(context, group, False)
def create_group(self, context, group):
"""Creates the group."""
return self._create_group(context, group)
def _create_group(self, context, group, is_generic_group=True):
context = context.elevated()
# Make sure the host in the DB matches our own when clustered
self._set_resource_host(group)
status = fields.GroupStatus.AVAILABLE
model_update = None
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.start")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.start")
try:
utils.require_driver_initialized(self.driver)
LOG.info(_LI("Group %s: creating"), group.name)
if is_generic_group:
try:
model_update = self.driver.create_group(context,
group)
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update = self._create_group_generic(context,
group)
else:
cg, __ = self._convert_group_to_cg(group, [])
model_update = self.driver.create_consistencygroup(
context, cg)
else:
model_update = self.driver.create_consistencygroup(context,
group)
if model_update:
if (model_update['status'] ==
fields.GroupStatus.ERROR):
msg = (_('Create group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = fields.GroupStatus.ERROR
group.save()
LOG.error(_LE("Group %s: create failed"),
group.name)
group.status = status
group.created_at = timeutils.utcnow()
group.save()
LOG.info(_LI("Group %s: created successfully"),
group.name)
if is_generic_group:
self._notify_about_group_usage(
context, group, "create.end")
else:
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create group completed successfully."),
resource={'type': 'group',
'id': group.id})
return group
def create_consistencygroup_from_src(self, context, group,
cgsnapshot=None, source_cg=None):
"""Creates the consistency group from source.
The source can be a CG snapshot or a source CG.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = self.db.volume_get_all_by_group(context, group.id)
if cgsnapshot:
try:
# Check if cgsnapshot still exists
cgsnapshot = objects.CGSnapshot.get_by_id(
context, cgsnapshot.id)
except exception.CgSnapshotNotFound:
LOG.error(_LE("Create consistency group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': cgsnapshot.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("snapshot-%s") % cgsnapshot.id
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_CG_SRC_SNAP_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_CG_SRC_SNAP_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
if source_cg:
try:
source_cg = objects.ConsistencyGroup.get_by_id(
context, source_cg.id)
except exception.ConsistencyGroupNotFound:
LOG.error(_LE("Create consistency group "
"from source cg-%(cg)s failed: "
"ConsistencyGroupNotFound."),
{'cg': source_cg.id},
resource={'type': 'consistency_group',
'id': group.id})
raise
source_name = _("cg-%s") % source_cg.id
source_vols = self.db.volume_get_all_by_group(
context, source_cg.id)
for source_vol in source_vols:
if (source_vol['status'] not in
VALID_CREATE_CG_SRC_CG_STATUS):
msg = (_("Cannot create consistency group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol['id'],
'valid': VALID_CREATE_CG_SRC_CG_STATUS})
raise exception.InvalidConsistencyGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if cgsnapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_cg and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_consistencygroup_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, group, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create consistency group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
# Update volume status to 'error' as well.
for vol in volumes:
self.db.volume_update(
context, vol['id'], {'status': 'error'})
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_consistencygroup_usage(
context, group, "create.end")
LOG.info(_LI("Create consistency group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'consistency_group',
'id': group.id})
return group
def create_group_from_src(self, context, group,
group_snapshot=None, source_group=None):
"""Creates the group from source.
The source can be a group snapshot or a source group.
"""
source_name = None
snapshots = None
source_vols = None
try:
volumes = objects.VolumeList.get_all_by_generic_group(context,
group.id)
if group_snapshot:
try:
# Check if group_snapshot still exists
group_snapshot = objects.GroupSnapshot.get_by_id(
context, group_snapshot.id)
except exception.GroupSnapshotNotFound:
LOG.error(_LE("Create group "
"from snapshot-%(snap)s failed: "
"SnapshotNotFound."),
{'snap': group_snapshot.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("snapshot-%s") % group_snapshot.id
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
for snap in snapshots:
if (snap.status not in
VALID_CREATE_GROUP_SRC_SNAP_STATUS):
msg = (_("Cannot create group "
"%(group)s because snapshot %(snap)s is "
"not in a valid state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'snap': snap['id'],
'valid': VALID_CREATE_GROUP_SRC_SNAP_STATUS})
raise exception.InvalidGroup(reason=msg)
if source_group:
try:
source_group = objects.Group.get_by_id(
context, source_group.id)
except exception.GroupNotFound:
LOG.error(_LE("Create group "
"from source group-%(group)s failed: "
"GroupNotFound."),
{'group': source_group.id},
resource={'type': 'group',
'id': group.id})
raise
source_name = _("group-%s") % source_group.id
source_vols = objects.VolumeList.get_all_by_generic_group(
context, source_group.id)
for source_vol in source_vols:
if (source_vol.status not in
VALID_CREATE_GROUP_SRC_GROUP_STATUS):
msg = (_("Cannot create group "
"%(group)s because source volume "
"%(source_vol)s is not in a valid "
"state. Valid states are: "
"%(valid)s.") %
{'group': group.id,
'source_vol': source_vol.id,
'valid': VALID_CREATE_GROUP_SRC_GROUP_STATUS})
raise exception.InvalidGroup(reason=msg)
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes.
sorted_snapshots = None
if group_snapshot and snapshots:
sorted_snapshots = self._sort_snapshots(volumes, snapshots)
# Sort source volumes so that they are in the same order as their
# corresponding target volumes.
sorted_source_vols = None
if source_group and source_vols:
sorted_source_vols = self._sort_source_vols(volumes,
source_vols)
self._notify_about_group_usage(
context, group, "create.start")
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.create_group_from_src(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group, sorted_source_vols))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._create_group_from_src_generic(
context, group, volumes, group_snapshot,
sorted_snapshots, source_group,
sorted_source_vols))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
cgsnapshot, sorted_snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, sorted_snapshots, context))
source_cg, sorted_source_vols = (
self._convert_group_to_cg(source_group,
sorted_source_vols))
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
context, cg, volumes, cgsnapshot,
sorted_snapshots, source_cg, sorted_source_vols))
self._remove_cgsnapshot_id_from_snapshots(sorted_snapshots)
self._remove_consistencygroup_id_from_volumes(volumes)
self._remove_consistencygroup_id_from_volumes(
sorted_source_vols)
if volumes_model_update:
for update in volumes_model_update:
self.db.volume_update(context, update['id'], update)
if model_update:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
LOG.error(_LE("Create group "
"from source %(source)s failed."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
# Update volume status to 'error' as well.
self._remove_consistencygroup_id_from_volumes(volumes)
for vol in volumes:
vol.status = 'error'
vol.save()
now = timeutils.utcnow()
status = 'available'
for vol in volumes:
update = {'status': status, 'created_at': now}
self._update_volume_from_src(context, vol, update, group=group)
self._update_allocated_capacity(vol)
group.status = status
group.created_at = now
group.save()
self._notify_about_group_usage(
context, group, "create.end")
LOG.info(_LI("Create group "
"from source-%(source)s completed successfully."),
{'source': source_name},
resource={'type': 'group',
'id': group.id})
return group
def _create_group_from_src_generic(self, context, group, volumes,
group_snapshot=None, snapshots=None,
source_group=None, source_vols=None):
"""Creates a group from source.
:param context: the context of the caller.
:param group: the Group object to be created.
:param volumes: a list of volume objects in the group.
:param group_snapshot: the GroupSnapshot object as source.
:param snapshots: a list of snapshot objects in group_snapshot.
:param source_group: the Group object as source.
:param source_vols: a list of volume objects in the source_group.
:returns: model_update, volumes_model_update
"""
for vol in volumes:
try:
if snapshots:
for snapshot in snapshots:
if vol.snapshot_id == snapshot.id:
self.driver.create_volume_from_snapshot(
vol, snapshot)
break
except Exception:
raise
try:
if source_vols:
for source_vol in source_vols:
if vol.source_volid == source_vol.id:
self.driver.create_cloned_volume(vol, source_vol)
break
except Exception:
raise
return None, None
def _sort_snapshots(self, volumes, snapshots):
# Sort source snapshots so that they are in the same order as their
# corresponding target volumes. Each source snapshot in the snapshots
# list should have a corresponding target volume in the volumes list.
if not volumes or not snapshots or len(volumes) != len(snapshots):
msg = _("Input volumes or snapshots are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_snapshots = []
for vol in volumes:
found_snaps = [snap for snap in snapshots
if snap['id'] == vol['snapshot_id']]
if not found_snaps:
LOG.error(_LE("Source snapshot cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.SnapshotNotFound(
snapshot_id=vol['snapshot_id'])
sorted_snapshots.extend(found_snaps)
return sorted_snapshots
def _sort_source_vols(self, volumes, source_vols):
# Sort source volumes so that they are in the same order as their
# corresponding target volumes. Each source volume in the source_vols
# list should have a corresponding target volume in the volumes list.
if not volumes or not source_vols or len(volumes) != len(source_vols):
msg = _("Input volumes or source volumes are invalid.")
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
sorted_source_vols = []
for vol in volumes:
found_source_vols = [source_vol for source_vol in source_vols
if source_vol['id'] == vol['source_volid']]
if not found_source_vols:
LOG.error(_LE("Source volumes cannot be found for target "
"volume %(volume_id)s."),
{'volume_id': vol['id']})
raise exception.VolumeNotFound(
volume_id=vol['source_volid'])
sorted_source_vols.extend(found_source_vols)
return sorted_source_vols
def _update_volume_from_src(self, context, vol, update, group=None):
try:
snapshot_id = vol.get('snapshot_id')
source_volid = vol.get('source_volid')
if snapshot_id:
snapshot = objects.Snapshot.get_by_id(context, snapshot_id)
orig_vref = self.db.volume_get(context,
snapshot.volume_id)
if orig_vref.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_to_volume(
context, vol['id'], snapshot_id)
if source_volid:
source_vol = objects.Volume.get_by_id(context, source_volid)
if source_vol.bootable:
update['bootable'] = True
self.db.volume_glance_metadata_copy_from_volume_to_volume(
context, source_volid, vol['id'])
if source_vol.multiattach:
update['multiattach'] = True
except exception.SnapshotNotFound:
LOG.error(_LE("Source snapshot %(snapshot_id)s cannot be found."),
{'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.VolumeNotFound:
LOG.error(_LE("The source volume %(volume_id)s "
"cannot be found."),
{'volume_id': snapshot.volume_id})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise
except exception.CinderException as ex:
LOG.error(_LE("Failed to update %(volume_id)s"
" metadata using the provided snapshot"
" %(snapshot_id)s metadata."),
{'volume_id': vol['id'],
'snapshot_id': vol['snapshot_id']})
self.db.volume_update(context, vol['id'],
{'status': 'error'})
if group:
group.status = 'error'
group.save()
raise exception.MetadataCopyFailure(reason=six.text_type(ex))
self.db.volume_update(context, vol['id'], update)
def _update_allocated_capacity(self, vol):
# Update allocated capacity in volume stats
pool = vol_utils.extract_host(vol['host'], 'pool')
if pool is None:
# Legacy volume, put them into default pool
pool = self.driver.configuration.safe_get(
'volume_backend_name') or vol_utils.extract_host(
vol['host'], 'pool', True)
try:
self.stats['pools'][pool]['allocated_capacity_gb'] += (
vol['size'])
except KeyError:
self.stats['pools'][pool] = dict(
allocated_capacity_gb=vol['size'])
def delete_consistencygroup(self, context, group):
"""Deletes consistency group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_group(context, group.id)
for volume in volumes:
if (volume.attach_status ==
fields.VolumeAttachStatus.ATTACHED):
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume.id)
self._check_is_our_resource(volume)
self._notify_about_consistencygroup_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, group, volumes))
if volumes_model_update:
for volume in volumes_model_update:
update = {'status': volume['status']}
self.db.volume_update(context, volume['id'],
update)
# If we failed to delete a volume, make sure the status
# for the cg is set to error as well
if (volume['status'] in ['error_deleting', 'error'] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = volume['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete consistency group failed.'))
LOG.error(msg,
resource={'type': 'consistency_group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
for vol in volumes:
vol.status = 'error'
vol.save()
# Get reservations for group
try:
reserve_opts = {'consistencygroups': -1}
cgreservations = CGQUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
cgreservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
for volume in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -volume.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete consistency group "
"failed to update usages."),
resource={'type': 'consistency_group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, volume.id)
self.db.volume_destroy(context, volume.id)
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= volume.size
if cgreservations:
CGQUOTAS.commit(context, cgreservations,
project_id=project_id)
group.destroy()
self._notify_about_consistencygroup_usage(
context, group, "delete.end", volumes)
self.publish_service_capabilities(context)
LOG.info(_LI("Delete consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def delete_group(self, context, group):
"""Deletes group and the volumes in the group."""
context = context.elevated()
project_id = group.project_id
if context.project_id != group.project_id:
project_id = group.project_id
else:
project_id = context.project_id
volumes = objects.VolumeList.get_all_by_generic_group(
context, group.id)
for vol_obj in volumes:
if vol_obj.attach_status == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=vol_obj.id)
self._check_is_our_resource(vol_obj)
self._notify_about_group_usage(
context, group, "delete.start")
volumes_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
try:
model_update, volumes_model_update = (
self.driver.delete_group(context, group, volumes))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, volumes_model_update = (
self._delete_group_generic(context, group, volumes))
else:
cg, volumes = self._convert_group_to_cg(
group, volumes)
model_update, volumes_model_update = (
self.driver.delete_consistencygroup(context, cg,
volumes))
self._remove_consistencygroup_id_from_volumes(volumes)
if volumes_model_update:
for update in volumes_model_update:
# If we failed to delete a volume, make sure the
# status for the group is set to error as well
if (update['status'] in ['error_deleting', 'error']
and model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = update['status']
self.db.volumes_update(context, volumes_model_update)
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Delete group failed.'))
LOG.error(msg,
resource={'type': 'group',
'id': group.id})
raise exception.VolumeDriverException(message=msg)
else:
group.update(model_update)
group.save()
except Exception:
with excutils.save_and_reraise_exception():
group.status = 'error'
group.save()
# Update volume status to 'error' if driver returns
# None for volumes_model_update.
if not volumes_model_update:
self._remove_consistencygroup_id_from_volumes(volumes)
for vol_obj in volumes:
vol_obj.status = 'error'
vol_obj.save()
# Get reservations for group
try:
reserve_opts = {'groups': -1}
grpreservations = GROUP_QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
grpreservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
for vol in volumes:
# Get reservations for volume
try:
reserve_opts = {'volumes': -1,
'gigabytes': -vol.size}
QUOTAS.add_volume_type_opts(context,
reserve_opts,
vol.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Delete group "
"failed to update usages."),
resource={'type': 'group',
'id': group.id})
# Delete glance metadata if it exists
self.db.volume_glance_metadata_delete_by_volume(context, vol.id)
vol.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
self.stats['allocated_capacity_gb'] -= vol.size
if grpreservations:
GROUP_QUOTAS.commit(context, grpreservations,
project_id=project_id)
group.destroy()
self._notify_about_group_usage(
context, group, "delete.end")
self.publish_service_capabilities(context)
LOG.info(_LI("Delete group "
"completed successfully."),
resource={'type': 'group',
'id': group.id})
def _convert_group_to_cg(self, group, volumes):
if not group:
return None, None
cg = consistencygroup.ConsistencyGroup()
cg.from_group(group)
for vol in volumes:
vol.consistencygroup_id = vol.group_id
vol.consistencygroup = cg
return cg, volumes
def _remove_consistencygroup_id_from_volumes(self, volumes):
if not volumes:
return
for vol in volumes:
vol.consistencygroup_id = None
vol.consistencygroup = None
def _convert_group_snapshot_to_cgsnapshot(self, group_snapshot, snapshots,
ctxt):
if not group_snapshot:
return None, None
cgsnap = cgsnapshot.CGSnapshot()
cgsnap.from_group_snapshot(group_snapshot)
# Populate consistencygroup object
grp = objects.Group.get_by_id(ctxt, group_snapshot.group_id)
cg, __ = self._convert_group_to_cg(grp, [])
cgsnap.consistencygroup = cg
for snap in snapshots:
snap.cgsnapshot_id = snap.group_snapshot_id
snap.cgsnapshot = cgsnap
return cgsnap, snapshots
def _remove_cgsnapshot_id_from_snapshots(self, snapshots):
if not snapshots:
return
for snap in snapshots:
snap.cgsnapshot_id = None
snap.cgsnapshot = None
def _create_group_generic(self, context, group):
"""Creates a group."""
# A group entry is already created in db. Just returns a status here.
model_update = {'status': fields.GroupStatus.AVAILABLE,
'created_at': timeutils.utcnow()}
return model_update
def _delete_group_generic(self, context, group, volumes):
"""Deletes a group and volumes in the group."""
model_update = {'status': group.status}
volume_model_updates = []
for volume_ref in volumes:
volume_model_update = {'id': volume_ref.id}
try:
self.driver.remove_export(context, volume_ref)
self.driver.delete_volume(volume_ref)
volume_model_update['status'] = 'deleted'
except exception.VolumeIsBusy:
volume_model_update['status'] = 'available'
except Exception:
volume_model_update['status'] = 'error'
model_update['status'] = fields.GroupStatus.ERROR
volume_model_updates.append(volume_model_update)
return model_update, volume_model_updates
def _update_group_generic(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates a group."""
# NOTE(xyang): The volume manager adds/removes the volume to/from the
# group in the database. This default implementation does not do
# anything in the backend storage.
return None, None, None
def update_consistencygroup(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates consistency group.
Update consistency group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ovo = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS:
msg = (_("Cannot add volume %(volume_id)s to consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ovo.id,
'group_id': group.id,
'status': add_vol_ovo.status,
'valid': VALID_ADD_VOL_TO_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ovo)
add_volumes_ref.append(add_vol_ovo)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = self.db.volume_get(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update consistency group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol},
resource={'type': 'consistency_group',
'id': group.id})
raise
if remove_vol_ref['status'] not in VALID_REMOVE_VOL_FROM_CG_STATUS:
msg = (_("Cannot remove volume %(volume_id)s from consistency "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref['id'],
'group_id': group.id,
'status': remove_vol_ref['status'],
'valid': VALID_REMOVE_VOL_FROM_CG_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_consistencygroup_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
if add_volumes_update:
for update in add_volumes_update:
self.db.volume_update(context, update['id'], update)
if remove_volumes_update:
for update in remove_volumes_update:
self.db.volume_update(context, update['id'], update)
if model_update:
if model_update['status'] in (
[fields.ConsistencyGroupStatus.ERROR]):
msg = (_('Error occurred when updating consistency group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating consistency group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating consistency "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'status': 'error'})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'status': 'error'})
now = timeutils.utcnow()
group.status = 'available'
group.update_at = now
group.save()
for add_vol in add_volumes_ref:
self.db.volume_update(context, add_vol['id'],
{'consistencygroup_id': group.id,
'updated_at': now})
for rem_vol in remove_volumes_ref:
self.db.volume_update(context, rem_vol['id'],
{'consistencygroup_id': None,
'updated_at': now})
self._notify_about_consistencygroup_usage(
context, group, "update.end")
LOG.info(_LI("Update consistency group "
"completed successfully."),
resource={'type': 'consistency_group',
'id': group.id})
def update_group(self, context, group,
add_volumes=None, remove_volumes=None):
"""Updates group.
Update group by adding volumes to the group,
or removing volumes from the group.
"""
add_volumes_ref = []
remove_volumes_ref = []
add_volumes_list = []
remove_volumes_list = []
if add_volumes:
add_volumes_list = add_volumes.split(',')
if remove_volumes:
remove_volumes_list = remove_volumes.split(',')
for add_vol in add_volumes_list:
try:
add_vol_ref = objects.Volume.get_by_id(context, add_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to add volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': add_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if add_vol_ref.status not in VALID_ADD_VOL_TO_GROUP_STATUS:
msg = (_("Cannot add volume %(volume_id)s to "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': add_vol_ref.id,
'group_id': group.id,
'status': add_vol_ref.status,
'valid': VALID_ADD_VOL_TO_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
self._check_is_our_resource(add_vol_ref)
add_volumes_ref.append(add_vol_ref)
for remove_vol in remove_volumes_list:
try:
remove_vol_ref = objects.Volume.get_by_id(context, remove_vol)
except exception.VolumeNotFound:
LOG.error(_LE("Update group "
"failed to remove volume-%(volume_id)s: "
"VolumeNotFound."),
{'volume_id': remove_vol_ref.id},
resource={'type': 'group',
'id': group.id})
raise
if (remove_vol_ref.status not in
VALID_REMOVE_VOL_FROM_GROUP_STATUS):
msg = (_("Cannot remove volume %(volume_id)s from "
"group %(group_id)s because volume is in an invalid "
"state: %(status)s. Valid states are: %(valid)s.") %
{'volume_id': remove_vol_ref.id,
'group_id': group.id,
'status': remove_vol_ref.status,
'valid': VALID_REMOVE_VOL_FROM_GROUP_STATUS})
raise exception.InvalidVolume(reason=msg)
remove_volumes_ref.append(remove_vol_ref)
self._notify_about_group_usage(
context, group, "update.start")
try:
utils.require_driver_initialized(self.driver)
try:
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_group(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group.group_type_id != cgsnap_type['id']:
model_update, add_volumes_update, remove_volumes_update = (
self._update_group_generic(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
else:
cg, remove_volumes_ref = self._convert_group_to_cg(
group, remove_volumes_ref)
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(
context, group,
add_volumes=add_volumes_ref,
remove_volumes=remove_volumes_ref))
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
if add_volumes_update:
self.db.volumes_update(context, add_volumes_update)
if remove_volumes_update:
self.db.volumes_update(context, remove_volumes_update)
if model_update:
if model_update['status'] in (
[fields.GroupStatus.ERROR]):
msg = (_('Error occurred when updating group '
'%s.') % group.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group.update(model_update)
group.save()
except exception.VolumeDriverException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred in the volume driver when "
"updating group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
self._remove_consistencygroup_id_from_volumes(
remove_volumes_ref)
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error occurred when updating "
"group %(group_id)s."),
{'group_id': group.id})
group.status = 'error'
group.save()
for add_vol in add_volumes_ref:
add_vol.status = 'error'
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.status = 'error'
rem_vol.save()
group.status = 'available'
group.save()
for add_vol in add_volumes_ref:
add_vol.group_id = group.id
add_vol.save()
for rem_vol in remove_volumes_ref:
rem_vol.group_id = None
rem_vol.save()
self._notify_about_group_usage(
context, group, "update.end")
LOG.info(_LI("Update group completed successfully."),
resource={'type': 'group',
'id': group.id})
def create_cgsnapshot(self, context, cgsnapshot):
"""Creates the cgsnapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("Cgsnapshot %s: creating."), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Cgsnapshot %(cgsnap_id)s: creating.",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
self.db.snapshot_update(context,
snap_model['id'],
snap_model)
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot['volume_id']
snapshot_id = snapshot['id']
vol_ref = self.db.volume_get(context, volume_id)
if vol_ref.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
# TODO(thangp): Switch over to use snapshot.update()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_update(
context, snapshot_id, {
'status': fields.SnapshotStatus.ERROR})
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
self.db.snapshot_update(context,
snapshot['id'],
{'status': fields.SnapshotStatus.AVAILABLE,
'progress': '100%'})
cgsnapshot.status = 'available'
cgsnapshot.save()
LOG.info(_LI("cgsnapshot %s: created successfully"),
cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "create.end")
return cgsnapshot
def create_group_snapshot(self, context, group_snapshot):
"""Creates the group_snapshot."""
caller_context = context
context = context.elevated()
LOG.info(_LI("GroupSnapshot %s: creating."), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("Group snapshot %(grp_snap_id)s: creating.",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.create_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._create_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.create_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# Update db for snapshot.
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap_id = snap_model.pop('id')
snap_obj = objects.Snapshot.get_by_id(context, snap_id)
snap_obj.update(snap_model)
snap_obj.save()
if (snap_model['status'] in [
fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] == 'error':
msg = (_('Error occurred when creating group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
volume_id = snapshot.volume_id
snapshot_id = snapshot.id
vol_obj = objects.Volume.get_by_id(context, volume_id)
if vol_obj.bootable:
try:
self.db.volume_glance_metadata_copy_to_snapshot(
context, snapshot_id, volume_id)
except exception.GlanceMetadataNotFound:
# If volume is not created from image, No glance metadata
# would be available for that volume in
# volume glance metadata table
pass
except exception.CinderException as ex:
LOG.error(_LE("Failed updating %(snapshot_id)s"
" metadata using the provided volumes"
" %(volume_id)s metadata"),
{'volume_id': volume_id,
'snapshot_id': snapshot_id})
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
raise exception.MetadataCopyFailure(
reason=six.text_type(ex))
snapshot.status = fields.SnapshotStatus.AVAILABLE
snapshot.progress = '100%'
snapshot.save()
group_snapshot.status = 'available'
group_snapshot.save()
LOG.info(_LI("group_snapshot %s: created successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "create.end")
return group_snapshot
def _create_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Creates a group_snapshot."""
model_update = {'status': 'available'}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.create_snapshot(snapshot)
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def _delete_group_snapshot_generic(self, context, group_snapshot,
snapshots):
"""Deletes a group_snapshot."""
model_update = {'status': group_snapshot.status}
snapshot_model_updates = []
for snapshot in snapshots:
snapshot_model_update = {'id': snapshot.id}
try:
self.driver.delete_snapshot(snapshot)
snapshot_model_update['status'] = 'deleted'
except exception.SnapshotIsBusy:
snapshot_model_update['status'] = 'available'
except Exception:
snapshot_model_update['status'] = 'error'
model_update['status'] = 'error'
snapshot_model_updates.append(snapshot_model_update)
return model_update, snapshot_model_updates
def delete_cgsnapshot(self, context, cgsnapshot):
"""Deletes cgsnapshot."""
caller_context = context
context = context.elevated()
project_id = cgsnapshot.project_id
LOG.info(_LI("cgsnapshot %s: deleting"), cgsnapshot.id)
snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
context, cgsnapshot.id)
self._notify_about_cgsnapshot_usage(
context, cgsnapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("cgsnapshot %(cgsnap_id)s: deleting",
{'cgsnap_id': cgsnapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
cgsnapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap.status = snap_model['status']
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting cgsnapshot '
'%s.') % cgsnapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
cgsnapshot.update(model_update)
cgsnapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
cgsnapshot.status = 'error'
cgsnapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot['volume_size'],
}
volume_ref = self.db.volume_get(context, snapshot['volume_id'])
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.get('volume_type_id'))
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot['id'])
# TODO(thangp): Switch over to use snapshot.destroy()
# after cgsnapshot-objects bugs are fixed
self.db.snapshot_destroy(context, snapshot['id'])
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
cgsnapshot.destroy()
LOG.info(_LI("cgsnapshot %s: deleted successfully"), cgsnapshot.id)
self._notify_about_cgsnapshot_usage(context, cgsnapshot, "delete.end",
snapshots)
def delete_group_snapshot(self, context, group_snapshot):
"""Deletes group_snapshot."""
caller_context = context
context = context.elevated()
project_id = group_snapshot.project_id
LOG.info(_LI("group_snapshot %s: deleting"), group_snapshot.id)
snapshots = objects.SnapshotList.get_all_for_group_snapshot(
context, group_snapshot.id)
self._notify_about_group_snapshot_usage(
context, group_snapshot, "delete.start")
snapshots_model_update = None
model_update = None
try:
utils.require_driver_initialized(self.driver)
LOG.debug("group_snapshot %(grp_snap_id)s: deleting",
{'grp_snap_id': group_snapshot.id})
# Pass context so that drivers that want to use it, can,
# but it is not a requirement for all drivers.
group_snapshot.context = caller_context
for snapshot in snapshots:
snapshot.context = caller_context
try:
model_update, snapshots_model_update = (
self.driver.delete_group_snapshot(context, group_snapshot,
snapshots))
except NotImplementedError:
cgsnap_type = group_types.get_default_cgsnapshot_type()
if group_snapshot.group_type_id != cgsnap_type['id']:
model_update, snapshots_model_update = (
self._delete_group_snapshot_generic(
context, group_snapshot, snapshots))
else:
cgsnapshot, snapshots = (
self._convert_group_snapshot_to_cgsnapshot(
group_snapshot, snapshots, context))
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(context, cgsnapshot,
snapshots))
self._remove_cgsnapshot_id_from_snapshots(snapshots)
if snapshots_model_update:
for snap_model in snapshots_model_update:
# NOTE(xyang): snapshots is a list of snapshot objects.
# snapshots_model_update should be a list of dicts.
snap = next((item for item in snapshots if
item.id == snap_model['id']), None)
if snap:
snap_model.pop('id')
snap.update(snap_model)
snap.save()
if (snap_model['status'] in
[fields.SnapshotStatus.ERROR_DELETING,
fields.SnapshotStatus.ERROR] and
model_update['status'] not in
['error_deleting', 'error']):
model_update['status'] = snap_model['status']
if model_update:
if model_update['status'] in ['error_deleting', 'error']:
msg = (_('Error occurred when deleting group_snapshot '
'%s.') % group_snapshot.id)
LOG.error(msg)
raise exception.VolumeDriverException(message=msg)
else:
group_snapshot.update(model_update)
group_snapshot.save()
except exception.CinderException:
with excutils.save_and_reraise_exception():
group_snapshot.status = 'error'
group_snapshot.save()
# Update snapshot status to 'error' if driver returns
# None for snapshots_model_update.
if not snapshots_model_update:
self._remove_cgsnapshot_id_from_snapshots(snapshots)
for snapshot in snapshots:
snapshot.status = fields.SnapshotStatus.ERROR
snapshot.save()
for snapshot in snapshots:
# Get reservations
try:
if CONF.no_snapshot_gb_quota:
reserve_opts = {'snapshots': -1}
else:
reserve_opts = {
'snapshots': -1,
'gigabytes': -snapshot.volume_size,
}
volume_ref = objects.Volume.get_by_id(context,
snapshot.volume_id)
QUOTAS.add_volume_type_opts(context,
reserve_opts,
volume_ref.volume_type_id)
reservations = QUOTAS.reserve(context,
project_id=project_id,
**reserve_opts)
except Exception:
reservations = None
LOG.exception(_LE("Failed to update usages deleting snapshot"))
self.db.volume_glance_metadata_delete_by_snapshot(context,
snapshot.id)
snapshot.destroy()
# Commit the reservations
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
group_snapshot.destroy()
LOG.info(_LI("group_snapshot %s: deleted successfully"),
group_snapshot.id)
self._notify_about_group_snapshot_usage(context, group_snapshot,
"delete.end",
snapshots)
def update_migrated_volume(self, ctxt, volume, new_volume, volume_status):
"""Finalize migration process on backend device."""
model_update = None
model_update_default = {'_name_id': new_volume.name_id,
'provider_location':
new_volume.provider_location}
try:
model_update = self.driver.update_migrated_volume(ctxt,
volume,
new_volume,
volume_status)
except NotImplementedError:
# If update_migrated_volume is not implemented for the driver,
# _name_id and provider_location will be set with the values
# from new_volume.
model_update = model_update_default
if model_update:
model_update_default.update(model_update)
# Swap keys that were changed in the source so we keep their values
# in the temporary volume's DB record.
# Need to convert 'metadata' and 'admin_metadata' since
# they are not keys of volume, their corresponding keys are
# 'volume_metadata' and 'volume_admin_metadata'.
model_update_new = dict()
for key in model_update:
if key == 'metadata':
if volume.get('volume_metadata'):
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_metadata}
elif key == 'admin_metadata':
model_update_new[key] = {
metadata['key']: metadata['value']
for metadata in volume.volume_admin_metadata}
else:
model_update_new[key] = volume[key]
with new_volume.obj_as_admin():
new_volume.update(model_update_new)
new_volume.save()
with volume.obj_as_admin():
volume.update(model_update_default)
volume.save()
# Replication V2.1 and a/a method
def failover(self, context, secondary_backend_id=None):
"""Failover a backend to a secondary replication target.
Instructs a replication capable/configured backend to failover
to one of it's secondary replication targets. host=None is
an acceptable input, and leaves it to the driver to failover
to the only configured target, or to choose a target on it's
own. All of the hosts volumes will be passed on to the driver
in order for it to determine the replicated volumes on the host,
if needed.
:param context: security context
:param secondary_backend_id: Specifies backend_id to fail over to
"""
updates = {}
repl_status = fields.ReplicationStatus
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
volumes = self._get_my_volumes(context)
exception_encountered = True
try:
# For non clustered we can call v2.1 failover_host, but for
# clustered we call a/a failover method. We know a/a method
# exists because BaseVD class wouldn't have started if it didn't.
failover = getattr(self.driver,
'failover' if service.is_clustered
else 'failover_host')
# expected form of volume_update_list:
# [{volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}},
# {volume_id: <cinder-volid>, updates: {'provider_id': xxxx....}}]
active_backend_id, volume_update_list = failover(
context,
volumes,
secondary_id=secondary_backend_id)
exception_encountered = False
except exception.UnableToFailOver:
LOG.exception(_LE("Failed to perform replication failover"))
updates['replication_status'] = repl_status.FAILOVER_ERROR
except exception.InvalidReplicationTarget:
LOG.exception(_LE("Invalid replication target specified "
"for failover"))
# Preserve the replication_status: Status should be failed over if
# we were failing back or if we were failing over from one
# secondary to another secondary. In both cases active_backend_id
# will be set.
if service.active_backend_id:
updates['replication_status'] = repl_status.FAILED_OVER
else:
updates['replication_status'] = repl_status.ENABLED
except exception.VolumeDriverException:
# NOTE(jdg): Drivers need to be aware if they fail during
# a failover sequence, we're expecting them to cleanup
# and make sure the driver state is such that the original
# backend is still set as primary as per driver memory
LOG.error(_LE("Driver reported error during "
"replication failover."))
updates.update(disabled=True,
replication_status=repl_status.FAILOVER_ERROR)
if exception_encountered:
LOG.error(
_LE("Error encountered during failover on host: "
"%(host)s invalid target ID %(backend_id)s"),
{'host': self.host, 'backend_id':
secondary_backend_id})
self.finish_failover(context, service, updates)
return
if secondary_backend_id == "default":
updates['replication_status'] = repl_status.ENABLED
updates['active_backend_id'] = ''
updates['disabled'] = service.frozen
updates['disabled_reason'] = 'frozen' if service.frozen else ''
else:
updates['replication_status'] = repl_status.FAILED_OVER
updates['active_backend_id'] = active_backend_id
updates['disabled'] = True
updates['disabled_reason'] = 'failed-over'
self.finish_failover(context, service, updates)
for update in volume_update_list:
# Response must include an id key: {volume_id: <cinder-uuid>}
if not update.get('volume_id'):
raise exception.UnableToFailOver(
reason=_("Update list, doesn't include volume_id"))
# Key things to consider (attaching failed-over volumes):
# provider_location
# provider_auth
# provider_id
# replication_status
vobj = objects.Volume.get_by_id(context, update['volume_id'])
vobj.update(update.get('updates', {}))
vobj.save()
LOG.info(_LI("Failed over to replication target successfully."))
# TODO(geguileo): In P - remove this
failover_host = failover
def finish_failover(self, context, service, updates):
"""Completion of the failover locally or via RPC."""
# If the service is clustered, broadcast the service changes to all
# volume services, including this one.
if service.is_clustered:
# We have to update the cluster with the same data, and we do it
# before broadcasting the failover_completed RPC call to prevent
# races with services that may be starting..
for key, value in updates.items():
setattr(service.cluster, key, value)
service.cluster.save()
rpcapi = volume_rpcapi.VolumeAPI()
rpcapi.failover_completed(context, service, updates)
else:
service.update(updates)
service.save()
def failover_completed(self, context, updates):
"""Finalize failover of this backend.
When a service is clustered and replicated the failover has 2 stages,
one that does the failover of the volumes and another that finalizes
the failover of the services themselves.
This method takes care of the last part and is called from the service
doing the failover of the volumes after finished processing the
volumes.
"""
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(context, svc_host,
constants.VOLUME_BINARY)
service.update(updates)
try:
self.driver.failover_completed(context, service.active_backend_id)
except Exception:
msg = _('Driver reported error during replication failover '
'completion.')
LOG.exception(msg)
service.disabled = True
service.disabled_reason = msg
service.replication_status = (
fields.ReplicationStatus.ERROR)
service.save()
def freeze_host(self, context):
"""Freeze management plane on this backend.
Basically puts the control/management plane into a
Read Only state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.freeze_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): In the case of freeze, we don't really
# need the backend's consent or anything, we'll just
# disable the service, so we can just log this and
# go about our business
LOG.warning(_LW('Error encountered on Cinder backend during '
'freeze operation, service is frozen, however '
'notification to driver has failed.'))
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = True
service.disabled_reason = "frozen"
service.save()
LOG.info(_LI("Set backend status to frozen successfully."))
return True
def thaw_host(self, context):
"""UnFreeze management plane on this backend.
Basically puts the control/management plane back into
a normal state. We should handle this in the scheduler,
however this is provided to let the driver know in case it
needs/wants to do something specific on the backend.
:param context: security context
"""
# TODO(jdg): Return from driver? or catch?
# Update status column in service entry
try:
self.driver.thaw_backend(context)
except exception.VolumeDriverException:
# NOTE(jdg): Thaw actually matters, if this call
# to the backend fails, we're stuck and can't re-enable
LOG.error(_LE('Error encountered on Cinder backend during '
'thaw operation, service will remain frozen.'))
return False
svc_host = vol_utils.extract_host(self.host, 'backend')
service = objects.Service.get_by_args(
context,
svc_host,
constants.VOLUME_BINARY)
service.disabled = False
service.disabled_reason = ""
service.save()
LOG.info(_LI("Thawed backend successfully."))
return True
def manage_existing_snapshot(self, ctxt, snapshot, ref=None):
LOG.debug('manage_existing_snapshot: managing %s.', ref)
try:
flow_engine = manage_existing_snapshot.get_flow(
ctxt,
self.db,
self.driver,
self.host,
snapshot.id,
ref)
except Exception:
msg = _LE("Failed to create manage_existing flow: "
"%(object_type)s %(object_id)s.")
LOG.exception(msg, {'object_type': 'snapshot',
'object_id': snapshot.id})
raise exception.CinderException(
_("Failed to create manage existing flow."))
with flow_utils.DynamicLogListener(flow_engine, logger=LOG):
flow_engine.run()
return snapshot.id
def get_manageable_snapshots(self, ctxt, marker, limit, offset,
sort_keys, sort_dirs, want_objects=False):
try:
utils.require_driver_initialized(self.driver)
except exception.DriverNotInitialized:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to uninitialized driver."))
cinder_snapshots = self._get_my_snapshots(ctxt)
try:
driver_entries = self.driver.get_manageable_snapshots(
cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs)
if want_objects:
driver_entries = (objects.ManageableSnapshotList.
from_primitives(ctxt, driver_entries))
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Listing manageable snapshots failed, due "
"to driver error."))
return driver_entries
def get_capabilities(self, context, discover):
"""Get capabilities of backend storage."""
if discover:
self.driver.init_capabilities()
capabilities = self.driver.capabilities
LOG.debug("Obtained capabilities list: %s.", capabilities)
return capabilities
def get_backup_device(self, ctxt, backup, want_objects=False):
(backup_device, is_snapshot) = (
self.driver.get_backup_device(ctxt, backup))
secure_enabled = self.driver.secure_file_operations_enabled()
backup_device_dict = {'backup_device': backup_device,
'secure_enabled': secure_enabled,
'is_snapshot': is_snapshot, }
# TODO(sborkows): from_primitive method will be removed in O, so there
# is a need to clean here then.
return (objects.BackupDeviceInfo.from_primitive(backup_device_dict,
ctxt)
if want_objects else backup_device_dict)
def secure_file_operations_enabled(self, ctxt, volume):
secure_enabled = self.driver.secure_file_operations_enabled()
return secure_enabled
def _connection_create(self, ctxt, volume, attachment, connector):
try:
self.driver.validate_connector(connector)
except exception.InvalidConnectorException as err:
raise exception.InvalidInput(reason=six.text_type(err))
except Exception as err:
err_msg = (_("Validate volume connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.error(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
model_update = self.driver.create_export(ctxt.elevated(),
volume, connector)
except exception.CinderException as ex:
err_msg = (_("Create export for volume failed (%s).") % ex.msg)
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
try:
if model_update:
volume.update(model_update)
volume.save()
except exception.CinderException as ex:
LOG.exception(_LE("Model update failed."), resource=volume)
raise exception.ExportFailure(reason=six.text_type(ex))
try:
conn_info = self.driver.initialize_connection(volume, connector)
except Exception as err:
err_msg = (_("Driver initialize connection failed "
"(error: %(err)s).") % {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
self.driver.remove_export(ctxt.elevated(), volume)
raise exception.VolumeBackendAPIException(data=err_msg)
conn_info = self._parse_connection_options(ctxt, volume, conn_info)
# NOTE(jdg): Get rid of the nested dict (data key)
conn_data = conn_info.pop('data', {})
connection_info = conn_data.copy()
connection_info.update(conn_info)
values = {'volume_id': volume.id,
'attach_status': 'attaching', }
self.db.volume_attachment_update(ctxt, attachment.id, values)
self.db.attachment_specs_update_or_create(
ctxt,
attachment.id,
connector)
connection_info['attachment_id'] = attachment.id
return connection_info
def attachment_update(self,
context,
vref,
connector,
attachment_id):
"""Update/Finalize an attachment.
This call updates a valid attachment record to associate with a volume
and provide the caller with the proper connection info. Note that
this call requires an `attachment_ref`. It's expected that prior to
this call that the volume and an attachment UUID has been reserved.
param: vref: Volume object to create attachment for
param: connector: Connector object to use for attachment creation
param: attachment_ref: ID of the attachment record to update
"""
mode = connector.get('mode', 'rw')
self._notify_about_volume_usage(context, vref, 'attach.start')
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
connection_info = self._connection_create(context,
vref,
attachment_ref,
connector)
# FIXME(jdg): get rid of this admin_meta option here, the only thing
# it does is enforce that a volume is R/O, that should be done via a
# type and not *more* metadata
volume_metadata = self.db.volume_admin_metadata_update(
context.elevated(),
attachment_ref.volume_id,
{'attached_mode': mode}, False)
if volume_metadata.get('readonly') == 'True' and mode != 'ro':
self.db.volume_update(context, vref.id,
{'status': 'error_attaching'})
self.message_api.create(
context, defined_messages.ATTACH_READONLY_VOLUME,
context.project_id, resource_type=resource_types.VOLUME,
resource_uuid=vref.id)
raise exception.InvalidVolumeAttachMode(mode=mode,
volume_id=vref.id)
try:
utils.require_driver_initialized(self.driver)
self.driver.attach_volume(context,
vref,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'))
except Exception:
with excutils.save_and_reraise_exception():
self.db.volume_attachment_update(
context, attachment_ref.id,
{'attach_status': 'error_attaching'})
self.db.volume_attached(context.elevated(),
attachment_ref.id,
attachment_ref.instance_uuid,
connector.get('hostname', ''),
connector.get('mountpoint', 'na'),
mode)
vref.refresh()
self._notify_about_volume_usage(context, vref, "attach.end")
LOG.info(_LI("Attach volume completed successfully."),
resource=vref)
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
return connection_info
def _connection_terminate(self, context, volume,
attachment, force=False):
"""Remove a volume connection, but leave attachment."""
utils.require_driver_initialized(self.driver)
# TODO(jdg): Add an object method to cover this
connector = self.db.attachment_specs_get(
context,
attachment.id)
try:
shared_connections = self.driver.terminate_connection(volume,
connector,
force=force)
if not isinstance(shared_connections, bool):
shared_connections = False
except Exception as err:
err_msg = (_('Terminate volume connection failed: %(err)s')
% {'err': six.text_type(err)})
LOG.exception(err_msg, resource=volume)
raise exception.VolumeBackendAPIException(data=err_msg)
LOG.info(_LI("Terminate volume connection completed successfully."),
resource=volume)
# NOTE(jdg): Return True/False if there are other outstanding
# attachments that share this connection. If True should signify
# caller to preserve the actual host connection (work should be
# done in the brick connector as it has the knowledge of what's
# going on here.
return shared_connections
def attachment_delete(self, context, attachment_id, vref):
"""Delete/Detach the specified attachment.
Notifies the backend device that we're detaching the specified
attachment instance.
param: vref: Volume object associated with the attachment
param: attachment: Attachment reference object to remove
NOTE if the attachment reference is None, we remove all existing
attachments for the specified volume object.
"""
has_shared_connection = False
attachment_ref = objects.VolumeAttachment.get_by_id(context,
attachment_id)
if not attachment_ref:
for attachment in VA_LIST.get_all_by_volume_id(context, vref.id):
if self._do_attachment_delete(context, vref, attachment):
has_shared_connection = True
else:
has_shared_connection = (
self._do_attachment_delete(context, vref, attachment_ref))
return has_shared_connection
def _do_attachment_delete(self, context, vref, attachment):
utils.require_driver_initialized(self.driver)
self._notify_about_volume_usage(context, vref, "detach.start")
has_shared_connection = self._connection_terminate(context,
vref,
attachment)
self.driver.detach_volume(context, vref, attachment)
try:
LOG.debug('Deleting attachment %(attachment_id)s.',
{'attachment_id': attachment.id},
resource=vref)
self.driver.detach_volume(context, vref, attachment)
self.driver.remove_export(context.elevated(), vref)
except Exception:
# FIXME(jdg): Obviously our volume object is going to need some
# changes to deal with multi-attach and figuring out how to
# represent a single failed attach out of multiple attachments
# TODO(jdg): object method here
self.db.volume_attachment_update(
context, attachment.get('id'),
{'attach_status': 'error_detaching'})
else:
self.db.volume_detached(context.elevated(), vref.id,
attachment.get('id'))
self.db.volume_admin_metadata_delete(context.elevated(),
vref.id,
'attached_mode')
self._notify_about_volume_usage(context, vref, "detach.end")
return has_shared_connection
def is_volume_trusted(self, ctxt, volume_id):
volume = self.db.api.volume_get(ctxt, volume_id)
verify_trust = False
asset_tags = 'None'
host = ''
for metadata in volume.volume_metadata:
if metadata.key == 'trust':
host = volume.host.split("@")[0]
verify_trust = True
elif metadata.key == 'asset_tags':
asset_tags = metadata.value
if verify_trust:
return self.asset_tag_filter.is_trusted(host, asset_tags)
return None | ge0rgi/cinder | cinder/volume/manager.py | Python | apache-2.0 | 211,218 | 0.000199 |
# Copyright 2016 Hewlett Packard Enterprise Development Company LP.
# Copyright 2016 IBM Corp
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
from oslo_utils import strutils
import tenacity
from ironic.common import cinder
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common import states
from ironic.drivers import base
from ironic.drivers import utils
from ironic import objects
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# NOTE(TheJulia): Sets containing known valid types that align with
# _generate_connector() and the volume connection information spec.
VALID_ISCSI_TYPES = ('iqn',)
# TODO(TheJulia): FCoE?
VALID_FC_TYPES = ('wwpn', 'wwnn')
class CinderStorage(base.StorageInterface):
"""A storage_interface driver supporting Cinder."""
def get_properties(self):
"""Return the properties of the interface.
:returns: dictionary of <property name>:<property description> entries.
"""
return {}
def _fail_validation(self, task, reason,
exception=exception.InvalidParameterValue):
msg = (_("Failed to validate cinder storage interface for node "
"%(node)s. %(reason)s") %
{'node': task.node.uuid, 'reason': reason})
LOG.error(msg)
raise exception(msg)
def _validate_connectors(self, task):
"""Validate connector information helper.
Enumerates through all connector objects, and identifies if
iSCSI or Fibre Channel connectors are present.
:param task: The task object.
:raises InvalidParameterValue: If iSCSI is identified and
iPXE is disabled.
:raises StorageError: If the number of wwpns is not equal to
the number of wwnns
:returns: Dictionary containing iscsi_found and fc_found
keys with boolean values representing if the
helper found that connector type configured
for the node.
"""
node = task.node
iscsi_uuids_found = []
wwpn_found = 0
wwnn_found = 0
ipxe_enabled = False
if 'ipxe_boot' in task.driver.boot.capabilities:
ipxe_enabled = True
for connector in task.volume_connectors:
if (connector.type in VALID_ISCSI_TYPES
and connector.connector_id is not None):
iscsi_uuids_found.append(connector.uuid)
if not ipxe_enabled:
msg = _("The [pxe]/ipxe_enabled option must "
"be set to True or the boot interface "
"must be set to ``ipxe`` to support network "
"booting to an iSCSI volume.")
self._fail_validation(task, msg)
if (connector.type in VALID_FC_TYPES
and connector.connector_id is not None):
# NOTE(TheJulia): Unlike iSCSI with cinder, we have no need
# to warn about multiple IQN entries, since we are able to
# submit multiple fibre channel WWPN entries.
if connector.type == 'wwpn':
wwpn_found += 1
if connector.type == 'wwnn':
wwnn_found += 1
if len(iscsi_uuids_found) > 1:
LOG.warning("Multiple possible iSCSI connectors, "
"%(iscsi_uuids_found)s found, for node %(node)s. "
"Only the first iSCSI connector, %(iscsi_uuid)s, "
"will be utilized.",
{'node': node.uuid,
'iscsi_uuids_found': iscsi_uuids_found,
'iscsi_uuid': iscsi_uuids_found[0]})
if wwpn_found != wwnn_found:
msg = _("Cinder requires both wwnn and wwpn entries for FCoE "
"connections. There must be a wwpn entry for every wwnn "
"entry. There are %(wwpn)d wwpn entries and %(wwnn)s wwnn "
"entries.") % {'wwpn': wwpn_found, 'wwnn': wwnn_found}
self._fail_validation(task, msg, exception.StorageError)
return {'fc_found': wwpn_found >= 1,
'iscsi_found': len(iscsi_uuids_found) >= 1}
def _validate_targets(self, task, found_types, iscsi_boot, fc_boot):
"""Validate target information helper.
Enumerates through all target objects and identifies if
iSCSI or Fibre Channel targets are present, and matches the
connector capability of the node.
:param task: The task object.
:param found_types: Dictionary containing boolean values returned
from the _validate_connectors helper method.
:param iscsi_boot: Boolean value indicating if iSCSI boot operations
are available.
:param fc_boot: Boolean value indicating if Fibre Channel boot
operations are available.
:raises: InvalidParameterValue
"""
for volume in task.volume_targets:
if volume.volume_id is None:
msg = (_("volume_id missing from target %(id)s.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
# NOTE(TheJulia): We should likely consider incorporation
# of the volume boot_index field, however it may not be
# relevant to the checks we perform here as in the end a
# FC volume "attached" to a node is a valid configuration
# as well.
# TODO(TheJulia): When we create support in nova to record
# that a volume attachment is going to take place, we will
# likely need to match the driver_volume_type field to
# our generic volume_type field. NB The LVM driver appears
# to not use that convention in cinder, as it is freeform.
if volume.volume_type == 'fibre_channel':
if not fc_boot and volume.boot_index == 0:
msg = (_("Volume target %(id)s is configured for "
"'fibre_channel', however the capability "
"'fibre_channel_boot' is not set on node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
if not found_types['fc_found']:
msg = (_("Volume target %(id)s is configured for "
"'fibre_channel', however no Fibre Channel "
"WWPNs are configured for the node volume "
"connectors.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
elif volume.volume_type == 'iscsi':
if not iscsi_boot and volume.boot_index == 0:
msg = (_("Volume target %(id)s is configured for "
"'iscsi', however the capability 'iscsi_boot' "
"is not set for the node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
if not found_types['iscsi_found']:
msg = (_("Volume target %(id)s is configured for "
"'iscsi', however no iSCSI connectors are "
"configured for the node.") %
{'id': volume.uuid})
self._fail_validation(task, msg)
else:
# NOTE(TheJulia); The note below needs to be updated
# whenever support for additional volume types are added.
msg = (_("Volume target %(id)s is of an unknown type "
"'%(type)s'. Supported types: 'iscsi' or "
"'fibre_channel'") %
{'id': volume.uuid, 'type': volume.volume_type})
self._fail_validation(task, msg)
def validate(self, task):
"""Validate storage_interface configuration for Cinder usage.
In order to provide fail fast functionality prior to nodes being
requested to enter the active state, this method performs basic
checks of the volume connectors, volume targets, and operator
defined capabilities. These checks are to help ensure that we
should have a compatible configuration prior to activating the
node.
:param task: The task object.
:raises: InvalidParameterValue If a misconfiguration or mismatch
exists that would prevent storage the cinder storage
driver from initializing attachments.
"""
found_types = self._validate_connectors(task)
node = task.node
iscsi_boot = strutils.bool_from_string(
utils.get_node_capability(node, 'iscsi_boot'))
fc_boot = strutils.bool_from_string(
utils.get_node_capability(node, 'fibre_channel_boot'))
# Validate capability configuration against configured volumes
# such that we raise errors for missing connectors if the
# boot capability is defined.
if iscsi_boot and not found_types['iscsi_found']:
valid_types = ', '.join(VALID_ISCSI_TYPES)
msg = (_("In order to enable the 'iscsi_boot' capability for "
"the node, an associated volume_connector type "
"must be valid for iSCSI (%(options)s).") %
{'options': valid_types})
self._fail_validation(task, msg)
if fc_boot and not found_types['fc_found']:
valid_types = ', '.join(VALID_FC_TYPES)
msg = (_("In order to enable the 'fibre_channel_boot' capability "
"for the node, an associated volume_connector type must "
"be valid for Fibre Channel (%(options)s).") %
{'options': valid_types})
self._fail_validation(task, msg)
self._validate_targets(task, found_types, iscsi_boot, fc_boot)
def attach_volumes(self, task):
"""Informs the storage subsystem to attach all volumes for the node.
:param task: The task object.
:raises: StorageError If an underlying exception or failure
is detected.
"""
node = task.node
targets = [target.volume_id for target in task.volume_targets]
# If there are no targets, then we have nothing to do.
if not targets:
return
connector = self._generate_connector(task)
try:
connected = cinder.attach_volumes(task, targets, connector)
except exception.StorageError as e:
with excutils.save_and_reraise_exception():
LOG.error("Error attaching volumes for node %(node)s: "
"%(err)s", {'node': node.uuid, 'err': e})
self.detach_volumes(task, connector=connector,
aborting_attach=True)
if len(targets) != len(connected):
LOG.error("The number of volumes defined for node %(node)s does "
"not match the number of attached volumes. Attempting "
"detach and abort operation.", {'node': node.uuid})
self.detach_volumes(task, connector=connector,
aborting_attach=True)
raise exception.StorageError(("Mismatch between the number of "
"configured volume targets for "
"node %(uuid)s and the number of "
"completed attachments.") %
{'uuid': node.uuid})
for volume in connected:
# Volumes that were already attached are
# skipped. Updating target volume properties
# for these volumes is nova's responsibility.
if not volume.get('already_attached'):
volume_uuid = volume['data']['ironic_volume_uuid']
targets = objects.VolumeTarget.list_by_volume_id(task.context,
volume_uuid)
for target in targets:
target.properties = volume['data']
target.save()
def detach_volumes(self, task, connector=None, aborting_attach=False):
"""Informs the storage subsystem to detach all volumes for the node.
This action is retried in case of failure.
:param task: The task object.
:param connector: The dictionary representing a node's connectivity
as defined by _generate_connector(). Generated
if not passed.
:param aborting_attach: Boolean representing if this detachment
was requested to handle aborting a
failed attachment
:raises: StorageError If an underlying exception or failure
is detected.
"""
# TODO(TheJulia): Ideally we should query the cinder API and reconcile
# or add any missing volumes and initiate detachments.
node = task.node
targets = [target.volume_id for target in task.volume_targets]
# If there are no targets, then we have nothing to do.
if not targets:
return
if not connector:
connector = self._generate_connector(task)
@tenacity.retry(
retry=tenacity.retry_if_exception_type(exception.StorageError),
stop=tenacity.stop_after_attempt(CONF.cinder.action_retries + 1),
wait=tenacity.wait_fixed(CONF.cinder.action_retry_interval),
reraise=True)
def detach_volumes():
try:
# NOTE(TheJulia): If the node is in ACTIVE state, we can
# tolerate failures detaching as the node is likely being
# powered down to cause a detachment event.
allow_errors = (task.node.provision_state == states.ACTIVE
or aborting_attach and outer_args['attempt']
> 0)
cinder.detach_volumes(task, targets, connector,
allow_errors=allow_errors)
except exception.StorageError as e:
with excutils.save_and_reraise_exception():
# NOTE(TheJulia): In the event that the node is not in
# ACTIVE state, we need to fail hard as we need to ensure
# all attachments are removed.
if aborting_attach:
msg_format = ("Error on aborting volume detach for "
"node %(node)s: %(err)s.")
else:
msg_format = ("Error detaching volume for "
"node %(node)s: %(err)s.")
msg = (msg_format) % {'node': node.uuid,
'err': e}
if outer_args['attempt'] < CONF.cinder.action_retries:
outer_args['attempt'] += 1
msg += " Re-attempting detachment."
LOG.warning(msg)
else:
LOG.error(msg)
# NOTE(mjturek): This dict is used by detach_volumes to determine
# if this is the last attempt. This is a dict rather than an int
# so that it is mutable by the inner function. In python3 this is
# possible with the 'nonlocal' keyword which is unfortunately not
# available in python2.
outer_args = {'attempt': 0}
detach_volumes()
def should_write_image(self, task):
"""Determines if deploy should perform the image write-out.
:param task: The task object.
:returns: True if the deployment write-out process should be
executed.
"""
# NOTE(TheJulia): There is no reason to check if a root volume
# exists here because if the validation has already been passed
# then we know that there should be a volume. If there is an
# image_source, then we should expect to write it out.
instance_info = task.node.instance_info
if 'image_source' not in instance_info:
for volume in task.volume_targets:
if volume['boot_index'] == 0:
return False
return True
def _generate_connector(self, task):
"""Generate cinder connector value based upon the node.
Generates cinder compatible connector information for the purpose of
attaching volumes. Translation: We need to tell the storage where and
possibly how we can connect.
Supports passing iSCSI information in the form of IP and IQN records,
as well as Fibre Channel information in the form of WWPN addresses.
Fibre Channel WWNN addresses are also sent, however at present in-tree
Cinder drivers do not utilize WWNN addresses.
If multiple connectors exist, the request will be filed with
MultiPath IO being enabled.
A warning is logged if an unsupported volume type is encountered.
:params task: The task object.
:returns: A dictionary data structure similar to:
{'ip': ip,
'initiator': iqn,
'multipath: True,
'wwpns': ['WWN1', 'WWN2']}
:raises: StorageError upon no valid connector record being identified.
"""
data = {}
valid = False
for connector in task.volume_connectors:
if 'iqn' in connector.type and 'initiator' not in data:
data['initiator'] = connector.connector_id
valid = True
elif 'ip' in connector.type and 'ip' not in data:
data['ip'] = connector.connector_id
# TODO(TheJulia): Translate to, or generate an IQN.
elif 'wwpn' in connector.type:
data.setdefault('wwpns', []).append(connector.connector_id)
valid = True
elif 'wwnn' in connector.type:
data.setdefault('wwnns', []).append(connector.connector_id)
valid = True
else:
# TODO(jtaryma): Add handling of type 'mac' with MAC to IP
# translation.
LOG.warning('Node %(node)s has a volume_connector (%(uuid)s) '
'defined with an unsupported type: %(type)s.',
{'node': task.node.uuid,
'uuid': connector.uuid,
'type': connector.type})
if not valid:
valid_types = ', '.join(VALID_FC_TYPES + VALID_ISCSI_TYPES)
msg = (_('Insufficient or incompatible volume connection '
'records for node %(uuid)s. Valid connector '
'types: %(types)s') %
{'uuid': task.node.uuid, 'types': valid_types})
LOG.error(msg)
raise exception.StorageError(msg)
# NOTE(TheJulia): Hostname appears to only be used for logging
# in cinder drivers, however that may not always be true, and
# may need to change over time.
data['host'] = task.node.uuid
if len(task.volume_connectors) > 1 and len(data) > 1:
data['multipath'] = True
return data
| openstack/ironic | ironic/drivers/modules/storage/cinder.py | Python | apache-2.0 | 20,389 | 0 |
from feature_extraction.pre_processing.filter_precedent import precendent_directory_cleaner
def run(command_list):
precendent_directory_cleaner.run(command_list)
| Cyberjusticelab/JusticeAI | src/ml_service/feature_extraction/pre_processing/pre_processing_driver.py | Python | mit | 168 | 0.005952 |
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
| jozefg/solar-system | src/attrdict.py | Python | mit | 148 | 0 |
from django.db.models.aggregates import StdDev
from django.db.models.expressions import Value
from django.db.utils import ProgrammingError
from django.utils.functional import cached_property
class BaseDatabaseFeatures(object):
gis_enabled = False
allows_group_by_pk = False
# True if django.db.backends.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate NULL rows in a nullable
# unique field? All core backends implement this correctly, but other
# databases such as SQL Server do not.
supports_nullable_unique_constraints = True
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists and some fields are nullable but not all of them?
supports_partially_nullable_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_savepoints = False
can_release_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does the backend truncate names properly when they are too long?
truncates_names = False
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Is there a true datatype for uuid?
has_native_uuid_field = False
# Is there a true datatype for timedeltas?
has_native_duration_field = False
# Does the database driver support timedeltas as arguments?
# This is only relevant when there is a native duration field.
# Specifically, there is a bug with cx_Oracle:
# https://bitbucket.org/anthony_tuininga/cx_oracle/issue/7/
driver_supports_timedelta_args = False
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# Does the database have a copy of the zoneinfo database?
has_zoneinfo_database = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Does the backend order NULL values as largest or smallest?
nulls_order_largest = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have an autoincrement primary key of 0? MySQL says No.
allows_auto_pk_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Does the backend reset sequences between tests?
supports_sequence_reset = True
# Can the backend determine reliably the length of a CharField?
can_introspect_max_length = True
# Can the backend determine reliably if a field is nullable?
# Note that this is separate from interprets_empty_strings_as_nulls,
# although the latter feature, when true, interferes with correct
# setting (and introspection) of CharFields' nullability.
# This is True for all core backends.
can_introspect_null = True
# Can the backend introspect the default value of a column?
can_introspect_default = True
# Confirm support for introspected foreign keys
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
can_introspect_foreign_keys = True
# Can the backend introspect an AutoField, instead of an IntegerField?
can_introspect_autofield = False
# Can the backend introspect a BigIntegerField, instead of an IntegerField?
can_introspect_big_integer_field = True
# Can the backend introspect an BinaryField, instead of an TextField?
can_introspect_binary_field = True
# Can the backend introspect an DecimalField, instead of an FloatField?
can_introspect_decimal_field = True
# Can the backend introspect an IPAddressField, instead of an CharField?
can_introspect_ip_address_field = False
# Can the backend introspect a PositiveIntegerField, instead of an IntegerField?
can_introspect_positive_integer_field = False
# Can the backend introspect a SmallIntegerField, instead of an IntegerField?
can_introspect_small_integer_field = False
# Can the backend introspect a TimeField, instead of a DateTimeField?
can_introspect_time_field = True
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Does the backend prevent running SQL queries in broken transactions?
atomic_transactions = True
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_column_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterized ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
# What kind of error does the backend throw when accessing closed cursor?
closed_cursor_error_class = ProgrammingError
# Does 'a' LIKE 'A' match?
has_case_insensitive_like = True
# Does the backend require the sqlparse library for splitting multi-line
# statements before executing them?
requires_sqlparse_for_splitting = True
# Suffix for backends that don't support "SELECT xxx;" queries.
bare_select_suffix = ''
# If NULL is implied on columns without needing to be explicitly specified
implied_column_null = False
uppercases_column_names = False
# Does the backend support "select for update" queries with limit (and offset)?
supports_select_for_update_with_limit = True
def __init__(self, connection):
self.connection = connection
@cached_property
def supports_transactions(self):
"""Confirm support for transactions."""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.set_autocommit(False)
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection.rollback()
self.connection.set_autocommit(True)
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
return count == 0
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions."""
try:
self.connection.ops.check_expression_support(StdDev(Value(1)))
return True
except NotImplementedError:
return False
def introspected_boolean_field_type(self, field=None, created_separately=False):
"""
What is the type returned when the backend introspects a BooleanField?
The optional arguments may be used to give further details of the field to be
introspected; in particular, they are provided by Django's test suite:
field -- the field definition
created_separately -- True if the field was added via a SchemaEditor's AddField,
False if the field was created with the model
Note that return value from this function is compared by tests against actual
introspection results; it should provide expectations, not run an introspection
itself.
"""
if self.can_introspect_null and field and field.null:
return 'NullBooleanField'
return 'BooleanField'
| devops2014/djangosite | django/db/backends/base/features.py | Python | bsd-3-clause | 9,722 | 0.001234 |
from sentry.testutils.cases import RuleTestCase
from sentry.rules.conditions.level import LevelCondition, LevelMatchType
class LevelConditionTest(RuleTestCase):
rule_cls = LevelCondition
def get_event(self):
event = self.event
event.group.level = 20
return event
def test_equals(self):
event = self.get_event()
rule = self.get_rule({
'match': LevelMatchType.EQUAL,
'level': '20',
})
self.assertPasses(rule, event)
rule = self.get_rule({
'match': LevelMatchType.EQUAL,
'level': '30',
})
self.assertDoesNotPass(rule, event)
def test_greater_than(self):
event = self.get_event()
rule = self.get_rule({
'match': LevelMatchType.GREATER_OR_EQUAL,
'level': '40',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule({
'match': LevelMatchType.GREATER_OR_EQUAL,
'level': '20',
})
self.assertPasses(rule, event)
def test_less_than(self):
event = self.get_event()
rule = self.get_rule({
'match': LevelMatchType.LESS_OR_EQUAL,
'level': '10',
})
self.assertDoesNotPass(rule, event)
rule = self.get_rule({
'match': LevelMatchType.LESS_OR_EQUAL,
'level': '30',
})
self.assertPasses(rule, event)
| wujuguang/sentry | tests/sentry/rules/conditions/test_level_event.py | Python | bsd-3-clause | 1,452 | 0 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from ... import units as u
from .. import transformations as t
from ..builtin_frames import ICRS, FK5, FK4, FK4NoETerms, Galactic, AltAz
from .. import representation as r
from ..baseframe import frame_transform_graph
from ...tests.helper import (assert_quantity_allclose as assert_allclose,
quantity_allclose, catch_warnings)
from ...time import Time
# Coordinates just for these tests.
class TCoo1(ICRS):
pass
class TCoo2(ICRS):
pass
class TCoo3(ICRS):
pass
def test_transform_classes():
"""
Tests the class-based/OO syntax for creating transforms
"""
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
trans1 = t.FunctionTransform(tfun, TCoo1, TCoo2,
register_graph=frame_transform_graph)
c1 = TCoo1(ra=1*u.radian, dec=0.5*u.radian)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.radian, 1)
assert_allclose(c2.dec.radian, 0.5)
def matfunc(coo, fr):
return [[1, 0, 0],
[0, coo.ra.degree, 0],
[0, 0, 1]]
trans2 = t.DynamicMatrixTransform(matfunc, TCoo1, TCoo2)
trans2.register(frame_transform_graph)
c3 = TCoo1(ra=1*u.deg, dec=2*u.deg)
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.ra.degree, 1)
assert_allclose(c4.ra.degree, 1)
# be sure to unregister the second one - no need for trans1 because it
# already got unregistered when trans2 was created.
trans2.unregister(frame_transform_graph)
def test_transform_decos():
"""
Tests the decorator syntax for creating transforms
"""
c1 = TCoo1(ra=1*u.deg, dec=2*u.deg)
@frame_transform_graph.transform(t.FunctionTransform, TCoo1, TCoo2)
def trans(coo1, f):
return TCoo2(ra=coo1.ra, dec=coo1.dec * 2)
c2 = c1.transform_to(TCoo2)
assert_allclose(c2.ra.degree, 1)
assert_allclose(c2.dec.degree, 4)
c3 = TCoo1(r.CartesianRepresentation(x=1*u.pc, y=1*u.pc, z=2*u.pc))
@frame_transform_graph.transform(t.StaticMatrixTransform, TCoo1, TCoo2)
def matrix():
return [[2, 0, 0],
[0, 1, 0],
[0, 0, 1]]
c4 = c3.transform_to(TCoo2)
assert_allclose(c4.cartesian.x, 2*u.pc)
assert_allclose(c4.cartesian.y, 1*u.pc)
assert_allclose(c4.cartesian.z, 2*u.pc)
def test_shortest_path():
class FakeTransform:
def __init__(self, pri):
self.priority = pri
g = t.TransformGraph()
# cheating by adding graph elements directly that are not classes - the
# graphing algorithm still works fine with integers - it just isn't a valid
# TransformGraph
# the graph looks is a down-going diamond graph with the lower-right slightly
# heavier and a cycle from the bottom to the top
# also, a pair of nodes isolated from 1
g._graph[1][2] = FakeTransform(1)
g._graph[1][3] = FakeTransform(1)
g._graph[2][4] = FakeTransform(1)
g._graph[3][4] = FakeTransform(2)
g._graph[4][1] = FakeTransform(5)
g._graph[5][6] = FakeTransform(1)
path, d = g.find_shortest_path(1, 2)
assert path == [1, 2]
assert d == 1
path, d = g.find_shortest_path(1, 3)
assert path == [1, 3]
assert d == 1
path, d = g.find_shortest_path(1, 4)
print('Cached paths:', g._shortestpaths)
assert path == [1, 2, 4]
assert d == 2
# unreachable
path, d = g.find_shortest_path(1, 5)
assert path is None
assert d == float('inf')
path, d = g.find_shortest_path(5, 6)
assert path == [5, 6]
assert d == 1
def test_sphere_cart():
"""
Tests the spherical <-> cartesian transform functions
"""
from ...utils import NumpyRNGContext
from .. import spherical_to_cartesian, cartesian_to_spherical
x, y, z = spherical_to_cartesian(1, 0, 0)
assert_allclose(x, 1)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(0, 1, 1)
assert_allclose(x, 0)
assert_allclose(y, 0)
assert_allclose(z, 0)
x, y, z = spherical_to_cartesian(5, 0, np.arcsin(4. / 5.))
assert_allclose(x, 3)
assert_allclose(y, 4)
assert_allclose(z, 0)
r, lat, lon = cartesian_to_spherical(0, 1, 0)
assert_allclose(r, 1)
assert_allclose(lat, 0 * u.deg)
assert_allclose(lon, np.pi / 2 * u.rad)
# test round-tripping
with NumpyRNGContext(13579):
x, y, z = np.random.randn(3, 5)
r, lat, lon = cartesian_to_spherical(x, y, z)
x2, y2, z2 = spherical_to_cartesian(r, lat, lon)
assert_allclose(x, x2)
assert_allclose(y, y2)
assert_allclose(z, z2)
def test_transform_path_pri():
"""
This checks that the transformation path prioritization works by
making sure the ICRS -> Gal transformation always goes through FK5
and not FK4.
"""
frame_transform_graph.invalidate_cache()
tpath, td = frame_transform_graph.find_shortest_path(ICRS, Galactic)
assert tpath == [ICRS, FK5, Galactic]
assert td == 2
# but direct from FK4 to Galactic should still be possible
tpath, td = frame_transform_graph.find_shortest_path(FK4, Galactic)
assert tpath == [FK4, FK4NoETerms, Galactic]
assert td == 2
def test_obstime():
"""
Checks to make sure observation time is
accounted for at least in FK4 <-> ICRS transformations
"""
b1950 = Time('B1950', scale='utc')
j1975 = Time('J1975', scale='utc')
fk4_50 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=b1950)
fk4_75 = FK4(ra=1*u.deg, dec=2*u.deg, obstime=j1975)
icrs_50 = fk4_50.transform_to(ICRS)
icrs_75 = fk4_75.transform_to(ICRS)
# now check that the resulting coordinates are *different* - they should be,
# because the obstime is different
assert icrs_50.ra.degree != icrs_75.ra.degree
assert icrs_50.dec.degree != icrs_75.dec.degree
# ------------------------------------------------------------------------------
# Affine transform tests and helpers:
# just acting as a namespace
class transfunc:
rep = r.CartesianRepresentation(np.arange(3)*u.pc)
dif = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
rep0 = r.CartesianRepresentation(np.zeros(3)*u.pc)
@classmethod
def both(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, cls.rep.with_differentials(cls.dif)
@classmethod
def just_matrix(cls, coo, fr):
# exchange x <-> z and offset
M = np.array([[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.]])
return M, None
@classmethod
def no_matrix(cls, coo, fr):
return None, cls.rep.with_differentials(cls.dif)
@classmethod
def no_pos(cls, coo, fr):
return None, cls.rep0.with_differentials(cls.dif)
@classmethod
def no_vel(cls, coo, fr):
return None, cls.rep
@pytest.mark.parametrize('transfunc', [transfunc.both, transfunc.no_matrix,
transfunc.no_pos, transfunc.no_vel,
transfunc.just_matrix])
@pytest.mark.parametrize('rep', [
r.CartesianRepresentation(5, 6, 7, unit=u.pc),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr)),
r.CartesianRepresentation(5, 6, 7, unit=u.pc,
differentials=r.CartesianDifferential(8, 9, 10,
unit=u.pc/u.Myr))
.represent_as(r.CylindricalRepresentation, r.CylindricalDifferential)
])
def test_affine_transform_succeed(transfunc, rep):
c = TCoo1(rep)
# compute expected output
M, offset = transfunc(c, TCoo2)
_rep = rep.to_cartesian()
diffs = dict([(k, diff.represent_as(r.CartesianDifferential, rep))
for k, diff in rep.differentials.items()])
expected_rep = _rep.with_differentials(diffs)
if M is not None:
expected_rep = expected_rep.transform(M)
expected_pos = expected_rep.without_differentials()
if offset is not None:
expected_pos = expected_pos + offset.without_differentials()
expected_vel = None
if c.data.differentials:
expected_vel = expected_rep.differentials['s']
if offset and offset.differentials:
expected_vel = (expected_vel + offset.differentials['s'])
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2)
assert quantity_allclose(c2.data.to_cartesian().xyz,
expected_pos.to_cartesian().xyz)
if expected_vel is not None:
diff = c2.data.differentials['s'].to_cartesian(base=c2.data)
assert quantity_allclose(diff.xyz, expected_vel.d_xyz)
trans.unregister(frame_transform_graph)
# these should fail
def transfunc_invalid_matrix(coo, fr):
return np.eye(4), None
# Leaving this open in case we want to add more functions to check for failures
@pytest.mark.parametrize('transfunc', [transfunc_invalid_matrix])
def test_affine_transform_fail(transfunc):
diff = r.CartesianDifferential(8, 9, 10, unit=u.pc/u.Myr)
rep = r.CartesianRepresentation(5, 6, 7, unit=u.pc, differentials=diff)
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(ValueError):
c2 = c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
def test_too_many_differentials():
dif1 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr)
dif2 = r.CartesianDifferential(*np.arange(3, 6)*u.pc/u.Myr**2)
rep = r.CartesianRepresentation(np.arange(3)*u.pc,
differentials={'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
# Check that if frame somehow gets through to transformation, multiple
# differentials are caught
c = TCoo1(rep.without_differentials())
c._data = c._data.with_differentials({'s': dif1, 's2': dif2})
with pytest.raises(ValueError):
c2 = c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
# A matrix transform of a unit spherical with differentials should work
@pytest.mark.parametrize('rep', [
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials=r.SphericalDifferential(d_lon=15*u.mas/u.yr,
d_lat=11*u.mas/u.yr,
d_distance=-110*u.km/u.s)),
r.UnitSphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)}),
r.SphericalRepresentation(lon=15*u.degree, lat=-11*u.degree,
distance=150*u.pc,
differentials={'s': r.RadialDifferential(d_distance=-110*u.km/u.s)})
])
def test_unit_spherical_with_differentials(rep):
c = TCoo1(rep)
# register and do the transformation and check against expected
trans = t.AffineTransform(transfunc.just_matrix, TCoo1, TCoo2)
trans.register(frame_transform_graph)
c2 = c.transform_to(TCoo2)
assert 's' in rep.differentials
assert isinstance(c2.data.differentials['s'],
rep.differentials['s'].__class__)
if isinstance(rep.differentials['s'], r.RadialDifferential):
assert c2.data.differentials['s'] is rep.differentials['s']
trans.unregister(frame_transform_graph)
# should fail if we have to do offsets
trans = t.AffineTransform(transfunc.both, TCoo1, TCoo2)
trans.register(frame_transform_graph)
with pytest.raises(TypeError):
c.transform_to(TCoo2)
trans.unregister(frame_transform_graph)
def test_vel_transformation_obstime_err():
# TODO: replace after a final decision on PR #6280
from ..sites import get_builtin_sites
diff = r.CartesianDifferential([.1, .2, .3]*u.km/u.s)
rep = r.CartesianRepresentation([1, 2, 3]*u.au, differentials=diff)
loc = get_builtin_sites()['example_site']
aaf = AltAz(obstime='J2010', location=loc)
aaf2 = AltAz(obstime=aaf.obstime + 3*u.day, location=loc)
aaf3 = AltAz(obstime=aaf.obstime + np.arange(3)*u.day, location=loc)
aaf4 = AltAz(obstime=aaf.obstime, location=loc)
aa = aaf.realize_frame(rep)
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf2)
assert 'cannot transform' in exc.value.args[0]
with pytest.raises(NotImplementedError) as exc:
aa.transform_to(aaf3)
assert 'cannot transform' in exc.value.args[0]
aa.transform_to(aaf4)
aa.transform_to(ICRS())
def test_function_transform_with_differentials():
tfun = lambda c, f: f.__class__(ra=c.ra, dec=c.dec)
ftrans = t.FunctionTransform(tfun, TCoo3, TCoo2,
register_graph=frame_transform_graph)
t3 = TCoo3(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=1*u.marcsec/u.yr,
pm_dec=1*u.marcsec/u.yr,)
with catch_warnings() as w:
t2 = t3.transform_to(TCoo2)
assert len(w) == 1
assert 'they have been dropped' in str(w[0].message)
def test_frame_override_component_with_attribute():
"""
It was previously possible to define a frame with an attribute with the
same name as a component. We don't want to allow this!
"""
from ..baseframe import BaseCoordinateFrame
from ..attributes import Attribute
class BorkedFrame(BaseCoordinateFrame):
ra = Attribute(default=150)
dec = Attribute(default=150)
def trans_func(coo1, f):
pass
trans = t.FunctionTransform(trans_func, BorkedFrame, ICRS)
with pytest.raises(ValueError) as exc:
trans.register(frame_transform_graph)
assert ('BorkedFrame' in exc.value.args[0] and
"'ra'" in exc.value.args[0] and
"'dec'" in exc.value.args[0])
| funbaker/astropy | astropy/coordinates/tests/test_transformations.py | Python | bsd-3-clause | 14,498 | 0.00069 |
#!/usr/bin/env python
from __future__ import print_function
import glob
import os
import re
import sys
from argparse import ArgumentParser
from param import (Library, Parameter, Vehicle, known_group_fields,
known_param_fields, required_param_fields, known_units)
from htmlemit import HtmlEmit
from rstemit import RSTEmit
from xmlemit import XmlEmit
from mdemit import MDEmit
from jsonemit import JSONEmit
from xmlemit_mp import XmlEmitMP
parser = ArgumentParser(description="Parse ArduPilot parameters.")
parser.add_argument("-v", "--verbose", dest='verbose', action='store_true', default=False, help="show debugging output")
parser.add_argument("--vehicle", required=True, help="Vehicle type to generate for")
parser.add_argument("--no-emit",
dest='emit_params',
action='store_false',
default=True,
help="don't emit parameter documention, just validate")
parser.add_argument("--format",
dest='output_format',
action='store',
default='all',
choices=['all', 'html', 'rst', 'wiki', 'xml', 'json', 'edn', 'md', 'xml_mp'],
help="what output format to use")
parser.add_argument("--sitl",
dest='emit_sitl',
action='store_true',
default=False,
help="true to only emit sitl parameters, false to not emit sitl parameters")
args = parser.parse_args()
# Regular expressions for parsing the parameter metadata
prog_param = re.compile(r"@Param(?:{([^}]+)})?: (\w+).*((?:\n[ \t]*// @(\w+)(?:{([^}]+)})?: ?(.*))+)(?:\n[ \t\r]*\n|\n[ \t]+[A-Z])", re.MULTILINE)
# match e.g @Value: 0=Unity, 1=Koala, 17=Liability
prog_param_fields = re.compile(r"[ \t]*// @(\w+): ?([^\r\n]*)")
# match e.g @Value{Copter}: 0=Volcano, 1=Peppermint
prog_param_tagged_fields = re.compile(r"[ \t]*// @(\w+){([^}]+)}: ([^\r\n]*)")
prog_groups = re.compile(r"@Group: *(\w+).*((?:\n[ \t]*// @(Path): (\S+))+)", re.MULTILINE)
apm_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../')
vehicle_paths = glob.glob(apm_path + "%s/Parameters.cpp" % args.vehicle)
apm_tools_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../Tools/')
vehicle_paths += glob.glob(apm_tools_path + "%s/Parameters.cpp" % args.vehicle)
vehicle_paths.sort(reverse=True)
vehicles = []
libraries = []
# AP_Vehicle also has parameters rooted at "", but isn't referenced
# from the vehicle in any way:
ap_vehicle_lib = Library("") # the "" is tacked onto the front of param name
setattr(ap_vehicle_lib, "Path", os.path.join('..', 'libraries', 'AP_Vehicle', 'AP_Vehicle.cpp'))
libraries.append(ap_vehicle_lib)
error_count = 0
current_param = None
current_file = None
def debug(str_to_print):
"""Debug output if verbose is set."""
if args.verbose:
print(str_to_print)
def error(str_to_print):
"""Show errors."""
global error_count
error_count += 1
if current_file is not None:
print("Error in %s" % current_file)
if current_param is not None:
print("At param %s" % current_param)
print(str_to_print)
truename_map = {
"Rover": "Rover",
"ArduSub": "Sub",
"ArduCopter": "Copter",
"ArduPlane": "Plane",
"AntennaTracker": "Tracker",
"AP_Periph": "AP_Periph",
}
valid_truenames = frozenset(truename_map.values())
for vehicle_path in vehicle_paths:
name = os.path.basename(os.path.dirname(vehicle_path))
path = os.path.normpath(os.path.dirname(vehicle_path))
vehicles.append(Vehicle(name, path, truename_map[name]))
debug('Found vehicle type %s' % name)
if len(vehicles) > 1 or len(vehicles) == 0:
print("Single vehicle only, please")
sys.exit(1)
for vehicle in vehicles:
debug("===\n\n\nProcessing %s" % vehicle.name)
current_file = vehicle.path+'/Parameters.cpp'
f = open(current_file)
p_text = f.read()
f.close()
group_matches = prog_groups.findall(p_text)
debug(group_matches)
for group_match in group_matches:
lib = Library(group_match[0])
fields = prog_param_fields.findall(group_match[1])
for field in fields:
if field[0] in known_group_fields:
setattr(lib, field[0], field[1])
else:
error("group: unknown parameter metadata field '%s'" % field[0])
if not any(lib.name == parsed_l.name for parsed_l in libraries):
libraries.append(lib)
param_matches = []
if not args.emit_sitl:
param_matches = prog_param.findall(p_text)
for param_match in param_matches:
(only_vehicles, param_name, field_text) = (param_match[0],
param_match[1],
param_match[2])
if len(only_vehicles):
only_vehicles_list = [x.strip() for x in only_vehicles.split(",")]
for only_vehicle in only_vehicles_list:
if only_vehicle not in valid_truenames:
raise ValueError("Invalid only_vehicle %s" % only_vehicle)
if vehicle.truename not in only_vehicles_list:
continue
p = Parameter(vehicle.name+":"+param_name, current_file)
debug(p.name + ' ')
current_param = p.name
fields = prog_param_fields.findall(field_text)
field_list = []
for field in fields:
field_list.append(field[0])
if field[0] in known_param_fields:
value = re.sub('@PREFIX@', "", field[1]).rstrip()
setattr(p, field[0], value)
else:
error("param: unknown parameter metadata field '%s'" % field[0])
for req_field in required_param_fields:
if req_field not in field_list:
error("missing parameter metadata field '%s' in %s" % (req_field, field_text))
vehicle.params.append(p)
current_file = None
debug("Processed %u params" % len(vehicle.params))
debug("Found %u documented libraries" % len(libraries))
if args.emit_sitl:
libraries = filter(lambda x : x.name == 'SIM_', libraries)
else:
libraries = filter(lambda x : x.name != 'SIM_', libraries)
libraries = list(libraries)
alllibs = libraries[:]
vehicle = vehicles[0]
def process_library(vehicle, library, pathprefix=None):
'''process one library'''
paths = library.Path.split(',')
for path in paths:
path = path.strip()
global current_file
current_file = path
debug("\n Processing file '%s'" % path)
if pathprefix is not None:
libraryfname = os.path.join(pathprefix, path)
elif path.find('/') == -1:
if len(vehicles) != 1:
print("Unable to handle multiple vehicles with .pde library")
continue
libraryfname = os.path.join(vehicles[0].path, path)
else:
libraryfname = os.path.normpath(os.path.join(apm_path + '/libraries/' + path))
if path and os.path.exists(libraryfname):
f = open(libraryfname)
p_text = f.read()
f.close()
else:
error("Path %s not found for library %s (fname=%s)" % (path, library.name, libraryfname))
continue
param_matches = prog_param.findall(p_text)
debug("Found %u documented parameters" % len(param_matches))
for param_match in param_matches:
(only_vehicles, param_name, field_text) = (param_match[0],
param_match[1],
param_match[2])
if len(only_vehicles):
only_vehicles_list = [x.strip() for x in only_vehicles.split(",")]
for only_vehicle in only_vehicles_list:
if only_vehicle not in valid_truenames:
raise ValueError("Invalid only_vehicle %s" % only_vehicle)
if vehicle.truename not in only_vehicles_list:
continue
p = Parameter(library.name+param_name, current_file)
debug(p.name + ' ')
global current_param
current_param = p.name
fields = prog_param_fields.findall(field_text)
non_vehicle_specific_values_seen = False
for field in fields:
if field[0] in known_param_fields:
value = re.sub('@PREFIX@', library.name, field[1])
setattr(p, field[0], value)
if field[0] == "Values":
non_vehicle_specific_values_seen = True
else:
error("param: unknown parameter metadata field %s" % field[0])
debug("matching %s" % field_text)
fields = prog_param_tagged_fields.findall(field_text)
this_vehicle_values_seen = False
this_vehicle_value = None
other_vehicle_values_seen = False
for field in fields:
only_for_vehicles = field[1].split(",")
only_for_vehicles = [x.rstrip().lstrip() for x in only_for_vehicles]
delta = set(only_for_vehicles) - set(truename_map.values())
if len(delta):
error("Unknown vehicles (%s)" % delta)
debug("field[0]=%s vehicle=%s truename=%s field[1]=%s only_for_vehicles=%s\n" %
(field[0], vehicle.name, vehicle.truename, field[1], str(only_for_vehicles)))
value = re.sub('@PREFIX@', library.name, field[2])
if field[0] == "Values":
if vehicle.truename in only_for_vehicles:
this_vehicle_values_seen = True
this_vehicle_value = value
if len(only_for_vehicles) > 1:
other_vehicle_values_seen = True
elif len(only_for_vehicles):
other_vehicle_values_seen = True
if field[0] in known_param_fields:
setattr(p, field[0], value)
else:
error("tagged param<: unknown parameter metadata field '%s'" % field[0])
if ((non_vehicle_specific_values_seen or not other_vehicle_values_seen) or this_vehicle_values_seen):
if this_vehicle_values_seen and field[0] == 'Values':
setattr(p, field[0], this_vehicle_value)
# debug("Appending (non_vehicle_specific_values_seen=%u "
# "other_vehicle_values_seen=%u this_vehicle_values_seen=%u)" %
# (non_vehicle_specific_values_seen, other_vehicle_values_seen, this_vehicle_values_seen))
p.path = path # Add path. Later deleted - only used for duplicates
library.params.append(p)
group_matches = prog_groups.findall(p_text)
debug("Found %u groups" % len(group_matches))
debug(group_matches)
done_groups = dict()
for group_match in group_matches:
group = group_match[0]
debug("Group: %s" % group)
do_append = True
if group in done_groups:
# this is to handle cases like the RangeFinder
# parameters, where the wasp stuff gets tack into the
# same RNGFND1_ group
lib = done_groups[group]
do_append = False
else:
lib = Library(group)
done_groups[group] = lib
fields = prog_param_fields.findall(group_match[1])
for field in fields:
if field[0] in known_group_fields:
setattr(lib, field[0], field[1])
else:
error("unknown parameter metadata field '%s'" % field[0])
if not any(lib.name == parsed_l.name for parsed_l in libraries):
if do_append:
lib.name = library.name + lib.name
debug("Group name: %s" % lib.name)
process_library(vehicle, lib, os.path.dirname(libraryfname))
if do_append:
alllibs.append(lib)
current_file = None
for library in libraries:
debug("===\n\n\nProcessing library %s" % library.name)
if hasattr(library, 'Path'):
process_library(vehicle, library)
else:
error("Skipped: no Path found")
debug("Processed %u documented parameters" % len(library.params))
# sort libraries by name
alllibs = sorted(alllibs, key=lambda x: x.name)
libraries = alllibs
def is_number(numberString):
try:
float(numberString)
return True
except ValueError:
return False
def clean_param(param):
if (hasattr(param, "Values")):
valueList = param.Values.split(",")
new_valueList = []
for i in valueList:
(start, sep, end) = i.partition(":")
if sep != ":":
raise ValueError("Expected a colon seperator in (%s)" % (i,))
if len(end) == 0:
raise ValueError("Expected a colon-separated string, got (%s)" % i)
end = end.strip()
start = start.strip()
new_valueList.append(":".join([start, end]))
param.Values = ",".join(new_valueList)
def validate(param):
"""
Validates the parameter meta data.
"""
global current_file
current_file = param.real_path
global current_param
current_param = param.name
# Validate values
if (hasattr(param, "Range")):
rangeValues = param.__dict__["Range"].split(" ")
if (len(rangeValues) != 2):
error("Invalid Range values for %s (%s)" %
(param.name, param.__dict__["Range"]))
return
min_value = rangeValues[0]
max_value = rangeValues[1]
if not is_number(min_value):
error("Min value not number: %s %s" % (param.name, min_value))
return
if not is_number(max_value):
error("Max value not number: %s %s" % (param.name, max_value))
return
# Check for duplicate in @value field
if (hasattr(param, "Values")):
valueList = param.__dict__["Values"].split(",")
values = []
for i in valueList:
i = i.replace(" ","")
values.append(i.partition(":")[0])
if (len(values) != len(set(values))):
error("Duplicate values found")
# Validate units
if (hasattr(param, "Units")):
if (param.__dict__["Units"] != "") and (param.__dict__["Units"] not in known_units):
error("unknown units field '%s'" % param.__dict__["Units"])
# Validate User
if (hasattr(param, "User")):
if param.User.strip() not in ["Standard", "Advanced"]:
error("unknown user (%s)" % param.User.strip())
if (hasattr(param, "Description")):
if not param.Description or not param.Description.strip():
error("Empty Description (%s)" % param)
for vehicle in vehicles:
for param in vehicle.params:
clean_param(param)
for vehicle in vehicles:
for param in vehicle.params:
validate(param)
# Find duplicate names in library and fix up path
for library in libraries:
param_names_seen = set()
param_names_duplicate = set()
# Find duplicates:
for param in library.params:
if param.name in param_names_seen: # is duplicate
param_names_duplicate.add(param.name)
param_names_seen.add(param.name)
# Fix up path for duplicates
for param in library.params:
if param.name in param_names_duplicate:
param.path = param.path.rsplit('/')[-1].rsplit('.')[0]
else:
# not a duplicate, so delete attribute.
delattr(param, "path")
for library in libraries:
for param in library.params:
clean_param(param)
for library in libraries:
for param in library.params:
validate(param)
if not args.emit_params:
sys.exit(error_count)
all_emitters = {
'json': JSONEmit,
'xml': XmlEmit,
'html': HtmlEmit,
'rst': RSTEmit,
'md': MDEmit,
'xml_mp': XmlEmitMP,
}
try:
from ednemit import EDNEmit
all_emitters['edn'] = EDNEmit
except ImportError:
# if the user wanted edn only then don't hide any errors
if args.output_format == 'edn':
raise
if args.verbose:
print("Unable to emit EDN, install edn_format and pytz if edn is desired")
# filter to just the ones we want to emit:
emitters_to_use = []
for emitter_name in all_emitters.keys():
if args.output_format == 'all' or args.output_format == emitter_name:
emitters_to_use.append(emitter_name)
if args.emit_sitl:
# only generate rst for SITL for now:
emitters_to_use = ['rst']
# actually invoke each emiiter:
for emitter_name in emitters_to_use:
emit = all_emitters[emitter_name](sitl=args.emit_sitl)
if not args.emit_sitl:
for vehicle in vehicles:
emit.emit(vehicle)
emit.start_libraries()
for library in libraries:
if library.params:
emit.emit(library)
emit.close()
sys.exit(error_count)
| phamelin/ardupilot | Tools/autotest/param_metadata/param_parse.py | Python | gpl-3.0 | 17,332 | 0.002192 |
import os
import os.path
import glob
import re
from turbogears import controllers, expose, redirect, identity, validators as v, validate, config
from turbogears.identity.exceptions import IdentityFailure
from hubspace.validators import *
from formencode import ForEach
from hubspace.utilities.templates import try_render
from hubspace.utilities.login import login_args, requestPassword, resetPassword
from hubspace.utilities.dicts import AttrDict
from hubspace.utilities.permissions import is_host, addUser2Group
from hubspace.utilities.object import modify_attribute, obj_of_type
from hubspace.model import Location, LocationMetaData, User, RUsage, Group, MicroSiteSpace, ObjectReference, ListItem, Page, MetaWrapper, PublicPlace, List
from sqlobject import AND, SQLObjectNotFound, IN, LIKE, func
from sqlobject.events import listen, RowUpdateSignal, RowCreatedSignal, RowDestroySignal
import os, re, unicodedata, md5, random, sys, datetime, traceback, hmac as create_hmac
from hashlib import sha1
import cherrypy
from kid import XML
from hubspace.feeds import get_local_profiles, get_local_future_events, get_local_past_events
from BeautifulSoup import BeautifulSoup
import sendmail
import hubspace.model
model = hubspace.model
from hubspace.utilities.cache import strongly_expire
from hubspace.utilities.uiutils import now
import hubspace.sync.core as sync
from turbogears import database
import urlparse
from urllib import quote, urlencode
from urllib2 import urlopen, Request, build_opener, install_opener, HTTPCookieProcessor, HTTPRedirectHandler
import cookielib
from hubspace import configuration
import vobject
import patches
import logging
applogger = logging.getLogger("hubspace")
gr_cache = {}
def place(obj):
if isinstance(obj, Location):
return obj
elif hasattr(obj, 'location'):
return obj.location
elif hasattr(obj, 'place'):
return obj.place
else:
raise AttributeError("object has not location")
def bs_preprocess(html):
"""remove distracting whitespaces and newline characters"""
html = re.sub('\n', ' ', html) # convert newlines to spaces
return html
def html2xhtml(value):
value = value.strip()
value = BeautifulSoup(value).prettify()
value = bs_preprocess(value)
try:
XML(value).expand()
except:
cherrypy.response.headers['X-JSON'] = 'error'
print "not good XML"
return value
def get_profiles(*args, **kwargs):
location = kwargs.get('location')
no_of_images = 9
only_with_images = True
profiles = get_local_profiles(location, only_with_images, no_of_images)
if len(args) >=1:
profiles.update(get_user(*args))
return profiles
def get_user(*args, **kwargs):
if len(args) >= 1:
user = User.by_user_name(args[0])
if user.public_field and user.active:
return {'user': user}
return {}
def get_public_place(*args, **kwargs):
if len(args) >= 1:
place = PublicPlace.select(AND(PublicPlace.q.name==args[0]))
if place.count():
return {'place': place[0]}
return {'place': None}
def get_events(*args, **kwargs):
no_of_events = 10
location = kwargs.get('location')
events = get_local_future_events(location, no_of_events)
events.update(get_local_past_events(location, no_of_events))
if len(args) >=1:
events.update(get_event(*args))
return events
def parseSubpageId(list_name):
if list_name.startswith('subpage'):
list_name,pageid=list_name.split('_')
else:
pageid = None
return (list_name,pageid)
standard_kw = ['microsite', 'page', 'location']
class RedirectToClient(Exception):
def __init__(self, url):
self.url = url
class HTTPRedirectClient(HTTPRedirectHandler):
def redirect_request(self, req, fp, code, msg, headers, newurl):
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
raise RedirectToClient(newurl)
#return Request(newurl,
# headers=req.headers,
# origin_req_host=req.get_origin_req_host(),
# unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
forwarded_request_headers = ['If-None-Match']
forwarded_response_headers = ['Etag', 'Last-Modified', 'X-Pingback', 'Cache-Control', 'Pragma', 'Expires']
class MediaContent(Exception):
def __init__(self, response):
self.response = response
class AjaxContent(Exception):
def __init__(self, html):
self.html = html
def get_blog(*args, **kwargs):
blog_url = kwargs['page'].blog_url.strip()
args = list(args)
args.insert(0, blog_url)
url = '/'.join(args)
url += '/'
kw_args = dict((key.replace('+', '-'), val) for key, val in kwargs.iteritems() if key not in standard_kw)
post_data = None
if kw_args:
if cherrypy.request.method == 'GET':
url += '?' + urlencode(kw_args)
if cherrypy.request.method == 'POST':
post_data = urlencode(kw_args)
if cherrypy.session.has_key('cj'):
cj = cherrypy.session['cj']
else:
cj = cherrypy.session['cj'] = cookielib.CookieJar()
opener = build_opener(HTTPCookieProcessor(cj), HTTPRedirectClient)
install_opener(opener)
headers = {}
for header in forwarded_request_headers:
if cherrypy.request.headers.get(header, 0):
headers[header] = cherrypy.request.headers[header]
try:
if post_data:
blog = Request(url, post_data, headers)
else:
blog = Request(url, headers=headers)
blog_handle = urlopen(blog)
except RedirectToClient, e:
redirect(e.url.replace(blog_url, cherrypy.request.base + '/public/' + kwargs['page'].path_name))
except IOError, e:
if hasattr(e, 'reason'):
blog_body = "Could not get blog from: " + url + " because " + e.reason
blog_head = ""
elif hasattr(e, 'code'):
cherrypy.response.headers['status'] = e.code
blog_body = "Could not get blog from: " + url + " because " + str(e.code)
blog_head = ""
except ValueError:
blog_body = ""
blog_head = ""
return {'blog': blog_body, 'blog_head': blog_head}
else:
content_type = blog_handle.headers.type
if content_type not in ['text/html', 'text/xhtml']:
raise redirect(url)
blog = blog_handle.read()
#replace any links to the blog_url current address
our_url = cherrypy.request.base + '/public/' + kwargs['page'].path_name
blog = blog.replace(blog_url, our_url)
blog = BeautifulSoup(blog)
#blog = bs_preprocess(blog)
for input in blog.body.findAll('input', attrs={'name':re.compile('.*\-.*')}):
input['name'] = input['name'].replace('-', '+') #hack around the awkwardness of submitting names with '-' from FormEncode
#change back anything ending in .js .css .png .gif, .jpg .swf
for link in blog.findAll('link', attrs={'href':re.compile('.*' + re.escape(our_url) + '.*')}):
link['href'] = link['href'].replace(our_url, blog_url)
for link in blog.findAll('img', attrs={'src':re.compile('.*' + re.escape(our_url) + '.*')}):
link['src'] = link['src'].replace(our_url, blog_url)
for link in blog.findAll('script', attrs={'src':re.compile('.*' + re.escape(our_url) + '.*')}):
link['src'] = link['src'].replace(our_url, blog_url)
for header in blog.body.findAll('div', attrs={'id':'header'}):
header.extract()
for css in blog.head.findAll('link', attrs={'href':re.compile('.*standalone\.css')}):
css.extract()
for link in blog.head.findAll('link', attrs={'rel':'canonical'}):
link.extract()
#link['href'] = link['src'].replace(our_url, blog_url)
blog_head = blog.head.renderContents()
blog_body = blog.body.renderContents()
for header in forwarded_response_headers:
if blog_handle.headers.get(header, 0):
cherrypy.response.headers[header] = blog_handle.headers[header]
return {'blog': blog_body, 'blog_head': blog_head}
def get_blog2(*args, **kwargs):
#import pdb; pdb.set_trace()
thispage = kwargs['page']
blog_url = thispage.blog_url.strip()
args = list(args)
args.insert(0, blog_url)
url = '/'.join(args)
#add a / if its not a .jpg or if its just the domain name
if not url.endswith('/') and (not '.' in args[-1] or url.count('/') <3):
url += '/'
kw_args = dict((key.replace('+', '-'), val) for key, val in kwargs.iteritems() if key not in standard_kw)
post_data = None
if kw_args:
if cherrypy.request.method == 'GET':
url += '?' + urlencode(kw_args)
if cherrypy.request.method == 'POST':
post_data = urlencode(kw_args)
if cherrypy.session.has_key('cj'):
cj = cherrypy.session['cj']
else:
cj = cherrypy.session['cj'] = cookielib.CookieJar()
opener = build_opener(HTTPCookieProcessor(cj), HTTPRedirectClient)
install_opener(opener)
headers = {}
for header in forwarded_request_headers:
if cherrypy.request.headers.get(header, 0):
headers[header] = cherrypy.request.headers[header]
try:
if post_data:
blog = Request(url, data=post_data, headers=headers)
else:
blog = Request(url, headers=headers)
blog_handle = urlopen(blog)
except RedirectToClient, e:
redirect(e.url.replace(blog_url, cherrypy.request.base + '/public/' + kwargs['page'].path_name))
except IOError, e:
errorbody = e.read()
if '<body id="error-page">' in errorbody:
blog_body = "There was an error with wp"
blog_head = ""
elif hasattr(e, 'reason'):
blog_body = "Could not get blog from: " + url + " because " + e.reason
blog_head = ""
elif hasattr(e, 'code'):
cherrypy.response.headers['status'] = e.code
blog_body = "Could not get blog from: " + url + " because " + str(e.code)
blog_head = ""
except ValueError:
blog_body = ""
blog_head = ""
return {'blog': blog_body, 'blog_head': blog_head}
else:
content_type = blog_handle.headers.type
if content_type not in ['text/html', 'text/xhtml']:
raise MediaContent(blog_handle)
#import pdb; pdb.set_trace()
blog = blog_handle.read()
#replace any links to the blog_url current address
our_url = cherrypy.request.base + '/public/' + kwargs['page'].path_name
blog = blog.replace(blog_url, our_url)
blog = BeautifulSoup(blog)
#blog = bs_preprocess(blog)
#for input in blog.body.findAll('input', attrs={'name':re.compile('.*\-.*')}):
for input in blog.findAll('input', attrs={'name':re.compile('.*\-.*')}):
input['name'] = input['name'].replace('-', '+') #hack around the awkwardness of submitting names with '-' from FormEncode
#change back anything ending in .js .css .png .gif, .jpg .swf
#for link in blog.findAll('link', attrs={'href':re.compile('.*' + re.escape(our_url) + '.*')}):
# link['href'] = link['href'].replace(our_url, blog_url)
#for link in blog.findAll('img', attrs={'src':re.compile('.*' + re.escape(our_url) + '.*')}):
# link['src'] = link['src'].replace(our_url, blog_url)
#for link in blog.findAll('script', attrs={'src':re.compile('.*' + re.escape(our_url) + '.*')}):
# link['src'] = link['src'].replace(our_url, blog_url)
if hasattr(blog,'body') and blog.body:
#import pdb; pdb.set_trace()
for header in blog.body.findAll('div', attrs={'id':'header'}):
header.extract()
for css in blog.head.findAll('link', attrs={'href':re.compile('.*standalone\.css')}):
css.extract()
#for script in blog.findAll('script',attrs={'src':re.compile('.*' + re.escape(blog_url) + '.*jquery\.js.*')}):
# script.extract()
for script in blog.findAll('script',attrs={'src':re.compile('jquery\.js')}):
script.extract()
for script in blog.findAll('script',attrs={'src':re.compile('functions\.js')}):
script.extract()
for script in blog.findAll('script',attrs={'src':re.compile('jquery\.validate\.js')}):
script.extract()
sidebartext=''
#the sidebar get injected via deliverance
for sidebar in blog.findAll('div',id='sidebar'):
sidebartext = sidebar.renderContents().replace('text_small','text_real_small')
sidebar.extract()
for wphead in blog.findAll('div',id='wphead'):
wphead.extract()
pass
blog_head = blog.head.renderContents()
found = blog.findAll(id='content')
#if possible grep the content div
if blog.findAll('div',id='content') and iswpadminurl(url):
blog_body = blog.findAll('div',id='content')[0].renderContents()
else:
blog_body = blog.body.renderContents()
#for header in forwarded_response_headers:
# if blog_handle.headers.get(header, 0):
# cherrypy.response.headers[header] = blog_handle.headers[header]
else:
raise AjaxContent(blog.renderContents())
#blog_body = ''
#blog_head = ''
return {'blog': blog_body, 'blog_head': blog_head,'sidebartext':sidebartext}
def sitesearch(*args,**kwargs):
#title, description, url
s = str(kwargs.get('s',''))
s = s.lower().strip()
page = kwargs['page']
location = page.location
searchresults = []
if s:
access_tuple = patches.utils.parseDBAccessDirective()
con = patches.utils.getPostgreSQLConnection(*access_tuple)
cur = con.cursor()
sql = """select distinct page.id from page left join object_reference on page.id=object_reference.object_id left join meta_data on object_reference.id=meta_data.object_ref_id where page.location_id=%s and object_reference.object_type='Page' and (lower(page.content) like '%%' || %s || '%%' or lower(meta_data.attr_value) like '%%' || %s || '%%')"""
#XXX no sql injections please!!!!
id = str(page.location.id)
cur.execute(sql,(id,s,s,))
result = [r[0] for r in cur.fetchall()]
for id in result:
page=Page.get(id)
title = page.name and page.name or page.title
url = cherrypy.request.base + '/public/' + page.path_name
description = re.sub(r'<[^>]*>','',page.content)[:100]
searchresults.append(dict(url=url,title=title,description=description))
blogs = Page.selectBy(location=page.location.id,page_type='blog2').orderBy('id')
if blogs.count() > 0:
blog = blogs[0]
blog = MetaWrapper(blog)
search_url = blog.blog_url.strip()
if not search_url.endswith('/'):
search_url += '/'
search_url += "?s=%s" % s
blogresult = urlopen(search_url).read()
blogsoup = BeautifulSoup(blogresult)
for d in blogsoup.findAll("div", { "class" : re.compile('hentry') }):
title = d.h3.a.string
url = d.h3.a['href']
parts = urlparse.urlsplit(url)
url = cherrypy.request.base + '/public/' + blog.path_name + parts[2]
description = re.sub(r'<[^>]*>','',d.find('div', { "class" : "entry-content" }).p.string)
searchresults.append(dict(url=url,title=title,description=description))
return dict(searchresults=searchresults,
s=s)
def iswpadminurl(url):
if 'wp-admin' in url or 'wp-login' in url:
return True
else:
return False
def get_event(*args):
return {'event': RUsage.get(args[0])}
def experience_slideshow(*args, **kwargs):
return {'image_source_list': [top_image_src(page, kwargs['microsite']) for page in Page.select(AND(Page.q.locationID == kwargs['location'],
Page.q.image != None)) if page.active]}
def image_source(image_name, microsite, default=""):
try:
os.stat(microsite.upload_dir + image_name)
return microsite.upload_url + image_name
except:
return default
def top_image_src(page, microsite):
if page.image_name:
return microsite.upload_url + page.image_name
else:
try:
os.stat(os.getcwd() + '/hubspace/static/images/micro/main-images/' + page.path_name.split('.')[0]+'.jpg')
return '/static/images/micro/main-images/' + page.path_name.split('.')[0]+'.jpg'
except OSError:
return '/static/images/micro/main-images/index.jpg'
def page_image_source(page_name, **kwargs):
return image_source(page_name + '.png', kwargs['microsite'], "/static/images/micro/main-images/" + page_name + '.jpg')
def standard_page(*args, **kwargs):
return {}
valid_username_chars = r"[^0-9a-z._-]"
valid_phone_chars = r"[^0-9/.\(\)\+-]"
def create_inactive_user_host_email(**kwargs):
"""send a mail to the hosts with a link to create the user
"""
location = Location.get(kwargs['location'])
if not kwargs.get('user_name', None):
kwargs['user_name'] = kwargs['first_name'] + '.' + kwargs['last_name']
#normalize the user_name to ascii
kwargs['user_name'] = unicodedata.normalize('NFKD', kwargs['user_name']).encode('ascii', 'ignore').lower()
kwargs['user_name'] = re.sub(valid_username_chars, '', kwargs['user_name'])
kwargs['phone'] = re.sub(valid_phone_chars, '', kwargs['phone'])
#ensure username is unique
try:
User.by_user_name(kwargs['user_name'])
try:
kwargs['user_name'] = kwargs['user_name'][:-1] + str(int(kwargs['user_name'][-1]) + 1)
except ValueError:
kwargs['user_name'] += '2'
kwargs['user_name'] = kwargs['user_name'].decode('utf-8')
return create_inactive_user_host_email(**kwargs)
except SQLObjectNotFound:
pass
#if user with that email has already been created, don't create them again - but send a mail to hosts to inform them
attributes = {'user_name': kwargs['user_name'],
'first_name': kwargs['first_name'],
'last_name' : kwargs['last_name'],
'email_address': kwargs['email_address'],
'organisation': kwargs['organisation'],
'home': kwargs['phone'],
'location_url': location.url,
'location_path': location.url.split('://')[1].replace('.', '_').replace('-', '_'),
'location_name':location.name}
try:
User.by_email_address(kwargs['email_address'])
link = "A user with this email address already exists so you cannot create this user"
except SQLObjectNotFound:
link = location.url + '/sites/%(location_path)s/edit/create_inactive_user?user_name=%(user_name)s&first_name=%(first_name)s&last_name=%(last_name)s&email_address=%(email_address)s&organisation=%(organisation)s&home=%(home)s' %dict((attr, quote(val.encode('utf-8'))) for attr, val in attributes.items())
hmac_key = config.get('hmac_key', None)
if hmac_key:
auth_code = create_hmac.new(hmac_key, link, sha1)
link += '&hmac=' + auth_code.hexdigest()
link = "To create the user hit this %s" %(link)
attributes.update({'link': link})
mail_body = u"""Dear %(location_name)s Hosts,\n\n Someone has enquired about membership of your Hub at %(location_url)s. The details are as follows\n\nname: %(first_name)s %(last_name)s\n\norganisation: %(organisation)s\n\nemail: %(email_address)s\n\ntel: %(home)s\n\n\n%(link)s
""" % attributes
mail_body = mail_body.encode('utf-8')
enquiries_address = location.name.lower().replace(' ', '') + "[email protected]"
sendmail.sendmail(enquiries_address, enquiries_address, "The Hub | enquiries", mail_body)
return attributes
def create_enquiry(*args, **kwargs):
create_inactive_user_host_email(**kwargs)
return {'first_name':kwargs['first_name'],
'last_name': kwargs['last_name'],
'organisation': kwargs['organisation'],
'phone': kwargs['phone'],
'email_address': kwargs['email_address'],
'submit': True}
site_template_args = {'tg_css':[], 'tg_js_head':[], 'tg_js_bodytop':[], 'tg_js_bodybottom':[]}
list_types = {'spaces_list': {'object_types':['PublicSpace'], 'mode':'add_new'},
'left_tabs': {'object_types':['Page'], 'mode':'add_new'},
'right_tabs': {'object_types':['Page'], 'mode':'add_new'},
'places_list': {'object_types':['PublicPlace'], 'mode':'add_new'},
'people_list': {'object_types':['User'], 'mode':'add_existing'},
'featured_list': {'object_types':['Location', 'User', 'Page'], 'mode':'add_existing'}
}
#jhb: idea could be to have the list_types in the database, (as per Tom), add a 'context' to them, eg.
#the page on which they should appear.
def getList(list_name,location):
#list_name,pageid = parseSubpageId(list_name)
#if pageid:
# lists = List.selectBy(list_name=list_name,location=location,page=pageid)
#else:
# lists = List.selectBy(list_name=list_name,location=location)
lists = List.selectBy(list_name=list_name,location=location)
if lists.count() == 1:
return lists[0]
else:
return None
def last_item(list_name, location):
thelist = getList(list_name,location)
if thelist:
try:
return ListItem.select(AND(ListItem.q.nextID == None,
ListItem.q.listID == thelist))[0]
except IndexError:
return None
return None
# try:
# return ListItem.select(AND(ListItem.q.nextID == None,
# ListItem.q.list_name == list_name,
# ListItem.q.locationID == location))[0]
# except IndexError:
# return None
def append_existing_item(list_name, obj, **kwargs):
#import pdb; pdb.set_trace()
thelist = getList(list_name,kwargs['location'])
old_last = last_item(list_name, kwargs['location'])
object_ref = ObjectReference.select(AND(ObjectReference.q.object_type == obj.__class__.__name__,
ObjectReference.q.object_id == obj.id))[0]
new_last = ListItem(**{'list_name':list_name, 'location':kwargs['location'], 'active':kwargs['active'], 'object_ref':object_ref,'list':thelist})
if old_last:
old_last.next = new_last
def append_to_list(list_name, **kwargs):
#we could be adding to a subpages list. If foo.html gets a bar subpage,
#the name needs to be foo__bar.html
#import pdb; pdb.set_trace()
if kwargs['object_type'] == Page.__name__:
path_name = kwargs['name']
subpage_of=None
if list_name.startswith('subpages_'):
thelist = getList(list_name,kwargs['location'])
page = thelist.page
pagenamebase = page.path_name.split('.html')[0]
path_name = pagenamebase+'__'+kwargs['name']
subpage_of = list_name.split('subpages_')[1]
page_type = kwargs.get('page_type', 'standard')
new_obj = kwargs['site_types'][page_type].create_page(kwargs['name'], kwargs['location'],path_name=path_name,subpage_of=subpage_of)
else:
object_type = getattr(model, kwargs['object_type'])
new_obj = object_type(**{'name': kwargs['name']})
return append_existing_item(list_name, new_obj, **kwargs)
class PageType(object):
def __init__(self, name, template, view_func=None, can_be_tab=True, static=True, default_vals=None):
self.name = name
self.template = template
self.view_func = view_func
self.can_be_tab = can_be_tab
self.static = static
self.defaults_vals = default_vals
if default_vals:
self.default_vals = default_vals
else:
self.default_vals = {}
def create_page(self, page_name, location, initial_vals=None,path_name=None,subpage_of=None):
#import pdb; pdb.set_trace()
if not path_name:
path_name=page_name
if self.static:
path_name = path_name + '.html'
attr_vals = dict(**self.default_vals)
if initial_vals:
attr_vals.update(initial_vals)
page = Page(**{'page_type': self.name,
'name': page_name,
'path_name': path_name,
'location': location})
#pages can have subpages
List(list_name='subpages_%s' % page.id,
object_types='Page',
mode='add_new',
page=page,
location=page.location)
page_wrapper = MetaWrapper(page)
if subpage_of:
attr_vals['subpage_of'] = subpage_of
for attr, val in attr_vals.items():
if not getattr(page_wrapper, attr):
setattr(page_wrapper, attr, val)
return page
login_page_type = PageType('login', 'hubspace.templates.microSiteLogin', login_args, static=False, default_vals={'name':"member login", 'subtitle':"book spaces and find members "})
request_password_type = PageType('requestPassword', 'hubspace.templates.microSitePassword', requestPassword, static=False, can_be_tab=False)
reset_password_type = PageType('resetPassword', 'hubspace.templates.microSitePassword', resetPassword, static=False, can_be_tab=False)
# on starting the application, each type is added to the map
website_page_types = {
'home': PageType('home', 'hubspace.templates.webSiteHome', default_vals={'name':"home"}),
'people': PageType('people', 'hubspace.templates.webSitePeople', get_user, default_vals={'name':"people"}),
'standard': PageType('standard', 'hubspace.templates.webSiteStandard', default_vals={'name':"contact"}),
'places': PageType('places', 'hubspace.templates.webSitePlaces', get_public_place, default_vals={'name':"places"}),
'login': login_page_type,
'requestPassword':request_password_type,
'resetPassword':reset_password_type
}
website_pages = {
'index':'home',
'contact':'standard',
'people':'people',
'places':'places',
'ideas':'standard',
'about':'standard',
'invitation':'standard',
'contact':'standard',
'login':'login',
'requestPassword':'requestPassword',
'resetPassword':'resetPassword'
}
website_left_page_list = ['index', 'people', 'places', 'ideas', 'about', 'invitation'] #determines the order the pages appear in tabs which are autocreated
website_right_page_list = ['contact']
microsite_page_types = {
'home': PageType('home', 'hubspace.templates.microSiteHome', default_vals={'name':"King's Cross"}),
'experience': PageType('experience', 'hubspace.templates.microSiteExperience', experience_slideshow, default_vals={'name':"experience", 'subtitle':"the hub at king's cross"}),
'events': PageType('events', 'hubspace.templates.microSiteEvents', get_events, static=True, default_vals={'name':"events", 'subtitle':"upcoming events and activities"}),
'spaces': PageType('spaces', 'hubspace.templates.microSiteSpaces', None, static=True, default_vals={'name':"spaces", 'subtitle':"for working and much more"}),
'blog': PageType('blog', 'hubspace.templates.microSiteBlog', get_blog, static=False),
'members': PageType('members', 'hubspace.templates.microSiteMembers', get_profiles, default_vals={'name':"our members", 'subtitle':"meet people at the hub"}),
'join': PageType('join', 'hubspace.templates.microSiteJoinus', default_vals={'name':"join us", 'subtitle':"how to become a member"}),
'joinConfirm': PageType('joinConfirm', 'hubspace.templates.microSiteJoinus', create_enquiry, static=False, can_be_tab=False),
'contact': PageType('contact', 'hubspace.templates.microSiteContact', default_vals={'name':"contact", 'subtitle':"get in touch"}),
'login': login_page_type,
'requestPassword':request_password_type,
'resetPassword':reset_password_type,
'standard': PageType('standard', 'hubspace.templates.microSiteStandard', standard_page, default_vals={'name':"pagex", 'subtitle':"the Hub"}),
'blog2': PageType('blog2', 'hubspace.templates.microSiteBlog2', get_blog2, static=False),
'plain': PageType('plain', 'hubspace.templates.microSitePlain', standard_page, default_vals={'name':"pagex", 'subtitle':"the Hub"}),
'plain2Column': PageType('plain2Column', 'hubspace.templates.microSitePlain2Col', standard_page, default_vals={'name':"pagex", 'subtitle':"the Hub"}),
'search': PageType('search', 'hubspace.templates.microSiteSearch', sitesearch, static=False),
}
#these are added to the database when a microsite is created
microsite_pages = {
'index': 'home',
'experience': 'experience',
'events': 'events',
'spaces':'spaces',
'members':'members',
'joinus':'join',
'joinConfirm': 'joinConfirm',
'contact': 'contact',
'login':'login',
'requestPassword':'requestPassword',
'resetPassword':'resetPassword'
}
#determines the order the pages appear in tabs which are autocreated
microsite_left_page_list = ['index', 'experience', 'events', 'spaces', 'members', 'joinus']
microsite_right_page_list = ['login', 'contact']
def migrate_data():
"""
"""
page_metadata_map = {'index':['sub_title', 'find_us_header', 'find_us', 'parking_header', 'parking', 'opening_hours', 'opening_hours_header', 'map_location'],
'experience': ['features_header', 'features_body'],
'events': ['upcoming_events', 'past_events'],
'spaces': ['meeting_rooms', 'meeting_rooms_intro'],
'members': ['profiles_header'],
'joinus': ['joinus_enquiry', 'joinus_enquiry_intro'],
'joinConfirm': ['joinus_body', 'joinus_confirm_header', 'joinus_confirm_body'],
'login': [],
'requestPassword': [],
'resetPassword': []}
page_attributes = {'index': [('index_title', 'name'), ('site_title', 'title'), ('hub_description', 'content')],
'experience': [('experience_title','name'), ('experience_header', 'title'), ('experience_body', 'content'), ('footer_experience', 'subtitle')],
'events': [('events_title','name'), ('events_header', 'title'), ('events_body', 'content'), ('footer_events', 'subtitle')],
'spaces': [('spaces_title','name'), ('spaces_header', 'title'), ('spaces_body', 'content'), ('footer_spaces', 'subtitle')],
'members': [('members_title','name'), ('members_header', 'title'), ('members_body', 'content'), ('footer_members', 'subtitle')],
'joinus': [('joinus_title','name'), ('joinus_header', 'title'), ('joinus_body', 'content'), ('footer_joinus', 'subtitle')],
'contact': [('contact_title','name'), ('contact_header', 'title'), ('contact_body', 'content'), ('footer_contact', 'subtitle')],
'login':[('memberlogin_title', 'name')]}
for loc in Location.select():
if loc.id == 16:
continue
try:
metadata = LocationMetaData.select(AND(LocationMetaData.q.location == loc,
LocationMetaData.q.attr_name == 'map_location'))[0]
db_val = metadata.attr_value
metadata.destroySelf()
except IndexError:
db_val = ""
if db_val:
MetaWrapper(loc).geo_address = db_val
if not loc.url:
continue
for page in Page.select(AND(Page.q.locationID==loc.id)):
page = MetaWrapper(page)
fake_microsite = AttrDict({'upload_dir': os.getcwd() + '/hubspace/static/' + loc.url.split('://')[1].replace('.', '_').replace('-', '_') + '/uploads/',
'upload_url': '/static/' + loc.url.split('://')[1].replace('.', '_').replace('-', '_') + '/uploads/'})
img_source = image_source(page.path_name.split('.')[0]+'.png', fake_microsite)
if img_source:
from file_store import LocationFiles
img_obj = LocationFiles(location=loc, attr_name=page.path_name.split('.')[0]+'.png', mime_type='image/png')
setattr(page, 'image', img_obj)
for property in page_metadata_map.get(page.path_name.split('.')[0], []):
try:
metadata = LocationMetaData.select(AND(LocationMetaData.q.location == loc,
LocationMetaData.q.attr_name == property))[0]
db_val = metadata.attr_value
metadata.destroySelf()
except IndexError:
continue
setattr(page, property, db_val)
for old_property, new_property in page_attributes.get(page.path_name.split('.')[0], []):
try:
metadata = LocationMetaData.select(AND(LocationMetaData.q.location == loc,
LocationMetaData.q.attr_name == old_property))[0]
db_val = metadata.attr_value
metadata.destroySelf()
except IndexError:
continue
setattr(page, new_property, db_val)
def relative_folder(site_url):
"""determine the position of the request compared to the root of the site
This is used to ensure that relative links in the master template are correct independent of whether we are lower down the url hierarchy
e.g. when we are at "/sites/kingscross_the_hub_net/members/tom.salfield" nav links should be prepended with "../"
Overall I think I have learned that it would be better to use absolute urls and then rewrite links on the way out (if as in this case) we want to have dynamically generated templates which need to be viewed as static files at a different location
"""
extra_path = cherrypy.request.path.replace('/' + site_url + '/', '')
if 'edit' in extra_path or extra_path.endswith('.html'):
return './' # this only works because all our staticly written files are at the first level in url hierarchy
steps_back = extra_path.count('/')
if steps_back:
return '../' * steps_back
else:
return './'
from hubspace.file_store import save_file
#override with site_folders / functions (if necessary)
class SiteList(controllers.Controller):
def __init__(self, site):
super(SiteList, self).__init__()
self.site = site
def get_list(self, list_name, page=None):
"""This should return a list of objects which can be rendered by the appropriate templates
e.g. in the case of spaces this should return a list of objects with the attributes 'name', 'description' and 'image'
e.g.2. in the case of the site tabs, this should return a list of site_page 'objects' with the relavant metadate fields as attributes
"""
#if page:
# lists = List.selectBy(location=self.site.location,list_name=list_name,page=page)
#else:
# lists = List.selectBy(location=self.site.location,list_name=list_name)
lists = List.selectBy(location=self.site.location,list_name=list_name)
if lists.count() == 1:
thelist = lists[0]
return {'list_items':self.iterator(list_name), 'list_name':list_name, 'list_types':thelist.object_types.split(','), 'list_mode':thelist.mode}
#return {'list_items':self.iterator(list_name), 'list_name':list_name, 'list_types':list_types[list_name]['object_types'], 'list_mode':list_types[list_name]['mode']}
@expose(template='hubspace.templates.listEditor', fragment=True)
def render_as_table(self, list_name):
relative_path = relative_folder(self.site.site_url)
#list_name,pageid = parseSubpageId(list_name)
#import pdb; pdb.set_trace()
#hack
#if pageid :
# page = Page.get(pageid)
# template_args = self.get_list(list_name,page)
# #relative_path = relative_path[3:] #remove the first ../
# template_args.update({'pageid':pageid})
#else:
# template_args = self.get_list(list_name)
# template_args.update({'pageid':None})
template_args = self.get_list(list_name)
template_args.update({'page_types_dict':self.site.site_types})
template_args.update({'page_types':[type[0] for type in self.site.site_types.iteritems() if type[1].can_be_tab]})
template_args.update({'relative_path': relative_path})
#template_args.update({'orig_name':orig_name})
return template_args
@expose()
@validate(validators={'list_name':v.UnicodeString(), "order":ForEach(v.Int())})
def reorder(self, list_name, order=None):
"""iterate new list and check if each item is in the right place, if a next of an object doesn't correspond to the next in the list, change .next on the object. On the last object in the list, set next = None.
We should put some safe guards in place to stop any possibility of cycling next references - this will block the application if it occurs
"""
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
first = self.first(list_name)
new_first = ListItem.get(order[0])
if new_first != first:
try:
new_first.previous.next = None
except AttributeError:
pass
previous = new_first
for id in order[1:]:
current = previous.next
new_current = ListItem.get(id)
if current != new_current:
previous.next = new_current
previous = new_current
new_last = ListItem.get(id)
if new_last != self.last(list_name):
new_last.next = None
return "success"
def last(self, list_name):
return last_item(list_name, self.site.location)
def first(self, list_name):
lists = List.selectBy(list_name=list_name,location=self.site.location)
if lists.count() == 1:
thelist = lists[0]
for item in ListItem.selectBy(list=thelist):
if item.previous == None:
return item
#below doesn't work because .previous doesn't exist where it isn't referenced by another "Space" with .next()
#try:
# return MicroSiteSpace.select(AND(MicroSiteSpace.q.previous == None,
# MicroSiteSpace.q.locationID == self.site.location))[0]
#except IndexError:
# return None
def iterator(self, list_name):
current = self.first(list_name)
if current:
yield current
while current.next:
yield current.next
current = current.next
@expose()
@validate(validators={'list_name':v.UnicodeString(), 'object_type':v.UnicodeString(), 'object_id':v.Int(), 'active':v.Int(if_empty=0),'pageid':v.Int(if_missing=1)})
def append_existing(self, list_name, **kwargs):
#import pdb; pdb.set_trace()
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
kwargs['location'] = self.site.location
kwargs['site_types'] = self.site.site_types
try:
obj = getattr(model, kwargs['object_type']).get(kwargs['object_id'])
append_existing_item(list_name, obj, **kwargs)
except:
pass
return self.render_as_table(list_name)
@expose()
@validate(validators={'list_name':v.UnicodeString(), 'object_type':v.UnicodeString(), 'page_type':v.UnicodeString(), 'name':v.UnicodeString(), 'active':v.Int(if_empty=0),'pageid':v.Int(if_missing=1)})
def append(self, list_name, **kwargs):
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
kwargs['location'] = self.site.location
kwargs['site_types'] = self.site.site_types
append_to_list(list_name, **kwargs)
return self.render_as_table(list_name)
@expose()
@validate(validators={'list_name':v.UnicodeString(), 'item_id':v.Int(if_empty=None)})
def remove(self, list_name, item_id):
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
self._remove(list_name,item_id)
return self.render_as_table(list_name)
def _remove(self,list_name,item_id): #XXX is this secured against web attacks?
item = ListItem.get(item_id)
if item.previous:
if item.next:
item.previous.next = item.next
else:
item.previous.next = None
#pages have subpages - lets destroy them first, which could have subpages...
if item.object.__class__ == Page:
for sublist in item.object.lists:
for subitem in sublist.listitems:
self._remove(sublist.list_name,subitem.id)
#item.object.destroySelf()
#item.destroySelf()
sublist.destroySelf()
item.object.destroySelf()
item.destroySelf()
@expose()
@validate(validators={'list_name':v.UnicodeString(), 'item_id':v.Int(if_empty=None)})
def remove_existing(self, list_name, item_id):
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
item = ListItem.get(item_id)
if item.previous:
if item.next:
item.previous.next = item.next
else:
item.previous.next = None
item.destroySelf()
return self.render_as_table(list_name)
@expose()
@validate(validators={'list_name':v.UnicodeString(), 'object_id':v.UnicodeString(), 'active':v.Int(if_empty=0)})
def toggle_active(self, list_name, object_id, active=0):
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
item = ListItem.get(object_id)
item.active = active
return self.render_as_table(list_name)
class MicroSiteEdit(controllers.Controller):
def __init__(self, site):
super(MicroSiteEdit, self).__init__()
self.site = site
@expose(template='hubspace.templates.uploadImage')
def uploadImageIframe(self, id, type, attr, height=0, width=0, page_name=""):
return {'id':id, 'type':type, 'attr':attr, 'relative_path':'../../../../' + relative_folder(self.site.site_url)+'edit/', 'height': height, 'width': width, 'page_name':page_name}
@expose()
@validate(validators={'object_id':real_int, 'object_type':v.UnicodeString(), 'property':v.UnicodeString(), 'height':v.Int(), 'width': v.Int(), 'page_name':v.UnicodeString()})
def uploadImage(self, object_id, object_type, property, image, height=None, width=None, page_name='index.html', tg_errors=None):
# for some very strange reason height and width come through as unicode
if tg_errors:
for tg_error in tg_errors:
print `tg_error`, str(tg_errors[tg_error])
return "error uploading"
if height:
height = int(height)
if width:
width = int(width)
obj = MetaWrapper(obj_of_type(object_type, object_id))
location = Location.get(self.site.location)
if not is_host(identity.current.user, location):
raise IdentityFailure('what about not hacking the system')
elif object_type in ['PublicSpace', 'Page', 'PublicPlace', 'Location']:
file_object = save_file(location.id, image, height=height, width=width, upload_dir=self.site.upload_dir)
print `obj` + `property` + str(int(file_object.id))
setattr(obj, property, str(int(file_object.id)))
if page_name.endswith('.html'):
self.site.render_page(page_name)
return "<div id='new_image_src'>" + self.site.upload_url + file_object.attr_name + "</div>"
@expose()
@validate(validators={'user_name':username, 'first_name':no_ws_ne_cp, 'last_name':no_ws_ne_cp, 'email_address':email_address, 'organisation':no_ws, 'home':phone})
def create_inactive_user(self, tg_errors=None, user_name=None, first_name=None, last_name=None, email_address=None, organisation=None, home=None, hmac=''):
#check hmac
hmac_key = config.get('hmac_key', None)
if hmac_key:
link, auth_code = cherrypy.request.browser_url.split('&hmac=')
if create_hmac.new(hmac_key, link, sha1).hexdigest() != auth_code:
raise "Invalid key"
else:
raise "please add an hmac_key='random_key' to the config file"
if tg_errors:
for tg_error in tg_errors:
print `tg_error`, str(tg_errors[tg_error])
return "Insufficient information to create a user - Please create this one manually"
location = Location.get(self.site.location)
user_args = {'user_name': user_name,
'first_name': first_name,
'last_name' : last_name,
'email_address': email_address,
'organisation': organisation,
'home': home,
'active': 0,
'homeplace': location,
'password': md5.new(str(random.random())).hexdigest()[:8]}
#create the user
user = User(**user_args)
#make user as a member of the homeplace
home_group = Group.selectBy(level='member', place=location)[0]
addUser2Group(user, home_group)
#tag the user
#redirect to hubspace
redirect(cherrypy.request.base)
@expose()
@validate(validators={'object_type':v.UnicodeString(), 'object_id':real_int, 'property':v.UnicodeString(), 'default':v.UnicodeString()})
def attribute_load(self, object_type, object_id, property, default=''):
"""load the attribute of any object type
"""
obj = obj_of_type(object_type, object_id)
obj = MetaWrapper(obj)
try:
val = getattr(obj, property)
except AttributeError:
val = default
return val and val or default
@expose()
@strongly_expire
@identity.require(identity.not_anonymous())
@validate(validators={'object_type':v.UnicodeString(), 'property':v.UnicodeString(), 'q':v.UnicodeString()})
def auto_complete(self, object_type, property, q, timestamp, **kwargs):
q = '% '.join(q.split(' ')) + '%'
type = getattr(model, object_type)
magic_prop = getattr(type.q, property)
members = list(type.select(AND(LIKE(func.lower(magic_prop), q.lower()),
User.q.public_field == 1))[:10])
if len(members) < 10:
q = '%' +q
members = set(members).union(set(type.select(AND(LIKE(func.lower(magic_prop), q.lower()),
User.q.public_field == 1))[:10-len(members)]))
members = [getattr(member, property) +'|'+ str(member.id) for member in members]
return '\n'.join(members)
@expose()
@validate(validators={'object_type':v.UnicodeString(), 'object_id':real_int, 'property':v.UnicodeString(), 'value': v.UnicodeString(), 'page_name': v.UnicodeString()})
def attribute_edit(self, object_type=None, object_id=None, property=None, value="", page_name="index.html", tg_errors=None):
"""edit the attribute of any object type
"""
if tg_errors:
cherrypy.response.headers['X-JSON'] = 'error'
for tg_error in tg_errors:
error = `tg_error` + " " + str(tg_errors[tg_error])
return error
page_name = page_name.split('#')[0]
value = html2xhtml(value)
obj = obj_of_type(object_type, object_id)
obj = MetaWrapper(obj)
if not is_host(identity.current.user, Location.get(self.site.location)):
raise IdentityFailure('what about not hacking the system')
if value != getattr(obj, property):
setattr(obj, property, value)
self.site.attr_changed(property, page_name)
cherrypy.response.headers['X-JSON'] = 'success'
return value
class MicroSite(controllers.Controller):
def __init__(self, location, site_dir, site_url, static_url, site_types):
super(MicroSite, self).__init__()
self.location = location.id
self.site_dir = site_dir
self.site_url = site_url
self.static_url = static_url
self.upload_dir = self.site_dir + '/uploads/'
self.upload_url = self.static_url + '/uploads/'
self.initialized = False
self.site_types = site_types
self.add_edit_controllers()
self.regenerate_lck = False
def _cpOnError(self):
try:
raise # http://www.cherrypy.org/wiki/ErrorsAndExceptions#a2.2
#use this for getting 404s...
#except Exception, err:
# if isinstance(err, IndexError):
#...or this for getting logging
#except:
# if 0:
except IndexError,err:
applogger.exception("microsite request %s 404:" % cherrypy.request.path)
cherrypy.response.status = 404
cherrypy.response.body = "404"
except Exception,err:
"""log the error and give the user a trac page to submit the bug
We should give the error a UID so that we can find the error associated more easily
"""
# If syncer transaction fails the syncer daemon takes care of rolling back the changes also
# syncerclient then raises SyncerError effectively stops TG transaction from commiting
# changes to native database.
# For all other errors we should call syncer rollback here.
# And finally if there is no error, we send transaction complete signal to syncer. Which is
# handled by TransactionCompleter filter.
if config.get('server.testing', False):
cherrypy.response.status = 500
else:
cherrypy.response.status = 200
e_info = sys.exc_info()
e_id = str(datetime.datetime.now())
e_path = cherrypy.request.path
_v = lambda v: str(v)[:20]
e_params = dict([(k, _v(v)) for (k, v) in cherrypy.request.paramMap.items()])
e_hdr = cherrypy.request.headerMap
applogger.error("%(e_id)s: Path:%(e_path)s" % locals())
applogger.error("%(e_id)s: Params:%(e_params)s" % locals())
applogger.exception("%(e_id)s:" % locals())
if isinstance(e_info[1], sync.SyncerError):
applogger.error("%(e_id)s: LDAP sync error" % locals())
else:
sync.sendRollbackSignal()
tb = sys.exc_info()[2]
e_str = traceback.format_exc(tb)
if isinstance(e_info[1], hubspace.errors.ErrorWithHint):
e_hint = e_info[1].hint
else:
e_hint = ""
d = dict(e_id=e_id, e_path=e_path, e_str=e_str, e_hint=e_hint)
cherrypy.response.body = try_render(d, template='hubspace.templates.issue', format='xhtml', headers={'content-type':'text/html'}, fragment=True)
def attr_changed(self, property, page_name):
if property in ('name', 'subtitle'):
self.regenerate_all()
elif self.site_types[Page.select(AND(Page.q.path_name == page_name,
Page.q.locationID == self.location))[0].page_type].static:
self.render_page(page_name)
def add_edit_controllers(self):
setattr(self, 'edit', MicroSiteEdit(self))
setattr(self, 'lists', SiteList(self))
def regenerate_all(self):
if self.regenerate_lck:
applogger.warn("[%s] regenerate_all: request ignored as lock is detected" % str(self.location))
return
self.regenerate_lck = True
try:
for page in Page.select(AND(Page.q.location==self.location)):
if self.site_types[page.page_type].static == True:
try:
self.render_page(page.path_name, relative_path='./')
applogger.debug("regenerate_all: calling render_page with (%s, %s) location [%s]" % (page.path_name, './',self.location))
except:
applogger.exception("failed to render page with name " + page.name + ", location " + `self.location` + " and id " + `page.id` )
finally:
self.regenerate_lck = False
def construct_args(self, page_name, *args, **kwargs):
template_args = dict(site_template_args)
try:
page = MetaWrapper(Page.select(AND(Page.q.location==self.location, Page.q.path_name==page_name))[0])
except (KeyError, IndexError):
try:
page = MetaWrapper(Page.select(AND(Page.q.location==self.location, Page.q.path_name==page_name + '.html'))[0])
except:
applogger.error("microsite: not found page for with location [%s] and page_name [%s]" % (self.location, page_name))
applogger.debug("debug info: args [%s] kwargs [%s]" % (str(args), str(kwargs)))
raise
func = self.site_types[page.page_type].view_func
if func:
kwargs['location'] = self.location
kwargs['microsite'] = self
kwargs['page'] = page
args_dict = func(*args, **kwargs)
else:
args_dict = {}
if kwargs.get('relative_path', None):
template_args.update({'relative_path': kwargs['relative_path']})
else:
template_args.update({'relative_path': relative_folder(self.site_url)})
template_args.update({'static_files_path': self.upload_url})
template_args.update({'lists': self.lists.iterator, 'upload_url': self.upload_url})
template_args.update({'top_image_src': top_image_src(page, microsite=self)})
template_args.update({'upload_url': self.upload_url})
template_args.update(args_dict)
location = MetaWrapper(Location.get(self.location))
template_args.update({'page':page, 'location':location, 'site_url': self.site_url})
return template_args
def get_sidebar(self,location,page):
blogs = Page.selectBy(location=location,page_type='blog2').orderBy('id')
if blogs.count() > 0:
sidebarblog = blogs[0]
sidebarblog = MetaWrapper(sidebarblog)
parts = get_blog(location=location,page=sidebarblog,microsite=self)
out = dict(blog_head=parts['blog_head'],blog=parts['sidebartext'])
else:
out = dict(blog_head='',blog='')
return out
@expose()
def jhb(self, *args, **kwargs):
raise 'foo2'
return 'foo bar'
@expose()
def icalfeed_ics(self,eventid=None,*args,**kwargs):
#import pdb; pdb.set_trace()
location = Location.get(self.location)
if eventid:
events = [RUsage.get(eventid)]
else:
events = get_local_past_events(location=self.location)['past_events']
events += get_local_future_events(location=self.location)['future_events']
cal = vobject.iCalendar()
cal.add('X-WR-CALNAME').value = "%s (%s) events" % (location.name,location.city)
cal.add('X-WR-TIMEZONE').value = location.timezone
length = 0
for event in events:
length += 1
ve = cal.add('vevent')
ve.add('summary').value = event.meeting_name
ve.add('description').value = event.meeting_description
ve.add('dtstart').value = event.start
ve.add('dtend').value = event.end_time
url = cherrypy.request.base + '/public/events/' + str(event.id)
ve.add('uid').value = url
ve.add('url').value = url
cherrypy.response.headers['Content-Type'] = 'text/calendar; charset=utf-8'
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename="icalfeed.ics"'
return cal.serialize()
@expose()
def default(self, *args, **kwargs):
"""This needs securing a little bit
"""
global gr_cache
if Location.get(self.location).hidden:
raise redirect('http://www.the-hub.net')
if kwargs.get('tg_errors', None):
return str(kwargs['tg_errors'])
gr_prefix = 'getready'
gr_dir = os.path.abspath('getready')
gr_pat = '/sites/.*_hub_net/getready(.*)'
gr_refresh = 'getready/refresh'
if cherrypy.request.path.endswith(gr_prefix):
raise redirect('http://www.the-hub.net/getready/')
elif cherrypy.request.path.endswith(gr_refresh):
applogger.info("gr_cache refresh: %s" % str(gr_cache.keys()))
gr_cache = {}
applogger.info("gr_cache refresh: %s" % str(gr_cache.keys()))
return "done"
gr_match = re.match(gr_pat, cherrypy.request.path)
if gr_match:
gr_req_path = gr_match.groups()[0]
if gr_req_path in gr_cache:
return gr_cache[gr_req_path]
if gr_req_path == '/':
gr_req_path = '/index.html'
gr_path = os.path.abspath(gr_prefix + gr_req_path)
if gr_path.startswith(gr_dir) and os.path.exists(gr_path):
out = file(gr_path).read()
gr_cache[gr_req_path] = out
else:
applogger.error('requested path: %s' % gr_path)
out = '404'
return out
if cherrypy.request.path.split('/')[-1] == self.site_dir.split('/')[-1]:
redirect(cherrypy.request.path + '/')
if not args or args[0]=='':
path_name = configuration.site_index_page[Location.get(self.location).url]
else:
path_name = args[0]
args = args[1:]
if not self.initialized:
self.regenerate_all()
self.initialized = True
try:
html = self.render_page(path_name, *args, **kwargs)
#page = Page.select(AND(Page.q.location==self.location,
# Page.q.path_name==path_name))[0]
#sidebar_content = self.get_sidebar(location=self.location, page=page)
#html = html.replace('<!-- sidebar headers -->',sidebar_content['blog_head'])
#html = html.replace('<!-- sidebar content -->',sidebar_content['blog'])
return html
except MediaContent, e:
cherrypy.response.headers['Content-Type'] = e.response.headers['Content-Type']
cherrypy.response.headers['Content-Length'] = e.response.headers['Content-Length']
return e.response.read()
except AjaxContent, e:
return e.html
@expose()
def blog_media_content(*args,**kwargs):
path_name = args[0]
subpath = '/'.join(args[1:])
try:
page = MetaWrapper(Page.select(AND(Page.q.location==self.location, Page.q.path_name==page_name))[0])
except (KeyError, IndexError):
page = MetaWrapper(Page.select(AND(Page.q.location==self.location, Page.q.path_name==page_name + '.html'))[0])
if not page.page_type == 'blog': #should always be the case (in normal use)
raise 'shit'
if subpath.endswith('/'):
subpath = subpath[:-1]
def render_page(self, path_name, *args, **kwargs):
#applogger.debug("render_page: request [%s %s %s]" % (path_name, str(args), str(kwargs)))
path_name = path_name.split('#')[0]
try:
template_args = self.construct_args(path_name, *args, **kwargs)
except Exception, err:
applogger.error("render_page: failed for path_name [%s], args [%s], kwargs [%s]" % (path_name, str(args), str(kwargs)))
raise
if path_name:
page = Page.select(AND(Page.q.location==self.location, IN(Page.q.path_name, [path_name, path_name + '.html'])))[0]
template = self.site_types[page.page_type].template
else:
page = None
template = 'hubspace.templates.microSiteHome'
path = self.site_dir + '/' + page.path_name
out = try_render(template_args, template=template, format='xhtml', headers={'content-type':'text/xhtml'})
if page and self.site_types[page.page_type].static and path_name.endswith('.html'):
# same method serves for events.html and events/<event-id> so in case in case if URL path
# does not end with .html we do not need to generate html page. Or we don't support that yet.
applogger.info("render_page: generating [%s] for location [%s]" % (path, self.location))
#applogger.info("render_page: generating [%s] template_args [%s]" % (path, str(template_args)))
template_args['render_static'] = True # TODO: what difference does render_static make?
public_html = try_render(template_args, template=template, format='xhtml', headers={'content-type':'text/xhtml'})
new_html = open(path, 'w')
new_html.write(public_html)
new_html.close()
return out
class Sites(controllers.Controller):
def __init__(self):
super(Sites, self).__init__()
for loc in Location.select(AND(Location.q.url!=None)):
self.add_site(loc)
def add_site(self, loc):
"""This should be called when first setting the url of location
"""
if not loc.url:
return
site_path = loc.url.split('://')[1].replace('.', '_').replace('-', '_')
static_dir = os.getcwd() + '/hubspace/static/'
static_url = '/static/' + site_path
site_dir = static_dir + site_path
site_url = 'sites/' + site_path
if loc.url not in ["http://the-hub.net", "http://new.the-hub.net"]:
site_types = microsite_page_types
site_pages = microsite_pages
site_left_tabs = microsite_left_page_list
site_right_tabs = microsite_right_page_list
else:
site_types = website_page_types
site_pages = website_pages
site_left_tabs = website_left_page_list
site_right_tabs = website_right_page_list
#create the static directory if it doesn't exist
try:
os.stat(site_dir)
except OSError:
os.mkdir(site_dir)
os.mkdir(site_dir + '/uploads')
try:
Page.select(AND(Page.q.location==loc))[0]
except IndexError:
index_pages = self.create_pages(loc, site_types, site_pages)
self.create_page_lists(loc, site_left_tabs, site_right_tabs, index_pages[loc])
setattr(self.__class__, site_path, MicroSite(loc, site_dir, site_url, static_url, site_types))
def create_pages(self, loc, site_types, site_pages):
index_pages = {}
for page, type in site_pages.items():
p = site_types[type].create_page(page, loc, {})
if page == 'index':
index_pages[loc] = p
return index_pages
def create_page_lists(self, loc, left_tabs, right_tabs, index_page):
for list_name, data in list_types.items():
object_types = ','.join(data['object_types'])
List(list_name=list_name,
object_types=object_types,
mode=data['mode'],
page=index_page,
location=loc,)
kwargs = {'location':loc, 'object_type': Page, 'active': 1}
for page in left_tabs:
kwargs.update({'name':page})
try:
page = Page.select(AND(Page.q.location == loc,
IN(Page.q.path_name, [page, page + '.html'])))[0]
append_existing_item('left_tabs', page, **kwargs)
except IndexError:
pass
for page in right_tabs:
kwargs.update({'name':page})
try:
page = Page.select(AND(Page.q.location == loc,
IN(Page.q.path_name, [page, page + '.html'])))[0]
append_existing_item('right_tabs', page, **kwargs)
except IndexError:
pass
def move_site(self, loc, new_url):
"""this should be called on changing the url of a location
"""
pass
def refresh_all_static_pages():
# Problems with this code
# - If it is called from a scheduled job, at points it tries to use tg identity framework which is not possible outside http request
sites = dict((site.location, site) for site in cherrypy.root.sites.__class__.__dict__.values() if isinstance(site, MicroSite))
for page in Page.select():
site = sites[page.locationID]
if site.site_types[page.page_type].static:
path = page.path_name
if not path.endswith('.html'): continue
if os.path.isfile(path):
mtime = datetime.datetime.fromtimestamp(os.stat(path).st_mtime)
if not now(site.location).day == m_time.day:
try:
site.render_page(page.path_name, relative_path='./')
except Exception, err:
applogger.exception("refresh_static_pages: failed to regenerate %s with error '%s'" % (path, err))
def regenerate_page(location_id, page_type, check_mtime=False):
sites = (site for site in cherrypy.root.sites.__class__.__dict__.values() if isinstance(site, MicroSite))
for site in sites:
if site.location == location_id: break
else:
applogger.warning("could not find microsite instance for location [%s] page_type [%s]" % (location_id, page_type))
return
if page_type in site.site_types and site.site_types[page_type].static:
pages = Page.select(AND(Page.q.location==location_id, Page.q.page_type==page_type))
for page in pages:
if page.path_name.endswith('.html'):
applogger.debug("regenerating page: location_id [%s] path_name [%s]" % (location_id, page.path_name))
site.render_page(page.path_name, relative_path='./')
def on_add_rusage(kwargs, post_funcs):
rusage = kwargs['class'].get(kwargs['id'])
if rusage.public_field:
applogger.info("microsite.on_add_rusage: added %(id)s" % kwargs)
location = rusage.resource.place.id
regenerate_page(location, "events")
def on_del_rusage(rusage, post_funcs):
if rusage.public_field:
applogger.info("microsite.on_del_rusage: removing %s" % rusage.id)
location = rusage.resource.placeID
regenerate_page(location, "events")
def on_updt_rusage(instance, kwargs):
if 'public_field' in kwargs or instance.public_field: # not precise logic
applogger.info("microsite.on_updt_rusage: updating %s" % instance.id)
location = instance.resource.placeID
regenerate_page(location, "events")
def on_add_user(kwargs, post_funcs):
user = kwargs['class'].get(kwargs['id'])
if user.public_field:
applogger.info("microsite.on_add_user: updating %s" % user.id)
location = user.homeplaceID
regenerate_page(location, "members")
def on_updt_user(instance, kwargs):
if 'public_field' in kwargs or instance.public_field: # not precise logic
applogger.info("microsite.on_updt_user: updating %s" % instance.id)
location = instance.homeplaceID
regenerate_page(location, "members")
listen(on_add_rusage, RUsage, RowCreatedSignal)
listen(on_updt_rusage, RUsage, RowUpdateSignal)
listen(on_del_rusage, RUsage, RowDestroySignal)
listen(on_add_user, User, RowCreatedSignal)
listen(on_updt_user, User, RowUpdateSignal)
| thehub/hubspace | hubspace/microSite.py | Python | gpl-2.0 | 70,144 | 0.011533 |
'''
Copyright 2009, 2010 Anthony John Machin. All rights reserved.
Supplied subject to The GNU General Public License v3.0
Created on 28 Jan 2009
Last Updated on 10 July 2010
As test20 with tests of:
rules instantiation and query inference
Related:
single dict TS recursion rule plus generic rule + minimal data:
test20simple-001d - unmerged recursive rule EQ order correct QL order correct
test20simple-002d - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003d - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004d - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005d - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006d - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007d - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008d - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009d - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010d - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011d - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012d - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
single rbtree TS recursion rule plus generic rule + minimal data:
test20simple-001r - unmerged recursive rule EQ order correct QL order correct
test20simple-002r - unmerged recursive rule EQ order correct QL order incorrect
test20simple-003r - merged recursive rule EQ order correct QL order correct variables consistent
test20simple-004r - merged recursive rule EQ order correct QL order correct variables inconsistent (03)
test20simple-005r - merged recursive rule EQ order correct QL order incorrect variables consistent
test20simple-006r - merged recursive rule EQ order correct QL order incorrect variables inconsistent (04)
test20simple-007r - unmerged recursive rule EQ order incorrect QL order correct (05)
test20simple-008r - unmerged recursive rule EQ order incorrect QL order incorrect (06)
test20simple-009r - merged recursive rule EQ order incorrect QL order correct variables consistent
test20simple-010r - merged recursive rule EQ order incorrect QL order correct variables inconsistent (07)
test20simple-011r - merged recursive rule EQ order incorrect QL order incorrect variables consistent
test20simple-012r - merged recursive rule EQ order incorrect QL order incorrect variables inconsistent (08)
@author: Administrator
'''
import metabulate.stores.stores as mtstores
import metabulate.facades.facade as mtfacade
import metabulate.utils.utils as mtutils
import metabulate.utils.debug as mtdebug
import metabulate.renderers.render as mtrender
import metabulate.rules.rules as mtrules
import metabulate.singletons.singleton as mtsingleton
if __name__ == "__main__":
# get default file paths and types
mtconfig = mtsingleton.Settings()._getItem('config')
debug_path = mtconfig._getItem('debugfile_path','%configfilesbase%Debug\\',mtconfig)
debug_type = mtconfig._getItem('debugfile_type','txt',mtconfig)
result_path = mtconfig._getItem('resultsfile_path','%configfilesbase%Results\\',mtconfig)
result_type = mtconfig._getItem('resultsfile_type','txt',mtconfig)
unload_path = mtconfig._getItem('stores_unloadfile_path','%configfilesbase%Unloads\\',mtconfig)
unload_type = mtconfig._getItem('stores_unloadfile_type','pyo',mtconfig)
# set debug criteria
dc22f = mtdebug.Criteria(methods=['_actionPredicate','_actionTriple','_processTriple','_addTriple'],
targets=[mtutils.Flatfile(path=debug_path,
name='DebugOutput_dc22',
type=debug_type)])
dc28 = mtdebug.Criteria(classes=['Query'],methods=['_solve'],notes=['trace'])
# set debug
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc8f,dc12f,dc7f,dc13f,dc10f,dc14f,dc15f])
# d._update(criteria=[dc6,dc20f_dup,dc20f_ok])
# d._update(criteria=[dc11f])
# d._update(criteria=[dc21f])
# d._update(criteria=[dc6,dc20f])
# files
fu = mtutils.Flatfile(path=unload_path,
name='test20r-30_unload_s1',
type=unload_type)
f1 = mtutils.Flatfile(path=result_path,
name='genealogy_test1',
type=result_type)
f3 = mtutils.Flatfile(path=result_path,
name='test20r-30_triples',
type=result_type)
f4 = mtutils.Flatfile(path=result_path,
name='test20r-30_rules',
type=result_type)
f5 = mtutils.Flatfile(path=result_path,
name='test20r-30_queries',
type=result_type)
f6 = mtutils.Flatfile(path=result_path,
name='test20r-30_results',
type=result_type)
# stores
sa = mtstores.TripleStore(structure='dict') # TS sa dict
sr = mtstores.TripleStore() # TS sr
s2 = mtstores.TripleStore()
s3 = mtstores.TripleStore()
s4 = mtstores.TripleStore()
# add namespaces in source stores
sa._addNamespace('mytriples', 'http://www.semanticle.org/triples/')
sa._addNamespace('comtriples', 'http://www.semanticle.com/triples/')
# triples for recursion test
sa._actionTriple("add [('mytriples#bob', 'child_of', 'alice'),('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')]")
sa._actionTriple("add", [('cev', 'child_of', 'http://www.semanticle.org/triples/#bob'),"('http://www.semanticle.com/triples/#dan', 'child_of', 'cev')"])
sa._actionTriple("add", 'eve', 'child_of', 'comtriples#dan')
# sa._actionTriple("add",{('?desc', 'desc_of', '?ancs'):
# [
# [[('?child', 'child_of', '?ancs')],[('?desc', 'desc_of', '?child')]]
# [[('?desc', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]
# ,[[('?desc', 'child_of', '?ancs')]]
# ]})
sa._actionTriple("add","{('?desc', 'desc_of', '?ancs'):[[[('?child', 'child_of', '?ancs')],[('?desc', 'desc_of', '?child')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
# sa._actionTriple("add","{('?desc1', 'desc_of', '?ancs'):[[[('?desc1', 'desc_of', '?child')],[('?child', 'child_of', '?ancs')]]]}") # add rule clause 2 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add",{('?obj', '?inv', '?sub'):
[
[[('?inv', 'rev_of', '?forw'),('?forw', 'rev_of', '?inv')]
,[('?sub', "?forw", '?obj')]]
,[[('?inv', 'syn_of', '?inv1'),('?inv1', 'syn_of', '?inv')]
,[('?obj', "?inv1", '?sub')]]
]}) # add rule to DTS._queryStore a (or change to DTS s1)
sa._actionTriple("add ('?desc', 'desc_of', '?ancs') :- [[[('?desc', 'child_of', '?ancs')]]]") # add rule clause 1 to DTS._queryStore b (or change to DTS s1)
sa._actionTriple("add", 'ancs_of', 'rev_of', 'desc_of') # ant
# s1._actionTriple("add", 'desc_of', 'rev_of', 'ancsr_of') # rev ant
sa._actionTriple("add", 'des_of', 'syn_of', 'desc_of') # syn
# s1._actionTriple("add", 'desc_of', 'syn_of', 'descr_of') # rev syn
sa._actionTriple("add", 'anc_of', 'rev_of', 'des_of') # ant of syn
# s1._actionTriple("add", 'ancestor1_of', 'syn_of', 'ancs_of') # syn of ant
sa._actionTriple("add", 'ancestor2_of', 'syn_of', 'anc_of') # syn of ant of syn
# s1._actionTriple("add", 'ancestor3_of', 'syn_of', 'ancestor2_of') # syn of syn of ant of syn
# triples for nested rules test
# s1._actionTriple("add", 'bob', 'is_sex', 'male')
# s1._actionTriple("add", 'cev', 'is_sex', 'male')
# s1._actionTriple("add", 'dan', 'is_sex', 'male')
# s1._actionTriple("add", 'eve', 'is_sex', 'female')
# s1._actionTriple("add", 'nancy', 'mother_of', 'mike')
# s1._actionTriple("add", 'niel', 'father_of', 'mike')
# s1._actionTriple("add", 'mike', 'is_sex', 'male')
# s1._actionPredicate(action="add",
# fact=('?child', 'son_of', '?parent'),
# rule=[[[('?child', 'child_of', '?parent')],
# [('?child', "'is_sex'", "'male'")]]])
# s1._actionPredicate(action="add",
# fact=('?child', 'child_of', '?parent'),
# rule=[[[('?parent', 'father_of', '?child')]],
# [[('?parent', "'mother_of'", '?child')]]])
# Test Load/Unload
# s1._unload(file=f1)
# s0 = s1._load(file=f1)
# print (s0._toString())
# print
print (sa._toString())
# print
# print ('unloading DSTS s1 to fu')
# sa._unload()
# print ('reloading DSTS from fu as sr')
# sr = sr._load()
# print
# print (sr._toString())
# print
# print (s0._toString())
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc19f])
# set Result requests
# rlt04 = mtrules.Result(request=[[('?sub=eve','?pred=child_of','?obj=dan')]]) # pass
# rlt04 = mtrules.Result(request=[[("?sub='*'","?pred='*'","?obj='*'")]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub="*"','?pred="*"','?obj="*"')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub="?"','?pred','?obj="?"')]]) # FAIL - NO RULES RETURNED (MAYBE OK?)
# rlt04 = mtrules.Result(request=[[("?sub='?'","?pred","?obj='?'")]]) # FAIL - NO RULES RETURNED (MAYBE OK?)
# rlt04 = mtrules.Result(request=[[('?sub=eve', "?pred=desc_of", '?obj=alice')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub=eve', "?pred=des_of", '?obj=alice')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=desc_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=ancs_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub', "?pred=des_of", '?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub=?','?pred','?obj')
# ,('?sub','?pred=?','?obj')
# ,('?sub','?pred','?obj=?')]]) # pass - all inferences
# rlt04 = mtrules.Result(request=[[('?sub == ?','?pred','?obj')
# ,('?sub','?pred = =?','?obj')
# ,('?sub','?pred','?obj==?')]]) # pass - all rules
rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],[('?sub','child_of','dan')]]) # FAIL
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],['not',('?sub','child_of','dan')]]) # pass
# rlt04 = mtrules.Result(request=[['not',('?sub','child_of','comtriples#dan')],[('?sub','?pred','?obj')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred','?obj')],['not',('?sub','child_of','dan')]
# ,['not',('?sub','from','London')]]) # pass
# rlt04 = mtrules.Result(request=[[('?sub','?pred=ancestor3_of','?obj')]]) # pass
rlt05 = mtrules.Result(request=[[("?s","?r=?r1='child_of'","?o")]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('ender', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('ender|eddy', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[(('?person1', 'from', 'Truro'), "desc_of", '?person2')]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "desc_of", '?person2')]
# ,[('?person2', "desc_of", 'alice')]]) # pass
# rlt02 = mtrules.Result(request=[[('eve', "des_of", '?person2')]
# ,[('?person2', "des_of", 'alice')]]) # pass - syn of recursed rule
# rlt02 = mtrules.Result(request=[[('eve', "descr_of", '?person2')]
# ,[('?person2', "descr_of", 'alice')]]) # pass - reversed syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancs_of", '?person2')]
# ,[('?person2', "ancs_of", 'eve')]]) # pass - ant of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancsr_of", '?person2')]
# ,[('?person2', "ancsr_of", 'eve')]]) # pass - reversed ant of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "anc_of", '?person2')]
# ,[('?person2', "anc_of", 'eve')]]) # pass - ant of syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancestor1_of", '?person2')]
# ,[('?person2', "ancestor1_of", 'eve')]]) # pass - syn of ant of recursed rule
rlt02 = mtrules.Result(request=[[('alice', "ancestor2_of", '?person2')]
,[('?person2', "ancestor2_of", 'eve')]]) # pass - syn of ant of syn of recursed rule
# rlt02 = mtrules.Result(request=[[('alice', "ancestor3_of", '?person2')]
# ,[('?person2', "ancestor3_of", 'eve')]]) # pass - syn of syn of ant of syn of recursed rule
print ('queries defined')
# rendering submission
p0t = mtrender.Sequence(pattern=['?!triples'], # via variable notation
targets=[s3,f3],
render='py')
p0r = mtrender.Sequence(pattern=['?!rules'], # via variable notation
targets=[s4,f4],
render='py')
p0q = mtrender.Sequence(pattern=['?!queries'], # via variable notation
targets=[f5],
render='py')
p1 = mtrender.Sequence(pattern=[('?son', 'son_of', '?person')], # triple propogation
targets=[s2,'display'],
render='csv')
p2 = mtrender.Sequence(pattern=[('?person1', 'desc_of', '?person2')], # triple propogation
targets=[s2,'display'],
render='csv')
p3 = mtrender.Sequence(pattern=['?person2'],
targets=['display'],
render='csv')
p4 = mtrender.Sequence(pattern=[('?sub', '?pred', '?obj')],
targets=[s2,'display'],
render='csv',
URImode='nativealias')
p4a = mtrender.Sequence(pattern=[('?sub', '?pred', '?obj'),('results', 'contain', ('?sub', '?pred', '?obj'))],
targets=[s2,'display'],
render='csv',
URImode='nativealias')
p6 = mtrender.Transformation(pattern=['!!og!!','/^(.)(.*?)(.)$/$3$2$1/'],id='?p6')
#p5 = mtrender.Sequence(pattern=[({('np2',p2):{'og':'?o'}},'is known by','?s')])
p5 = mtrender.Sequence(pattern=[({('np6','!p6'):{'?og':'?o'}},'is known by','?s')],
targets=['display'],
render='csv')
print ('Renders defined')
# d = mtdebug.Debug()
# assign it the criteria
# d._update(criteria=[dc16f])
# set query
rlt02._update(outputs=[p3])
face02 = mtfacade.Facade(store=sa,
results=[rlt02])
# rlt04._update(outputs=[p4,p0t,p0r,p0q])
rlt04._update(outputs=[p4])
face04 = mtfacade.Facade(store=sa,
results=[rlt04])
rlt05._update(outputs=[p5])
face05 = mtfacade.Facade(store=sa,
results=[rlt05])
print ('results and facades defined')
# reset dubug criteria
# execute the query
# s1._update(infer=False)
face04._generate()
print
# check output channelled to a store
print ('results instantiated')
print (s2._toString())
print ('should be 60 results')
print
print ('contributory triples instantiated')
print (s3._toString())
print ('contributory rules instantiated')
print (s4._toString())
# print ('source Store again')
# print (sr._toString())
| Semanticle/Semanticle | sm-mt-devel/src/metabulate/tests/test26case-009d.py | Python | gpl-2.0 | 18,357 | 0.011113 |
__all__ = ("settings", "urls", "wsgi")
__version__ = "0.159.0"
| lopopolo/hyperbola | hyperbola/__init__.py | Python | mit | 63 | 0 |
##
# Copyright 2013-2020 Ghent University
#
# This file is triple-licensed under GPLv2 (see below), MIT, and
# BSD three-clause licenses.
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Clang + GCC compiler toolchain. Clang uses libstdc++. GFortran is used for Fortran code.
:author: Dmitri Gribenko (National Technical University of Ukraine "KPI")
"""
from easybuild.toolchains.compiler.clang import Clang
from easybuild.toolchains.compiler.gcc import Gcc
from easybuild.tools.toolchain.toolchain import SYSTEM_TOOLCHAIN_NAME
TC_CONSTANT_CLANGGCC = "ClangGCC"
class ClangGcc(Clang, Gcc):
"""Compiler toolchain with Clang and GFortran compilers."""
NAME = 'ClangGCC'
COMPILER_MODULE_NAME = ['Clang', 'GCC']
COMPILER_FAMILY = TC_CONSTANT_CLANGGCC
SUBTOOLCHAIN = SYSTEM_TOOLCHAIN_NAME
| pescobar/easybuild-framework | easybuild/toolchains/clanggcc.py | Python | gpl-2.0 | 1,795 | 0.001671 |
# pylint: disable=E1101,E1103,W0232
from collections import OrderedDict
import datetime
from sys import getsizeof
import warnings
import numpy as np
from pandas._libs import (
Timestamp, algos as libalgos, index as libindex, lib, tslibs)
import pandas.compat as compat
from pandas.compat import lrange, lzip, map, range, zip
from pandas.compat.numpy import function as nv
from pandas.errors import PerformanceWarning, UnsortedIndexError
from pandas.util._decorators import Appender, cache_readonly, deprecate_kwarg
from pandas.core.dtypes.common import (
ensure_int64, ensure_platform_int, is_categorical_dtype, is_hashable,
is_integer, is_iterator, is_list_like, is_object_dtype, is_scalar,
pandas_dtype)
from pandas.core.dtypes.dtypes import ExtensionDtype, PandasExtensionDtype
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.dtypes.missing import array_equivalent, isna
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.config import get_option
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import (
Index, InvalidIndexError, _index_shared_docs, ensure_index)
from pandas.core.indexes.frozen import FrozenList, _ensure_frozen
import pandas.core.missing as missing
from pandas.io.formats.printing import pprint_thing
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
_index_doc_kwargs.update(
dict(klass='MultiIndex',
target_klass='MultiIndex or list of tuples'))
class MultiIndexUIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.UInt64Engine):
"""
This class manages a MultiIndex by mapping label combinations to positive
integers.
"""
_base = libindex.UInt64Engine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one uint64 (each), in a strictly
monotonic way (i.e. respecting the lexicographic order of integer
combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : scalar or 1-dimensional array, of dtype uint64
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits:
codes <<= self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer:
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine,
libindex.ObjectEngine):
"""
This class manages those (extreme) cases in which the number of possible
label combinations overflows the 64 bits integers, and uses an ObjectEngine
containing Python integers.
"""
_base = libindex.ObjectEngine
def _codes_to_ints(self, codes):
"""
Transform combination(s) of uint64 in one Python integer (each), in a
strictly monotonic way (i.e. respecting the lexicographic order of
integer combinations): see BaseMultiIndexCodesEngine documentation.
Parameters
----------
codes : 1- or 2-dimensional array of dtype uint64
Combinations of integers (one per row)
Returns
------
int_keys : int, or 1-dimensional array of dtype object
Integer(s) representing one combination (each)
"""
# Shift the representation of each level by the pre-calculated number
# of bits. Since this can overflow uint64, first make sure we are
# working with Python integers:
codes = codes.astype('object') << self.offsets
# Now sum and OR are in fact interchangeable. This is a simple
# composition of the (disjunct) significant bits of each level (i.e.
# each column in "codes") in a single positive integer (per row):
if codes.ndim == 1:
# Single key
return np.bitwise_or.reduce(codes)
# Multiple keys
return np.bitwise_or.reduce(codes, axis=1)
class MultiIndex(Index):
"""
A multi-level, or hierarchical, index object for pandas objects.
Parameters
----------
levels : sequence of arrays
The unique labels for each level.
codes : sequence of arrays
Integers for each level designating which label at each location.
.. versionadded:: 0.24.0
labels : sequence of arrays
Integers for each level designating which label at each location.
.. deprecated:: 0.24.0
Use ``codes`` instead
sortorder : optional int
Level of sortedness (must be lexicographically sorted by that
level).
names : optional sequence of objects
Names for each of the index levels. (name is accepted for compat).
copy : bool, default False
Copy the meta-data.
verify_integrity : bool, default True
Check that the levels/codes are consistent and valid.
Attributes
----------
names
levels
codes
nlevels
levshape
Methods
-------
from_arrays
from_tuples
from_product
from_frame
set_levels
set_codes
to_frame
to_flat_index
is_lexsorted
sortlevel
droplevel
swaplevel
reorder_levels
remove_unused_levels
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Create a MultiIndex from the cartesian product
of iterables.
MultiIndex.from_tuples : Convert list of tuples to a MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Index : The base pandas Index type.
Examples
---------
A new ``MultiIndex`` is typically constructed using one of the helper
methods :meth:`MultiIndex.from_arrays`, :meth:`MultiIndex.from_product`
and :meth:`MultiIndex.from_tuples`. For example (using ``.from_arrays``):
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
See further examples for how to construct a MultiIndex in the doc strings
of the mentioned helper methods.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/advanced.html>`_ for more.
"""
# initialize to zero-length tuples to make everything work
_typ = 'multiindex'
_names = FrozenList()
_levels = FrozenList()
_codes = FrozenList()
_comparables = ['names']
rename = Index.set_names
# --------------------------------------------------------------------
# Constructors
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def __new__(cls, levels=None, codes=None, sortorder=None, names=None,
dtype=None, copy=False, name=None,
verify_integrity=True, _set_identity=True):
# compat with Index
if name is not None:
names = name
if levels is None or codes is None:
raise TypeError("Must pass both levels and codes")
if len(levels) != len(codes):
raise ValueError('Length of levels and codes must be the same.')
if len(levels) == 0:
raise ValueError('Must pass non-zero number of levels/codes')
result = object.__new__(MultiIndex)
# we've already validated levels and codes, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
result._set_codes(codes, copy=copy, validate=False)
if names is not None:
# handles name validation
result._set_names(names)
if sortorder is not None:
result.sortorder = int(sortorder)
else:
result.sortorder = sortorder
if verify_integrity:
result._verify_integrity()
if _set_identity:
result._reset_identity()
return result
def _verify_integrity(self, codes=None, levels=None):
"""
Parameters
----------
codes : optional list
Codes to check for validity. Defaults to current codes.
levels : optional list
Levels to check for validity. Defaults to current levels.
Raises
------
ValueError
If length of levels and codes don't match, if the codes for any
level would exceed level bounds, or there are any duplicate levels.
"""
# NOTE: Currently does not check, among other things, that cached
# nlevels matches nor that sortorder matches actually sortorder.
codes = codes or self.codes
levels = levels or self.levels
if len(levels) != len(codes):
raise ValueError("Length of levels and codes must match. NOTE:"
" this index is in an inconsistent state.")
codes_length = len(self.codes[0])
for i, (level, level_codes) in enumerate(zip(levels, codes)):
if len(level_codes) != codes_length:
raise ValueError("Unequal code lengths: %s" %
([len(code_) for code_ in codes]))
if len(level_codes) and level_codes.max() >= len(level):
raise ValueError("On level %d, code max (%d) >= length of"
" level (%d). NOTE: this index is in an"
" inconsistent state" % (i, level_codes.max(),
len(level)))
if not level.is_unique:
raise ValueError("Level values must be unique: {values} on "
"level {level}".format(
values=[value for value in level],
level=i))
@classmethod
def from_arrays(cls, arrays, sortorder=None, names=None):
"""
Convert arrays to MultiIndex.
Parameters
----------
arrays : list / sequence of array-likes
Each array-like gives one level's value for each data point.
len(arrays) is the number of levels.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
>>> pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
"""
if not is_list_like(arrays):
raise TypeError("Input must be a list / sequence of array-likes.")
elif is_iterator(arrays):
arrays = list(arrays)
# Check if lengths of all arrays are equal or not,
# raise ValueError, if not
for i in range(1, len(arrays)):
if len(arrays[i]) != len(arrays[i - 1]):
raise ValueError('all arrays must be same length')
from pandas.core.arrays.categorical import _factorize_from_iterables
codes, levels = _factorize_from_iterables(arrays)
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
return MultiIndex(levels=levels, codes=codes, sortorder=sortorder,
names=names, verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
"""
Convert list of tuples to MultiIndex.
Parameters
----------
tuples : list / sequence of tuple-likes
Each tuple is the index of one row/column.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> tuples = [(1, u'red'), (1, u'blue'),
... (2, u'red'), (2, u'blue')]
>>> pd.MultiIndex.from_tuples(tuples, names=('number', 'color'))
MultiIndex(levels=[[1, 2], ['blue', 'red']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['number', 'color'])
"""
if not is_list_like(tuples):
raise TypeError('Input must be a list / sequence of tuple-likes.')
elif is_iterator(tuples):
tuples = list(tuples)
if len(tuples) == 0:
if names is None:
msg = 'Cannot infer number of levels from empty list'
raise TypeError(msg)
arrays = [[]] * len(names)
elif isinstance(tuples, (np.ndarray, Index)):
if isinstance(tuples, Index):
tuples = tuples._values
arrays = list(lib.tuples_to_object_array(tuples).T)
elif isinstance(tuples, list):
arrays = list(lib.to_object_array_tuples(tuples).T)
else:
arrays = lzip(*tuples)
return MultiIndex.from_arrays(arrays, sortorder=sortorder, names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
"""
Make a MultiIndex from the cartesian product of multiple iterables.
Parameters
----------
iterables : list / sequence of iterables
Each iterable has unique labels for each level of the index.
sortorder : int or None
Level of sortedness (must be lexicographically sorted by that
level).
names : list / sequence of str, optional
Names for the levels in the index.
Returns
-------
index : MultiIndex
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_frame : Make a MultiIndex from a DataFrame.
Examples
--------
>>> numbers = [0, 1, 2]
>>> colors = ['green', 'purple']
>>> pd.MultiIndex.from_product([numbers, colors],
... names=['number', 'color'])
MultiIndex(levels=[[0, 1, 2], ['green', 'purple']],
codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]],
names=['number', 'color'])
"""
from pandas.core.arrays.categorical import _factorize_from_iterables
from pandas.core.reshape.util import cartesian_product
if not is_list_like(iterables):
raise TypeError("Input must be a list / sequence of iterables.")
elif is_iterator(iterables):
iterables = list(iterables)
codes, levels = _factorize_from_iterables(iterables)
codes = cartesian_product(codes)
return MultiIndex(levels, codes, sortorder=sortorder, names=names)
@classmethod
def from_frame(cls, df, sortorder=None, names=None):
"""
Make a MultiIndex from a DataFrame.
.. versionadded:: 0.24.0
Parameters
----------
df : DataFrame
DataFrame to be converted to MultiIndex.
sortorder : int, optional
Level of sortedness (must be lexicographically sorted by that
level).
names : list-like, optional
If no names are provided, use the column names, or tuple of column
names if the columns is a MultiIndex. If a sequence, overwrite
names with the given sequence.
Returns
-------
MultiIndex
The MultiIndex representation of the given DataFrame.
See Also
--------
MultiIndex.from_arrays : Convert list of arrays to MultiIndex.
MultiIndex.from_tuples : Convert list of tuples to MultiIndex.
MultiIndex.from_product : Make a MultiIndex from cartesian product
of iterables.
Examples
--------
>>> df = pd.DataFrame([['HI', 'Temp'], ['HI', 'Precip'],
... ['NJ', 'Temp'], ['NJ', 'Precip']],
... columns=['a', 'b'])
>>> df
a b
0 HI Temp
1 HI Precip
2 NJ Temp
3 NJ Precip
>>> pd.MultiIndex.from_frame(df)
MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['a', 'b'])
Using explicit names, instead of the column names
>>> pd.MultiIndex.from_frame(df, names=['state', 'observation'])
MultiIndex(levels=[['HI', 'NJ'], ['Precip', 'Temp']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]],
names=['state', 'observation'])
"""
if not isinstance(df, ABCDataFrame):
raise TypeError("Input must be a DataFrame")
column_names, columns = lzip(*df.iteritems())
names = column_names if names is None else names
return cls.from_arrays(columns, sortorder=sortorder, names=names)
# --------------------------------------------------------------------
@property
def levels(self):
return self._levels
@property
def _values(self):
# We override here, since our parent uses _data, which we dont' use.
return self.values
@property
def array(self):
"""
Raises a ValueError for `MultiIndex` because there's no single
array backing a MultiIndex.
Raises
------
ValueError
"""
msg = ("MultiIndex has no single backing array. Use "
"'MultiIndex.to_numpy()' to get a NumPy array of tuples.")
raise ValueError(msg)
@property
def _is_homogeneous_type(self):
"""Whether the levels of a MultiIndex all have the same dtype.
This looks at the dtypes of the levels.
See Also
--------
Index._is_homogeneous_type
DataFrame._is_homogeneous_type
Examples
--------
>>> MultiIndex.from_tuples([
... ('a', 'b'), ('a', 'c')])._is_homogeneous_type
True
>>> MultiIndex.from_tuples([
... ('a', 1), ('a', 2)])._is_homogeneous_type
False
"""
return len({x.dtype for x in self.levels}) <= 1
def _set_levels(self, levels, level=None, copy=False, validate=True,
verify_integrity=False):
# This is NOT part of the levels property because it should be
# externally not allowed to set levels. User beware if you change
# _levels directly
if validate and len(levels) == 0:
raise ValueError('Must set non-zero number of levels.')
if validate and level is None and len(levels) != self.nlevels:
raise ValueError('Length of levels must match number of levels.')
if validate and level is not None and len(levels) != len(level):
raise ValueError('Length of levels must match length of level.')
if level is None:
new_levels = FrozenList(
ensure_index(lev, copy=copy)._shallow_copy()
for lev in levels)
else:
level = [self._get_level_number(l) for l in level]
new_levels = list(self._levels)
for l, v in zip(level, levels):
new_levels[l] = ensure_index(v, copy=copy)._shallow_copy()
new_levels = FrozenList(new_levels)
if verify_integrity:
self._verify_integrity(levels=new_levels)
names = self.names
self._levels = new_levels
if any(names):
self._set_names(names)
self._tuples = None
self._reset_cache()
def set_levels(self, levels, level=None, inplace=False,
verify_integrity=True):
"""
Set new levels on MultiIndex. Defaults to returning
new index.
Parameters
----------
levels : sequence or list of sequence
new level(s) to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and codes are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_levels([['a','b'], [1,2]])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level=0)
MultiIndex(levels=[[u'a', u'b'], [u'one', u'two']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels(['a','b'], level='bar')
MultiIndex(levels=[[1, 2], [u'a', u'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_levels([['a','b'], [1,2]], level=[0,1])
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
"""
if is_list_like(levels) and not isinstance(levels, Index):
levels = list(levels)
if level is not None and not is_list_like(level):
if not is_list_like(levels):
raise TypeError("Levels must be list-like")
if is_list_like(levels[0]):
raise TypeError("Levels must be list-like")
level = [level]
levels = [levels]
elif level is None or is_list_like(level):
if not is_list_like(levels) or not is_list_like(levels[0]):
raise TypeError("Levels must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_levels(levels, level=level, validate=True,
verify_integrity=verify_integrity)
if not inplace:
return idx
@property
def codes(self):
return self._codes
@property
def labels(self):
warnings.warn((".labels was deprecated in version 0.24.0. "
"Use .codes instead."),
FutureWarning, stacklevel=2)
return self.codes
def _set_codes(self, codes, level=None, copy=False, validate=True,
verify_integrity=False):
if validate and level is None and len(codes) != self.nlevels:
raise ValueError("Length of codes must match number of levels")
if validate and level is not None and len(codes) != len(level):
raise ValueError('Length of codes must match length of levels.')
if level is None:
new_codes = FrozenList(
_ensure_frozen(level_codes, lev, copy=copy)._shallow_copy()
for lev, level_codes in zip(self.levels, codes))
else:
level = [self._get_level_number(l) for l in level]
new_codes = list(self._codes)
for lev_idx, level_codes in zip(level, codes):
lev = self.levels[lev_idx]
new_codes[lev_idx] = _ensure_frozen(
level_codes, lev, copy=copy)._shallow_copy()
new_codes = FrozenList(new_codes)
if verify_integrity:
self._verify_integrity(codes=new_codes)
self._codes = new_codes
self._tuples = None
self._reset_cache()
def set_labels(self, labels, level=None, inplace=False,
verify_integrity=True):
warnings.warn((".set_labels was deprecated in version 0.24.0. "
"Use .set_codes instead."),
FutureWarning, stacklevel=2)
return self.set_codes(codes=labels, level=level, inplace=inplace,
verify_integrity=verify_integrity)
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def set_codes(self, codes, level=None, inplace=False,
verify_integrity=True):
"""
Set new codes on MultiIndex. Defaults to returning
new index.
.. versionadded:: 0.24.0
New name for deprecated method `set_labels`.
Parameters
----------
codes : sequence or list of sequence
new codes to apply
level : int, level name, or sequence of int/level names (default None)
level(s) to set (None for all levels)
inplace : bool
if True, mutates in place
verify_integrity : bool (default True)
if True, checks that levels and codes are compatible
Returns
-------
new index (of same type and class...etc)
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_codes([[1,0,1,0], [0,0,1,1]])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_codes([1,0,1,0], level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[1, 0, 1, 0], [0, 1, 0, 1]],
names=[u'foo', u'bar'])
>>> idx.set_codes([0,0,1,1], level='bar')
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[0, 0, 1, 1], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
>>> idx.set_codes([[1,0,1,0], [0,0,1,1]], level=[0,1])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[1, 0, 1, 0], [0, 0, 1, 1]],
names=[u'foo', u'bar'])
"""
if level is not None and not is_list_like(level):
if not is_list_like(codes):
raise TypeError("Codes must be list-like")
if is_list_like(codes[0]):
raise TypeError("Codes must be list-like")
level = [level]
codes = [codes]
elif level is None or is_list_like(level):
if not is_list_like(codes) or not is_list_like(codes[0]):
raise TypeError("Codes must be list of lists-like")
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._reset_identity()
idx._set_codes(codes, level=level, verify_integrity=verify_integrity)
if not inplace:
return idx
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def copy(self, names=None, dtype=None, levels=None, codes=None,
deep=False, _set_identity=False, **kwargs):
"""
Make a copy of this object. Names, dtype, levels and codes can be
passed and will be set on new copy.
Parameters
----------
names : sequence, optional
dtype : numpy dtype or pandas type, optional
levels : sequence, optional
codes : sequence, optional
Returns
-------
copy : MultiIndex
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
This could be potentially expensive on large MultiIndex objects.
"""
name = kwargs.get('name')
names = self._validate_names(name=name, names=names, deep=deep)
if deep:
from copy import deepcopy
if levels is None:
levels = deepcopy(self.levels)
if codes is None:
codes = deepcopy(self.codes)
else:
if levels is None:
levels = self.levels
if codes is None:
codes = self.codes
return MultiIndex(levels=levels, codes=codes, names=names,
sortorder=self.sortorder, verify_integrity=False,
_set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self.values
def view(self, cls=None):
""" this is defined as a copy with the same identity """
result = self.copy()
result._id = self._id
return result
def _shallow_copy_with_infer(self, values, **kwargs):
# On equal MultiIndexes the difference is empty.
# Therefore, an empty MultiIndex is returned GH13490
if len(values) == 0:
return MultiIndex(levels=[[] for _ in range(self.nlevels)],
codes=[[] for _ in range(self.nlevels)],
**kwargs)
return self._shallow_copy(values, **kwargs)
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
self.get_loc(key)
return True
except (LookupError, TypeError):
return False
contains = __contains__
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is not None:
names = kwargs.pop('names', kwargs.pop('name', self.names))
# discards freq
kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, names=names, **kwargs)
return self.view()
@cache_readonly
def dtype(self):
return np.dtype('O')
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
def f(l):
return 'mixed' in l or 'string' in l or 'unicode' in l
return any(f(l) for l in self._inferred_type_levels)
@Appender(Index.memory_usage.__doc__)
def memory_usage(self, deep=False):
# we are overwriting our base class to avoid
# computing .values here which could materialize
# a tuple representation uncessarily
return self._nbytes(deep)
@cache_readonly
def nbytes(self):
""" return the number of bytes in the underlying data """
return self._nbytes(False)
def _nbytes(self, deep=False):
"""
return the number of bytes in the underlying data
deeply introspect the level data if deep=True
include the engine hashtable
*this is in internal routine*
"""
# for implementations with no useful getsizeof (PyPy)
objsize = 24
level_nbytes = sum(i.memory_usage(deep=deep) for i in self.levels)
label_nbytes = sum(i.nbytes for i in self.codes)
names_nbytes = sum(getsizeof(i, objsize) for i in self.names)
result = level_nbytes + label_nbytes + names_nbytes
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
attrs = [
('levels', ibase.default_pprint(self._levels,
max_seq_items=False)),
('codes', ibase.default_pprint(self._codes,
max_seq_items=False))]
if com._any_not_none(*self.names):
attrs.append(('names', ibase.default_pprint(self.names)))
if self.sortorder is not None:
attrs.append(('sortorder', ibase.default_pprint(self.sortorder)))
return attrs
def _format_space(self):
return "\n%s" % (' ' * (len(self.__class__.__name__) + 1))
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_native_types(self, na_rep='nan', **kwargs):
new_levels = []
new_codes = []
# go through the levels and format them
for level, level_codes in zip(self.levels, self.codes):
level = level._format_native_types(na_rep=na_rep, **kwargs)
# add nan values, if there are any
mask = (level_codes == -1)
if mask.any():
nan_index = len(level)
level = np.append(level, na_rep)
level_codes = level_codes.values()
level_codes[mask] = nan_index
new_levels.append(level)
new_codes.append(level_codes)
if len(new_levels) == 1:
return Index(new_levels[0])._format_native_types()
else:
# reconstruct the multi-index
mi = MultiIndex(levels=new_levels, codes=new_codes,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
return mi.values
def format(self, space=2, sparsify=None, adjoin=True, names=False,
na_rep=None, formatter=None):
if len(self) == 0:
return []
stringified_levels = []
for lev, level_codes in zip(self.levels, self.codes):
na = na_rep if na_rep is not None else _get_na_rep(lev.dtype.type)
if len(lev) > 0:
formatted = lev.take(level_codes).format(formatter=formatter)
# we have some NA
mask = level_codes == -1
if mask.any():
formatted = np.array(formatted, dtype=object)
formatted[mask] = na
formatted = formatted.tolist()
else:
# weird all NA case
formatted = [pprint_thing(na if isna(x) else x,
escape_chars=('\t', '\r', '\n'))
for x in algos.take_1d(lev._values, level_codes)]
stringified_levels.append(formatted)
result_levels = []
for lev, name in zip(stringified_levels, self.names):
level = []
if names:
level.append(pprint_thing(name,
escape_chars=('\t', '\r', '\n'))
if name is not None else '')
level.extend(np.array(lev, dtype=object))
result_levels.append(level)
if sparsify is None:
sparsify = get_option("display.multi_sparse")
if sparsify:
sentinel = ''
# GH3547
# use value of sparsify as sentinel, unless it's an obvious
# "Truthey" value
if sparsify not in [True, 1]:
sentinel = sparsify
# little bit of a kludge job for #1217
result_levels = _sparsify(result_levels, start=int(names),
sentinel=sentinel)
if adjoin:
from pandas.io.formats.format import _get_adjustment
adj = _get_adjustment()
return adj.adjoin(space, *result_levels).split('\n')
else:
return result_levels
# --------------------------------------------------------------------
def __len__(self):
return len(self.codes[0])
def _get_names(self):
return FrozenList(level.name for level in self.levels)
def _set_names(self, names, level=None, validate=True):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
validate : boolean, default True
validate that the names match level lengths
Raises
------
TypeError if each name is not hashable.
Notes
-----
sets names on levels. WARNING: mutates!
Note that you generally want to set this *after* changing levels, so
that it only acts on copies
"""
# GH 15110
# Don't allow a single string for names in a MultiIndex
if names is not None and not is_list_like(names):
raise ValueError('Names should be list-like for a MultiIndex')
names = list(names)
if validate and level is not None and len(names) != len(level):
raise ValueError('Length of names must match length of level.')
if validate and level is None and len(names) != self.nlevels:
raise ValueError('Length of names must match number of levels in '
'MultiIndex.')
if level is None:
level = range(self.nlevels)
else:
level = [self._get_level_number(l) for l in level]
# set the name
for l, name in zip(level, names):
if name is not None:
# GH 20527
# All items in 'names' need to be hashable:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.levels[l].rename(name, inplace=True)
names = property(fset=_set_names, fget=_get_names,
doc="Names of levels in MultiIndex")
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level):
indexer = self.codes[level]
level_index = self.levels[level]
if mapper is not None:
# Handle group mapping function and return
level_values = self.levels[level].take(indexer)
grouper = level_values.map(mapper)
return grouper, None, None
codes, uniques = algos.factorize(indexer, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# Handle NAs
mask = indexer != -1
ok_codes, uniques = algos.factorize(indexer[mask], sort=True)
codes = np.empty(len(indexer), dtype=indexer.dtype)
codes[mask] = ok_codes
codes[~mask] = -1
if len(uniques) < len(level_index):
# Remove unobserved levels from level_index
level_index = level_index.take(uniques)
grouper = level_index.take(codes)
return grouper, codes, level_index
@property
def _constructor(self):
return MultiIndex.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
def _get_level_number(self, level):
count = self.names.count(level)
if (count > 1) and not is_integer(level):
raise ValueError('The name %s occurs multiple times, use a '
'level number' % level)
try:
level = self.names.index(level)
except ValueError:
if not is_integer(level):
raise KeyError('Level %s not found' % str(level))
elif level < 0:
level += self.nlevels
if level < 0:
orig_level = level - self.nlevels
raise IndexError('Too many levels: Index has only %d '
'levels, %d is not a valid level number' %
(self.nlevels, orig_level))
# Note: levels are zero-based
elif level >= self.nlevels:
raise IndexError('Too many levels: Index has only %d levels, '
'not %d' % (self.nlevels, level + 1))
return level
_tuples = None
@cache_readonly
def _engine(self):
# Calculate the number of bits needed to represent labels in each
# level, as log2 of their sizes (including -1 for NaN):
sizes = np.ceil(np.log2([len(l) + 1 for l in self.levels]))
# Sum bit counts, starting from the _right_....
lev_bits = np.cumsum(sizes[::-1])[::-1]
# ... in order to obtain offsets such that sorting the combination of
# shifted codes (one for each level, resulting in a unique integer) is
# equivalent to sorting lexicographically the codes themselves. Notice
# that each level needs to be shifted by the number of bits needed to
# represent the _previous_ ones:
offsets = np.concatenate([lev_bits[1:], [0]]).astype('uint64')
# Check the total number of bits needed for our representation:
if lev_bits[0] > 64:
# The levels would overflow a 64 bit uint - use Python integers:
return MultiIndexPyIntEngine(self.levels, self.codes, offsets)
return MultiIndexUIntEngine(self.levels, self.codes, offsets)
@property
def values(self):
if self._tuples is not None:
return self._tuples
values = []
for i in range(self.nlevels):
vals = self._get_level_values(i)
if is_categorical_dtype(vals):
vals = vals.get_values()
if (isinstance(vals.dtype, (PandasExtensionDtype, ExtensionDtype))
or hasattr(vals, '_box_values')):
vals = vals.astype(object)
vals = np.array(vals, copy=False)
values.append(vals)
self._tuples = lib.fast_zip(values)
return self._tuples
@property
def _has_complex_internals(self):
# to disable groupby tricks
return True
@cache_readonly
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
"""
# reversed() because lexsort() wants the most significant key last.
values = [self._get_level_values(i).values
for i in reversed(range(len(self.levels)))]
try:
sort_order = np.lexsort(values)
return Index(sort_order).is_monotonic
except TypeError:
# we have mixed types and np.lexsort is not happy
return Index(self.values).is_monotonic
@cache_readonly
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
"""
# monotonic decreasing if and only if reverse is monotonic increasing
return self[::-1].is_monotonic_increasing
@cache_readonly
def _have_mixed_levels(self):
""" return a boolean list indicated if we have mixed levels """
return ['mixed' in l for l in self._inferred_type_levels]
@cache_readonly
def _inferred_type_levels(self):
""" return a list of the inferred types, one for each level """
return [i.inferred_type for i in self.levels]
@cache_readonly
def _hashed_values(self):
""" return a uint64 ndarray of my hashed values """
from pandas.core.util.hashing import hash_tuples
return hash_tuples(self)
def _hashed_indexing_key(self, key):
"""
validate and return the hash for the provided key
*this is internal for use for the cython routines*
Parameters
----------
key : string or tuple
Returns
-------
np.uint64
Notes
-----
we need to stringify if we have mixed levels
"""
from pandas.core.util.hashing import hash_tuples, hash_tuple
if not isinstance(key, tuple):
return hash_tuples(key)
if not len(key) == self.nlevels:
raise KeyError
def f(k, stringify):
if stringify and not isinstance(k, compat.string_types):
k = str(k)
return k
key = tuple(f(k, stringify)
for k, stringify in zip(key, self._have_mixed_levels))
return hash_tuple(key)
@Appender(Index.duplicated.__doc__)
def duplicated(self, keep='first'):
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64
shape = map(len, self.levels)
ids = get_group_index(self.codes, shape, sort=False, xnull=False)
return duplicated_int64(ids, keep)
def fillna(self, value=None, downcast=None):
"""
fillna is not implemented for MultiIndex
"""
raise NotImplementedError('isna is not defined for MultiIndex')
@Appender(_index_shared_docs['dropna'])
def dropna(self, how='any'):
nans = [level_codes == -1 for level_codes in self.codes]
if how == 'any':
indexer = np.any(nans, axis=0)
elif how == 'all':
indexer = np.all(nans, axis=0)
else:
raise ValueError("invalid how option: {0}".format(how))
new_codes = [level_codes[~indexer] for level_codes in self.codes]
return self.copy(codes=new_codes, deep=True)
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
# Label-based
s = com.values_from_object(series)
k = com.values_from_object(key)
def _try_mi(k):
# TODO: what if a level contains tuples??
loc = self.get_loc(k)
new_values = series._values[loc]
new_index = self[loc]
new_index = maybe_droplevels(new_index, k)
return series._constructor(new_values, index=new_index,
name=series.name).__finalize__(self)
try:
return self._engine.get_value(s, k)
except KeyError as e1:
try:
return _try_mi(key)
except KeyError:
pass
try:
return libindex.get_value_at(s, k)
except IndexError:
raise
except TypeError:
# generator/iterator-like
if is_iterator(key):
raise InvalidIndexError(key)
else:
raise e1
except Exception: # pragma: no cover
raise e1
except TypeError:
# a Timestamp will raise a TypeError in a multi-index
# rather than a KeyError, try it here
# note that a string that 'looks' like a Timestamp will raise
# a KeyError! (GH5725)
if (isinstance(key, (datetime.datetime, np.datetime64)) or
(compat.PY3 and isinstance(key, compat.string_types))):
try:
return _try_mi(key)
except KeyError:
raise
except (IndexError, ValueError, TypeError):
pass
try:
return _try_mi(Timestamp(key))
except (KeyError, TypeError,
IndexError, ValueError, tslibs.OutOfBoundsDatetime):
pass
raise InvalidIndexError(key)
def _get_level_values(self, level, unique=False):
"""
Return vector of label values for requested level,
equal to the length of the index
**this is an internal method**
Parameters
----------
level : int level
unique : bool, default False
if True, drop duplicated values
Returns
-------
values : ndarray
"""
values = self.levels[level]
level_codes = self.codes[level]
if unique:
level_codes = algos.unique(level_codes)
filled = algos.take_1d(values._values, level_codes,
fill_value=values._na_value)
values = values._shallow_copy(filled)
return values
def get_level_values(self, level):
"""
Return vector of label values for requested level,
equal to the length of the index.
Parameters
----------
level : int or str
``level`` is either the integer position of the level in the
MultiIndex, or the name of the level.
Returns
-------
values : Index
``values`` is a level of this MultiIndex converted to
a single :class:`Index` (or subclass thereof).
Examples
---------
Create a MultiIndex:
>>> mi = pd.MultiIndex.from_arrays((list('abc'), list('def')))
>>> mi.names = ['level_1', 'level_2']
Get level values by supplying level as either integer or name:
>>> mi.get_level_values(0)
Index(['a', 'b', 'c'], dtype='object', name='level_1')
>>> mi.get_level_values('level_2')
Index(['d', 'e', 'f'], dtype='object', name='level_2')
"""
level = self._get_level_number(level)
values = self._get_level_values(level)
return values
@Appender(_index_shared_docs['index_unique'] % _index_doc_kwargs)
def unique(self, level=None):
if level is None:
return super(MultiIndex, self).unique()
else:
level = self._get_level_number(level)
return self._get_level_values(level=level, unique=True)
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self.set_levels([i._to_safe_for_reshape() for i in self.levels])
def to_frame(self, index=True, name=None):
"""
Create a DataFrame with the levels of the MultiIndex as columns.
Column ordering is determined by the DataFrame constructor with data as
a dict.
.. versionadded:: 0.24.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original MultiIndex.
name : list / sequence of strings, optional
The passed names should substitute index level names.
Returns
-------
DataFrame : a DataFrame containing the original MultiIndex data.
See Also
--------
DataFrame
"""
from pandas import DataFrame
if name is not None:
if not is_list_like(name):
raise TypeError("'name' must be a list / sequence "
"of column names.")
if len(name) != len(self.levels):
raise ValueError("'name' should have same length as "
"number of levels on index.")
idx_names = name
else:
idx_names = self.names
# Guarantee resulting column order
result = DataFrame(
OrderedDict([
((level if lvlname is None else lvlname),
self._get_level_values(level))
for lvlname, level in zip(idx_names, range(len(self.levels)))
]),
copy=False
)
if index:
result.index = self
return result
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
shapes given by n_repeat and n_shuffle.
.. deprecated:: 0.24.0
Useful to replicate and rearrange a MultiIndex for combination
with another Index with n_repeat items.
Parameters
----------
n_repeat : int
Number of times to repeat the labels on self
n_shuffle : int
Controls the reordering of the labels. If the result is going
to be an inner level in a MultiIndex, n_shuffle will need to be
greater than one. The size of each label must divisible by
n_shuffle.
Returns
-------
MultiIndex
Examples
--------
>>> idx = pd.MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')])
>>> idx.to_hierarchical(3)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
codes=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1]])
"""
levels = self.levels
codes = [np.repeat(level_codes, n_repeat) for
level_codes in self.codes]
# Assumes that each level_codes is divisible by n_shuffle
codes = [x.reshape(n_shuffle, -1).ravel(order='F') for x in codes]
names = self.names
warnings.warn("Method .to_hierarchical is deprecated and will "
"be removed in a future version",
FutureWarning, stacklevel=2)
return MultiIndex(levels=levels, codes=codes, names=names)
def to_flat_index(self):
"""
Convert a MultiIndex to an Index of Tuples containing the level values.
.. versionadded:: 0.24.0
Returns
-------
pd.Index
Index with the MultiIndex data represented in Tuples.
Notes
-----
This method will simply return the caller if called by anything other
than a MultiIndex.
Examples
--------
>>> index = pd.MultiIndex.from_product(
... [['foo', 'bar'], ['baz', 'qux']],
... names=['a', 'b'])
>>> index.to_flat_index()
Index([('foo', 'baz'), ('foo', 'qux'),
('bar', 'baz'), ('bar', 'qux')],
dtype='object')
"""
return Index(self.values, tupleize_cols=False)
@property
def is_all_dates(self):
return False
def is_lexsorted(self):
"""
Return True if the codes are lexicographically sorted
"""
return self.lexsort_depth == self.nlevels
@cache_readonly
def lexsort_depth(self):
if self.sortorder is not None:
if self.sortorder == 0:
return self.nlevels
else:
return 0
int64_codes = [ensure_int64(level_codes) for level_codes in self.codes]
for k in range(self.nlevels, 0, -1):
if libalgos.is_lexsorted(int64_codes[:k]):
return k
return 0
def _sort_levels_monotonic(self):
"""
.. versionadded:: 0.20.0
This is an *internal* function.
Create a new MultiIndex from the current to monotonically sorted
items IN the levels. This does not actually make the entire MultiIndex
monotonic, JUST the levels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i.sort_monotonic()
MultiIndex(levels=[['a', 'b'], ['aa', 'bb']],
codes=[[0, 0, 1, 1], [1, 0, 1, 0]])
"""
if self.is_lexsorted() and self.is_monotonic:
return self
new_levels = []
new_codes = []
for lev, level_codes in zip(self.levels, self.codes):
if not lev.is_monotonic:
try:
# indexer to reorder the levels
indexer = lev.argsort()
except TypeError:
pass
else:
lev = lev.take(indexer)
# indexer to reorder the level codes
indexer = ensure_int64(indexer)
ri = lib.get_reverse_indexer(indexer, len(indexer))
level_codes = algos.take_1d(ri, level_codes)
new_levels.append(lev)
new_codes.append(level_codes)
return MultiIndex(new_levels, new_codes,
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def remove_unused_levels(self):
"""
Create a new MultiIndex from the current that removes
unused levels, meaning that they are not expressed in the labels.
The resulting MultiIndex will have the same outward
appearance, meaning the same .values and ordering. It will also
be .equals() to the original.
.. versionadded:: 0.20.0
Returns
-------
MultiIndex
Examples
--------
>>> i = pd.MultiIndex.from_product([range(2), list('ab')])
MultiIndex(levels=[[0, 1], ['a', 'b']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> i[2:]
MultiIndex(levels=[[0, 1], ['a', 'b']],
codes=[[1, 1], [0, 1]])
The 0 from the first level is not represented
and can be removed
>>> i[2:].remove_unused_levels()
MultiIndex(levels=[[1], ['a', 'b']],
codes=[[0, 0], [0, 1]])
"""
new_levels = []
new_codes = []
changed = False
for lev, level_codes in zip(self.levels, self.codes):
# Since few levels are typically unused, bincount() is more
# efficient than unique() - however it only accepts positive values
# (and drops order):
uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1
has_na = int(len(uniques) and (uniques[0] == -1))
if len(uniques) != len(lev) + has_na:
# We have unused levels
changed = True
# Recalculate uniques, now preserving order.
# Can easily be cythonized by exploiting the already existing
# "uniques" and stop parsing "level_codes" when all items
# are found:
uniques = algos.unique(level_codes)
if has_na:
na_idx = np.where(uniques == -1)[0]
# Just ensure that -1 is in first position:
uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]]
# codes get mapped from uniques to 0:len(uniques)
# -1 (if present) is mapped to last position
code_mapping = np.zeros(len(lev) + has_na)
# ... and reassigned value -1:
code_mapping[uniques] = np.arange(len(uniques)) - has_na
level_codes = code_mapping[level_codes]
# new levels are simple
lev = lev.take(uniques[has_na:])
new_levels.append(lev)
new_codes.append(level_codes)
result = self._shallow_copy()
if changed:
result._reset_identity()
result._set_levels(new_levels, validate=False)
result._set_codes(new_codes, validate=False)
return result
@property
def nlevels(self):
"""Integer number of levels in this MultiIndex."""
return len(self.levels)
@property
def levshape(self):
"""A tuple with the length of each level."""
return tuple(len(x) for x in self.levels)
def __reduce__(self):
"""Necessary for making this object picklable"""
d = dict(levels=[lev for lev in self.levels],
codes=[level_codes for level_codes in self.codes],
sortorder=self.sortorder, names=list(self.names))
return ibase._new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
levels = state.get('levels')
codes = state.get('codes')
sortorder = state.get('sortorder')
names = state.get('names')
elif isinstance(state, tuple):
nd_state, own_state = state
levels, codes, sortorder, names = own_state
self._set_levels([Index(x) for x in levels], validate=False)
self._set_codes(codes)
self._set_names(names)
self.sortorder = sortorder
self._verify_integrity()
self._reset_identity()
def __getitem__(self, key):
if is_scalar(key):
key = com.cast_scalar_indexer(key)
retval = []
for lev, level_codes in zip(self.levels, self.codes):
if level_codes[key] == -1:
retval.append(np.nan)
else:
retval.append(lev[level_codes[key]])
return tuple(retval)
else:
if com.is_bool_indexer(key):
key = np.asarray(key, dtype=bool)
sortorder = self.sortorder
else:
# cannot be sure whether the result will be sorted
sortorder = None
if isinstance(key, Index):
key = np.asarray(key)
new_codes = [level_codes[key] for level_codes in self.codes]
return MultiIndex(levels=self.levels, codes=new_codes,
names=self.names, sortorder=sortorder,
verify_integrity=False)
@Appender(_index_shared_docs['take'] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_platform_int(indices)
taken = self._assert_take_fillable(self.codes, indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=-1)
return MultiIndex(levels=self.levels, codes=taken,
names=self.names, verify_integrity=False)
def _assert_take_fillable(self, values, indices, allow_fill=True,
fill_value=None, na_value=None):
""" Internal method to handle NA filling of take """
# only fill if we are passing a non-None fill_value
if allow_fill and fill_value is not None:
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
taken = [lab.take(indices) for lab in self.codes]
mask = indices == -1
if mask.any():
masked = []
for new_label in taken:
label_values = new_label.values()
label_values[mask] = na_value
masked.append(np.asarray(label_values))
taken = masked
else:
taken = [lab.take(indices) for lab in self.codes]
return taken
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
if not isinstance(other, (list, tuple)):
other = [other]
if all((isinstance(o, MultiIndex) and o.nlevels >= self.nlevels)
for o in other):
arrays = []
for i in range(self.nlevels):
label = self._get_level_values(i)
appended = [o._get_level_values(i) for o in other]
arrays.append(label.append(appended))
return MultiIndex.from_arrays(arrays, names=self.names)
to_concat = (self.values, ) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
return MultiIndex.from_tuples(new_tuples, names=self.names)
except (TypeError, IndexError):
return Index(new_tuples)
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@Appender(_index_shared_docs['repeat'] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
return MultiIndex(levels=self.levels,
codes=[level_codes.view(np.ndarray).repeat(repeats)
for level_codes in self.codes],
names=self.names, sortorder=self.sortorder,
verify_integrity=False)
def where(self, cond, other=None):
raise NotImplementedError(".where is not supported for "
"MultiIndex operations")
@deprecate_kwarg(old_arg_name='labels', new_arg_name='codes')
def drop(self, codes, level=None, errors='raise'):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples
level : int or level name, default None
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level)
try:
if not isinstance(codes, (np.ndarray, Index)):
codes = com.index_labels_to_array(codes)
indexer = self.get_indexer(codes)
mask = indexer == -1
if mask.any():
if errors != 'ignore':
raise ValueError('codes %s not contained in axis' %
codes[mask])
except Exception:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(lrange(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn('dropping on a non-lexsorted multi-index'
' without a level parameter may impact '
'performance.',
PerformanceWarning,
stacklevel=3)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = 'unsupported indexer of type {}'.format(type(loc))
raise AssertionError(msg)
except KeyError:
if errors != 'ignore':
raise
return self.delete(inds)
def _drop_from_level(self, codes, level):
codes = com.index_labels_to_array(codes)
i = self._get_level_number(level)
index = self.levels[i]
values = index.get_indexer(codes)
mask = ~algos.isin(self.codes[i], values)
return self[mask]
def swaplevel(self, i=-2, j=-1):
"""
Swap level i with level j.
Calling this method does not change the ordering of the values.
Parameters
----------
i : int, str, default -2
First level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
j : int, str, default -1
Second level of index to be swapped. Can pass level name as string.
Type of parameters can be mixed.
Returns
-------
MultiIndex
A new MultiIndex
.. versionchanged:: 0.18.1
The indexes ``i`` and ``j`` are now optional, and default to
the two innermost levels of the index.
See Also
--------
Series.swaplevel : Swap levels i and j in a MultiIndex.
Dataframe.swaplevel : Swap levels i and j in a MultiIndex on a
particular axis.
Examples
--------
>>> mi = pd.MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
... codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi
MultiIndex(levels=[['a', 'b'], ['bb', 'aa']],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]])
>>> mi.swaplevel(0, 1)
MultiIndex(levels=[['bb', 'aa'], ['a', 'b']],
codes=[[0, 1, 0, 1], [0, 0, 1, 1]])
"""
new_levels = list(self.levels)
new_codes = list(self.codes)
new_names = list(self.names)
i = self._get_level_number(i)
j = self._get_level_number(j)
new_levels[i], new_levels[j] = new_levels[j], new_levels[i]
new_codes[i], new_codes[j] = new_codes[j], new_codes[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
Rearrange levels using input order. May not drop or duplicate levels
Parameters
----------
"""
order = [self._get_level_number(i) for i in order]
if len(order) != self.nlevels:
raise AssertionError('Length of order must be same as '
'number of levels (%d), got %d' %
(self.nlevels, len(order)))
new_levels = [self.levels[i] for i in order]
new_codes = [self.codes[i] for i in order]
new_names = [self.names[i] for i in order]
return MultiIndex(levels=new_levels, codes=new_codes,
names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
def _get_codes_for_sorting(self):
"""
we categorizing our codes by using the
available categories (all, not just observed)
excluding any missing ones (-1); this is in preparation
for sorting, where we need to disambiguate that -1 is not
a valid valid
"""
from pandas.core.arrays import Categorical
def cats(level_codes):
return np.arange(np.array(level_codes).max() + 1 if
len(level_codes) else 0,
dtype=level_codes.dtype)
return [Categorical.from_codes(level_codes, cats(level_codes),
ordered=True)
for level_codes in self.codes]
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
Sort MultiIndex at the requested level. The result will respect the
original ordering of the associated factor at that level.
Parameters
----------
level : list-like, int or str, default 0
If a string is given, must be a name of the level
If list-like must be names or ints of levels.
ascending : boolean, default True
False to sort in descending order
Can also be a list to specify a directed ordering
sort_remaining : sort by the remaining levels after level
Returns
-------
sorted_index : pd.MultiIndex
Resulting index
indexer : np.ndarray
Indices of output values in original index
"""
from pandas.core.sorting import indexer_from_factorized
if isinstance(level, (compat.string_types, int)):
level = [level]
level = [self._get_level_number(lev) for lev in level]
sortorder = None
# we have a directed ordering via ascending
if isinstance(ascending, list):
if not len(level) == len(ascending):
raise ValueError("level must have same length as ascending")
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer([self.codes[lev] for lev in level],
orders=ascending)
# level ordering
else:
codes = list(self.codes)
shape = list(self.levshape)
# partition codes and shape
primary = tuple(codes.pop(lev - i) for i, lev in enumerate(level))
primshp = tuple(shape.pop(lev - i) for i, lev in enumerate(level))
if sort_remaining:
primary += primary + tuple(codes)
primshp += primshp + tuple(shape)
else:
sortorder = level[0]
indexer = indexer_from_factorized(primary, primshp,
compress=False)
if not ascending:
indexer = indexer[::-1]
indexer = ensure_platform_int(indexer)
new_codes = [level_codes.take(indexer) for level_codes in self.codes]
new_index = MultiIndex(codes=new_codes, levels=self.levels,
names=self.names, sortorder=sortorder,
verify_integrity=False)
return new_index, indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
indexer, keyarr = super(MultiIndex, self)._convert_listlike_indexer(
keyarr, kind=kind)
# are we indexing a specific level
if indexer is None and len(keyarr) and not isinstance(keyarr[0],
tuple):
level = 0
_, indexer = self.reindex(keyarr, level=level)
# take all
if indexer is None:
indexer = np.arange(len(self))
check = self.levels[0].get_indexer(keyarr)
mask = check == -1
if mask.any():
raise KeyError('%s not in index' % keyarr[mask])
return indexer, keyarr
@Appender(_index_shared_docs['get_indexer'] % _index_doc_kwargs)
def get_indexer(self, target, method=None, limit=None, tolerance=None):
method = missing.clean_reindex_fill_method(method)
target = ensure_index(target)
# empty indexer
if is_list_like(target) and not len(target):
return ensure_platform_int(np.array([]))
if not isinstance(target, MultiIndex):
try:
target = MultiIndex.from_tuples(target)
except (TypeError, ValueError):
# let's instead try with a straight Index
if method is None:
return Index(self.values).get_indexer(target,
method=method,
limit=limit,
tolerance=tolerance)
if not self.is_unique:
raise ValueError('Reindexing only valid with uniquely valued '
'Index objects')
if method == 'pad' or method == 'backfill':
if tolerance is not None:
raise NotImplementedError("tolerance not implemented yet "
'for MultiIndex')
indexer = self._engine.get_indexer(target, method, limit)
elif method == 'nearest':
raise NotImplementedError("method='nearest' not implemented yet "
'for MultiIndex; see GitHub issue 9365')
else:
indexer = self._engine.get_indexer(target)
return ensure_platform_int(indexer)
@Appender(_index_shared_docs['get_indexer_non_unique'] % _index_doc_kwargs)
def get_indexer_non_unique(self, target):
return super(MultiIndex, self).get_indexer_non_unique(target)
def reindex(self, target, method=None, level=None, limit=None,
tolerance=None):
"""
Create index with target's values (move/add/delete values as necessary)
Returns
-------
new_index : pd.MultiIndex
Resulting index
indexer : np.ndarray or None
Indices of output values in original index
"""
# GH6552: preserve names when reindexing to non-named target
# (i.e. neither Index nor Series).
preserve_names = not hasattr(target, 'names')
if level is not None:
if method is not None:
raise TypeError('Fill method not supported if level passed')
# GH7774: preserve dtype/tz if target is empty and not an Index.
# target may be an iterator
target = ibase._ensure_has_len(target)
if len(target) == 0 and not isinstance(target, Index):
idx = self.levels[level]
attrs = idx._get_attributes_dict()
attrs.pop('freq', None) # don't preserve freq
target = type(idx)._simple_new(np.empty(0, dtype=idx.dtype),
**attrs)
else:
target = ensure_index(target)
target, indexer, _ = self._join_level(target, level, how='right',
return_indexers=True,
keep_order=False)
else:
target = ensure_index(target)
if self.equals(target):
indexer = None
else:
if self.is_unique:
indexer = self.get_indexer(target, method=method,
limit=limit,
tolerance=tolerance)
else:
raise ValueError("cannot handle a non-unique multi-index!")
if not isinstance(target, MultiIndex):
if indexer is None:
target = self
elif (indexer >= 0).all():
target = self.take(indexer)
else:
# hopefully?
target = MultiIndex.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
target = target.copy(deep=False)
target.names = self.names
return target, indexer
def get_slice_bound(self, label, side, kind):
if not isinstance(label, tuple):
label = label,
return self._partial_tup_index(label, side=side)
def slice_locs(self, start=None, end=None, step=None, kind=None):
"""
For an ordered MultiIndex, compute the slice locations for input
labels.
The input labels can be tuples representing partial levels, e.g. for a
MultiIndex with 3 levels, you can pass a single value (corresponding to
the first level), or a 1-, 2-, or 3-tuple.
Parameters
----------
start : label or tuple, default None
If None, defaults to the beginning
end : label or tuple
If None, defaults to the end
step : int or None
Slice step
kind : string, optional, defaults None
Returns
-------
(start, end) : (int, int)
Notes
-----
This method only works if the MultiIndex is properly lexsorted. So,
if only the first 2 levels of a 3-level MultiIndex are lexsorted,
you can only pass two levels to ``.slice_locs``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abbd'), list('deff')],
... names=['A', 'B'])
Get the slice locations from the beginning of 'b' in the first level
until the end of the multiindex:
>>> mi.slice_locs(start='b')
(1, 4)
Like above, but stop at the end of 'b' in the first level and 'f' in
the second level:
>>> mi.slice_locs(start='b', end=('b', 'f'))
(1, 3)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
# This function adds nothing to its parent implementation (the magic
# happens in get_slice_bound method), but it adds meaningful doc.
return super(MultiIndex, self).slice_locs(start, end, step, kind=kind)
def _partial_tup_index(self, tup, side='left'):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
'Key length (%d) was greater than MultiIndex'
' lexsort depth (%d)' %
(len(tup), self.lexsort_depth))
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev:
if not lev.is_type_compatible(lib.infer_dtype([lab],
skipna=False)):
raise TypeError('Level type mismatch: %s' % lab)
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == 'right' and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = lev.get_loc(lab)
if k < n - 1:
end = start + section.searchsorted(idx, side='right')
start = start + section.searchsorted(idx, side='left')
else:
return start + section.searchsorted(idx, side=side)
def get_loc(self, key, method=None):
"""
Get location for a label or a tuple of labels as an integer, slice or
boolean mask.
Parameters
----------
key : label or tuple of labels (one for each level)
method : None
Returns
-------
loc : int, slice object or boolean mask
If the key is past the lexsort depth, the return may be a
boolean mask array, otherwise it is always a slice or int.
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_loc('b')
slice(1, 3, None)
>>> mi.get_loc(('b', 'e'))
1
Notes
------
The key cannot be a slice, list of same-level labels, a boolean mask,
or a sequence of such. If you want to use those, use
:meth:`MultiIndex.get_locs` instead.
See Also
--------
Index.get_loc : The get_loc method for (single-level) index.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
if method is not None:
raise NotImplementedError('only the default get_loc method is '
'currently supported for MultiIndex')
def _maybe_to_slice(loc):
"""convert integer indexer to boolean mask or slice if possible"""
if not isinstance(loc, np.ndarray) or loc.dtype != 'int64':
return loc
loc = lib.maybe_indices_to_slice(loc, len(self))
if isinstance(loc, slice):
return loc
mask = np.empty(len(self), dtype='bool')
mask.fill(False)
mask[loc] = True
return mask
if not isinstance(key, tuple):
loc = self._get_level_indexer(key, level=0)
return _maybe_to_slice(loc)
keylen = len(key)
if self.nlevels < keylen:
raise KeyError('Key length ({0}) exceeds index depth ({1})'
''.format(keylen, self.nlevels))
if keylen == self.nlevels and self.is_unique:
return self._engine.get_loc(key)
# -- partial selection or non-unique index
# break the key into 2 parts based on the lexsort_depth of the index;
# the first part returns a continuous slice of the index; the 2nd part
# needs linear search within the slice
i = self.lexsort_depth
lead_key, follow_key = key[:i], key[i:]
start, stop = (self.slice_locs(lead_key, lead_key)
if lead_key else (0, len(self)))
if start == stop:
raise KeyError(key)
if not follow_key:
return slice(start, stop)
warnings.warn('indexing past lexsort depth may impact performance.',
PerformanceWarning, stacklevel=10)
loc = np.arange(start, stop, dtype='int64')
for i, k in enumerate(follow_key, len(lead_key)):
mask = self.codes[i][loc] == self.levels[i].get_loc(k)
if not mask.all():
loc = loc[mask]
if not len(loc):
raise KeyError(key)
return (_maybe_to_slice(loc) if len(loc) != stop - start else
slice(start, stop))
def get_loc_level(self, key, level=0, drop_level=True):
"""
Get both the location for the requested label(s) and the
resulting sliced index.
Parameters
----------
key : label or sequence of labels
level : int/level name or list thereof, optional
drop_level : bool, default True
if ``False``, the resulting index will not drop any level.
Returns
-------
loc : A 2-tuple where the elements are:
Element 0: int, slice object or boolean array
Element 1: The resulting sliced multiindex/index. If the key
contains all levels, this will be ``None``.
Examples
--------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')],
... names=['A', 'B'])
>>> mi.get_loc_level('b')
(slice(1, 3, None), Index(['e', 'f'], dtype='object', name='B'))
>>> mi.get_loc_level('e', level='B')
(array([False, True, False], dtype=bool),
Index(['b'], dtype='object', name='A'))
>>> mi.get_loc_level(['b', 'e'])
(1, None)
See Also
---------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.get_locs : Get location for a label/slice/list/mask or a
sequence of such.
"""
def maybe_droplevels(indexer, levels, drop_level):
if not drop_level:
return self[indexer]
# kludgearound
orig_index = new_index = self[indexer]
levels = [self._get_level_number(i) for i in levels]
for i in sorted(levels, reverse=True):
try:
new_index = new_index.droplevel(i)
except ValueError:
# no dropping here
return orig_index
return new_index
if isinstance(level, (tuple, list)):
if len(key) != len(level):
raise AssertionError('Key for location must have same '
'length as number of levels')
result = None
for lev, k in zip(level, key):
loc, new_index = self.get_loc_level(k, level=lev)
if isinstance(loc, slice):
mask = np.zeros(len(self), dtype=bool)
mask[loc] = True
loc = mask
result = loc if result is None else result & loc
return result, maybe_droplevels(result, level, drop_level)
level = self._get_level_number(level)
# kludge for #1796
if isinstance(key, list):
key = tuple(key)
if isinstance(key, tuple) and level == 0:
try:
if key in self.levels[0]:
indexer = self._get_level_indexer(key, level=level)
new_index = maybe_droplevels(indexer, [0], drop_level)
return indexer, new_index
except TypeError:
pass
if not any(isinstance(k, slice) for k in key):
# partial selection
# optionally get indexer to avoid re-calculation
def partial_selection(key, indexer=None):
if indexer is None:
indexer = self.get_loc(key)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels,
drop_level)
if len(key) == self.nlevels and self.is_unique:
# Complete key in unique index -> standard get_loc
return (self._engine.get_loc(key), None)
else:
return partial_selection(key)
else:
indexer = None
for i, k in enumerate(key):
if not isinstance(k, slice):
k = self._get_level_indexer(k, level=i)
if isinstance(k, slice):
# everything
if k.start == 0 and k.stop == len(self):
k = slice(None, None)
else:
k_index = k
if isinstance(k, slice):
if k == slice(None, None):
continue
else:
raise TypeError(key)
if indexer is None:
indexer = k_index
else: # pragma: no cover
indexer &= k_index
if indexer is None:
indexer = slice(None, None)
ilevels = [i for i in range(len(key))
if key[i] != slice(None, None)]
return indexer, maybe_droplevels(indexer, ilevels, drop_level)
else:
indexer = self._get_level_indexer(key, level=level)
return indexer, maybe_droplevels(indexer, [level], drop_level)
def _get_level_indexer(self, key, level=0, indexer=None):
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer,
codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)._ndarray_values
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r,
assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returnig a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(key.start, key.stop,
key.step, kind='loc')
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
return convert_indexer(start.start, stop.stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side='left')
j = level_codes.searchsorted(stop, side='right')
return slice(i, j, step)
else:
code = level_index.get_loc(key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side='left')
j = level_codes.searchsorted(code, side='right')
if i == j:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(i, j)
def get_locs(self, seq):
"""
Get location for a given label/slice/list/mask or a sequence of such as
an array of integers.
Parameters
----------
seq : label/slice/list/mask or a sequence of such
You should use one of the above for each level.
If a level should not be used, set it to ``slice(None)``.
Returns
-------
locs : array of integers suitable for passing to iloc
Examples
---------
>>> mi = pd.MultiIndex.from_arrays([list('abb'), list('def')])
>>> mi.get_locs('b')
array([1, 2], dtype=int64)
>>> mi.get_locs([slice(None), ['e', 'f']])
array([1, 2], dtype=int64)
>>> mi.get_locs([[True, False, True], slice('e', 'f')])
array([2], dtype=int64)
See Also
--------
MultiIndex.get_loc : Get location for a label or a tuple of labels.
MultiIndex.slice_locs : Get slice location given start label(s) and
end label(s).
"""
from .numeric import Int64Index
# must be lexsorted to at least as many levels
true_slices = [i for (i, s) in enumerate(com.is_true_slices(seq)) if s]
if true_slices and true_slices[-1] >= self.lexsort_depth:
raise UnsortedIndexError('MultiIndex slicing requires the index '
'to be lexsorted: slicing on levels {0}, '
'lexsort depth {1}'
.format(true_slices, self.lexsort_depth))
# indexer
# this is the list of all values that we want to select
n = len(self)
indexer = None
def _convert_to_indexer(r):
# return an indexer
if isinstance(r, slice):
m = np.zeros(n, dtype=bool)
m[r] = True
r = m.nonzero()[0]
elif com.is_bool_indexer(r):
if len(r) != n:
raise ValueError("cannot index with a boolean indexer "
"that is not the same length as the "
"index")
r = r.nonzero()[0]
return Int64Index(r)
def _update_indexer(idxr, indexer=indexer):
if indexer is None:
indexer = Index(np.arange(n))
if idxr is None:
return indexer
return indexer & idxr
for i, k in enumerate(seq):
if com.is_bool_indexer(k):
# a boolean indexer, must be the same length!
k = np.asarray(k)
indexer = _update_indexer(_convert_to_indexer(k),
indexer=indexer)
elif is_list_like(k):
# a collection of labels to include from this level (these
# are or'd)
indexers = None
for x in k:
try:
idxrs = _convert_to_indexer(
self._get_level_indexer(x, level=i,
indexer=indexer))
indexers = (idxrs if indexers is None
else indexers | idxrs)
except KeyError:
# ignore not founds
continue
if indexers is not None:
indexer = _update_indexer(indexers, indexer=indexer)
else:
# no matches we are done
return Int64Index([])._ndarray_values
elif com.is_null_slice(k):
# empty slice
indexer = _update_indexer(None, indexer=indexer)
elif isinstance(k, slice):
# a slice, include BOTH of the labels
indexer = _update_indexer(_convert_to_indexer(
self._get_level_indexer(k, level=i, indexer=indexer)),
indexer=indexer)
else:
# a single label
indexer = _update_indexer(_convert_to_indexer(
self.get_loc_level(k, level=i, drop_level=False)[0]),
indexer=indexer)
# empty indexer
if indexer is None:
return Int64Index([])._ndarray_values
return indexer._ndarray_values
def truncate(self, before=None, after=None):
"""
Slice index between two labels / tuples, return new MultiIndex
Parameters
----------
before : label or tuple, can be partial. Default None
None defaults to start
after : label or tuple, can be partial. Default None
None defaults to end
Returns
-------
truncated : MultiIndex
"""
if after and before and after < before:
raise ValueError('after < before')
i, j = self.levels[0].slice_locs(before, after)
left, right = self.slice_locs(before, after)
new_levels = list(self.levels)
new_levels[0] = new_levels[0][i:j]
new_codes = [level_codes[left:right] for level_codes in self.codes]
new_codes[0] = new_codes[0] - i
return MultiIndex(levels=new_levels, codes=new_codes,
verify_integrity=False)
def equals(self, other):
"""
Determines if two MultiIndex objects have the same labeling information
(the levels themselves do not necessarily have to be the same)
See Also
--------
equal_levels
"""
if self.is_(other):
return True
if not isinstance(other, Index):
return False
if not isinstance(other, MultiIndex):
other_vals = com.values_from_object(ensure_index(other))
return array_equivalent(self._ndarray_values, other_vals)
if self.nlevels != other.nlevels:
return False
if len(self) != len(other):
return False
for i in range(self.nlevels):
self_codes = self.codes[i]
self_codes = self_codes[self_codes != -1]
self_values = algos.take_nd(np.asarray(self.levels[i]._values),
self_codes, allow_fill=False)
other_codes = other.codes[i]
other_codes = other_codes[other_codes != -1]
other_values = algos.take_nd(
np.asarray(other.levels[i]._values),
other_codes, allow_fill=False)
# since we use NaT both datetime64 and timedelta64
# we can have a situation where a level is typed say
# timedelta64 in self (IOW it has other values than NaT)
# but types datetime64 in other (where its all NaT)
# but these are equivalent
if len(self_values) == 0 and len(other_values) == 0:
continue
if not array_equivalent(self_values, other_values):
return False
return True
def equal_levels(self, other):
"""
Return True if the levels of both MultiIndex objects are the same
"""
if self.nlevels != other.nlevels:
return False
for i in range(self.nlevels):
if not self.levels[i].equals(other.levels[i]):
return False
return True
def union(self, other, sort=None):
"""
Form the union of two MultiIndex objects
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default None
Whether to sort the resulting Index.
* None : Sort the result, except when
1. `self` and `other` are equal.
2. `self` has length 0.
3. Some values in `self` or `other` cannot be compared.
A RuntimeWarning is issued in this case.
* False : do not sort the result.
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
Index
>>> index.union(index2)
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0 or self.equals(other):
return self
# TODO: Index.union returns other when `len(self)` is 0.
uniq_tuples = lib.fast_unique_multiple([self._ndarray_values,
other._ndarray_values],
sort=sort)
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def intersection(self, other, sort=False):
"""
Form the intersection of two MultiIndex objects.
Parameters
----------
other : MultiIndex or array / Index of tuples
sort : False or None, default False
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default from ``True`` to ``False``, to match
behaviour from before 0.24.0
Returns
-------
Index
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if self.equals(other):
return self
self_tuples = self._ndarray_values
other_tuples = other._ndarray_values
uniq_tuples = set(self_tuples) & set(other_tuples)
if sort is None:
uniq_tuples = sorted(uniq_tuples)
if len(uniq_tuples) == 0:
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
names=result_names)
def difference(self, other, sort=None):
"""
Compute set difference of two MultiIndex objects
Parameters
----------
other : MultiIndex
sort : False or None, default None
Sort the resulting MultiIndex if possible
.. versionadded:: 0.24.0
.. versionchanged:: 0.24.1
Changed the default value from ``True`` to ``None``
(without change in behaviour).
Returns
-------
diff : MultiIndex
"""
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_names = self._convert_can_do_setop(other)
if len(other) == 0:
return self
if self.equals(other):
return MultiIndex(levels=self.levels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
this = self._get_unique_index()
indexer = this.get_indexer(other)
indexer = indexer.take((indexer != -1).nonzero()[0])
label_diff = np.setdiff1d(np.arange(this.size), indexer,
assume_unique=True)
difference = this.values.take(label_diff)
if sort is None:
difference = sorted(difference)
if len(difference) == 0:
return MultiIndex(levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
names=result_names, verify_integrity=False)
else:
return MultiIndex.from_tuples(difference, sortorder=0,
names=result_names)
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
dtype = pandas_dtype(dtype)
if is_categorical_dtype(dtype):
msg = '> 1 ndim Categorical are not supported at this time'
raise NotImplementedError(msg)
elif not is_object_dtype(dtype):
msg = ('Setting {cls} dtype to anything other than object '
'is not supported').format(cls=self.__class__)
raise TypeError(msg)
elif copy is True:
return self._shallow_copy()
return self
def _convert_can_do_setop(self, other):
result_names = self.names
if not hasattr(other, 'names'):
if len(other) == 0:
other = MultiIndex(levels=[[]] * self.nlevels,
codes=[[]] * self.nlevels,
verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
other = MultiIndex.from_tuples(other)
except TypeError:
raise TypeError(msg)
else:
result_names = self.names if self.names == other.names else None
return other, result_names
def insert(self, loc, item):
"""
Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index
"""
# Pad the key with empty strings if lower levels of the key
# aren't specified:
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
# have to insert into level
# must insert at end otherwise you have to recompute all the
# other codes
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(
ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(levels=new_levels, codes=new_codes,
names=self.names, verify_integrity=False)
def delete(self, loc):
"""
Make new index with passed location deleted
Returns
-------
new_index : MultiIndex
"""
new_codes = [np.delete(level_codes, loc) for level_codes in self.codes]
return MultiIndex(levels=self.levels, codes=new_codes,
names=self.names, verify_integrity=False)
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
return MultiIndex.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
if level is None:
values = MultiIndex.from_tuples(values,
names=self.names).values
return algos.isin(self.values, values)
else:
num = self._get_level_number(level)
levs = self.levels[num]
level_codes = self.codes[num]
sought_labels = levs.isin(values).nonzero()[0]
if levs.size == 0:
return np.zeros(len(level_codes), dtype=np.bool_)
else:
return np.lib.arraysetops.in1d(level_codes, sought_labels)
MultiIndex._add_numeric_methods_disabled()
MultiIndex._add_numeric_methods_add_sub_disabled()
MultiIndex._add_logical_methods_disabled()
def _sparsify(label_list, start=0, sentinel=''):
pivoted = lzip(*label_list)
k = len(label_list)
result = pivoted[:start + 1]
prev = pivoted[start]
for cur in pivoted[start + 1:]:
sparse_cur = []
for i, (p, t) in enumerate(zip(prev, cur)):
if i == k - 1:
sparse_cur.append(t)
result.append(sparse_cur)
break
if p == t:
sparse_cur.append(sentinel)
else:
sparse_cur.extend(cur[i:])
result.append(sparse_cur)
break
prev = cur
return lzip(*result)
def _get_na_rep(dtype):
return {np.datetime64: 'NaT', np.timedelta64: 'NaT'}.get(dtype, 'NaN')
| GuessWhoSamFoo/pandas | pandas/core/indexes/multi.py | Python | bsd-3-clause | 113,443 | 0.000079 |
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the TimeSeries class which lets you build your TimeSeries charts just
passing the arguments to the Chart class and calling the proper functions.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from six import string_types
try:
import pandas as pd
except ImportError:
pd = None
from ..utils import chunk, cycle_colors
from .._builder import Builder, create_and_build
from ...models import ColumnDataSource, DataRange1d, GlyphRenderer, Range1d
from ...models.glyphs import Line
from ...properties import Any
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def TimeSeries(values, index=None, xscale='datetime', **kws):
""" Create a timeseries chart using
:class:`TimeSeriesBuilder <bokeh.charts.builder.timeseries_builder.TimeSeriesBuilder>`
to render the lines from values and index.
Args:
values (iterable): a 2d iterable containing the values. Can be anything that
can be converted to a 2d array, and which is the x (time) axis is determined
by ``index``, while the others are interpreted as y values.
index (str|1d iterable, optional): can be used to specify a common custom
index for all data series as an **1d iterable** of any sort that will be used as
series common index or a **string** that corresponds to the key of the
mapping to be used as index (and not as data series) if
area.values is a mapping (like a dict, an OrderedDict
or a pandas DataFrame)
In addition the the parameters specific to this chart,
:ref:`userguide_charts_generic_arguments` are also accepted as keyword parameters.
Returns:
a new :class:`Chart <bokeh.charts.Chart>`
Examples:
.. bokeh-plot::
:source-position: above
from collections import OrderedDict
import datetime
from bokeh.charts import TimeSeries, output_file, show
# (dict, OrderedDict, lists, arrays and DataFrames are valid inputs)
now = datetime.datetime.now()
delta = datetime.timedelta(minutes=1)
dts = [now + delta*i for i in range(5)]
xyvalues = OrderedDict({'Date': dts})
y_python = xyvalues['python'] = [2, 3, 7, 5, 26]
y_pypy = xyvalues['pypy'] = [12, 33, 47, 15, 126]
y_jython = xyvalues['jython'] = [22, 43, 10, 25, 26]
ts = TimeSeries(xyvalues, index='Date', title="TimeSeries", legend="top_left",
ylabel='Languages')
output_file('timeseries.html')
show(ts)
"""
return create_and_build(
TimeSeriesBuilder, values, index=index, xscale=xscale, **kws
)
class TimeSeriesBuilder(Builder):
"""This is the TimeSeries class and it is in charge of plotting
TimeSeries charts in an easy and intuitive way.
Essentially, we provide a way to ingest the data, make the proper
calculations and push the references into a source object.
We additionally make calculations for the ranges.
And finally add the needed lines taking the references from the source.
"""
index = Any(help="""
An index to be used for all data series as follows:
- A 1d iterable of any sort that will be used as
series common index
- As a string that corresponds to the key of the
mapping to be used as index (and not as data
series) if area.values is a mapping (like a dict,
an OrderedDict or a pandas DataFrame)
""")
def _process_data(self):
"""Take the x/y data from the timeseries values.
It calculates the chart properties accordingly. Then build a dict
containing references to all the points to be used by
the line glyph inside the ``_yield_renderers`` method.
"""
self._data = dict()
# list to save all the attributes we are going to create
self._attr = []
# necessary to make all formats and encoder happy with array, blaze, ...
xs = list([x for x in self._values_index])
for col, values in self._values.items():
if isinstance(self.index, string_types) \
and col == self.index:
continue
# save every the groups available in the incomming input
self._groups.append(col)
self.set_and_get("x_", col, xs)
self.set_and_get("y_", col, values)
def _set_sources(self):
"""Push the TimeSeries data into the ColumnDataSource and
calculate the proper ranges.
"""
self._source = ColumnDataSource(self._data)
self.x_range = DataRange1d()
y_names = self._attr[1::2]
endy = max(max(self._data[i]) for i in y_names)
starty = min(min(self._data[i]) for i in y_names)
self.y_range = Range1d(
start=starty - 0.1 * (endy - starty),
end=endy + 0.1 * (endy - starty)
)
def _yield_renderers(self):
"""Use the line glyphs to connect the xy points in the time series.
Takes reference points from the data loaded at the ColumnDataSource.
"""
self._duplet = list(chunk(self._attr, 2))
colors = cycle_colors(self._duplet, self.palette)
for i, (x, y) in enumerate(self._duplet, start=1):
glyph = Line(x=x, y=y, line_color=colors[i - 1])
renderer = GlyphRenderer(data_source=self._source, glyph=glyph)
self._legends.append((self._groups[i-1], [renderer]))
yield renderer
| carlvlewis/bokeh | bokeh/charts/builder/timeseries_builder.py | Python | bsd-3-clause | 6,252 | 0.002879 |
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from impact.tests.api_test_case import APITestCase
from impact.tests.factories import JudgingRoundFactory
class TestJudgingRound(APITestCase):
def test_str(self):
judging_round = JudgingRoundFactory()
judging_round_string = str(judging_round)
assert judging_round.name in judging_round_string
assert str(judging_round.program) in judging_round_string
| masschallenge/impact-api | web/impact/impact/tests/test_judging_round.py | Python | mit | 445 | 0 |
from robber import expect
from robber.explanation import Explanation
from robber.matchers.base import Base
class RespondTo(Base):
"""
expect(obj).to.respond_to('method')
"""
def matches(self):
return hasattr(self.actual, self.expected) and callable(getattr(self.actual, self.expected))
@property
def explanation(self):
return Explanation(self.actual, self.is_negative, 'respond to', self.expected)
expect.register('respond_to', RespondTo)
| vesln/robber.py | robber/matchers/respond_to.py | Python | mit | 485 | 0.004124 |
from gettext import gettext as _
import logging
from pulp.plugins.util.publish_step import PublishStep, UnitPublishStep
from pulp_openstack.common import constants
from pulp_openstack.common import openstack_utils
_logger = logging.getLogger(__name__)
class GlancePublisher(PublishStep):
"""
Openstack Image Web publisher class that pushes images into Glance.
"""
def __init__(self, repo, publish_conduit, config):
"""
:param repo: Pulp managed Yum repository
:type repo: pulp.plugins.model.Repository
:param publish_conduit: Conduit providing access to relative Pulp functionality
:type publish_conduit: pulp.plugins.conduits.repo_publish.RepoPublishConduit
:param config: Pulp configuration for the distributor
:type config: pulp.plugins.config.PluginCallConfiguration
"""
super(GlancePublisher, self).__init__(constants.PUBLISH_STEP_GLANCE_PUBLISHER,
repo, publish_conduit, config)
publish_step = PublishStep(constants.PUBLISH_STEP_OVER_GLANCE_REST)
publish_step.description = _('Pushing files to Glance.')
self.add_child(PublishImagesStep())
class PublishImagesStep(UnitPublishStep):
"""
Publish Images
"""
def __init__(self):
"""
Initialize publisher.
"""
super(PublishImagesStep, self).__init__(constants.PUBLISH_STEP_IMAGES,
constants.IMAGE_TYPE_ID)
self.context = None
self.description = _('Publishing Image Files.')
def initialize(self):
"""
Initialize publisher (second phase).
"""
_logger.info("initizialing, setting up connection to OpenStack")
keystone_conf = {'username': self.get_config().get('keystone-username'),
'password': self.get_config().get('keystone-password'),
'tenant_name': self.get_config().get('keystone-tenant'),
'auth_url': self.get_config().get('keystone-url')}
self.ou = openstack_utils.OpenstackUtils(**keystone_conf)
# this is to keep track of images we touched during process_unit(). At
# the end, anything untouched in glance that has the correct repo
# metadata is deleted.
self.images_processed = []
def process_unit(self, unit):
"""
Link the unit to the image content directory
:param unit: The unit to process
:type unit: pulp_openstack.common.models.OpenstackImage
"""
# we need to add the image checksum to our processed list ASAP, otherwise they will
# be deleted via finalize()
self.images_processed.append(unit.unit_key['image_checksum'])
_logger.debug("pushing unit %s from repo %s to glance" % (unit, self.get_repo().id))
images = list(self.ou.find_image(self.get_repo().id, unit.unit_key['image_checksum']))
_logger.debug("found existing image in glance: %s" % images)
if len(images) > 1:
raise RuntimeError("more than one image found with same checksum for repo %s!" %
self.get_repo().id)
if not images:
self.ou.create_image(unit.storage_path, self.get_repo().id,
name=unit.metadata['image_name'],
checksum=unit.unit_key['image_checksum'],
size=unit.metadata['image_size'])
else:
_logger.debug("image already exists, skipping publish")
def finalize(self):
"""
Finalize publish.
This examines self.images_processed and performs any deletions.
"""
# this could be more elegant
glance_image_by_checksum = {}
glance_images = self.ou.find_repo_images(self.get_repo().id)
for glance_image in glance_images:
glance_image_by_checksum[glance_image.checksum] = glance_image
_logger.debug("images in glance associated with repo: %s" % glance_image_by_checksum.keys())
pulp_image_checksums = self.images_processed
_logger.debug("images in pulp associated with repo: %s" % pulp_image_checksums)
for pulp_image_checksum in pulp_image_checksums:
if pulp_image_checksum not in glance_image_by_checksum.keys():
raise RuntimeError("Images found in pulp repo that were not published to glance. "
"Please consult error log for more details.")
for glance_image_checksum in glance_image_by_checksum:
if glance_image_checksum not in pulp_image_checksums:
_logger.info("deleting image with checksum %s from glance" % glance_image_checksum)
self.ou.delete_image(glance_image_by_checksum[glance_image_checksum])
| rbarlow/pulp_openstack | plugins/pulp_openstack/plugins/distributors/glance_publish_steps.py | Python | gpl-2.0 | 4,901 | 0.003265 |
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
import json, traceback
from PyQt4.Qt import QDialogButtonBox
from calibre.gui2 import error_dialog, warning_dialog
from calibre.gui2.preferences import ConfigWidgetBase, test_widget
from calibre.gui2.preferences.template_functions_ui import Ui_Form
from calibre.gui2.widgets import PythonHighlighter
from calibre.utils.formatter_functions import (formatter_functions,
compile_user_function, load_user_template_functions)
class ConfigWidget(ConfigWidgetBase, Ui_Form):
def genesis(self, gui):
self.gui = gui
self.db = gui.library_view.model().db
help_text = _('''
<p>Here you can add and remove functions used in template processing. A
template function is written in python. It takes information from the
book, processes it in some way, then returns a string result. Functions
defined here are usable in templates in the same way that builtin
functions are usable. The function must be named <b>evaluate</b>, and
must have the signature shown below.</p>
<p><code>evaluate(self, formatter, kwargs, mi, locals, your parameters)
→ returning a unicode string</code></p>
<p>The parameters of the evaluate function are:
<ul>
<li><b>formatter</b>: the instance of the formatter being used to
evaluate the current template. You can use this to do recursive
template evaluation.</li>
<li><b>kwargs</b>: a dictionary of metadata. Field values are in this
dictionary.
<li><b>mi</b>: a Metadata instance. Used to get field information.
This parameter can be None in some cases, such as when evaluating
non-book templates.</li>
<li><b>locals</b>: the local variables assigned to by the current
template program.</li>
<li><b>your parameters</b>: You must supply one or more formal
parameters. The number must match the arg count box, unless arg count is
-1 (variable number or arguments), in which case the last argument must
be *args. At least one argument is required, and is usually the value of
the field being operated upon. Note that when writing in basic template
mode, the user does not provide this first argument. Instead it is
supplied by the formatter.</li>
</ul></p>
<p>
The following example function checks the value of the field. If the
field is not empty, the field's value is returned, otherwise the value
EMPTY is returned.
<pre>
name: my_ifempty
arg count: 1
doc: my_ifempty(val) -- return val if it is not empty, otherwise the string 'EMPTY'
program code:
def evaluate(self, formatter, kwargs, mi, locals, val):
if val:
return val
else:
return 'EMPTY'</pre>
This function can be called in any of the three template program modes:
<ul>
<li>single-function mode: {tags:my_ifempty()}</li>
<li>template program mode: {tags:'my_ifempty($)'}</li>
<li>general program mode: program: my_ifempty(field('tags'))</li>
</p>
''')
self.textBrowser.setHtml(help_text)
def initialize(self):
try:
self.builtin_source_dict = json.loads(P('template-functions.json', data=True,
allow_user_override=False).decode('utf-8'))
except:
traceback.print_exc()
self.builtin_source_dict = {}
self.funcs = formatter_functions().get_functions()
self.builtins = formatter_functions().get_builtins_and_aliases()
self.build_function_names_box()
self.function_name.currentIndexChanged[str].connect(self.function_index_changed)
self.function_name.editTextChanged.connect(self.function_name_edited)
self.argument_count.valueChanged.connect(self.enable_replace_button)
self.documentation.textChanged.connect(self.enable_replace_button)
self.program.textChanged.connect(self.enable_replace_button)
self.create_button.clicked.connect(self.create_button_clicked)
self.delete_button.clicked.connect(self.delete_button_clicked)
self.create_button.setEnabled(False)
self.delete_button.setEnabled(False)
self.replace_button.setEnabled(False)
self.clear_button.clicked.connect(self.clear_button_clicked)
self.replace_button.clicked.connect(self.replace_button_clicked)
self.program.setTabStopWidth(20)
self.highlighter = PythonHighlighter(self.program.document())
def enable_replace_button(self):
self.replace_button.setEnabled(self.delete_button.isEnabled())
def clear_button_clicked(self):
self.build_function_names_box()
self.program.clear()
self.documentation.clear()
self.argument_count.clear()
self.create_button.setEnabled(False)
self.delete_button.setEnabled(False)
def build_function_names_box(self, scroll_to='', set_to=''):
self.function_name.blockSignals(True)
func_names = sorted(self.funcs)
self.function_name.clear()
self.function_name.addItem('')
self.function_name.addItems(func_names)
self.function_name.setCurrentIndex(0)
if set_to:
self.function_name.setEditText(set_to)
self.create_button.setEnabled(True)
self.function_name.blockSignals(False)
if scroll_to:
idx = self.function_name.findText(scroll_to)
if idx >= 0:
self.function_name.setCurrentIndex(idx)
if scroll_to not in self.builtins:
self.delete_button.setEnabled(True)
def delete_button_clicked(self):
name = unicode(self.function_name.currentText())
if name in self.builtins:
error_dialog(self.gui, _('Template functions'),
_('You cannot delete a built-in function'), show=True)
if name in self.funcs:
del self.funcs[name]
self.changed_signal.emit()
self.create_button.setEnabled(True)
self.delete_button.setEnabled(False)
self.build_function_names_box(set_to=name)
self.program.setReadOnly(False)
else:
error_dialog(self.gui, _('Template functions'),
_('Function not defined'), show=True)
def create_button_clicked(self):
self.changed_signal.emit()
name = unicode(self.function_name.currentText())
if name in self.funcs:
error_dialog(self.gui, _('Template functions'),
_('Name %s already used')%(name,), show=True)
return
if self.argument_count.value() == 0:
box = warning_dialog(self.gui, _('Template functions'),
_('Argument count should be -1 or greater than zero. '
'Setting it to zero means that this function cannot '
'be used in single function mode.'), det_msg = '',
show=False)
box.bb.setStandardButtons(box.bb.standardButtons() | QDialogButtonBox.Cancel)
box.det_msg_toggle.setVisible(False)
if not box.exec_():
return
try:
prog = unicode(self.program.toPlainText())
cls = compile_user_function(name, unicode(self.documentation.toPlainText()),
self.argument_count.value(), prog)
self.funcs[name] = cls
self.build_function_names_box(scroll_to=name)
except:
error_dialog(self.gui, _('Template functions'),
_('Exception while compiling function'), show=True,
det_msg=traceback.format_exc())
def function_name_edited(self, txt):
self.documentation.setReadOnly(False)
self.argument_count.setReadOnly(False)
self.create_button.setEnabled(True)
self.replace_button.setEnabled(False)
self.program.setReadOnly(False)
def function_index_changed(self, txt):
txt = unicode(txt)
self.create_button.setEnabled(False)
if not txt:
self.argument_count.clear()
self.documentation.clear()
self.documentation.setReadOnly(False)
self.argument_count.setReadOnly(False)
return
func = self.funcs[txt]
self.argument_count.setValue(func.arg_count)
self.documentation.setText(func.doc)
if txt in self.builtins:
if hasattr(func, 'program_text') and func.program_text:
self.program.setPlainText(func.program_text)
elif txt in self.builtin_source_dict:
self.program.setPlainText(self.builtin_source_dict[txt])
else:
self.program.setPlainText(_('function source code not available'))
self.documentation.setReadOnly(True)
self.argument_count.setReadOnly(True)
self.program.setReadOnly(True)
self.delete_button.setEnabled(False)
else:
self.program.setPlainText(func.program_text)
self.delete_button.setEnabled(True)
self.program.setReadOnly(False)
self.replace_button.setEnabled(False)
def replace_button_clicked(self):
self.delete_button_clicked()
self.create_button_clicked()
def refresh_gui(self, gui):
pass
def commit(self):
# formatter_functions().reset_to_builtins()
pref_value = []
for name, cls in self.funcs.iteritems():
if name not in self.builtins:
pref_value.append((cls.name, cls.doc, cls.arg_count, cls.program_text))
self.db.prefs.set('user_template_functions', pref_value)
load_user_template_functions(self.db.library_id, pref_value)
return False
if __name__ == '__main__':
from PyQt4.Qt import QApplication
app = QApplication([])
test_widget('Advanced', 'TemplateFunctions')
| insomnia-lab/calibre | src/calibre/gui2/preferences/template_functions.py | Python | gpl-3.0 | 10,331 | 0.002807 |
import urwid
from pipeinspector.widgets import BlobWidget, BlobBrowser
from pipeinspector.settings import UI
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "[email protected]"
__status__ = "Development"
class MainFrame(urwid.Frame):
"""
Represents the main GUI
"""
def __init__(self, pump):
self.header = urwid.AttrWrap(
urwid.Text("PipeInspector", align="center"), "header"
)
self.blob_browser = BlobBrowser()
self.info_area = urwid.Text("")
self.blobs = BlobWidget()
self.footer = urwid.Columns([self.info_area, self.blobs])
self.frame = urwid.AttrWrap(
urwid.Frame(self.blob_browser, header=self.header, footer=self.footer),
"default",
)
urwid.Frame.__init__(self, self.frame)
self.overlay = None
self.pump = pump
urwid.connect_signal(self.blobs, "blob_selected", self.blob_selected)
self.blobs.goto_blob(0)
def blob_selected(self, index):
self.info_area.set_text("Blob: {0}".format(index))
blob = self.pump.get_blob(index)
self.blob_browser.load(blob)
def keypress(self, size, key):
input = urwid.Frame.keypress(self, size, key)
if input is None:
return
if input in UI.keys["left"]:
self.blobs.previous_blob()
elif input in UI.keys["right"]:
self.blobs.next_blob()
elif input in [key.upper() for key in UI.keys["left"]]:
self.blobs.previous_blob(step=10)
elif input in [key.upper() for key in UI.keys["right"]]:
self.blobs.next_blob(step=10)
elif input in UI.keys["home"]:
self.blobs.goto_blob(0)
else:
return self.body.keypress(size, input)
| tamasgal/km3pipe | pipeinspector/gui.py | Python | mit | 1,923 | 0.00052 |
import requests
from cStringIO import StringIO
from PIL import Image
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.files.base import ContentFile
from tempfile import NamedTemporaryFile
from onadata.libs.utils.viewer_tools import get_path
def flat(*nums):
'''Build a tuple of ints from float or integer arguments.
Useful because PIL crop and resize require integer points.
source: https://gist.github.com/16a01455
'''
return tuple(int(round(n)) for n in nums)
def get_dimensions((width, height), longest_side):
if width > height:
width = longest_side
height = (height / width) * longest_side
elif height > width:
height = longest_side
width = (width / height) * longest_side
else:
height = longest_side
width = longest_side
return flat(width, height)
def _save_thumbnails(image, path, size, suffix):
nm = NamedTemporaryFile(suffix='.%s' % settings.IMG_FILE_TYPE)
default_storage = get_storage_class()()
try:
# Ensure conversion to float in operations
image.thumbnail(
get_dimensions(image.size, float(size)), Image.ANTIALIAS)
except ZeroDivisionError:
pass
image.save(nm.name)
default_storage.save(
get_path(path, suffix), ContentFile(nm.read()))
nm.close()
def resize(filename):
default_storage = get_storage_class()()
path = default_storage.url(filename)
req = requests.get(path)
if req.status_code == 200:
im = StringIO(req.content)
image = Image.open(im)
conf = settings.THUMB_CONF
[_save_thumbnails(
image, filename,
conf[key]['size'],
conf[key]['suffix']) for key in settings.THUMB_ORDER]
def resize_local_env(filename):
default_storage = get_storage_class()()
path = default_storage.path(filename)
image = Image.open(path)
conf = settings.THUMB_CONF
[_save_thumbnails(
image, path, conf[key]['size'],
conf[key]['suffix']) for key in settings.THUMB_ORDER]
def image_url(attachment, suffix):
'''Return url of an image given size(@param suffix)
e.g large, medium, small, or generate required thumbnail
'''
url = attachment.media_file.url
if suffix == 'original':
return url
else:
default_storage = get_storage_class()()
fs = get_storage_class('django.core.files.storage.FileSystemStorage')()
if suffix in settings.THUMB_CONF:
size = settings.THUMB_CONF[suffix]['suffix']
filename = attachment.media_file.name
if default_storage.exists(filename):
if default_storage.exists(get_path(filename, size)) and\
default_storage.size(get_path(filename, size)) > 0:
url = default_storage.url(
get_path(filename, size))
else:
if default_storage.__class__ != fs.__class__:
resize(filename)
else:
resize_local_env(filename)
return image_url(attachment, suffix)
else:
return None
return url
| piqoni/onadata | onadata/libs/utils/image_tools.py | Python | bsd-2-clause | 3,265 | 0 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the DB API."""
import copy
import datetime
import iso8601
import types
import uuid as stdlib_uuid
import mox
import netaddr
from oslo.config import cfg
from sqlalchemy.dialects import sqlite
from sqlalchemy import exc
from sqlalchemy.exc import IntegrityError
from sqlalchemy import MetaData
from sqlalchemy.orm import exc as sqlalchemy_orm_exc
from sqlalchemy.orm import query
from sqlalchemy.sql.expression import select
from nova import block_device
from nova.compute import vm_states
from nova import context
from nova import db
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy import utils as db_utils
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.db.sqlalchemy import session as db_session
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova import quota
from nova import test
from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
CONF.import_opt('reserved_host_memory_mb', 'nova.compute.resource_tracker')
CONF.import_opt('reserved_host_disk_mb', 'nova.compute.resource_tracker')
get_engine = db_session.get_engine
get_session = db_session.get_session
def _quota_reserve(context, project_id, user_id):
"""Create sample Quota, QuotaUsage and Reservation objects.
There is no method db.quota_usage_create(), so we have to use
db.quota_reserve() for creating QuotaUsage objects.
Returns reservations uuids.
"""
def get_sync(resource, usage):
def sync(elevated, project_id, user_id, session):
return {resource: usage}
return sync
quotas = {}
user_quotas = {}
resources = {}
deltas = {}
for i in range(3):
resource = 'resource%d' % i
if i == 2:
# test for project level resources
resource = 'fixed_ips'
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = quotas[resource]
else:
quotas[resource] = db.quota_create(context,
project_id, resource, i)
user_quotas[resource] = db.quota_create(context, project_id,
resource, i,
user_id=user_id)
sync_name = '_sync_%s' % resource
resources[resource] = quota.ReservableResource(
resource, sync_name, 'quota_res_%d' % i)
deltas[resource] = i
setattr(sqlalchemy_api, sync_name, get_sync(resource, i))
sqlalchemy_api.QUOTA_SYNC_FUNCTIONS[sync_name] = getattr(
sqlalchemy_api, sync_name)
return db.quota_reserve(context, resources, quotas, user_quotas, deltas,
timeutils.utcnow(), CONF.until_refresh,
datetime.timedelta(days=1), project_id, user_id)
class DbTestCase(test.TestCase):
def setUp(self):
super(DbTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def create_instance_with_args(self, **kwargs):
args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
'node': 'node1', 'project_id': self.project_id,
'vm_state': 'fake'}
if 'context' in kwargs:
ctxt = kwargs.pop('context')
args['project_id'] = ctxt.project_id
else:
ctxt = self.context
args.update(kwargs)
return db.instance_create(ctxt, args)
def fake_metadata(self, content):
meta = {}
for i in range(0, 10):
meta["foo%i" % i] = "this is %s item %i" % (content, i)
return meta
def create_metadata_for_instance(self, instance_uuid):
meta = self.fake_metadata('metadata')
db.instance_metadata_update(self.context, instance_uuid, meta, False)
sys_meta = self.fake_metadata('system_metadata')
db.instance_system_metadata_update(self.context, instance_uuid,
sys_meta, False)
return meta, sys_meta
class DecoratorTestCase(test.TestCase):
def _test_decorator_wraps_helper(self, decorator):
def test_func():
"""Test docstring."""
decorated_func = decorator(test_func)
self.assertEquals(test_func.func_name, decorated_func.func_name)
self.assertEquals(test_func.__doc__, decorated_func.__doc__)
self.assertEquals(test_func.__module__, decorated_func.__module__)
def test_require_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_context)
def test_require_admin_context_decorator_wraps_functions_properly(self):
self._test_decorator_wraps_helper(sqlalchemy_api.require_admin_context)
def _get_fake_aggr_values():
return {'name': 'fake_aggregate'}
def _get_fake_aggr_metadata():
return {'fake_key1': 'fake_value1',
'fake_key2': 'fake_value2',
'availability_zone': 'fake_avail_zone'}
def _get_fake_aggr_hosts():
return ['foo.openstack.org']
def _create_aggregate(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata()):
return db.aggregate_create(context, values, metadata)
def _create_aggregate_with_hosts(context=context.get_admin_context(),
values=_get_fake_aggr_values(),
metadata=_get_fake_aggr_metadata(),
hosts=_get_fake_aggr_hosts()):
result = _create_aggregate(context=context,
values=values, metadata=metadata)
for host in hosts:
db.aggregate_host_add(context, result['id'], host)
return result
class NotDbApiTestCase(DbTestCase):
def setUp(self):
super(NotDbApiTestCase, self).setUp()
self.flags(connection='notdb://', group='database')
def test_instance_get_all_by_filters_regex_unsupported_db(self):
# Ensure that the 'LIKE' operator is used for unsupported dbs.
self.create_instance_with_args(display_name='test1')
self.create_instance_with_args(display_name='test.*')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.context,
{'display_name': 'test.*'})
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'})
self.assertEqual(2, len(result))
def test_instance_get_all_by_filters_paginate(self):
test1 = self.create_instance_with_args(display_name='test1')
test2 = self.create_instance_with_args(display_name='test2')
test3 = self.create_instance_with_args(display_name='test3')
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
marker=None)
self.assertEqual(3, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test1['uuid'])
self.assertEqual(2, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test2['uuid'])
self.assertEqual(1, len(result))
result = db.instance_get_all_by_filters(self.context,
{'display_name': '%test%'},
sort_dir="asc",
marker=test3['uuid'])
self.assertEqual(0, len(result))
self.assertRaises(exception.MarkerNotFound,
db.instance_get_all_by_filters,
self.context, {'display_name': '%test%'},
marker=str(stdlib_uuid.uuid4()))
class AggregateDBApiTestCase(test.TestCase):
def setUp(self):
super(AggregateDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
def test_aggregate_create_no_metadata(self):
result = _create_aggregate(metadata=None)
self.assertEquals(result['name'], 'fake_aggregate')
def test_aggregate_create_avoid_name_conflict(self):
r1 = _create_aggregate(metadata=None)
db.aggregate_delete(context.get_admin_context(), r1['id'])
values = {'name': r1['name']}
metadata = {'availability_zone': 'new_zone'}
r2 = _create_aggregate(values=values, metadata=metadata)
self.assertEqual(r2['name'], values['name'])
self.assertEqual(r2['availability_zone'],
metadata['availability_zone'])
def test_aggregate_create_raise_exist_exc(self):
_create_aggregate(metadata=None)
self.assertRaises(exception.AggregateNameExists,
_create_aggregate, metadata=None)
def test_aggregate_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_get,
ctxt, aggregate_id)
def test_aggregate_metadata_get_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_metadata_get,
ctxt, aggregate_id)
def test_aggregate_create_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
def test_aggregate_create_delete_create_with_metadata(self):
#test for bug 1052479
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(expected_metadata,
matchers.DictMatches(_get_fake_aggr_metadata()))
db.aggregate_delete(ctxt, result['id'])
result = _create_aggregate(metadata={'availability_zone':
'fake_avail_zone'})
expected_metadata = db.aggregate_metadata_get(ctxt, result['id'])
self.assertEqual(expected_metadata, {'availability_zone':
'fake_avail_zone'})
def test_aggregate_create_low_privi_context(self):
self.assertRaises(exception.AdminRequired,
db.aggregate_create,
self.context, _get_fake_aggr_values())
def test_aggregate_get(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt)
expected = db.aggregate_get(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected['hosts'])
self.assertEqual(_get_fake_aggr_metadata(), expected['metadetails'])
def test_aggregate_get_by_host(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
values5 = {'name': 'fake_aggregate5'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2)
# a3 has no hosts and should not be in the results.
a3 = _create_aggregate(context=ctxt, values=values3)
# a4 has no matching hosts.
a4 = _create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'])
# a5 has no matching hosts after deleting the only matching host.
a5 = _create_aggregate_with_hosts(context=ctxt, values=values5,
hosts=['foo5.openstack.org', 'foo.openstack.org'])
db.aggregate_host_delete(ctxt, a5['id'],
'foo.openstack.org')
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual([a1['id'], a2['id']], [x['id'] for x in r1])
def test_aggregate_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate2'}
values3 = {'name': 'fake_aggregate3'}
values4 = {'name': 'fake_aggregate4'}
a1 = _create_aggregate_with_hosts(context=ctxt,
metadata={'goodkey': 'good'})
_create_aggregate_with_hosts(context=ctxt, values=values2)
_create_aggregate(context=ctxt, values=values3)
_create_aggregate_with_hosts(context=ctxt, values=values4,
hosts=['foo4.openstack.org'], metadata={'goodkey': 'bad'})
# filter result by key
r1 = db.aggregate_get_by_host(ctxt, 'foo.openstack.org', key='goodkey')
self.assertEqual([a1['id']], [x['id'] for x in r1])
def test_aggregate_metadata_get_by_host(self):
ctxt = context.get_admin_context()
values = {'name': 'fake_aggregate2'}
values2 = {'name': 'fake_aggregate3'}
_create_aggregate_with_hosts(context=ctxt)
_create_aggregate_with_hosts(context=ctxt, values=values)
_create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=['bar.openstack.org'], metadata={'badkey': 'bad'})
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org')
self.assertEqual(r1['fake_key1'], set(['fake_value1']))
self.assertFalse('badkey' in r1)
def test_aggregate_metadata_get_by_metadata_key(self):
ctxt = context.get_admin_context()
values = {'aggregate_id': 'fake_id',
'name': 'fake_aggregate'}
aggr = _create_aggregate_with_hosts(context=ctxt, values=values,
hosts=['bar.openstack.org'],
metadata={'availability_zone':
'az1'})
r1 = db.aggregate_metadata_get_by_metadata_key(ctxt, aggr['id'],
'availability_zone')
self.assertEqual(r1['availability_zone'], set(['az1']))
self.assertTrue('availability_zone' in r1)
self.assertFalse('name' in r1)
def test_aggregate_metadata_get_by_host_with_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_metadata_get_by_host(ctxt, 'foo2.openstack.org',
key='good')
self.assertEqual(r1['good'], set(['value12', 'value23']))
self.assertFalse('fake_key1' in r1)
self.assertFalse('bad' in r1)
# Delete metadata
db.aggregate_metadata_delete(ctxt, a3['id'], 'good')
r2 = db.aggregate_metadata_get_by_host(ctxt, 'foo.openstack.org',
key='good')
self.assertFalse('good' in r2)
def test_aggregate_host_get_by_metadata_key(self):
ctxt = context.get_admin_context()
values2 = {'name': 'fake_aggregate12'}
values3 = {'name': 'fake_aggregate23'}
a2_hosts = ['foo1.openstack.org', 'foo2.openstack.org']
a2_metadata = {'good': 'value12', 'bad': 'badvalue12'}
a3_hosts = ['foo2.openstack.org', 'foo3.openstack.org']
a3_metadata = {'good': 'value23', 'bad': 'badvalue23'}
a1 = _create_aggregate_with_hosts(context=ctxt)
a2 = _create_aggregate_with_hosts(context=ctxt, values=values2,
hosts=a2_hosts, metadata=a2_metadata)
a3 = _create_aggregate_with_hosts(context=ctxt, values=values3,
hosts=a3_hosts, metadata=a3_metadata)
r1 = db.aggregate_host_get_by_metadata_key(ctxt, key='good')
self.assertEqual({
'foo1.openstack.org': set(['value12']),
'foo2.openstack.org': set(['value12', 'value23']),
'foo3.openstack.org': set(['value23']),
}, r1)
self.assertFalse('fake_key1' in r1)
def test_aggregate_get_by_host_not_found(self):
ctxt = context.get_admin_context()
_create_aggregate_with_hosts(context=ctxt)
self.assertEqual([], db.aggregate_get_by_host(ctxt, 'unknown_host'))
def test_aggregate_delete_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
self.assertRaises(exception.AggregateNotFound,
db.aggregate_delete,
ctxt, aggregate_id)
def test_aggregate_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
db.aggregate_delete(ctxt, result['id'])
expected = db.aggregate_get_all(ctxt)
self.assertEqual(0, len(expected))
aggregate = db.aggregate_get(ctxt.elevated(read_deleted='yes'),
result['id'])
self.assertEqual(aggregate['deleted'], result['id'])
def test_aggregate_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
self.assertEqual(result['availability_zone'], 'fake_avail_zone')
new_values = _get_fake_aggr_values()
new_values['availability_zone'] = 'different_avail_zone'
updated = db.aggregate_update(ctxt, result['id'], new_values)
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['availability_zone'] = 'different_avail_zone'
db.aggregate_update(ctxt, result['id'], values)
expected = db.aggregate_metadata_get(ctxt, result['id'])
updated = db.aggregate_get(ctxt, result['id'])
self.assertThat(values['metadata'],
matchers.DictMatches(expected))
self.assertNotEqual(result['availability_zone'],
updated['availability_zone'])
def test_aggregate_update_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
values = _get_fake_aggr_values()
values['metadata'] = _get_fake_aggr_metadata()
values['metadata']['fake_key1'] = 'foo'
db.aggregate_update(ctxt, result['id'], values)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(values['metadata'], matchers.DictMatches(expected))
def test_aggregate_update_zone_with_existing_metadata(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
new_zone = {'availability_zone': 'fake_avail_zone_2'}
metadata = _get_fake_aggr_metadata()
metadata.update(new_zone)
db.aggregate_update(ctxt, result['id'], new_zone)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_update_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
new_values = _get_fake_aggr_values()
self.assertRaises(exception.AggregateNotFound,
db.aggregate_update, ctxt, aggregate_id, new_values)
def test_aggregate_get_all(self):
ctxt = context.get_admin_context()
counter = 3
for c in range(counter):
_create_aggregate(context=ctxt,
values={'name': 'fake_aggregate_%d' % c},
metadata=None)
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), counter)
def test_aggregate_get_all_non_deleted(self):
ctxt = context.get_admin_context()
add_counter = 5
remove_counter = 2
aggregates = []
for c in range(1, add_counter):
values = {'name': 'fake_aggregate_%d' % c}
aggregates.append(_create_aggregate(context=ctxt,
values=values, metadata=None))
for c in range(1, remove_counter):
db.aggregate_delete(ctxt, aggregates[c - 1]['id'])
results = db.aggregate_get_all(ctxt)
self.assertEqual(len(results), add_counter - remove_counter)
def test_aggregate_metadata_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_add_retry(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
def counted():
def get_query(context, id, session, read_deleted):
get_query.counter += 1
raise db_exc.DBDuplicateEntry
get_query.counter = 0
return get_query
get_query = counted()
self.stubs.Set(sqlalchemy_api,
'_aggregate_metadata_get_query', get_query)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
aggregate_metadata_add, ctxt, result['id'], {},
max_retries=5)
self.assertEqual(get_query.counter, 5)
def test_aggregate_metadata_update(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
metadata = _get_fake_aggr_metadata()
key = metadata.keys()[0]
db.aggregate_metadata_delete(ctxt, result['id'], key)
new_metadata = {key: 'foo'}
db.aggregate_metadata_add(ctxt, result['id'], new_metadata)
expected = db.aggregate_metadata_get(ctxt, result['id'])
metadata[key] = 'foo'
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_metadata_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata=None)
metadata = _get_fake_aggr_metadata()
db.aggregate_metadata_add(ctxt, result['id'], metadata)
db.aggregate_metadata_delete(ctxt, result['id'], metadata.keys()[0])
expected = db.aggregate_metadata_get(ctxt, result['id'])
del metadata[metadata.keys()[0]]
self.assertThat(metadata, matchers.DictMatches(expected))
def test_aggregate_remove_availability_zone(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt, metadata={'availability_zone':
'fake_avail_zone'})
db.aggregate_metadata_delete(ctxt, result['id'], 'availability_zone')
expected = db.aggregate_metadata_get(ctxt, result['id'])
aggregate = db.aggregate_get(ctxt, result['id'])
self.assertEquals(aggregate['availability_zone'], None)
self.assertThat({}, matchers.DictMatches(expected))
def test_aggregate_metadata_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateMetadataNotFound,
db.aggregate_metadata_delete,
ctxt, result['id'], 'foo_key')
def test_aggregate_host_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(_get_fake_aggr_hosts(), expected)
def test_aggregate_host_re_add(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
host = _get_fake_aggr_hosts()[0]
db.aggregate_host_delete(ctxt, result['id'], host)
db.aggregate_host_add(ctxt, result['id'], host)
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(len(expected), 1)
def test_aggregate_host_add_duplicate_works(self):
ctxt = context.get_admin_context()
r1 = _create_aggregate_with_hosts(context=ctxt, metadata=None)
r2 = _create_aggregate_with_hosts(ctxt,
values={'name': 'fake_aggregate2'},
metadata={'availability_zone': 'fake_avail_zone2'})
h1 = db.aggregate_host_get_all(ctxt, r1['id'])
h2 = db.aggregate_host_get_all(ctxt, r2['id'])
self.assertEqual(h1, h2)
def test_aggregate_host_add_duplicate_raise_exist_exc(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
self.assertRaises(exception.AggregateHostExists,
db.aggregate_host_add,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
def test_aggregate_host_add_raise_not_found(self):
ctxt = context.get_admin_context()
# this does not exist!
aggregate_id = 1
host = _get_fake_aggr_hosts()[0]
self.assertRaises(exception.AggregateNotFound,
db.aggregate_host_add,
ctxt, aggregate_id, host)
def test_aggregate_host_delete(self):
ctxt = context.get_admin_context()
result = _create_aggregate_with_hosts(context=ctxt, metadata=None)
db.aggregate_host_delete(ctxt, result['id'],
_get_fake_aggr_hosts()[0])
expected = db.aggregate_host_get_all(ctxt, result['id'])
self.assertEqual(0, len(expected))
def test_aggregate_host_delete_raise_not_found(self):
ctxt = context.get_admin_context()
result = _create_aggregate(context=ctxt)
self.assertRaises(exception.AggregateHostNotFound,
db.aggregate_host_delete,
ctxt, result['id'], _get_fake_aggr_hosts()[0])
class SqlAlchemyDbApiTestCase(DbTestCase):
def test_instance_get_all_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
def test_instance_get_all_uuids_by_host(self):
ctxt = context.get_admin_context()
self.create_instance_with_args()
self.create_instance_with_args()
self.create_instance_with_args(host='host2')
result = sqlalchemy_api._instance_get_all_uuids_by_host(ctxt, 'host1')
self.assertEqual(2, len(result))
self.assertEqual(types.UnicodeType, type(result[0]))
class MigrationTestCase(test.TestCase):
def setUp(self):
super(MigrationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self._create()
self._create()
self._create(status='reverted')
self._create(status='confirmed')
self._create(source_compute='host2', source_node='b',
dest_compute='host1', dest_node='a')
self._create(source_compute='host2', dest_compute='host3')
self._create(source_compute='host3', dest_compute='host4')
def _create(self, status='migrating', source_compute='host1',
source_node='a', dest_compute='host2', dest_node='b',
system_metadata=None):
values = {'host': source_compute}
instance = db.instance_create(self.ctxt, values)
if system_metadata:
db.instance_system_metadata_update(self.ctxt, instance['uuid'],
system_metadata, False)
values = {'status': status, 'source_compute': source_compute,
'source_node': source_node, 'dest_compute': dest_compute,
'dest_node': dest_node, 'instance_uuid': instance['uuid']}
db.migration_create(self.ctxt, values)
def _assert_in_progress(self, migrations):
for migration in migrations:
self.assertNotEqual('confirmed', migration['status'])
self.assertNotEqual('reverted', migration['status'])
def test_migration_get_in_progress_joins(self):
self._create(source_compute='foo', system_metadata={'foo': 'bar'})
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'foo', 'a')
system_metadata = migrations[0]['instance']['system_metadata'][0]
self.assertEqual(system_metadata['key'], 'foo')
self.assertEqual(system_metadata['value'], 'bar')
def test_in_progress_host1_nodea(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'a')
# 2 as source + 1 as dest
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_in_progress_host1_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host1', 'b')
# some migrations are to/from host1, but none with a node 'b'
self.assertEqual(0, len(migrations))
def test_in_progress_host2_nodeb(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
# 2 as dest, 1 as source
self.assertEqual(3, len(migrations))
self._assert_in_progress(migrations)
def test_instance_join(self):
migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt,
'host2', 'b')
for migration in migrations:
instance = migration['instance']
self.assertEqual(migration['instance_uuid'], instance['uuid'])
def test_get_migrations_by_filters(self):
filters = {"status": "migrating", "host": "host3"}
migrations = db.migration_get_all_by_filters(self.ctxt, filters)
self.assertEqual(2, len(migrations))
for migration in migrations:
self.assertEqual(filters["status"], migration['status'])
hosts = [migration['source_compute'], migration['dest_compute']]
self.assertIn(filters["host"], hosts)
def test_only_admin_can_get_all_migrations_by_filters(self):
user_ctxt = context.RequestContext(user_id=None, project_id=None,
is_admin=False, read_deleted="no",
overwrite=False)
self.assertRaises(exception.AdminRequired,
db.migration_get_all_by_filters, user_ctxt, {})
def test_migration_get_unconfirmed_by_dest_compute(self):
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure no migrations are returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(0, len(results))
updated_at = datetime.datetime(2000, 1, 1, 12, 0, 0)
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
# Ensure different host is not returned
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host')
self.assertEqual(0, len(results))
# Ensure one migration older than 10 seconds is returned.
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
'fake_host2')
self.assertEqual(1, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
# Ensure the new migration is not returned.
updated_at = timeutils.utcnow()
values = {"status": "finished", "updated_at": updated_at,
"dest_compute": "fake_host2"}
migration = db.migration_create(self.ctxt, values)
results = db.migration_get_unconfirmed_by_dest_compute(self.ctxt, 10,
"fake_host2")
self.assertEqual(0, len(results))
db.migration_update(self.ctxt, migration['id'],
{"status": "CONFIRMED"})
def test_migration_update_not_found(self):
self.assertRaises(exception.MigrationNotFound,
db.migration_update, self.ctxt, 42, {})
class ModelsObjectComparatorMixin(object):
def _dict_from_object(self, obj, ignored_keys):
if ignored_keys is None:
ignored_keys = []
return dict([(k, v) for k, v in obj.iteritems()
if k not in ignored_keys])
def _assertEqualObjects(self, obj1, obj2, ignored_keys=None):
obj1 = self._dict_from_object(obj1, ignored_keys)
obj2 = self._dict_from_object(obj2, ignored_keys)
self.assertEqual(len(obj1),
len(obj2),
"Keys mismatch: %s" %
str(set(obj1.keys()) ^ set(obj2.keys())))
for key, value in obj1.iteritems():
self.assertEqual(value, obj2[key])
def _assertEqualListsOfObjects(self, objs1, objs2, ignored_keys=None):
obj_to_dict = lambda o: self._dict_from_object(o, ignored_keys)
sort_key = lambda d: [d[k] for k in sorted(d)]
conv_and_sort = lambda obj: sorted(map(obj_to_dict, obj), key=sort_key)
self.assertEqual(conv_and_sort(objs1), conv_and_sort(objs2))
def _assertEqualListsOfPrimitivesAsSets(self, primitives1, primitives2):
self.assertEqual(len(primitives1), len(primitives2))
for primitive in primitives1:
self.assertIn(primitive, primitives2)
for primitive in primitives2:
self.assertIn(primitive, primitives1)
class InstanceSystemMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_system_metadata_* methods."""
def setUp(self):
super(InstanceSystemMetadataTestCase, self).setUp()
values = {'host': 'h1', 'project_id': 'p1',
'system_metadata': {'key': 'value'}}
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, values)
def test_instance_system_metadata_get(self):
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value'})
def test_instance_system_metadata_update_new_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
def test_instance_system_metadata_update_existent_pair(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'key': 'new_value'})
def test_instance_system_metadata_update_delete_true(self):
db.instance_system_metadata_update(
self.ctxt, self.instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_system_metadata_get(self.ctxt,
self.instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
@test.testtools.skip("bug 1189462")
def test_instance_system_metadata_update_nonexistent(self):
self.assertRaises(exception.InstanceNotFound,
db.instance_system_metadata_update,
self.ctxt, 'nonexistent-uuid',
{'key': 'value'}, True)
class ReservationTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.reservation_* methods."""
def setUp(self):
super(ReservationTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = {'uuid': 'sample-uuid',
'project_id': 'project1',
'user_id': 'user1',
'resource': 'resource',
'delta': 42,
'expire': timeutils.utcnow() + datetime.timedelta(days=1),
'usage': {'id': 1}}
def test_reservation_create(self):
reservation = db.reservation_create(self.ctxt, **self.values)
self._assertEqualObjects(self.values, reservation, ignored_keys=(
'deleted', 'updated_at',
'deleted_at', 'id',
'created_at', 'usage',
'usage_id'))
self.assertEqual(reservation['usage_id'], self.values['usage']['id'])
def test_reservation_get(self):
reservation = db.reservation_create(self.ctxt, **self.values)
reservation_db = db.reservation_get(self.ctxt, self.values['uuid'])
self._assertEqualObjects(reservation, reservation_db)
def test_reservation_get_nonexistent(self):
self.assertRaises(exception.ReservationNotFound, db.reservation_get,
self.ctxt, 'non-exitent-resevation-uuid')
def test_reservation_commit(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_commit(self.ctxt, reservations, 'project1', 'user1')
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 2},
'fixed_ips': {'reserved': 0, 'in_use': 4}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_rollback(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 1, 'in_use': 1},
'fixed_ips': {'reserved': 2, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
db.reservation_get(self.ctxt, reservations[0])
db.reservation_rollback(self.ctxt, reservations, 'project1', 'user1')
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, reservations[0])
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
def test_reservation_expire(self):
self.values['expire'] = timeutils.utcnow() + datetime.timedelta(days=1)
_quota_reserve(self.ctxt, 'project1', 'user1')
db.reservation_expire(self.ctxt)
expected = {'project_id': 'project1', 'user_id': 'user1',
'resource0': {'reserved': 0, 'in_use': 0},
'resource1': {'reserved': 0, 'in_use': 1},
'fixed_ips': {'reserved': 0, 'in_use': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'))
class SecurityGroupRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _get_base_rule_values(self):
return {
'protocol': "tcp",
'from_port': 80,
'to_port': 8080,
'cidr': None,
'deleted': 0,
'deleted_at': None,
'grantee_group': None,
'updated_at': None
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def _create_security_group_rule(self, values):
v = self._get_base_rule_values()
v.update(values)
return db.security_group_rule_create(self.ctxt, v)
def test_security_group_rule_create(self):
security_group_rule = self._create_security_group_rule({})
self.assertIsNotNone(security_group_rule['id'])
for key, value in self._get_base_rule_values().items():
self.assertEqual(value, security_group_rule[key])
def test_security_group_rule_get_by_security_group(self):
security_group = self._create_security_group({})
security_group_rule = self._create_security_group_rule(
{'parent_group': security_group})
security_group_rule1 = self._create_security_group_rule(
{'parent_group': security_group})
found_rules = db.security_group_rule_get_by_security_group(self.ctxt,
security_group['id'])
self.assertEqual(len(found_rules), 2)
rules_ids = [security_group_rule['id'], security_group_rule1['id']]
for rule in found_rules:
self.assertIn(rule['id'], rules_ids)
def test_security_group_rule_get_by_security_group_grantee(self):
security_group = self._create_security_group({})
security_group_rule = self._create_security_group_rule(
{'grantee_group': security_group})
rules = db.security_group_rule_get_by_security_group_grantee(self.ctxt,
security_group['id'])
self.assertEqual(len(rules), 1)
self.assertEqual(rules[0]['id'], security_group_rule['id'])
def test_security_group_rule_destroy(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
security_group_rule1 = self._create_security_group_rule({})
security_group_rule2 = self._create_security_group_rule({})
db.security_group_rule_destroy(self.ctxt, security_group_rule1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get,
self.ctxt, security_group_rule1['id'])
self._assertEqualObjects(db.security_group_rule_get(self.ctxt,
security_group_rule2['id']),
security_group_rule2, ['grantee_group'])
def test_security_group_rule_destroy_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_destroy, self.ctxt, 100500)
def test_security_group_rule_get(self):
security_group_rule1 = (
self._create_security_group_rule({}))
security_group_rule2 = self._create_security_group_rule({})
real_security_group_rule = db.security_group_rule_get(self.ctxt,
security_group_rule1['id'])
self._assertEqualObjects(security_group_rule1,
real_security_group_rule, ['grantee_group'])
def test_security_group_rule_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_rule_get, self.ctxt, 100500)
def test_security_group_rule_count_by_group(self):
sg1 = self._create_security_group({'name': 'fake1'})
sg2 = self._create_security_group({'name': 'fake2'})
rules_by_group = {sg1: [], sg2: []}
for group in rules_by_group:
rules = rules_by_group[group]
for i in range(0, 10):
rules.append(
self._create_security_group_rule({'parent_group_id':
group['id']}))
db.security_group_rule_destroy(self.ctxt,
rules_by_group[sg1][0]['id'])
counted_groups = [db.security_group_rule_count_by_group(self.ctxt,
group['id'])
for group in [sg1, sg2]]
expected = [9, 10]
self.assertEqual(counted_groups, expected)
class SecurityGroupTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(SecurityGroupTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'name': 'fake_sec_group',
'description': 'fake_sec_group_descr',
'user_id': 'fake',
'project_id': 'fake',
'instances': []
}
def _create_security_group(self, values):
v = self._get_base_values()
v.update(values)
return db.security_group_create(self.ctxt, v)
def test_security_group_create(self):
security_group = self._create_security_group({})
self.assertFalse(security_group['id'] is None)
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, security_group[key])
def test_security_group_destroy(self):
security_group1 = self._create_security_group({})
security_group2 = \
self._create_security_group({'name': 'fake_sec_group2'})
db.security_group_destroy(self.ctxt, security_group1['id'])
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get,
self.ctxt, security_group1['id'])
self._assertEqualObjects(db.security_group_get(
self.ctxt, security_group2['id'],
columns_to_join=['instances']), security_group2)
def test_security_group_get(self):
security_group1 = self._create_security_group({})
self._create_security_group({'name': 'fake_sec_group2'})
real_security_group = db.security_group_get(self.ctxt,
security_group1['id'],
columns_to_join=['instances'])
self._assertEqualObjects(security_group1,
real_security_group)
def test_security_group_get_no_instances(self):
instance = db.instance_create(self.ctxt, {})
sid = self._create_security_group({'instances': [instance]})['id']
session = get_session()
self.mox.StubOutWithMock(sqlalchemy_api, 'get_session')
sqlalchemy_api.get_session().AndReturn(session)
sqlalchemy_api.get_session().AndReturn(session)
self.mox.ReplayAll()
security_group = db.security_group_get(self.ctxt, sid,
columns_to_join=['instances'])
session.expunge(security_group)
self.assertEqual(1, len(security_group['instances']))
security_group = db.security_group_get(self.ctxt, sid)
session.expunge(security_group)
self.assertRaises(sqlalchemy_orm_exc.DetachedInstanceError,
getattr, security_group, 'instances')
def test_security_group_get_not_found_exception(self):
self.assertRaises(exception.SecurityGroupNotFound,
db.security_group_get, self.ctxt, 100500)
def test_security_group_get_by_name(self):
security_group1 = self._create_security_group({'name': 'fake1'})
security_group2 = self._create_security_group({'name': 'fake2'})
real_security_group1 = db.security_group_get_by_name(
self.ctxt,
security_group1['project_id'],
security_group1['name'])
real_security_group2 = db.security_group_get_by_name(
self.ctxt,
security_group2['project_id'],
security_group2['name'])
self._assertEqualObjects(security_group1, real_security_group1)
self._assertEqualObjects(security_group2, real_security_group2)
def test_security_group_get_by_project(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake2', 'project_id': 'fake_proj2'})
real1 = db.security_group_get_by_project(
self.ctxt,
security_group1['project_id'])
real2 = db.security_group_get_by_project(
self.ctxt,
security_group2['project_id'])
expected1, expected2 = [security_group1], [security_group2]
self._assertEqualListsOfObjects(expected1, real1,
ignored_keys=['instances'])
self._assertEqualListsOfObjects(expected2, real2,
ignored_keys=['instances'])
def test_security_group_get_by_instance(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'name': 'fake1', 'instances': [instance]},
{'name': 'fake2', 'instances': [instance]},
{'name': 'fake3', 'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_by_instance(self.ctxt,
instance['uuid'])
expected = security_groups[:2]
self._assertEqualListsOfObjects(expected, real,
ignored_keys=['instances'])
def test_security_group_get_all(self):
values = [
{'name': 'fake1', 'project_id': 'fake_proj1'},
{'name': 'fake2', 'project_id': 'fake_proj2'},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = db.security_group_get_all(self.ctxt)
self._assertEqualListsOfObjects(security_groups, real,
ignored_keys=['instances'])
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance],
'name': 'fake_in_use'},
{'instances': []},
]
security_groups = [self._create_security_group(vals)
for vals in values]
real = []
for security_group in security_groups:
in_use = db.security_group_in_use(self.ctxt,
security_group['id'])
real.append(in_use)
expected = [True, False]
self.assertEquals(expected, real)
def test_security_group_ensure_default(self):
self.assertEquals(0, len(db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)))
db.security_group_ensure_default(self.ctxt)
security_groups = db.security_group_get_by_project(
self.ctxt,
self.ctxt.project_id)
self.assertEquals(1, len(security_groups))
self.assertEquals("default", security_groups[0]["name"])
def test_security_group_update(self):
security_group = self._create_security_group({})
new_values = {
'name': 'sec_group1',
'description': 'sec_group_descr1',
'user_id': 'fake_user1',
'project_id': 'fake_proj1',
}
updated_group = db.security_group_update(self.ctxt,
security_group['id'],
new_values,
columns_to_join=['rules.grantee_group'])
for key, value in new_values.iteritems():
self.assertEqual(updated_group[key], value)
self.assertEqual(updated_group['rules'], [])
def test_security_group_update_to_duplicate(self):
security_group1 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj1'})
security_group2 = self._create_security_group(
{'name': 'fake1', 'project_id': 'fake_proj2'})
self.assertRaises(exception.SecurityGroupExists,
db.security_group_update,
self.ctxt, security_group2['id'],
{'project_id': 'fake_proj1'})
class InstanceTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.instance_* methods."""
sample_data = {
'project_id': 'project1',
'hostname': 'example.com',
'host': 'h1',
'node': 'n1',
'metadata': {'mkey1': 'mval1', 'mkey2': 'mval2'},
'system_metadata': {'smkey1': 'smval1', 'smkey2': 'smval2'},
'info_cache': {'ckey': 'cvalue'},
}
def setUp(self):
super(InstanceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _assertEqualInstances(self, instance1, instance2):
self._assertEqualObjects(instance1, instance2,
ignored_keys=['metadata', 'system_metadata', 'info_cache'])
def _assertEqualListsOfInstances(self, list1, list2):
self._assertEqualListsOfObjects(list1, list2,
ignored_keys=['metadata', 'system_metadata', 'info_cache'])
def create_instance_with_args(self, **kwargs):
if 'context' in kwargs:
context = kwargs.pop('context')
else:
context = self.ctxt
args = self.sample_data.copy()
args.update(kwargs)
return db.instance_create(context, args)
def test_instance_create(self):
instance = self.create_instance_with_args()
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
def test_instance_create_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_with_object_values(self):
values = {
'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1'),
}
dt_keys = ('created_at', 'deleted_at', 'updated_at',
'launched_at', 'terminated_at', 'scheduled_at')
dt = timeutils.utcnow()
dt_utc = dt.replace(tzinfo=iso8601.iso8601.Utc())
for key in dt_keys:
values[key] = dt_utc
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual(inst['access_ip_v4'], '1.2.3.4')
self.assertEqual(inst['access_ip_v6'], '::1')
for key in dt_keys:
self.assertEqual(inst[key], dt)
def test_instance_update_no_metadata_clobber(self):
meta = {'foo': 'bar'}
sys_meta = {'sfoo': 'sbar'}
values = {
'metadata': meta,
'system_metadata': sys_meta,
}
inst = db.instance_create(self.ctxt, {})
inst = db.instance_update(self.ctxt, inst['uuid'], values)
self.assertEqual({'foo': 'bar'}, meta)
self.assertEqual({'sfoo': 'sbar'}, sys_meta)
def test_instance_get_all_with_meta(self):
inst = self.create_instance_with_args()
for inst in db.instance_get_all(self.ctxt):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_update(self):
instance = self.create_instance_with_args()
metadata = {'host': 'bar', 'key2': 'wuff'}
system_metadata = {'original_image_ref': 'baz'}
# Update the metadata
db.instance_update(self.ctxt, instance['uuid'], {'metadata': metadata,
'system_metadata': system_metadata})
# Retrieve the user-provided metadata to ensure it was successfully
# updated
self.assertEqual(metadata,
db.instance_metadata_get(self.ctxt, instance['uuid']))
self.assertEqual(system_metadata,
db.instance_system_metadata_get(self.ctxt, instance['uuid']))
def test_instance_update_bad_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '123'}
self.assertRaises(ValueError,
db.instance_update,
self.ctxt, instance['uuid'], values)
def test_instance_update_good_str_dates(self):
instance = self.create_instance_with_args()
values = {'created_at': '2011-01-31T00:00:00.0'}
actual = db.instance_update(self.ctxt, instance['uuid'], values)
expected = datetime.datetime(2011, 1, 31)
self.assertEquals(expected, actual["created_at"])
def test_create_instance_unique_hostname(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
self.create_instance_with_args(hostname='h1', project_id='p1')
# With scope 'global' any duplicate should fail, be it this project:
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p3')
# or another:
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context2,
hostname='h1', project_id='p2')
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists,
self.create_instance_with_args,
context=context1,
hostname='h1', project_id='p1')
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
self.create_instance_with_args(context=context2, hostname='h2')
self.flags(osapi_compute_unique_server_name_scope=None)
def test_instance_get_all_by_filters_with_meta(self):
inst = self.create_instance_with_args()
for inst in db.instance_get_all_by_filters(self.ctxt, {}):
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_without_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt, {},
columns_to_join=[])
for inst in result:
meta = utils.metadata_to_dict(inst['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(inst['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_all_by_filters(self):
instances = [self.create_instance_with_args() for i in range(3)]
filtered_instances = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfInstances(instances, filtered_instances)
def test_instance_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
meta = sqlalchemy_api._instance_metadata_get_multi(self.ctxt, uuids)
for row in meta:
self.assertTrue(row['instance_uuid'] in uuids)
def test_instance_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_metadata_get_multi(self.ctxt, [])
def test_instance_system_system_metadata_get_multi(self):
uuids = [self.create_instance_with_args()['uuid'] for i in range(3)]
sys_meta = sqlalchemy_api._instance_system_metadata_get_multi(
self.ctxt, uuids)
for row in sys_meta:
self.assertTrue(row['instance_uuid'] in uuids)
def test_instance_system_metadata_get_multi_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
sqlalchemy_api._instance_system_metadata_get_multi(self.ctxt, [])
def test_instance_get_all_by_filters_regex(self):
i1 = self.create_instance_with_args(display_name='test1')
i2 = self.create_instance_with_args(display_name='teeeest2')
self.create_instance_with_args(display_name='diff')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': 't.*st.'})
self._assertEqualListsOfInstances(result, [i1, i2])
def test_instance_get_all_by_filters_exact_match(self):
instance = self.create_instance_with_args(host='host1')
self.create_instance_with_args(host='host12')
result = db.instance_get_all_by_filters(self.ctxt,
{'host': 'host1'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_metadata(self):
instance = self.create_instance_with_args(metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_system_metadata(self):
instance = self.create_instance_with_args(
system_metadata={'foo': 'bar'})
self.create_instance_with_args()
result = db.instance_get_all_by_filters(self.ctxt,
{'system_metadata': {'foo': 'bar'}})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_unicode_value(self):
instance = self.create_instance_with_args(display_name=u'test♥')
result = db.instance_get_all_by_filters(self.ctxt,
{'display_name': u'test'})
self._assertEqualListsOfInstances([instance], result)
def test_instance_get_all_by_filters_tags(self):
instance = self.create_instance_with_args(
metadata={'foo': 'bar'})
self.create_instance_with_args()
#For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
{'name': 'tag-value', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
#For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self._assertEqualListsOfInstances([instance], result)
#For non-existent tag
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'barred'},
]})
self.assertEqual([], result)
#Confirm with deleted tags
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'foo')
#For format 'tag-'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-key', 'value': 'foo'},
]})
self.assertEqual([], result)
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag-value', 'value': 'bar'}
]})
self.assertEqual([], result)
#For format 'tag:'
result = db.instance_get_all_by_filters(
self.ctxt, {'filter': [
{'name': 'tag:foo', 'value': 'bar'},
]})
self.assertEqual([], result)
def test_instance_get_by_uuid(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'])
self._assertEqualInstances(inst, result)
def test_instance_get_by_uuid_join_empty(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=[])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_get_by_uuid_join_sys_meta(self):
inst = self.create_instance_with_args()
result = db.instance_get_by_uuid(self.ctxt, inst['uuid'],
columns_to_join=['system_metadata'])
meta = utils.metadata_to_dict(result['metadata'])
self.assertEqual(meta, {})
sys_meta = utils.metadata_to_dict(result['system_metadata'])
self.assertEqual(sys_meta, self.sample_data['system_metadata'])
def test_instance_get_all_by_filters_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt, {})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices'])
def test_instance_get_all_by_filters_deleted_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True})
self._assertEqualListsOfObjects([inst1, inst2], result,
ignored_keys=['metadata', 'system_metadata',
'deleted', 'deleted_at', 'info_cache',
'pci_devices'])
def test_instance_get_all_by_filters_deleted_no_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': True,
'soft_deleted': False})
self._assertEqualListsOfObjects([inst1], result,
ignored_keys=['deleted', 'deleted_at', 'metadata',
'system_metadata', 'info_cache', 'pci_devices'])
def test_instance_get_all_by_filters_alive_and_soft_deleted(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(vm_state=vm_states.SOFT_DELETED)
inst3 = self.create_instance_with_args()
db.instance_destroy(self.ctxt, inst1['uuid'])
result = db.instance_get_all_by_filters(self.ctxt,
{'deleted': False,
'soft_deleted': True})
self._assertEqualListsOfInstances([inst2, inst3], result)
def test_instance_get_all_by_filters_cleaned(self):
inst1 = self.create_instance_with_args()
inst2 = self.create_instance_with_args(reservation_id='b')
db.instance_update(self.ctxt, inst1['uuid'], {'cleaned': 1})
result = db.instance_get_all_by_filters(self.ctxt, {})
self.assertEqual(2, len(result))
self.assertIn(inst1['uuid'], [result[0]['uuid'], result[1]['uuid']])
self.assertIn(inst2['uuid'], [result[0]['uuid'], result[1]['uuid']])
if inst1['uuid'] == result[0]['uuid']:
self.assertTrue(result[0]['cleaned'])
self.assertFalse(result[1]['cleaned'])
else:
self.assertTrue(result[1]['cleaned'])
self.assertFalse(result[0]['cleaned'])
def test_instance_get_all_by_host_and_node_no_join(self):
instance = self.create_instance_with_args()
result = db.instance_get_all_by_host_and_node(self.ctxt, 'h1', 'n1')
self.assertEqual(result[0]['uuid'], instance['uuid'])
self.assertEqual(result[0]['system_metadata'], [])
def test_instance_get_all_hung_in_rebooting(self):
# Ensure no instances are returned.
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
# Ensure one rebooting instance with updated_at older than 10 seconds
# is returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=datetime.datetime(2000, 1, 1, 12, 0, 0))
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self._assertEqualListsOfObjects([instance], results,
ignored_keys=['task_state', 'info_cache', 'security_groups',
'metadata', 'system_metadata', 'pci_devices'])
db.instance_update(self.ctxt, instance['uuid'], {"task_state": None})
# Ensure the newly rebooted instance is not returned.
instance = self.create_instance_with_args(task_state="rebooting",
updated_at=timeutils.utcnow())
results = db.instance_get_all_hung_in_rebooting(self.ctxt, 10)
self.assertEqual([], results)
def test_instance_update_with_expected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
db.instance_update(self.ctxt, instance['uuid'], {'host': 'h1',
'expected_vm_state': ('foo', 'bar')})
def test_instance_update_with_unexpected_vm_state(self):
instance = self.create_instance_with_args(vm_state='foo')
self.assertRaises(exception.UnexpectedVMStateError,
db.instance_update, self.ctxt, instance['uuid'],
{'host': 'h1', 'expected_vm_state': ('spam', 'bar')})
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
# Update the metadata
values = {'metadata': {'host': 'bar', 'key2': 'wuff'},
'system_metadata': {'original_image_ref': 'baz'}}
db.instance_update(ctxt, instance['uuid'], values)
# Retrieve the user-provided metadata to ensure it was successfully
# updated
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('bar', instance_meta['host'])
self.assertEqual('wuff', instance_meta['key2'])
self.assertNotIn('key1', instance_meta)
# Retrieve the system metadata to ensure it was successfully updated
system_meta = db.instance_system_metadata_get(ctxt, instance['uuid'])
self.assertEqual('baz', system_meta['original_image_ref'])
def test_delete_instance_metadata_on_instance_destroy(self):
ctxt = context.get_admin_context()
# Create an instance with some metadata
values = {'metadata': {'host': 'foo', 'key1': 'meow'},
'system_metadata': {'original_image_ref': 'blah'}}
instance = db.instance_create(ctxt, values)
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
self.assertEqual('foo', instance_meta['host'])
self.assertEqual('meow', instance_meta['key1'])
db.instance_destroy(ctxt, instance['uuid'])
instance_meta = db.instance_metadata_get(ctxt, instance['uuid'])
# Make sure instance metadata is deleted as well
self.assertEqual({}, instance_meta)
def test_instance_update_with_and_get_original(self):
instance = self.create_instance_with_args(vm_state='building')
(old_ref, new_ref) = db.instance_update_and_get_original(self.ctxt,
instance['uuid'], {'vm_state': 'needscoffee'})
self.assertEqual('building', old_ref['vm_state'])
self.assertEqual('needscoffee', new_ref['vm_state'])
def test_instance_update_and_get_original_metadata(self):
instance = self.create_instance_with_args()
columns_to_join = ['metadata']
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'vm_state': 'needscoffee'},
columns_to_join=columns_to_join)
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, self.sample_data['metadata'])
sys_meta = utils.metadata_to_dict(new_ref['system_metadata'])
self.assertEqual(sys_meta, {})
def test_instance_update_and_get_original_metadata_none_join(self):
instance = self.create_instance_with_args()
(old_ref, new_ref) = db.instance_update_and_get_original(
self.ctxt, instance['uuid'], {'metadata': {'mk1': 'mv3'}})
meta = utils.metadata_to_dict(new_ref['metadata'])
self.assertEqual(meta, {'mk1': 'mv3'})
def test_instance_update_unique_name(self):
context1 = context.RequestContext('user1', 'p1')
context2 = context.RequestContext('user2', 'p2')
inst1 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name1')
inst2 = self.create_instance_with_args(context=context1,
project_id='p1',
hostname='fake_name2')
inst3 = self.create_instance_with_args(context=context2,
project_id='p2',
hostname='fake_name3')
# osapi_compute_unique_server_name_scope is unset so this should work:
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name2'})
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_name1'})
# With scope 'global' any duplicate should fail.
self.flags(osapi_compute_unique_server_name_scope='global')
self.assertRaises(exception.InstanceExists,
db.instance_update,
context1,
inst2['uuid'],
{'hostname': 'fake_name1'})
self.assertRaises(exception.InstanceExists,
db.instance_update,
context2,
inst3['uuid'],
{'hostname': 'fake_name1'})
# But we should definitely be able to update our name if we aren't
# really changing it.
db.instance_update(context1, inst1['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in the project should fail:
self.flags(osapi_compute_unique_server_name_scope='project')
self.assertRaises(exception.InstanceExists, db.instance_update,
context1, inst2['uuid'], {'hostname': 'fake_NAME'})
# With scope 'project' a duplicate in a different project should work:
self.flags(osapi_compute_unique_server_name_scope='project')
db.instance_update(context2, inst3['uuid'], {'hostname': 'fake_NAME'})
def _test_instance_update_updates_metadata(self, metadata_type):
instance = self.create_instance_with_args()
def set_and_check(meta):
inst = db.instance_update(self.ctxt, instance['uuid'],
{metadata_type: dict(meta)})
_meta = utils.metadata_to_dict(inst[metadata_type])
self.assertEqual(meta, _meta)
meta = {'speed': '88', 'units': 'MPH'}
set_and_check(meta)
meta['gigawatts'] = '1.21'
set_and_check(meta)
del meta['gigawatts']
set_and_check(meta)
def test_security_group_in_use(self):
instance = db.instance_create(self.ctxt, dict(host='foo'))
values = [
{'instances': [instance]},
{'instances': []},
]
def test_instance_update_updates_system_metadata(self):
# Ensure that system_metadata is updated during instance_update
self._test_instance_update_updates_metadata('system_metadata')
def test_instance_update_updates_metadata(self):
# Ensure that metadata is updated during instance_update
self._test_instance_update_updates_metadata('metadata')
def test_instance_floating_address_get_all(self):
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {'host': 'h1', 'hostname': 'n1'})
instance2 = db.instance_create(ctxt, {'host': 'h2', 'hostname': 'n2'})
fixed_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_addresses = ['2.1.1.1', '2.1.1.2', '2.1.1.3']
instance_uuids = [instance1['uuid'], instance1['uuid'],
instance2['uuid']]
for fixed_addr, float_addr, instance_uuid in zip(fixed_addresses,
float_addresses,
instance_uuids):
db.fixed_ip_create(ctxt, {'address': fixed_addr,
'instance_uuid': instance_uuid})
fixed_id = db.fixed_ip_get_by_address(ctxt, fixed_addr)['id']
db.floating_ip_create(ctxt,
{'address': float_addr,
'fixed_ip_id': fixed_id})
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[0])
self.assertEqual(set(float_addresses[:2]), set(real_float_addresses))
real_float_addresses = \
db.instance_floating_address_get_all(ctxt, instance_uuids[2])
self.assertEqual(set([float_addresses[2]]), set(real_float_addresses))
def test_instance_stringified_ips(self):
instance = self.create_instance_with_args()
instance = db.instance_update(
self.ctxt, instance['uuid'],
{'access_ip_v4': netaddr.IPAddress('1.2.3.4'),
'access_ip_v6': netaddr.IPAddress('::1')})
self.assertTrue(isinstance(instance['access_ip_v4'], basestring))
self.assertTrue(isinstance(instance['access_ip_v6'], basestring))
instance = db.instance_get_by_uuid(self.ctxt, instance['uuid'])
self.assertTrue(isinstance(instance['access_ip_v4'], basestring))
self.assertTrue(isinstance(instance['access_ip_v6'], basestring))
class InstanceMetadataTestCase(test.TestCase):
"""Tests for db.api.instance_metadata_* methods."""
def setUp(self):
super(InstanceMetadataTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_instance_metadata_get(self):
instance = db.instance_create(self.ctxt, {'metadata':
{'key': 'value'}})
self.assertEqual({'key': 'value'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_delete(self):
instance = db.instance_create(self.ctxt,
{'metadata': {'key': 'val',
'key1': 'val1'}})
db.instance_metadata_delete(self.ctxt, instance['uuid'], 'key1')
self.assertEqual({'key': 'val'}, db.instance_metadata_get(
self.ctxt, instance['uuid']))
def test_instance_metadata_update(self):
instance = db.instance_create(self.ctxt, {'host': 'h1',
'project_id': 'p1', 'metadata': {'key': 'value'}})
# This should add new key/value pair
metadata = db.instance_metadata_update(
self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, False)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'key': 'value', 'new_key': 'new_value'})
# This should leave only one key/value pair
metadata = db.instance_metadata_update(
self.ctxt, instance['uuid'],
{'new_key': 'new_value'}, True)
metadata = db.instance_metadata_get(self.ctxt, instance['uuid'])
self.assertEqual(metadata, {'new_key': 'new_value'})
class ServiceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ServiceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'host': 'fake_host',
'binary': 'fake_binary',
'topic': 'fake_topic',
'report_count': 3,
'disabled': False
}
def _create_service(self, values):
v = self._get_base_values()
v.update(values)
return db.service_create(self.ctxt, v)
def test_service_create(self):
service = self._create_service({})
self.assertFalse(service['id'] is None)
for key, value in self._get_base_values().iteritems():
self.assertEqual(value, service[key])
def test_service_destroy(self):
service1 = self._create_service({})
service2 = self._create_service({'host': 'fake_host2'})
db.service_destroy(self.ctxt, service1['id'])
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, service1['id'])
self._assertEqualObjects(db.service_get(self.ctxt, service2['id']),
service2, ignored_keys=['compute_node'])
def test_service_update(self):
service = self._create_service({})
new_values = {
'host': 'fake_host1',
'binary': 'fake_binary1',
'topic': 'fake_topic1',
'report_count': 4,
'disabled': True
}
db.service_update(self.ctxt, service['id'], new_values)
updated_service = db.service_get(self.ctxt, service['id'])
for key, value in new_values.iteritems():
self.assertEqual(value, updated_service[key])
def test_service_update_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_update, self.ctxt, 100500, {})
def test_service_get(self):
service1 = self._create_service({})
self._create_service({'host': 'some_other_fake_host'})
real_service1 = db.service_get(self.ctxt, service1['id'])
self._assertEqualObjects(service1, real_service1,
ignored_keys=['compute_node'])
def test_service_get_with_compute_node(self):
service = self._create_service({})
compute_values = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=service['id'])
compute = db.compute_node_create(self.ctxt, compute_values)
real_service = db.service_get(self.ctxt, service['id'])
real_compute = real_service['compute_node'][0]
self.assertEqual(compute['id'], real_compute['id'])
def test_service_get_not_found_exception(self):
self.assertRaises(exception.ServiceNotFound,
db.service_get, self.ctxt, 100500)
def test_service_get_by_host_and_topic(self):
service1 = self._create_service({'host': 'host1', 'topic': 'topic1'})
self._create_service({'host': 'host2', 'topic': 'topic2'})
real_service1 = db.service_get_by_host_and_topic(self.ctxt,
host='host1',
topic='topic1')
self._assertEqualObjects(service1, real_service1)
def test_service_get_all(self):
values = [
{'host': 'host1', 'topic': 'topic1'},
{'host': 'host2', 'topic': 'topic2'},
{'disabled': True}
]
services = [self._create_service(vals) for vals in values]
disabled_services = [services[-1]]
non_disabled_services = services[:-1]
compares = [
(services, db.service_get_all(self.ctxt)),
(disabled_services, db.service_get_all(self.ctxt, True)),
(non_disabled_services, db.service_get_all(self.ctxt, False))
]
for comp in compares:
self._assertEqualListsOfObjects(*comp)
def test_service_get_all_by_topic(self):
values = [
{'host': 'host1', 'topic': 't1'},
{'host': 'host2', 'topic': 't1'},
{'disabled': True, 'topic': 't1'},
{'host': 'host3', 'topic': 't2'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_topic(self.ctxt, 't1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_all_by_host(self):
values = [
{'host': 'host1', 'topic': 't11', 'binary': 'b11'},
{'host': 'host1', 'topic': 't12', 'binary': 'b12'},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': 't1'}
]
services = [self._create_service(vals) for vals in values]
expected = services[:2]
real = db.service_get_all_by_host(self.ctxt, 'host1')
self._assertEqualListsOfObjects(expected, real)
def test_service_get_by_compute_host(self):
values = [
{'host': 'host1', 'topic': CONF.compute_topic},
{'host': 'host2', 'topic': 't1'},
{'host': 'host3', 'topic': CONF.compute_topic}
]
services = [self._create_service(vals) for vals in values]
real_service = db.service_get_by_compute_host(self.ctxt, 'host1')
self._assertEqualObjects(services[0], real_service,
ignored_keys=['compute_node'])
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_compute_host_not_found(self):
self.assertRaises(exception.ComputeHostNotFound,
db.service_get_by_compute_host,
self.ctxt, 'non-exists-host')
def test_service_get_by_args(self):
values = [
{'host': 'host1', 'binary': 'a'},
{'host': 'host2', 'binary': 'b'}
]
services = [self._create_service(vals) for vals in values]
service1 = db.service_get_by_args(self.ctxt, 'host1', 'a')
self._assertEqualObjects(services[0], service1)
service2 = db.service_get_by_args(self.ctxt, 'host2', 'b')
self._assertEqualObjects(services[1], service2)
def test_service_get_by_args_not_found_exception(self):
self.assertRaises(exception.HostBinaryNotFound,
db.service_get_by_args,
self.ctxt, 'non-exists-host', 'a')
def test_service_binary_exists_exception(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'topic': 'top1'})
self.assertRaises(exception.ServiceBinaryExists, db.service_create,
self.ctxt, values)
def test_service_topic_exists_exceptions(self):
db.service_create(self.ctxt, self._get_base_values())
values = self._get_base_values()
values.update({'binary': 'bin1'})
self.assertRaises(exception.ServiceTopicExists, db.service_create,
self.ctxt, values)
class BaseInstanceTypeTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(BaseInstanceTypeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.user_ctxt = context.RequestContext('user', 'user')
def _get_base_values(self):
return {
'name': 'fake_name',
'memory_mb': 512,
'vcpus': 1,
'root_gb': 10,
'ephemeral_gb': 10,
'flavorid': 'fake_flavor',
'swap': 0,
'rxtx_factor': 0.5,
'vcpu_weight': 1,
'disabled': False,
'is_public': True
}
def _create_inst_type(self, values):
v = self._get_base_values()
v.update(values)
return db.flavor_create(self.ctxt, v)
class InstanceActionTestCase(test.TestCase, ModelsObjectComparatorMixin):
IGNORED_FIELDS = [
'id',
'created_at',
'updated_at',
'deleted_at',
'deleted'
]
def setUp(self):
super(InstanceActionTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_action_values(self, uuid, action='run_instance', ctxt=None):
if ctxt is None:
ctxt = self.ctxt
return {
'action': action,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'user_id': ctxt.user_id,
'project_id': ctxt.project_id,
'start_time': timeutils.utcnow(),
'message': 'action-message'
}
def _create_event_values(self, uuid, event='schedule',
ctxt=None, extra=None):
if ctxt is None:
ctxt = self.ctxt
values = {
'event': event,
'instance_uuid': uuid,
'request_id': ctxt.request_id,
'start_time': timeutils.utcnow()
}
if extra is not None:
values.update(extra)
return values
def _assertActionSaved(self, action, uuid):
"""Retrieve the action to ensure it was successfully added."""
actions = db.actions_get(self.ctxt, uuid)
self.assertEqual(1, len(actions))
self._assertEqualObjects(action, actions[0])
def _assertActionEventSaved(self, event, action_id):
# Retrieve the event to ensure it was successfully added
events = db.action_events_get(self.ctxt, action_id)
self.assertEqual(1, len(events))
self._assertEqualObjects(event, events[0],
['instance_uuid', 'request_id'])
def test_instance_action_start(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
ignored_keys = self.IGNORED_FIELDS + ['finish_time']
self._assertEqualObjects(action_values, action, ignored_keys)
self._assertActionSaved(action, uuid)
def test_instance_action_finish(self):
"""Create an instance action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
db.action_start(self.ctxt, action_values)
action_values['finish_time'] = timeutils.utcnow()
action = db.action_finish(self.ctxt, action_values)
self._assertEqualObjects(action_values, action, self.IGNORED_FIELDS)
self._assertActionSaved(action, uuid)
def test_instance_action_finish_without_started_event(self):
"""Create an instance finish action."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action_values['finish_time'] = timeutils.utcnow()
self.assertRaises(exception.InstanceActionNotFound, db.action_finish,
self.ctxt, action_values)
def test_instance_actions_get_by_instance(self):
"""Ensure we can get actions by UUID."""
uuid1 = str(stdlib_uuid.uuid4())
expected = []
action_values = self._create_action_values(uuid1)
action = db.action_start(self.ctxt, action_values)
expected.append(action)
action_values['action'] = 'resize'
action = db.action_start(self.ctxt, action_values)
expected.append(action)
# Create some extra actions
uuid2 = str(stdlib_uuid.uuid4())
ctxt2 = context.get_admin_context()
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
# Retrieve the action to ensure it was successfully added
actions = db.actions_get(self.ctxt, uuid1)
self._assertEqualListsOfObjects(expected, actions)
def test_instance_action_get_by_instance_and_action(self):
"""Ensure we can get an action by instance UUID and action id."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid1)
db.action_start(self.ctxt, action_values)
action_values['action'] = 'resize'
db.action_start(self.ctxt, action_values)
action_values = self._create_action_values(uuid2, 'reboot', ctxt2)
db.action_start(ctxt2, action_values)
db.action_start(ctxt2, action_values)
actions = db.actions_get(self.ctxt, uuid1)
request_id = actions[0]['request_id']
action = db.action_get_by_request_id(self.ctxt, uuid1, request_id)
self.assertEqual('run_instance', action['action'])
self.assertEqual(self.ctxt.request_id, action['request_id'])
def test_instance_action_event_start(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action_values = self._create_action_values(uuid)
action = db.action_start(self.ctxt, action_values)
event_values = self._create_event_values(uuid)
event = db.action_event_start(self.ctxt, event_values)
# self.fail(self._dict_from_object(event, None))
event_values['action_id'] = action['id']
ignored = self.IGNORED_FIELDS + ['finish_time', 'traceback', 'result']
self._assertEqualObjects(event_values, event, ignored)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_start_without_action(self):
"""Create an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = self._create_event_values(uuid)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_start, self.ctxt, event_values)
def test_instance_action_event_finish_without_started_event(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionEventNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_without_action(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
self.assertRaises(exception.InstanceActionNotFound,
db.action_event_finish, self.ctxt, event_values)
def test_instance_action_event_finish_success(self):
"""Finish an instance action event."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Success'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertNotEqual('Error', action['message'])
def test_instance_action_event_finish_error(self):
"""Finish an instance action event with an error."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
db.action_event_start(self.ctxt, self._create_event_values(uuid))
event_values = {
'finish_time': timeutils.utcnow() + datetime.timedelta(seconds=5),
'result': 'Error'
}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_finish(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
action = db.action_get_by_request_id(self.ctxt, uuid,
self.ctxt.request_id)
self.assertEqual('Error', action['message'])
def test_instance_action_and_event_start_string_time(self):
"""Create an instance action and event with a string start_time."""
uuid = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt, self._create_action_values(uuid))
event_values = {'start_time': timeutils.strtime(timeutils.utcnow())}
event_values = self._create_event_values(uuid, extra=event_values)
event = db.action_event_start(self.ctxt, event_values)
self._assertActionEventSaved(event, action['id'])
def test_instance_action_event_get_by_id(self):
"""Get a specific instance action event."""
ctxt2 = context.get_admin_context()
uuid1 = str(stdlib_uuid.uuid4())
uuid2 = str(stdlib_uuid.uuid4())
action = db.action_start(self.ctxt,
self._create_action_values(uuid1))
db.action_start(ctxt2,
self._create_action_values(uuid2, 'reboot', ctxt2))
event = db.action_event_start(self.ctxt,
self._create_event_values(uuid1))
event_values = self._create_event_values(uuid2, 'reboot', ctxt2)
db.action_event_start(ctxt2, event_values)
# Retrieve the event to ensure it was successfully added
saved_event = db.action_event_get_by_id(self.ctxt,
action['id'],
event['id'])
self._assertEqualObjects(event, saved_event,
['instance_uuid', 'request_id'])
class InstanceFaultTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceFaultTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_fault_values(self, uuid, code=404):
return {
'message': 'message',
'details': 'detail',
'instance_uuid': uuid,
'code': code,
'host': 'localhost'
}
def test_instance_fault_create(self):
"""Ensure we can create an instance fault."""
uuid = str(stdlib_uuid.uuid4())
# Ensure no faults registered for this instance
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(0, len(faults[uuid]))
# Create a fault
fault_values = self._create_fault_values(uuid)
fault = db.instance_fault_create(self.ctxt, fault_values)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(fault_values, fault, ignored_keys)
# Retrieve the fault to ensure it was successfully added
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
self.assertEqual(1, len(faults[uuid]))
self._assertEqualObjects(fault, faults[uuid][0])
def test_instance_fault_get_by_instance(self):
"""Ensure we can retrieve faults for instance."""
uuids = [str(stdlib_uuid.uuid4()), str(stdlib_uuid.uuid4())]
fault_codes = [404, 500]
expected = {}
# Create faults
for uuid in uuids:
expected[uuid] = []
for code in fault_codes:
fault_values = self._create_fault_values(uuid, code)
fault = db.instance_fault_create(self.ctxt, fault_values)
expected[uuid].append(fault)
# Ensure faults are saved
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, uuids)
self.assertEqual(len(expected), len(faults))
for uuid in uuids:
self._assertEqualListsOfObjects(expected[uuid], faults[uuid])
def test_instance_faults_get_by_instance_uuids_no_faults(self):
uuid = str(stdlib_uuid.uuid4())
# None should be returned when no faults exist.
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [uuid])
expected = {uuid: []}
self.assertEqual(expected, faults)
def test_instance_faults_get_by_instance_uuids_no_uuids(self):
self.mox.StubOutWithMock(query.Query, 'filter')
self.mox.ReplayAll()
faults = db.instance_fault_get_by_instance_uuids(self.ctxt, [])
self.assertEqual({}, faults)
class InstanceTypeTestCase(BaseInstanceTypeTestCase):
def test_flavor_create(self):
inst_type = self._create_inst_type({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self.assertFalse(inst_type['id'] is None)
self._assertEqualObjects(inst_type, self._get_base_values(),
ignored_keys)
def test_instance_type_destroy(self):
specs1 = {'a': '1', 'b': '2'}
inst_type1 = self._create_inst_type({'name': 'name1', 'flavorid': 'a1',
'extra_specs': specs1})
specs2 = {'c': '4', 'd': '3'}
inst_type2 = self._create_inst_type({'name': 'name2', 'flavorid': 'a2',
'extra_specs': specs2})
db.flavor_destroy(self.ctxt, 'name1')
self.assertRaises(exception.InstanceTypeNotFound,
db.flavor_get, self.ctxt, inst_type1['id'])
real_specs1 = db.flavor_extra_specs_get(self.ctxt,
inst_type1['flavorid'])
self._assertEqualObjects(real_specs1, {})
r_inst_type2 = db.flavor_get(self.ctxt, inst_type2['id'])
self._assertEqualObjects(inst_type2, r_inst_type2, 'extra_specs')
def test_instance_type_destroy_not_found(self):
self.assertRaises(exception.InstanceTypeNotFound,
db.flavor_destroy, self.ctxt, 'nonexists')
def test_flavor_create_duplicate_name(self):
self._create_inst_type({})
self.assertRaises(exception.InstanceTypeExists,
self._create_inst_type,
{'flavorid': 'some_random_flavor'})
def test_flavor_create_duplicate_flavorid(self):
self._create_inst_type({})
self.assertRaises(exception.InstanceTypeIdExists,
self._create_inst_type,
{'name': 'some_random_name'})
def test_flavor_create_with_extra_specs(self):
extra_specs = dict(a='abc', b='def', c='ghi')
inst_type = self._create_inst_type({'extra_specs': extra_specs})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'extra_specs']
self._assertEqualObjects(inst_type, self._get_base_values(),
ignored_keys)
self._assertEqualObjects(extra_specs, inst_type['extra_specs'])
def test_instance_type_get_all(self):
# NOTE(boris-42): Remove base instance types
for it in db.flavor_get_all(self.ctxt):
db.flavor_destroy(self.ctxt, it['name'])
instance_types = [
{'root_gb': 600, 'memory_mb': 100, 'disabled': True,
'is_public': True, 'name': 'a1', 'flavorid': 'f1'},
{'root_gb': 500, 'memory_mb': 200, 'disabled': True,
'is_public': True, 'name': 'a2', 'flavorid': 'f2'},
{'root_gb': 400, 'memory_mb': 300, 'disabled': False,
'is_public': True, 'name': 'a3', 'flavorid': 'f3'},
{'root_gb': 300, 'memory_mb': 400, 'disabled': False,
'is_public': False, 'name': 'a4', 'flavorid': 'f4'},
{'root_gb': 200, 'memory_mb': 500, 'disabled': True,
'is_public': False, 'name': 'a5', 'flavorid': 'f5'},
{'root_gb': 100, 'memory_mb': 600, 'disabled': True,
'is_public': False, 'name': 'a6', 'flavorid': 'f6'}
]
instance_types = [self._create_inst_type(it) for it in instance_types]
lambda_filters = {
'min_memory_mb': lambda it, v: it['memory_mb'] >= v,
'min_root_gb': lambda it, v: it['root_gb'] >= v,
'disabled': lambda it, v: it['disabled'] == v,
'is_public': lambda it, v: (v is None or it['is_public'] == v)
}
mem_filts = [{'min_memory_mb': x} for x in [100, 350, 550, 650]]
root_filts = [{'min_root_gb': x} for x in [100, 350, 550, 650]]
disabled_filts = [{'disabled': x} for x in [True, False]]
is_public_filts = [{'is_public': x} for x in [True, False, None]]
def assert_multi_filter_instance_type_get(filters=None):
if filters is None:
filters = {}
expected_it = instance_types
for name, value in filters.iteritems():
filt = lambda it: lambda_filters[name](it, value)
expected_it = filter(filt, expected_it)
real_it = db.flavor_get_all(self.ctxt, filters=filters)
self._assertEqualListsOfObjects(expected_it, real_it)
#no filter
assert_multi_filter_instance_type_get()
#test only with one filter
for filt in mem_filts:
assert_multi_filter_instance_type_get(filt)
for filt in root_filts:
assert_multi_filter_instance_type_get(filt)
for filt in disabled_filts:
assert_multi_filter_instance_type_get(filt)
for filt in is_public_filts:
assert_multi_filter_instance_type_get(filt)
#test all filters together
for mem in mem_filts:
for root in root_filts:
for disabled in disabled_filts:
for is_public in is_public_filts:
filts = [f.items() for f in
[mem, root, disabled, is_public]]
filts = dict(reduce(lambda x, y: x + y, filts, []))
assert_multi_filter_instance_type_get(filts)
def test_flavor_get_all_limit_sort(self):
def assert_sorted_by_key_dir(sort_key, asc=True):
sort_dir = 'asc' if asc else 'desc'
results = db.flavor_get_all(self.ctxt, sort_key='name',
sort_dir=sort_dir)
# Manually sort the results as we would expect them
expected_results = sorted(results,
key=lambda item: item['name'],
reverse=(not asc))
self.assertEqual(expected_results, results)
def assert_sorted_by_key_both_dir(sort_key):
assert_sorted_by_key_dir(sort_key, True)
assert_sorted_by_key_dir(sort_key, False)
for attr in ['memory_mb', 'root_gb', 'deleted_at', 'name', 'deleted',
'created_at', 'ephemeral_gb', 'updated_at', 'disabled',
'vcpus', 'swap', 'rxtx_factor', 'is_public', 'flavorid',
'vcpu_weight', 'id']:
assert_sorted_by_key_both_dir(attr)
def test_flavor_get_all_limit(self):
limited_flavors = db.flavor_get_all(self.ctxt, limit=2)
self.assertEqual(2, len(limited_flavors))
def test_flavor_get_all_list_marker(self):
all_flavors = db.flavor_get_all(self.ctxt)
# Set the 3rd result as the marker
marker_flavorid = all_flavors[2]['flavorid']
marked_flavors = db.flavor_get_all(self.ctxt, marker=marker_flavorid)
# We expect everything /after/ the 3rd result
expected_results = all_flavors[3:]
self.assertEqual(expected_results, marked_flavors)
def test_instance_type_get(self):
inst_types = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
inst_types = [self._create_inst_type(t) for t in inst_types]
for inst_type in inst_types:
inst_type_by_id = db.flavor_get(self.ctxt, inst_type['id'])
self._assertEqualObjects(inst_type, inst_type_by_id)
def test_instance_type_get_non_public(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
inst_type_by_id = db.flavor_get(self.ctxt, inst_type['id'])
self._assertEqualObjects(inst_type, inst_type_by_id)
# Regular user can not
self.assertRaises(exception.InstanceTypeNotFound, db.flavor_get,
self.user_ctxt, inst_type['id'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, inst_type['flavorid'],
self.user_ctxt.project_id)
inst_type_by_id = db.flavor_get(self.user_ctxt, inst_type['id'])
self._assertEqualObjects(inst_type, inst_type_by_id)
def test_instance_type_get_by_name(self):
inst_types = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
inst_types = [self._create_inst_type(t) for t in inst_types]
for inst_type in inst_types:
inst_type_by_name = db.flavor_get_by_name(self.ctxt,
inst_type['name'])
self._assertEqualObjects(inst_type, inst_type_by_name)
def test_instance_type_get_by_name_not_found(self):
self._create_inst_type({})
self.assertRaises(exception.InstanceTypeNotFoundByName,
db.flavor_get_by_name, self.ctxt, 'nonexists')
def test_instance_type_get_by_name_non_public(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
inst_type_by_name = db.flavor_get_by_name(self.ctxt,
inst_type['name'])
self._assertEqualObjects(inst_type, inst_type_by_name)
# Regular user can not
self.assertRaises(exception.InstanceTypeNotFoundByName,
db.flavor_get_by_name, self.user_ctxt,
inst_type['name'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, inst_type['flavorid'],
self.user_ctxt.project_id)
inst_type_by_name = db.flavor_get_by_name(self.user_ctxt,
inst_type['name'])
self._assertEqualObjects(inst_type, inst_type_by_name)
def test_instance_type_get_by_flavor_id(self):
inst_types = [{'name': 'abc', 'flavorid': '123'},
{'name': 'def', 'flavorid': '456'},
{'name': 'ghi', 'flavorid': '789'}]
inst_types = [self._create_inst_type(t) for t in inst_types]
for inst_type in inst_types:
params = (self.ctxt, inst_type['flavorid'])
inst_type_by_flavorid = db.flavor_get_by_flavor_id(*params)
self._assertEqualObjects(inst_type, inst_type_by_flavorid)
def test_instance_type_get_by_flavor_not_found(self):
self._create_inst_type({})
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_instance_type_get_by_flavor_id_non_public(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123',
'is_public': False})
# Admin can see it
inst_type_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
inst_type['flavorid'])
self._assertEqualObjects(inst_type, inst_type_by_fid)
# Regular user can not
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id, self.user_ctxt,
inst_type['flavorid'])
# Regular user can see it after being granted access
db.flavor_access_add(self.ctxt, inst_type['flavorid'],
self.user_ctxt.project_id)
inst_type_by_fid = db.flavor_get_by_flavor_id(self.user_ctxt,
inst_type['flavorid'])
self._assertEqualObjects(inst_type, inst_type_by_fid)
def test_instance_type_get_by_flavor_id_deleted(self):
inst_type = self._create_inst_type({'name': 'abc', 'flavorid': '123'})
db.flavor_destroy(self.ctxt, 'abc')
inst_type_by_fid = db.flavor_get_by_flavor_id(self.ctxt,
inst_type['flavorid'], read_deleted='yes')
self.assertEqual(inst_type['id'], inst_type_by_fid['id'])
class InstanceTypeExtraSpecsTestCase(BaseInstanceTypeTestCase):
def setUp(self):
super(InstanceTypeExtraSpecsTestCase, self).setUp()
values = ({'name': 'n1', 'flavorid': 'f1',
'extra_specs': dict(a='a', b='b', c='c')},
{'name': 'n2', 'flavorid': 'f2',
'extra_specs': dict(d='d', e='e', f='f')})
# NOTE(boris-42): We have already tested flavor_create method
# with extra_specs in InstanceTypeTestCase.
self.inst_types = [self._create_inst_type(v) for v in values]
def test_instance_type_extra_specs_get(self):
for it in self.inst_types:
real_specs = db.flavor_extra_specs_get(self.ctxt,
it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_instance_type_extra_specs_get_item(self):
expected = dict(f1=dict(a='a', b='b', c='c'),
f2=dict(d='d', e='e', f='f'))
for flavor, specs in expected.iteritems():
for key, val in specs.iteritems():
spec = db.flavor_extra_specs_get_item(self.ctxt, flavor,
key)
self.assertEqual(spec[key], val)
def test_instance_type_extra_specs_delete(self):
for it in self.inst_types:
specs = it['extra_specs']
key = specs.keys()[0]
del specs[key]
db.flavor_extra_specs_delete(self.ctxt, it['flavorid'], key)
real_specs = db.flavor_extra_specs_get(self.ctxt,
it['flavorid'])
self._assertEqualObjects(it['extra_specs'], real_specs)
def test_instance_type_extra_specs_delete_failed(self):
for it in self.inst_types:
self.assertRaises(exception.InstanceTypeExtraSpecsNotFound,
db.flavor_extra_specs_delete,
self.ctxt, it['flavorid'], 'dummy')
def test_instance_type_extra_specs_update_or_create(self):
for it in self.inst_types:
current_specs = it['extra_specs']
current_specs.update(dict(b='b1', c='c1', d='d1', e='e1'))
params = (self.ctxt, it['flavorid'], current_specs)
db.flavor_extra_specs_update_or_create(*params)
real_specs = db.flavor_extra_specs_get(self.ctxt,
it['flavorid'])
self._assertEqualObjects(current_specs, real_specs)
def test_instance_type_extra_specs_update_or_create_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_extra_specs_update_or_create,
self.ctxt, 'nonexists', {})
def test_instance_type_extra_specs_update_or_create_retry(self):
def counted():
def get_id(context, flavorid, session):
get_id.counter += 1
raise db_exc.DBDuplicateEntry
get_id.counter = 0
return get_id
get_id = counted()
self.stubs.Set(sqlalchemy_api,
'_instance_type_get_id_from_flavor', get_id)
self.assertRaises(db_exc.DBDuplicateEntry, sqlalchemy_api.
flavor_extra_specs_update_or_create,
self.ctxt, 1, {}, 5)
self.assertEqual(get_id.counter, 5)
class InstanceTypeAccessTestCase(BaseInstanceTypeTestCase):
def _create_inst_type_access(self, instance_type_id, project_id):
return db.flavor_access_add(self.ctxt, instance_type_id,
project_id)
def test_instance_type_access_get_by_flavor_id(self):
inst_types = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_inst_type(v) for v in inst_types))
access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'),
self._create_inst_type_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')]
for it, access_it in zip((it1, it2), (access_it1, access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_instance_type_access_get_by_flavor_id_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_get_by_flavor_id,
self.ctxt, 'nonexists')
def test_instance_type_access_add(self):
inst_type = self._create_inst_type({'flavorid': 'f1'})
project_id = 'p1'
access = self._create_inst_type_access(inst_type['flavorid'],
project_id)
# NOTE(boris-42): Check that instance_type_access_add doesn't fail and
# returns correct value. This is enough because other
# logic is checked by other methods.
self.assertFalse(access['id'] is None)
self.assertEqual(access['instance_type_id'], inst_type['id'])
self.assertEqual(access['project_id'], project_id)
def test_instance_type_access_add_to_non_existing_flavor(self):
self.assertRaises(exception.FlavorNotFound,
self._create_inst_type_access,
'nonexists', 'does_not_matter')
def test_instance_type_access_add_duplicate_project_id_flavor(self):
inst_type = self._create_inst_type({'flavorid': 'f1'})
params = (inst_type['flavorid'], 'p1')
self._create_inst_type_access(*params)
self.assertRaises(exception.FlavorAccessExists,
self._create_inst_type_access, *params)
def test_instance_type_access_remove(self):
inst_types = ({'name': 'n1', 'flavorid': 'f1'},
{'name': 'n2', 'flavorid': 'f2'})
it1, it2 = tuple((self._create_inst_type(v) for v in inst_types))
access_it1 = [self._create_inst_type_access(it1['flavorid'], 'pr1'),
self._create_inst_type_access(it1['flavorid'], 'pr2')]
access_it2 = [self._create_inst_type_access(it2['flavorid'], 'pr1')]
db.flavor_access_remove(self.ctxt, it1['flavorid'],
access_it1[1]['project_id'])
for it, access_it in zip((it1, it2), (access_it1[:1], access_it2)):
params = (self.ctxt, it['flavorid'])
real_access_it = db.flavor_access_get_by_flavor_id(*params)
self._assertEqualListsOfObjects(access_it, real_access_it)
def test_instance_type_access_remove_flavor_not_found(self):
self.assertRaises(exception.FlavorNotFound,
db.flavor_access_remove,
self.ctxt, 'nonexists', 'does_not_matter')
def test_instance_type_access_remove_access_not_found(self):
inst_type = self._create_inst_type({'flavorid': 'f1'})
params = (inst_type['flavorid'], 'p1')
self._create_inst_type_access(*params)
self.assertRaises(exception.FlavorAccessNotFound,
db.flavor_access_remove,
self.ctxt, inst_type['flavorid'], 'p2')
def test_instance_type_access_removed_after_instance_type_destroy(self):
inst_type1 = self._create_inst_type({'flavorid': 'f1', 'name': 'n1'})
inst_type2 = self._create_inst_type({'flavorid': 'f2', 'name': 'n2'})
values = [
(inst_type1['flavorid'], 'p1'),
(inst_type1['flavorid'], 'p2'),
(inst_type2['flavorid'], 'p3')
]
for v in values:
self._create_inst_type_access(*v)
db.flavor_destroy(self.ctxt, inst_type1['name'])
p = (self.ctxt, inst_type1['flavorid'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
p = (self.ctxt, inst_type2['flavorid'])
self.assertEqual(1, len(db.flavor_access_get_by_flavor_id(*p)))
db.flavor_destroy(self.ctxt, inst_type2['name'])
self.assertEqual(0, len(db.flavor_access_get_by_flavor_id(*p)))
class FixedIPTestCase(BaseInstanceTypeTestCase):
def _timeout_test(self, ctxt, timeout, multi_host):
instance = db.instance_create(ctxt, dict(host='foo'))
net = db.network_create_safe(ctxt, dict(multi_host=multi_host,
host='bar'))
old = timeout - datetime.timedelta(seconds=5)
new = timeout + datetime.timedelta(seconds=5)
# should deallocate
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# still allocated
db.fixed_ip_create(ctxt, dict(allocated=True,
instance_uuid=instance['uuid'],
network_id=net['id'],
updated_at=old))
# wrong network
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=old))
# too new
db.fixed_ip_create(ctxt, dict(allocated=False,
instance_uuid=instance['uuid'],
network_id=None,
updated_at=new))
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()))
self.mox.ReplayAll()
def test_fixed_ip_disassociate_all_by_timeout_single_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, False)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 0)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 1)
def test_fixed_ip_disassociate_all_by_timeout_multi_host(self):
now = timeutils.utcnow()
self._timeout_test(self.ctxt, now, True)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'foo', now)
self.assertEqual(result, 1)
result = db.fixed_ip_disassociate_all_by_timeout(self.ctxt, 'bar', now)
self.assertEqual(result, 0)
def test_fixed_ip_get_by_floating_address(self):
fixed_ip = db.fixed_ip_create(self.ctxt, {'address': '192.168.0.2'})
values = {'address': '8.7.6.5',
'fixed_ip_id': fixed_ip['id']}
floating = db.floating_ip_create(self.ctxt, values)['address']
fixed_ip_ref = db.fixed_ip_get_by_floating_address(self.ctxt, floating)
self._assertEqualObjects(fixed_ip, fixed_ip_ref)
def test_fixed_ip_get_by_host(self):
host_ips = {
'host1': ['1.1.1.1', '1.1.1.2', '1.1.1.3'],
'host2': ['1.1.1.4', '1.1.1.5'],
'host3': ['1.1.1.6']
}
for host, ips in host_ips.iteritems():
for ip in ips:
instance_uuid = self._create_instance(host=host)
db.fixed_ip_create(self.ctxt, {'address': ip})
db.fixed_ip_associate(self.ctxt, ip, instance_uuid)
for host, ips in host_ips.iteritems():
ips_on_host = map(lambda x: x['address'],
db.fixed_ip_get_by_host(self.ctxt, host))
self._assertEqualListsOfPrimitivesAsSets(ips_on_host, ips)
def test_fixed_ip_get_by_network_host_not_found_exception(self):
self.assertRaises(
exception.FixedIpNotFoundForNetworkHost,
db.fixed_ip_get_by_network_host,
self.ctxt, 1, 'ignore')
def test_fixed_ip_get_by_network_host_fixed_ip_found(self):
db.fixed_ip_create(self.ctxt, dict(network_id=1, host='host'))
fip = db.fixed_ip_get_by_network_host(self.ctxt, 1, 'host')
self.assertEquals(1, fip['network_id'])
self.assertEquals('host', fip['host'])
def _create_instance(self, **kwargs):
instance = db.instance_create(self.ctxt, kwargs)
return instance['uuid']
def test_fixed_ip_get_by_instance_fixed_ip_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ip_get_by_instance_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_inappropriate_ignored(self):
instance_uuid = self._create_instance()
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=instance_uuid, address=FIXED_IP_ADDRESS_2))
another_instance = db.instance_create(self.ctxt, {})
db.fixed_ip_create(self.ctxt, dict(
instance_uuid=another_instance['uuid'], address="192.168.1.7"))
ips_list = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ip_get_by_instance_not_found_exception(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForInstance,
db.fixed_ip_get_by_instance,
self.ctxt, instance_uuid)
def test_fixed_ips_by_virtual_interface_fixed_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets([FIXED_IP_ADDRESS],
[ips_list[0].address])
def test_fixed_ips_by_virtual_interface_multiple_fixed_ips_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_inappropriate_ignored(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
FIXED_IP_ADDRESS_1 = '192.168.1.5'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_1))
FIXED_IP_ADDRESS_2 = '192.168.1.6'
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=vif.id, address=FIXED_IP_ADDRESS_2))
another_vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
db.fixed_ip_create(self.ctxt, dict(
virtual_interface_id=another_vif.id, address="192.168.1.7"))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self._assertEqualListsOfPrimitivesAsSets(
[FIXED_IP_ADDRESS_1, FIXED_IP_ADDRESS_2],
[ips_list[0].address, ips_list[1].address])
def test_fixed_ips_by_virtual_interface_no_ip_found(self):
instance_uuid = self._create_instance()
vif = db.virtual_interface_create(
self.ctxt, dict(instance_uuid=instance_uuid))
ips_list = db.fixed_ips_by_virtual_interface(self.ctxt, vif.id)
self.assertEquals(0, len(ips_list))
def create_fixed_ip(self, **params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_fixed_ip_associate_fails_if_ip_not_in_network(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.FixedIpNotFoundForNetwork,
db.fixed_ip_associate,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_fails_if_ip_in_use(self):
instance_uuid = self._create_instance()
address = self.create_fixed_ip(instance_uuid=instance_uuid)
self.assertRaises(exception.FixedIpAlreadyInUse,
db.fixed_ip_associate,
self.ctxt, address, instance_uuid)
def test_fixed_ip_associate_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_associate_succeeds_and_sets_network(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip()
db.fixed_ip_associate(self.ctxt, address, instance_uuid,
network_id=network['id'])
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
self.assertEqual(fixed_ip['network_id'], network['id'])
def test_fixed_ip_associate_pool_invalid_uuid(self):
instance_uuid = '123'
self.assertRaises(exception.InvalidUUID, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_no_more_fixed_ips(self):
instance_uuid = self._create_instance()
self.assertRaises(exception.NoMoreFixedIps, db.fixed_ip_associate_pool,
self.ctxt, None, instance_uuid)
def test_fixed_ip_associate_pool_succeeds(self):
instance_uuid = self._create_instance()
network = db.network_create_safe(self.ctxt, {})
address = self.create_fixed_ip(network_id=network['id'])
db.fixed_ip_associate_pool(self.ctxt, network['id'], instance_uuid)
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, address)
self.assertEqual(fixed_ip['instance_uuid'], instance_uuid)
def test_fixed_ip_create_same_address(self):
address = '192.168.1.5'
params = {'address': address}
db.fixed_ip_create(self.ctxt, params)
self.assertRaises(exception.FixedIpExists, db.fixed_ip_create,
self.ctxt, params)
def test_fixed_ip_create_success(self):
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': '192.168.1.5',
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_create(self.ctxt, param)
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_bulk_create_same_address(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_2, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None},
]
self.assertRaises(exception.FixedIpExists, db.fixed_ip_bulk_create,
self.ctxt, params)
# In this case the transaction will be rolled back and none of the ips
# will make it to the database.
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_1)
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address_2)
def test_fixed_ip_bulk_create_success(self):
address_1 = '192.168.1.5'
address_2 = '192.168.1.6'
instance_uuid = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
params = [
{'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': address_1, 'allocated': False,
'instance_uuid': instance_uuid, 'network_id': network_id_1,
'virtual_interface_id': None},
{'reserved': False, 'deleted': 0, 'leased': False,
'host': 'localhost', 'address': address_2, 'allocated': True,
'instance_uuid': instance_uuid, 'network_id': network_id_2,
'virtual_interface_id': None}
]
db.fixed_ip_bulk_create(self.ctxt, params)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_data = db.fixed_ip_get_by_instance(self.ctxt, instance_uuid)
# we have no `id` in incoming data so we can not use
# _assertEqualListsOfObjects to compare incoming data and received
# objects
fixed_ip_data = sorted(fixed_ip_data, key=lambda i: i['network_id'])
params = sorted(params, key=lambda i: i['network_id'])
for param, ip in zip(params, fixed_ip_data):
self._assertEqualObjects(param, ip, ignored_keys)
def test_fixed_ip_disassociate(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
db.fixed_ip_disassociate(self.ctxt, address)
fixed_ip_data = db.fixed_ip_get_by_address(self.ctxt, address)
ignored_keys = ['created_at', 'id', 'deleted_at',
'updated_at', 'instance_uuid']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
self.assertIsNone(fixed_ip_data['instance_uuid'])
def test_fixed_ip_get_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFound,
db.fixed_ip_get, self.ctxt, 0)
def test_fixed_ip_get_success2(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
fixed_ip_id = db.fixed_ip_create(self.ctxt, param)
self.ctxt.is_admin = False
self.assertRaises(exception.NotAuthorized, db.fixed_ip_get,
self.ctxt, fixed_ip_id)
def test_fixed_ip_get_success(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_id = db.fixed_ip_get_by_address(self.ctxt, address)['id']
fixed_ip_data = db.fixed_ip_get(self.ctxt, fixed_ip_id)
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data, ignored_keys)
def test_fixed_ip_get_by_address_detailed_not_found_exception(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address_detailed, self.ctxt,
'192.168.1.5')
def test_fixed_ip_get_by_address_with_data_error_exception(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.FixedIpInvalid,
db.fixed_ip_get_by_address_detailed, self.ctxt,
'192.168.1.6')
def test_fixed_ip_get_by_address_detailed_sucsess(self):
address = '192.168.1.5'
instance_uuid = self._create_instance()
network_id = db.network_create_safe(self.ctxt, {})['id']
param = {
'reserved': False,
'deleted': 0,
'leased': False,
'host': '127.0.0.1',
'address': address,
'allocated': False,
'instance_uuid': instance_uuid,
'network_id': network_id,
'virtual_interface_id': None
}
db.fixed_ip_create(self.ctxt, param)
fixed_ip_data = db.fixed_ip_get_by_address_detailed(self.ctxt, address)
# fixed ip check here
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
self._assertEqualObjects(param, fixed_ip_data[0], ignored_keys)
# network model check here
network_data = db.network_get(self.ctxt, network_id)
self._assertEqualObjects(network_data, fixed_ip_data[1])
# Instance check here
instance_data = db.instance_get_by_uuid(self.ctxt, instance_uuid)
ignored_keys = ['info_cache', 'system_metadata',
'security_groups', 'metadata',
'pci_devices'] # HOW ????
self._assertEqualObjects(instance_data, fixed_ip_data[2], ignored_keys)
def test_fixed_ip_update_not_found_for_address(self):
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_update, self.ctxt,
'192.168.1.5', {})
def test_fixed_ip_update(self):
instance_uuid_1 = self._create_instance()
instance_uuid_2 = self._create_instance()
network_id_1 = db.network_create_safe(self.ctxt, {})['id']
network_id_2 = db.network_create_safe(self.ctxt, {})['id']
param_1 = {
'reserved': True, 'deleted': 0, 'leased': True,
'host': '192.168.133.1', 'address': '10.0.0.2',
'allocated': True, 'instance_uuid': instance_uuid_1,
'network_id': network_id_1, 'virtual_interface_id': '123',
}
param_2 = {
'reserved': False, 'deleted': 0, 'leased': False,
'host': '127.0.0.1', 'address': '10.0.0.3', 'allocated': False,
'instance_uuid': instance_uuid_2, 'network_id': network_id_2,
'virtual_interface_id': None
}
ignored_keys = ['created_at', 'id', 'deleted_at', 'updated_at']
fixed_ip_addr = db.fixed_ip_create(self.ctxt, param_1)['address']
db.fixed_ip_update(self.ctxt, fixed_ip_addr, param_2)
fixed_ip_after_update = db.fixed_ip_get_by_address(self.ctxt,
param_2['address'])
self._assertEqualObjects(param_2, fixed_ip_after_update, ignored_keys)
class FloatingIpTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(FloatingIpTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_base_values(self):
return {
'address': '1.1.1.1',
'fixed_ip_id': None,
'project_id': 'fake_project',
'host': 'fake_host',
'auto_assigned': False,
'pool': 'fake_pool',
'interface': 'fake_interface',
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()))
self.mox.ReplayAll()
def _create_floating_ip(self, values):
if not values:
values = {}
vals = self._get_base_values()
vals.update(values)
return db.floating_ip_create(self.ctxt, vals)
def test_floating_ip_get(self):
values = [{'address': '0.0.0.0'}, {'address': '1.1.1.1'}]
floating_ips = [self._create_floating_ip(val) for val in values]
for floating_ip in floating_ips:
real_floating_ip = db.floating_ip_get(self.ctxt, floating_ip['id'])
self._assertEqualObjects(floating_ip, real_floating_ip,
ignored_keys=['fixed_ip'])
def test_floating_ip_get_not_found(self):
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, 100500)
def test_floating_ip_get_with_long_id_not_found(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidID,
db.floating_ip_get, self.ctxt, 123456789101112)
def test_floating_ip_get_pools(self):
values = [
{'address': '0.0.0.0', 'pool': 'abc'},
{'address': '1.1.1.1', 'pool': 'abc'},
{'address': '2.2.2.2', 'pool': 'def'},
{'address': '3.3.3.3', 'pool': 'ghi'},
]
for val in values:
self._create_floating_ip(val)
expected_pools = [{'name': x}
for x in set(map(lambda x: x['pool'], values))]
real_pools = db.floating_ip_get_pools(self.ctxt)
self._assertEqualListsOfPrimitivesAsSets(real_pools, expected_pools)
def test_floating_ip_allocate_address(self):
pools = {
'pool1': ['0.0.0.0', '1.1.1.1'],
'pool2': ['2.2.2.2'],
'pool3': ['3.3.3.3', '4.4.4.4', '5.5.5.5']
}
for pool, addresses in pools.iteritems():
for address in addresses:
vals = {'pool': pool, 'address': address, 'project_id': None}
self._create_floating_ip(vals)
project_id = self._get_base_values()['project_id']
for pool, addresses in pools.iteritems():
alloc_addrs = []
for i in addresses:
float_addr = db.floating_ip_allocate_address(self.ctxt,
project_id, pool)
alloc_addrs.append(float_addr)
self._assertEqualListsOfPrimitivesAsSets(alloc_addrs, addresses)
def test_floating_ip_allocate_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
float_ips = []
for i in range(0, 2):
float_ips.append(self._create_floating_ip(
{"address": addresses[i]}))
for i in range(2, 4):
float_ips.append(self._create_floating_ip({"address": addresses[i],
"auto_assigned": True}))
for i in range(0, 2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertFalse(float_ip.auto_assigned)
for i in range(2, 4):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
def test_floating_ip_allocate_address_no_more_floating_ips(self):
self.assertRaises(exception.NoMoreFloatingIps,
db.floating_ip_allocate_address,
self.ctxt, 'any_project_id', 'no_such_pool')
def test_floating_ip_allocate_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.NotAuthorized,
db.floating_ip_allocate_address,
ctxt, 'other_project_id', 'any_pool')
def _get_existing_ips(self):
return [ip['address'] for ip in db.floating_ip_get_all(self.ctxt)]
def test_floating_ip_bulk_create(self):
expected_ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
db.floating_ip_bulk_create(self.ctxt,
map(lambda x: {'address': x}, expected_ips))
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_ips)
def test_floating_ip_bulk_create_duplicate(self):
ips = ['1.1.1.1', '1.1.1.2', '1.1.1.3', '1.1.1.4']
prepare_ips = lambda x: {'address': x}
db.floating_ip_bulk_create(self.ctxt, map(prepare_ips, ips))
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_bulk_create,
self.ctxt, map(prepare_ips, ['1.1.1.5', '1.1.1.4']))
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '1.1.1.5')
def test_floating_ip_bulk_destroy(self):
ips_for_delete = []
ips_for_non_delete = []
def create_ips(i):
return [{'address': '1.1.%s.%s' % (i, k)} for k in range(1, 256)]
# NOTE(boris-42): Create more then 256 ip to check that
# _ip_range_splitter works properly.
for i in range(1, 3):
ips_for_delete.extend(create_ips(i))
ips_for_non_delete.extend(create_ips(3))
db.floating_ip_bulk_create(self.ctxt,
ips_for_delete + ips_for_non_delete)
db.floating_ip_bulk_destroy(self.ctxt, ips_for_delete)
expected_addresses = map(lambda x: x['address'], ips_for_non_delete)
self._assertEqualListsOfPrimitivesAsSets(self._get_existing_ips(),
expected_addresses)
def test_floating_ip_create(self):
floating_ip = self._create_floating_ip({})
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self.assertFalse(floating_ip['id'] is None)
self._assertEqualObjects(floating_ip, self._get_base_values(),
ignored_keys)
def test_floating_ip_create_duplicate(self):
self._create_floating_ip({})
self.assertRaises(exception.FloatingIpExists,
self._create_floating_ip, {})
def _create_fixed_ip(self, params):
default_params = {'address': '192.168.0.1'}
default_params.update(params)
return db.fixed_ip_create(self.ctxt, default_params)['address']
def test_floating_ip_fixed_ip_associate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
float_ips = [self._create_floating_ip({'address': address})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed_ip = db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
self.assertEqual(fixed_ip.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertEqual(fixed_ip.id, updated_float_ip.fixed_ip_id)
self.assertEqual('host', updated_float_ip.host)
# Test that already allocated float_ip returns None
result = db.floating_ip_fixed_ip_associate(self.ctxt,
float_addresses[0],
fixed_addresses[0], 'host')
self.assertTrue(result is None)
def test_floating_ip_fixed_ip_associate_float_ip_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_fixed_ip_associate,
self.ctxt, '10.10.10.10', 'some', 'some')
def test_floating_ip_deallocate(self):
values = {'address': '1.1.1.1', 'project_id': 'fake', 'host': 'fake'}
float_ip = self._create_floating_ip(values)
db.floating_ip_deallocate(self.ctxt, float_ip.address)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertTrue(updated_float_ip.project_id is None)
self.assertTrue(updated_float_ip.host is None)
self.assertFalse(updated_float_ip.auto_assigned)
def test_floating_ip_destroy(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
expected_len = len(addresses)
for float_ip in float_ips:
db.floating_ip_destroy(self.ctxt, float_ip.address)
self.assertRaises(exception.FloatingIpNotFound,
db.floating_ip_get, self.ctxt, float_ip.id)
expected_len -= 1
if expected_len > 0:
self.assertEqual(expected_len,
len(db.floating_ip_get_all(self.ctxt)))
else:
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_disassociate(self):
float_addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
fixed_addresses = ['2.2.2.1', '2.2.2.2', '2.2.2.3']
float_ips = [self._create_floating_ip({'address': address})
for address in float_addresses]
fixed_addrs = [self._create_fixed_ip({'address': address})
for address in fixed_addresses]
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
db.floating_ip_fixed_ip_associate(self.ctxt,
float_ip.address,
fixed_addr, 'host')
for float_ip, fixed_addr in zip(float_ips, fixed_addrs):
fixed = db.floating_ip_disassociate(self.ctxt, float_ip.address)
self.assertEqual(fixed.address, fixed_addr)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip.id)
self.assertTrue(updated_float_ip.fixed_ip_id is None)
self.assertTrue(updated_float_ip.host is None)
def test_floating_ip_disassociate_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_disassociate, self.ctxt,
'11.11.11.11')
def test_floating_ip_set_auto_assigned(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr,
'auto_assigned': False})
for addr in addresses]
for i in range(2):
db.floating_ip_set_auto_assigned(self.ctxt, float_ips[i].address)
for i in range(2):
float_ip = db.floating_ip_get(self.ctxt, float_ips[i].id)
self.assertTrue(float_ip.auto_assigned)
float_ip = db.floating_ip_get(self.ctxt, float_ips[2].id)
self.assertFalse(float_ip.auto_assigned)
def test_floating_ip_get_all(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
self._assertEqualListsOfObjects(float_ips,
db.floating_ip_get_all(self.ctxt))
def test_floating_ip_get_all_not_found(self):
self.assertRaises(exception.NoFloatingIpsDefined,
db.floating_ip_get_all, self.ctxt)
def test_floating_ip_get_all_by_host(self):
hosts = {
'host1': ['1.1.1.1', '1.1.1.2'],
'host2': ['2.1.1.1', '2.1.1.2'],
'host3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
hosts_with_float_ips = {}
for host, addresses in hosts.iteritems():
hosts_with_float_ips[host] = []
for address in addresses:
float_ip = self._create_floating_ip({'host': host,
'address': address})
hosts_with_float_ips[host].append(float_ip)
for host, float_ips in hosts_with_float_ips.iteritems():
real_float_ips = db.floating_ip_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects(float_ips, real_float_ips)
def test_floating_ip_get_all_by_host_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForHost,
db.floating_ip_get_all_by_host,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_all_by_project(self):
projects = {
'pr1': ['1.1.1.1', '1.1.1.2'],
'pr2': ['2.1.1.1', '2.1.1.2'],
'pr3': ['3.1.1.1', '3.1.1.2', '3.1.1.3']
}
projects_with_float_ips = {}
for project_id, addresses in projects.iteritems():
projects_with_float_ips[project_id] = []
for address in addresses:
float_ip = self._create_floating_ip({'project_id': project_id,
'address': address})
projects_with_float_ips[project_id].append(float_ip)
for project_id, float_ips in projects_with_float_ips.iteritems():
real_float_ips = db.floating_ip_get_all_by_project(self.ctxt,
project_id)
self._assertEqualListsOfObjects(float_ips, real_float_ips,
ignored_keys='fixed_ip')
def test_floating_ip_get_all_by_project_not_authorized(self):
ctxt = context.RequestContext(user_id='a', project_id='abc',
is_admin=False)
self.assertRaises(exception.NotAuthorized,
db.floating_ip_get_all_by_project,
ctxt, 'other_project')
def test_floating_ip_get_by_address(self):
addresses = ['1.1.1.1', '1.1.1.2', '1.1.1.3']
float_ips = [self._create_floating_ip({'address': addr})
for addr in addresses]
for float_ip in float_ips:
real_float_ip = db.floating_ip_get_by_address(self.ctxt,
float_ip.address)
self._assertEqualObjects(float_ip, real_float_ip,
ignored_keys='fixed_ip')
def test_floating_ip_get_by_address_not_found(self):
self.assertRaises(exception.FloatingIpNotFoundForAddress,
db.floating_ip_get_by_address,
self.ctxt, '20.20.20.20')
def test_floating_ip_get_by_invalid_address(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.floating_ip_get_by_address,
self.ctxt, 'non_exists_host')
def test_floating_ip_get_by_fixed_address(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
self._create_floating_ip({'address': float_addr})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
float_ip = db.floating_ip_get_by_fixed_address(self.ctxt,
fixed_addr)
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_get_by_fixed_ip_id(self):
fixed_float = [
('1.1.1.1', '2.2.2.1'),
('1.1.1.2', '2.2.2.2'),
('1.1.1.3', '2.2.2.3')
]
for fixed_addr, float_addr in fixed_float:
self._create_floating_ip({'address': float_addr})
self._create_fixed_ip({'address': fixed_addr})
db.floating_ip_fixed_ip_associate(self.ctxt, float_addr,
fixed_addr, 'some_host')
for fixed_addr, float_addr in fixed_float:
fixed_ip = db.fixed_ip_get_by_address(self.ctxt, fixed_addr)
float_ip = db.floating_ip_get_by_fixed_ip_id(self.ctxt,
fixed_ip['id'])
self.assertEqual(float_addr, float_ip[0]['address'])
def test_floating_ip_update(self):
float_ip = self._create_floating_ip({})
values = {
'project_id': 'some_pr',
'host': 'some_host',
'auto_assigned': True,
'interface': 'some_interface',
'pool': 'some_pool'
}
db.floating_ip_update(self.ctxt, float_ip['address'], values)
updated_float_ip = db.floating_ip_get(self.ctxt, float_ip['id'])
self._assertEqualObjects(updated_float_ip, values,
ignored_keys=['id', 'address', 'updated_at',
'deleted_at', 'created_at',
'deleted', 'fixed_ip_id',
'fixed_ip'])
def test_floating_ip_update_to_duplicate(self):
float_ip1 = self._create_floating_ip({'address': '1.1.1.1'})
float_ip2 = self._create_floating_ip({'address': '1.1.1.2'})
self.assertRaises(exception.FloatingIpExists,
db.floating_ip_update,
self.ctxt, float_ip2['address'],
{'address': float_ip1['address']})
class InstanceDestroyConstraints(test.TestCase):
def test_destroy_with_equal_any_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.equal_any('deleting'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_equal_any_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'resize'})
constraint = db.constraint(vm_state=db.equal_any('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
def test_destroy_with_not_equal_constraint_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'task_state': 'deleting'})
constraint = db.constraint(task_state=db.not_equal('error', 'resize'))
db.instance_destroy(ctx, instance['uuid'], constraint)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
ctx, instance['uuid'])
def test_destroy_with_not_equal_constraint_not_met(self):
ctx = context.get_admin_context()
instance = db.instance_create(ctx, {'vm_state': 'active'})
constraint = db.constraint(vm_state=db.not_equal('active', 'error'))
self.assertRaises(exception.ConstraintNotMet, db.instance_destroy,
ctx, instance['uuid'], constraint)
instance = db.instance_get_by_uuid(ctx, instance['uuid'])
self.assertFalse(instance['deleted'])
class VolumeUsageDBApiTestCase(test.TestCase):
def setUp(self):
super(VolumeUsageDBApiTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.useFixture(test.TimeOverride())
def test_vol_usage_update_no_totals_update(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(now)
timeutils.utcnow().AndReturn(now)
timeutils.utcnow().AndReturn(now)
self.mox.ReplayAll()
expected_vol_usages = [{'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'user_id': 'fake-user-uuid1',
'curr_reads': 1000,
'curr_read_bytes': 2000,
'curr_writes': 3000,
'curr_write_bytes': 4000,
'curr_last_refreshed': now,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None},
{'volume_id': u'2',
'instance_uuid': 'fake-instance-uuid2',
'project_id': 'fake-project-uuid2',
'user_id': 'fake-user-uuid2',
'curr_reads': 100,
'curr_read_bytes': 200,
'curr_writes': 300,
'curr_write_bytes': 400,
'tot_reads': 0,
'tot_read_bytes': 0,
'tot_writes': 0,
'tot_write_bytes': 0,
'tot_last_refreshed': None}]
def _compare(vol_usage, expected):
for key, value in expected.items():
self.assertEqual(vol_usage[key], value)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1', rd_req=10, rd_bytes=20,
wr_req=30, wr_bytes=40,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'2', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid2',
project_id='fake-project-uuid2',
user_id='fake-user-uuid2',
availability_zone='fake-az')
db.vol_usage_update(ctxt, u'1', rd_req=1000, rd_bytes=2000,
wr_req=3000, wr_bytes=4000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
user_id='fake-user-uuid1',
availability_zone='fake-az')
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 2)
_compare(vol_usages[0], expected_vol_usages[0])
_compare(vol_usages[1], expected_vol_usages[1])
def test_vol_usage_update_totals_update(self):
ctxt = context.get_admin_context()
now = datetime.datetime(1, 1, 1, 1, 0, 0)
start_time = now - datetime.timedelta(seconds=10)
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(now)
now1 = now + datetime.timedelta(minutes=1)
timeutils.utcnow().AndReturn(now1)
now2 = now + datetime.timedelta(minutes=2)
timeutils.utcnow().AndReturn(now2)
now3 = now + datetime.timedelta(minutes=3)
timeutils.utcnow().AndReturn(now3)
self.mox.ReplayAll()
db.vol_usage_update(ctxt, u'1', rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 0)
self.assertEqual(current_usage['curr_reads'], 100)
db.vol_usage_update(ctxt, u'1', rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 0)
db.vol_usage_update(ctxt, u'1', rd_req=300, rd_bytes=400,
wr_req=500, wr_bytes=600,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
availability_zone='fake-az',
user_id='fake-user-uuid')
current_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
self.assertEqual(current_usage['tot_reads'], 200)
self.assertEqual(current_usage['curr_reads'], 300)
db.vol_usage_update(ctxt, u'1', rd_req=400, rd_bytes=500,
wr_req=600, wr_bytes=700,
instance_id='fake-instance-uuid',
project_id='fake-project-uuid',
user_id='fake-user-uuid',
availability_zone='fake-az',
update_totals=True)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
expected_vol_usages = {'volume_id': u'1',
'project_id': 'fake-project-uuid',
'user_id': 'fake-user-uuid',
'instance_uuid': 'fake-instance-uuid',
'availability_zone': 'fake-az',
'tot_reads': 600,
'tot_read_bytes': 800,
'tot_writes': 1000,
'tot_write_bytes': 1200,
'tot_last_refreshed': now3,
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'curr_last_refreshed': now2}
self.assertEquals(1, len(vol_usages))
for key, value in expected_vol_usages.items():
self.assertEqual(vol_usages[0][key], value, key)
def test_vol_usage_update_when_blockdevicestats_reset(self):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less then the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
db.vol_usage_update(ctxt, u'1',
rd_req=200, rd_bytes=300,
wr_req=400, wr_bytes=500,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 200,
'curr_read_bytes': 300,
'curr_writes': 400,
'curr_write_bytes': 500,
'tot_reads': 10000,
'tot_read_bytes': 20000,
'tot_writes': 30000,
'tot_write_bytes': 40000}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
def test_vol_usage_update_totals_update_when_blockdevicestats_reset(self):
# This is unlikely to happen, but could when a volume is detached
# right after a instance has rebooted / recovered and before
# the system polled and updated the volume usage cache table.
ctxt = context.get_admin_context()
now = timeutils.utcnow()
start_time = now - datetime.timedelta(seconds=10)
vol_usages = db.vol_get_usage_by_time(ctxt, start_time)
self.assertEqual(len(vol_usages), 0)
db.vol_usage_update(ctxt, u'1',
rd_req=10000, rd_bytes=20000,
wr_req=30000, wr_bytes=40000,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1')
# Instance rebooted or crashed. block device stats were reset and are
# less then the previous values
db.vol_usage_update(ctxt, u'1',
rd_req=100, rd_bytes=200,
wr_req=300, wr_bytes=400,
instance_id='fake-instance-uuid1',
project_id='fake-project-uuid1',
availability_zone='fake-az',
user_id='fake-user-uuid1',
update_totals=True)
vol_usage = db.vol_get_usage_by_time(ctxt, start_time)[0]
expected_vol_usage = {'volume_id': u'1',
'instance_uuid': 'fake-instance-uuid1',
'project_id': 'fake-project-uuid1',
'availability_zone': 'fake-az',
'user_id': 'fake-user-uuid1',
'curr_reads': 0,
'curr_read_bytes': 0,
'curr_writes': 0,
'curr_write_bytes': 0,
'tot_reads': 10100,
'tot_read_bytes': 20200,
'tot_writes': 30300,
'tot_write_bytes': 40400}
for key, value in expected_vol_usage.items():
self.assertEqual(vol_usage[key], value, key)
class TaskLogTestCase(test.TestCase):
def setUp(self):
super(TaskLogTestCase, self).setUp()
self.context = context.get_admin_context()
now = timeutils.utcnow()
self.begin = now - datetime.timedelta(seconds=10)
self.end = now - datetime.timedelta(seconds=5)
self.task_name = 'fake-task-name'
self.host = 'fake-host'
self.message = 'Fake task message'
db.task_log_begin_task(self.context, self.task_name, self.begin,
self.end, self.host, message=self.message)
def test_task_log_get(self):
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], self.task_name)
self.assertEqual(result['period_beginning'], self.begin)
self.assertEqual(result['period_ending'], self.end)
self.assertEqual(result['host'], self.host)
self.assertEqual(result['message'], self.message)
def test_task_log_get_all(self):
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host)
self.assertEqual(len(result), 1)
result = db.task_log_get_all(self.context, self.task_name, self.begin,
self.end, host=self.host, state='')
self.assertEqual(len(result), 0)
def test_task_log_begin_task(self):
db.task_log_begin_task(self.context, 'fake', self.begin,
self.end, self.host, task_items=42,
message=self.message)
result = db.task_log_get(self.context, 'fake', self.begin,
self.end, self.host)
self.assertEqual(result['task_name'], 'fake')
def test_task_log_begin_task_duplicate(self):
params = (self.context, 'fake', self.begin, self.end, self.host)
db.task_log_begin_task(*params, message=self.message)
self.assertRaises(exception.TaskAlreadyRunning,
db.task_log_begin_task,
*params, message=self.message)
def test_task_log_end_task(self):
errors = 1
db.task_log_end_task(self.context, self.task_name, self.begin,
self.end, self.host, errors, message=self.message)
result = db.task_log_get(self.context, self.task_name, self.begin,
self.end, self.host)
self.assertEqual(result['errors'], 1)
def test_task_log_end_task_task_not_running(self):
self.assertRaises(exception.TaskNotRunning,
db.task_log_end_task, self.context, 'nonexistent',
self.begin, self.end, self.host, 42,
message=self.message)
class BlockDeviceMappingTestCase(test.TestCase):
def setUp(self):
super(BlockDeviceMappingTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance = db.instance_create(self.ctxt, {})
def _create_bdm(self, values):
values.setdefault('instance_uuid', self.instance['uuid'])
values.setdefault('device_name', 'fake_device')
values.setdefault('source_type', 'volume')
values.setdefault('destination_type', 'volume')
block_dev = block_device.BlockDeviceDict(values)
db.block_device_mapping_create(self.ctxt, block_dev, legacy=False)
uuid = block_dev['instance_uuid']
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
for bdm in bdms:
if bdm['device_name'] == values['device_name']:
return bdm
def test_scrub_empty_str_values_no_effect(self):
values = {'volume_size': 5}
expected = copy.copy(values)
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, expected)
def test_scrub_empty_str_values_empty_string(self):
values = {'volume_size': ''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_scrub_empty_str_values_empty_unicode(self):
values = {'volume_size': u''}
sqlalchemy_api._scrub_empty_str_values(values, ['volume_size'])
self.assertEqual(values, {})
def test_block_device_mapping_create(self):
bdm = self._create_bdm({})
self.assertFalse(bdm is None)
def test_block_device_mapping_update(self):
bdm = self._create_bdm({})
result = db.block_device_mapping_update(
self.ctxt, bdm['id'], {'destination_type': 'moon'},
legacy=False)
uuid = bdm['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(bdm_real[0]['destination_type'], 'moon')
# Also make sure the update call returned correct data
self.assertEqual(dict(bdm_real[0].iteritems()),
dict(result.iteritems()))
def test_block_device_mapping_update_or_create(self):
values = {
'instance_uuid': self.instance['uuid'],
'device_name': 'fake_name',
'source_type': 'volume',
'destination_type': 'volume'
}
# check create
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
uuid = values['instance_uuid']
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
self.assertEqual(bdm_real[0]['device_name'], 'fake_name')
# check update
values['destination_type'] = 'camelot'
db.block_device_mapping_update_or_create(self.ctxt, values,
legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'fake_name')
self.assertEqual(bdm_real['destination_type'], 'camelot')
# check create without device_name
bdm1 = dict(values)
bdm1['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 2)
bdm_real = bdm_real[1]
self.assertEqual(bdm_real['device_name'], None)
# check create multiple devices without device_name
bdm2 = dict(values)
bdm2['device_name'] = None
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 3)
bdm_real = bdm_real[2]
self.assertEqual(bdm_real['device_name'], None)
def test_block_device_mapping_update_or_create_multiple_ephemeral(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'guest_format': 'myformat',
}
bdm1 = dict(values)
bdm1['device_name'] = '/dev/sdb'
db.block_device_mapping_update_or_create(self.ctxt, bdm1, legacy=False)
bdm2 = dict(values)
bdm2['device_name'] = '/dev/sdc'
db.block_device_mapping_update_or_create(self.ctxt, bdm2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 2)
for bdm, device_name in zip(bdm_real, ['/dev/sdb', '/dev/sdc']):
self.assertEqual(bdm['device_name'], device_name)
self.assertEqual(bdm['guest_format'], 'myformat')
def test_block_device_mapping_update_or_create_check_remove_virt(self):
uuid = self.instance['uuid']
values = {
'instance_uuid': uuid,
'source_type': 'blank',
'destination_type': 'local',
'guest_format': 'swap',
}
# check that old swap bdms are deleted on create
val1 = dict(values)
val1['device_name'] = 'device1'
db.block_device_mapping_create(self.ctxt, val1, legacy=False)
val2 = dict(values)
val2['device_name'] = 'device2'
db.block_device_mapping_update_or_create(self.ctxt, val2, legacy=False)
bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdm_real), 1)
bdm_real = bdm_real[0]
self.assertEqual(bdm_real['device_name'], 'device2')
self.assertEqual(bdm_real['source_type'], 'blank')
self.assertEqual(bdm_real['guest_format'], 'swap')
db.block_device_mapping_destroy(self.ctxt, bdm_real['id'])
def test_block_device_mapping_get_all_by_instance(self):
uuid1 = self.instance['uuid']
uuid2 = db.instance_create(self.ctxt, {})['uuid']
bmds_values = [{'instance_uuid': uuid1,
'device_name': 'first'},
{'instance_uuid': uuid2,
'device_name': 'second'},
{'instance_uuid': uuid2,
'device_name': 'third'}]
for bdm in bmds_values:
self._create_bdm(bdm)
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1)
self.assertEqual(len(bmd), 1)
self.assertEqual(bmd[0]['device_name'], 'first')
bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2)
self.assertEqual(len(bmd), 2)
def test_block_device_mapping_destroy(self):
bdm = self._create_bdm({})
db.block_device_mapping_destroy(self.ctxt, bdm['id'])
bdm = db.block_device_mapping_get_all_by_instance(self.ctxt,
bdm['instance_uuid'])
self.assertEqual(len(bdm), 0)
def test_block_device_mapping_destory_by_instance_and_volumne(self):
vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f'
vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f'
self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1})
self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2})
uuid = self.instance['uuid']
db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid,
vol_id1)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], 'fake2')
def test_block_device_mapping_destroy_by_instance_and_device(self):
self._create_bdm({'device_name': 'fake1'})
self._create_bdm({'device_name': 'fake2'})
uuid = self.instance['uuid']
params = (self.ctxt, uuid, 'fake1')
db.block_device_mapping_destroy_by_instance_and_device(*params)
bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid)
self.assertEqual(len(bdms), 1)
self.assertEqual(bdms[0]['device_name'], 'fake2')
def test_block_device_mapping_get_by_volume_id(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id')
self.assertEqual(bdm['volume_id'], 'fake_id')
def test_block_device_mapping_get_by_volume_id_join_instance(self):
self._create_bdm({'volume_id': 'fake_id'})
bdm = db.block_device_mapping_get_by_volume_id(self.ctxt, 'fake_id',
['instance'])
self.assertEqual(bdm['volume_id'], 'fake_id')
self.assertEqual(bdm['instance']['uuid'], self.instance['uuid'])
class AgentBuildTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.agent_build_* methods."""
def setUp(self):
super(AgentBuildTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_agent_build_create_and_get_all(self):
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
agent_build = db.agent_build_create(self.ctxt, {'os': 'GNU/HURD'})
all_agent_builds = db.agent_build_get_all(self.ctxt)
self.assertEqual(1, len(all_agent_builds))
self._assertEqualObjects(agent_build, all_agent_builds[0])
def test_agent_build_get_by_triple(self):
agent_build = db.agent_build_create(self.ctxt, {'hypervisor': 'kvm',
'os': 'FreeBSD', 'architecture': 'x86_64'})
self.assertIsNone(db.agent_build_get_by_triple(self.ctxt, 'kvm',
'FreeBSD', 'i386'))
self._assertEqualObjects(agent_build, db.agent_build_get_by_triple(
self.ctxt, 'kvm', 'FreeBSD', 'x86_64'))
def test_agent_build_destroy(self):
agent_build = db.agent_build_create(self.ctxt, {})
self.assertEqual(1, len(db.agent_build_get_all(self.ctxt)))
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertEqual(0, len(db.agent_build_get_all(self.ctxt)))
def test_agent_build_update(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_update(self.ctxt, agent_build.id, {'os': 'ReactOS'})
self.assertEqual('ReactOS', db.agent_build_get_all(self.ctxt)[0].os)
def test_agent_build_destroy_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_destroy, self.ctxt, agent_build.id)
def test_agent_build_update_destroyed(self):
agent_build = db.agent_build_create(self.ctxt, {'os': 'HaikuOS'})
db.agent_build_destroy(self.ctxt, agent_build.id)
self.assertRaises(exception.AgentBuildNotFound,
db.agent_build_update, self.ctxt, agent_build.id, {'os': 'OS/2'})
def test_agent_build_exists(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': 'x86_64'}
db.agent_build_create(self.ctxt, values)
self.assertRaises(exception.AgentBuildExists, db.agent_build_create,
self.ctxt, values)
def test_agent_build_get_all_by_hypervisor(self):
values = {'hypervisor': 'kvm', 'os': 'FreeBSD',
'architecture': 'x86_64'}
created = db.agent_build_create(self.ctxt, values)
actual = db.agent_build_get_all(self.ctxt, hypervisor='kvm')
self._assertEqualListsOfObjects([created], actual)
class VirtualInterfaceTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(VirtualInterfaceTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.instance_uuid = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project1'}
self.network = db.network_create_safe(self.ctxt, values)
def _get_base_values(self):
return {
'instance_uuid': self.instance_uuid,
'address': 'fake_address',
'network_id': self.network['id'],
'uuid': str(stdlib_uuid.uuid4())
}
def mock_db_query_first_to_raise_data_error_exception(self):
self.mox.StubOutWithMock(query.Query, 'first')
query.Query.first().AndRaise(exc.DataError(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg()))
self.mox.ReplayAll()
def _create_virt_interface(self, values):
v = self._get_base_values()
v.update(values)
return db.virtual_interface_create(self.ctxt, v)
def test_virtual_interface_create(self):
vif = self._create_virt_interface({})
self.assertFalse(vif['id'] is None)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at', 'uuid']
self._assertEqualObjects(vif, self._get_base_values(), ignored_keys)
def test_virtual_interface_create_with_duplicate_address(self):
vif = self._create_virt_interface({})
self.assertRaises(exception.VirtualInterfaceCreateException,
self._create_virt_interface, {"uuid": vif['uuid']})
def test_virtual_interface_get(self):
vifs = [self._create_virt_interface({'address': 'a'}),
self._create_virt_interface({'address': 'b'})]
for vif in vifs:
real_vif = db.virtual_interface_get(self.ctxt, vif['id'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address(self):
vifs = [self._create_virt_interface({'address': 'first'}),
self._create_virt_interface({'address': 'second'})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_address(self.ctxt,
vif['address'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_address_not_found(self):
self.assertIsNone(db.virtual_interface_get_by_address(self.ctxt,
"i.nv.ali.ip"))
def test_virtual_interface_get_by_address_data_error_exception(self):
self.mock_db_query_first_to_raise_data_error_exception()
self.assertRaises(exception.InvalidIpAddressError,
db.virtual_interface_get_by_address,
self.ctxt,
"i.nv.ali.ip")
def test_virtual_interface_get_by_uuid(self):
vifs = [self._create_virt_interface({"address": "address_1"}),
self._create_virt_interface({"address": "address_2"})]
for vif in vifs:
real_vif = db.virtual_interface_get_by_uuid(self.ctxt, vif['uuid'])
self._assertEqualObjects(vif, real_vif)
def test_virtual_interface_get_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
vifs1 = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2'})]
vifs2 = [self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
vifs1_real = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
vifs2_real = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self._assertEqualListsOfObjects(vifs1, vifs1_real)
self._assertEqualListsOfObjects(vifs2, vifs2_real)
def test_virtual_interface_get_by_instance_and_network(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = {'host': 'localhost', 'project_id': 'project2'}
network_id = db.network_create_safe(self.ctxt, values)['id']
vifs = [self._create_virt_interface({'address': 'fake1'}),
self._create_virt_interface({'address': 'fake2',
'network_id': network_id,
'instance_uuid': inst_uuid2}),
self._create_virt_interface({'address': 'fake3',
'instance_uuid': inst_uuid2})]
for vif in vifs:
params = (self.ctxt, vif['instance_uuid'], vif['network_id'])
r_vif = db.virtual_interface_get_by_instance_and_network(*params)
self._assertEqualObjects(r_vif, vif)
def test_virtual_interface_delete_by_instance(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
for vals in values:
self._create_virt_interface(vals)
db.virtual_interface_delete_by_instance(self.ctxt, self.instance_uuid)
real_vifs1 = db.virtual_interface_get_by_instance(self.ctxt,
self.instance_uuid)
real_vifs2 = db.virtual_interface_get_by_instance(self.ctxt,
inst_uuid2)
self.assertEqual(len(real_vifs1), 0)
self.assertEqual(len(real_vifs2), 1)
def test_virtual_interface_get_all(self):
inst_uuid2 = db.instance_create(self.ctxt, {})['uuid']
values = [dict(address='fake1'), dict(address='fake2'),
dict(address='fake3', instance_uuid=inst_uuid2)]
vifs = [self._create_virt_interface(val) for val in values]
real_vifs = db.virtual_interface_get_all(self.ctxt)
self._assertEqualListsOfObjects(vifs, real_vifs)
class NetworkTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.network_* methods."""
def setUp(self):
super(NetworkTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_associated_fixed_ip(self, host, cidr, ip):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr})
self.assertFalse(db.network_in_use_on_host(self.ctxt, network.id,
host))
instance = db.instance_create(self.ctxt,
{'project_id': 'project1', 'host': host})
virtual_interface = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid, 'network_id': network.id,
'address': ip})
db.fixed_ip_create(self.ctxt, {'address': ip,
'network_id': network.id, 'allocated': True,
'virtual_interface_id': virtual_interface.id})
db.fixed_ip_associate(self.ctxt, ip, instance.uuid,
network.id)
return network, instance
def test_network_get_associated_fixed_ips(self):
network, instance = self._get_associated_fixed_ip('host.net',
'192.0.2.0/30', '192.0.2.1')
data = db.network_get_associated_fixed_ips(self.ctxt, network.id)
self.assertEqual(1, len(data))
self.assertEqual('192.0.2.1', data[0]['address'])
self.assertEqual('192.0.2.1', data[0]['vif_address'])
self.assertEqual(instance.uuid, data[0]['instance_uuid'])
self.assertTrue(data[0]['allocated'])
def test_network_create_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(36, len(network['uuid']))
db_network = db.network_get(self.ctxt, network['id'])
self._assertEqualObjects(network, db_network)
def test_network_create_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 1}
db.network_create_safe(self.ctxt, values1)
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, self.ctxt, values2)
def test_network_delete_safe(self):
values = {'host': 'localhost', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
db_network = db.network_get(self.ctxt, network['id'])
values = {'network_id': network['id'], 'address': '192.168.1.5'}
address1 = db.fixed_ip_create(self.ctxt, values)['address']
values = {'network_id': network['id'],
'address': '192.168.1.6',
'allocated': True}
address2 = db.fixed_ip_create(self.ctxt, values)['address']
self.assertRaises(exception.NetworkInUse,
db.network_delete_safe, self.ctxt, network['id'])
db.fixed_ip_update(self.ctxt, address2, {'allocated': False})
network = db.network_delete_safe(self.ctxt, network['id'])
self.assertRaises(exception.FixedIpNotFoundForAddress,
db.fixed_ip_get_by_address, self.ctxt, address1)
ctxt = self.ctxt.elevated(read_deleted='yes')
fixed_ip = db.fixed_ip_get_by_address(ctxt, address1)
self.assertTrue(fixed_ip['deleted'])
def test_network_in_use_on_host(self):
values = {'host': 'foo', 'hostname': 'myname'}
instance = db.instance_create(self.ctxt, values)
values = {'address': '192.168.1.5', 'instance_uuid': instance['uuid']}
vif = db.virtual_interface_create(self.ctxt, values)
values = {'address': '192.168.1.6',
'network_id': 1,
'allocated': True,
'instance_uuid': instance['uuid'],
'virtual_interface_id': vif['id']}
db.fixed_ip_create(self.ctxt, values)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'foo'), True)
self.assertEqual(db.network_in_use_on_host(self.ctxt, 1, 'bar'), False)
def test_network_update_nonexistent(self):
self.assertRaises(exception.NetworkNotFound,
db.network_update, self.ctxt, 'nonexistent', {})
def test_network_update_with_duplicate_vlan(self):
values1 = {'host': 'localhost', 'project_id': 'project1', 'vlan': 1}
values2 = {'host': 'something', 'project_id': 'project1', 'vlan': 2}
network_ref = db.network_create_safe(self.ctxt, values1)
db.network_create_safe(self.ctxt, values2)
self.assertRaises(exception.DuplicateVlan,
db.network_update, self.ctxt,
network_ref["id"], values2)
def test_network_update(self):
network = db.network_create_safe(self.ctxt, {'project_id': 'project1',
'vlan': 1, 'host': 'test.com'})
db.network_update(self.ctxt, network.id, {'vlan': 2})
network_new = db.network_get(self.ctxt, network.id)
self.assertEqual(2, network_new.vlan)
def test_network_set_host_nonexistent_network(self):
self.assertRaises(exception.NetworkNotFound,
db.network_set_host, self.ctxt, 'nonexistent', 'nonexistent')
def test_network_set_host_with_initially_no_host(self):
values = {'host': 'example.com', 'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(
db.network_set_host(self.ctxt, network.id, 'new.example.com'),
'example.com')
def test_network_set_host(self):
values = {'project_id': 'project1'}
network = db.network_create_safe(self.ctxt, values)
self.assertEqual(
db.network_set_host(self.ctxt, network.id, 'example.com'),
'example.com')
self.assertEqual('example.com',
db.network_get(self.ctxt, network.id).host)
def test_network_get_all_by_host(self):
self.assertEqual([],
db.network_get_all_by_host(self.ctxt, 'example.com'))
host = 'h1.example.com'
# network with host set
net1 = db.network_create_safe(self.ctxt, {'host': host})
self._assertEqualListsOfObjects([net1],
db.network_get_all_by_host(self.ctxt, host))
# network with fixed ip with host set
net2 = db.network_create_safe(self.ctxt, {})
db.fixed_ip_create(self.ctxt, {'host': host, 'network_id': net2.id})
data = db.network_get_all_by_host(self.ctxt, host)
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_host(self.ctxt, host))
# network with instance with host set
net3 = db.network_create_safe(self.ctxt, {})
instance = db.instance_create(self.ctxt, {'host': host})
vif = db.virtual_interface_create(self.ctxt,
{'instance_uuid': instance.uuid})
db.fixed_ip_create(self.ctxt, {'network_id': net3.id,
'virtual_interface_id': vif.id})
self._assertEqualListsOfObjects([net1, net2, net3],
db.network_get_all_by_host(self.ctxt, host))
def test_network_get_by_cidr(self):
cidr = '192.0.2.0/30'
cidr_v6 = '2001:db8:1::/64'
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'cidr': cidr, 'cidr_v6': cidr_v6})
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr))
self._assertEqualObjects(network,
db.network_get_by_cidr(self.ctxt, cidr_v6))
def test_network_get_by_cidr_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForCidr,
db.network_get_by_cidr, self.ctxt, '192.0.2.0/30')
def test_network_get_by_uuid(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project_1'})
self._assertEqualObjects(network,
db.network_get_by_uuid(self.ctxt, network.uuid))
def test_network_get_by_uuid_nonexistent(self):
self.assertRaises(exception.NetworkNotFoundForUUID,
db.network_get_by_uuid, self.ctxt, 'non-existent-uuid')
def test_network_get_all_by_uuids_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all_by_uuids, self.ctxt, ['non-existent-uuid'])
def test_network_get_all_by_uuids(self):
net1 = db.network_create_safe(self.ctxt, {})
net2 = db.network_create_safe(self.ctxt, {})
self._assertEqualListsOfObjects([net1, net2],
db.network_get_all_by_uuids(self.ctxt, [net1.uuid, net2.uuid]))
def test_network_get_all_no_networks(self):
self.assertRaises(exception.NoNetworksFound,
db.network_get_all, self.ctxt)
def test_network_get_all(self):
network = db.network_create_safe(self.ctxt, {})
network_db = db.network_get_all(self.ctxt)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network, network_db[0])
def test_network_get_all_admin_user(self):
network1 = db.network_create_safe(self.ctxt, {})
network2 = db.network_create_safe(self.ctxt,
{'project_id': 'project1'})
self._assertEqualListsOfObjects([network1, network2],
db.network_get_all(self.ctxt,
project_only=True))
def test_network_get_all_normal_user(self):
normal_ctxt = context.RequestContext('fake', 'fake')
db.network_create_safe(self.ctxt, {})
db.network_create_safe(self.ctxt, {'project_id': 'project1'})
network1 = db.network_create_safe(self.ctxt,
{'project_id': 'fake'})
network_db = db.network_get_all(normal_ctxt, project_only=True)
self.assertEqual(1, len(network_db))
self._assertEqualObjects(network1, network_db[0])
def test_network_get(self):
network = db.network_create_safe(self.ctxt, {})
self._assertEqualObjects(db.network_get(self.ctxt, network.id),
network)
db.network_delete_safe(self.ctxt, network.id)
self.assertRaises(exception.NetworkNotFound,
db.network_get, self.ctxt, network.id)
def test_network_associate(self):
network = db.network_create_safe(self.ctxt, {})
self.assertIsNone(network.project_id)
db.network_associate(self.ctxt, "project1", network.id)
self.assertEqual("project1", db.network_get(self.ctxt,
network.id).project_id)
def test_network_diassociate(self):
network = db.network_create_safe(self.ctxt,
{'project_id': 'project1', 'host': 'test.net'})
# disassociate project
db.network_disassociate(self.ctxt, network.id, False, True)
self.assertIsNone(db.network_get(self.ctxt, network.id).project_id)
# disassociate host
db.network_disassociate(self.ctxt, network.id, True, False)
self.assertIsNone(db.network_get(self.ctxt, network.id).host)
def test_network_count_reserved_ips(self):
net = db.network_create_safe(self.ctxt, {})
self.assertEqual(0, db.network_count_reserved_ips(self.ctxt, net.id))
db.fixed_ip_create(self.ctxt, {'network_id': net.id,
'reserved': True})
self.assertEqual(1, db.network_count_reserved_ips(self.ctxt, net.id))
class KeyPairTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(KeyPairTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _create_key_pair(self, values):
return db.key_pair_create(self.ctxt, values)
def test_key_pair_create(self):
param = {
'name': 'test_1',
'user_id': 'test_user_id_1',
'public_key': 'test_public_key_1',
'fingerprint': 'test_fingerprint_1'
}
key_pair = self._create_key_pair(param)
self.assertTrue(key_pair['id'] is not None)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(key_pair, param, ignored_keys)
def test_key_pair_create_with_duplicate_name(self):
params = {'name': 'test_name', 'user_id': 'test_user_id'}
self._create_key_pair(params)
self.assertRaises(exception.KeyPairExists, self._create_key_pair,
params)
def test_key_pair_get(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1'},
{'name': 'test_2', 'user_id': 'test_user_id_2'},
{'name': 'test_3', 'user_id': 'test_user_id_3'}
]
key_pairs = [self._create_key_pair(p) for p in params]
for key in key_pairs:
real_key = db.key_pair_get(self.ctxt, key['user_id'], key['name'])
self._assertEqualObjects(key, real_key)
def test_key_pair_get_no_results(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_get_deleted(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
key_pair_created = self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
ctxt = self.ctxt.elevated(read_deleted='yes')
key_pair_deleted = db.key_pair_get(ctxt, param['user_id'],
param['name'])
ignored_keys = ['deleted', 'created_at', 'updated_at', 'deleted_at']
self._assertEqualObjects(key_pair_deleted, key_pair_created,
ignored_keys)
self.assertEqual(key_pair_deleted['deleted'], key_pair_deleted['id'])
def test_key_pair_get_all_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1'},
{'name': 'test_2', 'user_id': 'test_user_id_1'},
{'name': 'test_3', 'user_id': 'test_user_id_2'}
]
key_pairs_user_1 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_1']
key_pairs_user_2 = [self._create_key_pair(p) for p in params
if p['user_id'] == 'test_user_id_2']
real_keys_1 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_1')
real_keys_2 = db.key_pair_get_all_by_user(self.ctxt, 'test_user_id_2')
self._assertEqualListsOfObjects(key_pairs_user_1, real_keys_1)
self._assertEqualListsOfObjects(key_pairs_user_2, real_keys_2)
def test_key_pair_count_by_user(self):
params = [
{'name': 'test_1', 'user_id': 'test_user_id_1'},
{'name': 'test_2', 'user_id': 'test_user_id_1'},
{'name': 'test_3', 'user_id': 'test_user_id_2'}
]
for p in params:
self._create_key_pair(p)
count_1 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_1')
self.assertEqual(count_1, 2)
count_2 = db.key_pair_count_by_user(self.ctxt, 'test_user_id_2')
self.assertEqual(count_2, 1)
def test_key_pair_destroy(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self._create_key_pair(param)
db.key_pair_destroy(self.ctxt, param['user_id'], param['name'])
self.assertRaises(exception.KeypairNotFound, db.key_pair_get,
self.ctxt, param['user_id'], param['name'])
def test_key_pair_destroy_no_such_key(self):
param = {'name': 'test_1', 'user_id': 'test_user_id_1'}
self.assertRaises(exception.KeypairNotFound,
db.key_pair_destroy, self.ctxt,
param['user_id'], param['name'])
class QuotaTestCase(test.TestCase, ModelsObjectComparatorMixin):
"""Tests for db.api.quota_* methods."""
def setUp(self):
super(QuotaTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_create(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
self.assertEqual(quota.resource, 'resource')
self.assertEqual(quota.hard_limit, 99)
self.assertEqual(quota.project_id, 'project1')
def test_quota_get(self):
quota = db.quota_create(self.ctxt, 'project1', 'resource', 99)
quota_db = db.quota_get(self.ctxt, 'project1', 'resource')
self._assertEqualObjects(quota, quota_db)
def test_quota_get_all_by_project(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j, j)
for i in range(3):
quotas_db = db.quota_get_all_by_project(self.ctxt, 'proj%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'resource0': 0,
'resource1': 1,
'resource2': 2})
def test_quota_get_all_by_project_and_user(self):
for i in range(3):
for j in range(3):
db.quota_create(self.ctxt, 'proj%d' % i, 'resource%d' % j,
j - 1, user_id='user%d' % i)
for i in range(3):
quotas_db = db.quota_get_all_by_project_and_user(self.ctxt,
'proj%d' % i,
'user%d' % i)
self.assertEqual(quotas_db, {'project_id': 'proj%d' % i,
'user_id': 'user%d' % i,
'resource0': -1,
'resource1': 0,
'resource2': 1})
def test_quota_update(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
db.quota_update(self.ctxt, 'project1', 'resource1', 42)
quota = db.quota_get(self.ctxt, 'project1', 'resource1')
self.assertEqual(quota.hard_limit, 42)
self.assertEqual(quota.resource, 'resource1')
self.assertEqual(quota.project_id, 'project1')
def test_quota_update_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_update, self.ctxt, 'project1', 'resource1', 42)
def test_quota_get_nonexistent(self):
self.assertRaises(exception.ProjectQuotaNotFound,
db.quota_get, self.ctxt, 'project1', 'resource1')
def test_quota_reserve_all_resources(self):
quotas = {}
deltas = {}
reservable_resources = {}
for i, resource in enumerate(quota.resources):
if isinstance(resource, quota.ReservableResource):
quotas[resource.name] = db.quota_create(self.ctxt, 'project1',
resource.name, 100)
deltas[resource.name] = i
reservable_resources[resource.name] = resource
usages = {'instances': 3, 'cores': 6, 'ram': 9}
instances = []
for i in range(3):
instances.append(db.instance_create(self.ctxt,
{'vcpus': 2, 'memory_mb': 3,
'project_id': 'project1'}))
usages['fixed_ips'] = 2
network = db.network_create_safe(self.ctxt, {})
for i in range(2):
address = '192.168.0.%d' % i
ip = db.fixed_ip_create(self.ctxt, {'project_id': 'project1',
'address': address,
'network_id': network['id']})
db.fixed_ip_associate(self.ctxt, address,
instances[0].uuid, network['id'])
usages['floating_ips'] = 5
for i in range(5):
db.floating_ip_create(self.ctxt, {'project_id': 'project1'})
usages['security_groups'] = 3
for i in range(3):
db.security_group_create(self.ctxt, {'project_id': 'project1'})
reservations_uuids = db.quota_reserve(self.ctxt, reservable_resources,
quotas, quotas, deltas, None,
None, None, 'project1')
resources_names = reservable_resources.keys()
for reservation_uuid in reservations_uuids:
reservation = db.reservation_get(self.ctxt, reservation_uuid)
usage = db.quota_usage_get(self.ctxt, 'project1',
reservation.resource)
self.assertEqual(usage.in_use, usages[reservation.resource],
'Resource: %s' % reservation.resource)
self.assertEqual(usage.reserved, deltas[reservation.resource])
self.assertIn(reservation.resource, resources_names)
resources_names.remove(reservation.resource)
self.assertEqual(len(resources_names), 0)
def test_quota_destroy_all_by_project(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project(self.ctxt, 'project1')
self.assertEqual(db.quota_get_all_by_project(self.ctxt, 'project1'),
{'project_id': 'project1'})
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1', 'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project(
self.ctxt, 'project1'),
{'project_id': 'project1'})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, r)
def test_quota_destroy_all_by_project_and_user(self):
reservations = _quota_reserve(self.ctxt, 'project1', 'user1')
db.quota_destroy_all_by_project_and_user(self.ctxt, 'project1',
'user1')
self.assertEqual(db.quota_get_all_by_project_and_user(self.ctxt,
'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1'})
self.assertEqual(db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'project1', 'user1'),
{'project_id': 'project1',
'user_id': 'user1',
'fixed_ips': {'in_use': 2, 'reserved': 2}})
for r in reservations:
self.assertRaises(exception.ReservationNotFound,
db.reservation_get, self.ctxt, r)
def test_quota_usage_get_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_get,
self.ctxt, 'p1', 'nonexitent_resource')
def test_quota_usage_get(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0')
expected = {'resource': 'resource0', 'project_id': 'p1',
'in_use': 0, 'reserved': 0, 'total': 0}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_usage_get_all_by_project(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project(
self.ctxt, 'p1'))
def test_quota_usage_get_all_by_project_and_user(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
expected = {'project_id': 'p1',
'user_id': 'u1',
'resource0': {'in_use': 0, 'reserved': 0},
'resource1': {'in_use': 1, 'reserved': 1},
'fixed_ips': {'in_use': 2, 'reserved': 2}}
self.assertEqual(expected, db.quota_usage_get_all_by_project_and_user(
self.ctxt, 'p1', 'u1'))
def test_quota_usage_update_nonexistent(self):
self.assertRaises(exception.QuotaUsageNotFound, db.quota_usage_update,
self.ctxt, 'p1', 'u1', 'resource', in_use=42)
def test_quota_usage_update(self):
_quota_reserve(self.ctxt, 'p1', 'u1')
db.quota_usage_update(self.ctxt, 'p1', 'u1', 'resource0', in_use=42,
reserved=43)
quota_usage = db.quota_usage_get(self.ctxt, 'p1', 'resource0', 'u1')
expected = {'resource': 'resource0', 'project_id': 'p1',
'user_id': 'u1', 'in_use': 42, 'reserved': 43, 'total': 85}
for key, value in expected.iteritems():
self.assertEqual(value, quota_usage[key])
def test_quota_create_exists(self):
db.quota_create(self.ctxt, 'project1', 'resource1', 41)
self.assertRaises(exception.QuotaExists, db.quota_create, self.ctxt,
'project1', 'resource1', 42)
class QuotaClassTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(QuotaClassTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def test_quota_class_get_default(self):
params = {
'test_resource1': '10',
'test_resource2': '20',
'test_resource3': '30',
}
for res, limit in params.items():
db.quota_class_create(self.ctxt, 'default', res, limit)
defaults = db.quota_class_get_default(self.ctxt)
self.assertEqual(defaults, dict(class_name='default',
test_resource1=10,
test_resource2=20,
test_resource3=30))
def test_quota_class_create(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
self.assertEqual(qc.class_name, 'class name')
self.assertEqual(qc.resource, 'resource')
self.assertEqual(qc.hard_limit, 42)
def test_quota_class_get(self):
qc = db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
qc_db = db.quota_class_get(self.ctxt, 'class name', 'resource')
self._assertEqualObjects(qc, qc_db)
def test_quota_class_get_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_get,
self.ctxt, 'nonexistent', 'resource')
def test_quota_class_get_all_by_name(self):
for i in range(3):
for j in range(3):
db.quota_class_create(self.ctxt, 'class%d' % i,
'resource%d' % j, j)
for i in range(3):
classes = db.quota_class_get_all_by_name(self.ctxt, 'class%d' % i)
self.assertEqual(classes, {'class_name': 'class%d' % i,
'resource0': 0, 'resource1': 1, 'resource2': 2})
def test_quota_class_update(self):
db.quota_class_create(self.ctxt, 'class name', 'resource', 42)
db.quota_class_update(self.ctxt, 'class name', 'resource', 43)
self.assertEqual(db.quota_class_get(self.ctxt, 'class name',
'resource').hard_limit, 43)
def test_quota_class_update_nonexistent(self):
self.assertRaises(exception.QuotaClassNotFound, db.quota_class_update,
self.ctxt, 'class name', 'resource', 42)
class S3ImageTestCase(test.TestCase):
def setUp(self):
super(S3ImageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = [uuidutils.generate_uuid() for i in xrange(3)]
self.images = [db.s3_image_create(self.ctxt, uuid)
for uuid in self.values]
def test_s3_image_create(self):
for ref in self.images:
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(sorted(self.values),
sorted([ref.uuid for ref in self.images]))
def test_s3_image_get_by_uuid(self):
for uuid in self.values:
ref = db.s3_image_get_by_uuid(self.ctxt, uuid)
self.assertTrue(uuidutils.is_uuid_like(ref.uuid))
self.assertEqual(uuid, ref.uuid)
def test_s3_image_get(self):
self.assertEqual(sorted(self.values),
sorted([db.s3_image_get(self.ctxt, ref.id).uuid
for ref in self.images]))
def test_s3_image_get_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get, self.ctxt,
100500)
def test_s3_image_get_by_uuid_not_found(self):
self.assertRaises(exception.ImageNotFound, db.s3_image_get_by_uuid,
self.ctxt, uuidutils.generate_uuid())
class ComputeNodeTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(ComputeNodeTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.service_dict = dict(host='host1', binary='nova-compute',
topic=CONF.compute_topic, report_count=1,
disabled=False)
self.service = db.service_create(self.ctxt, self.service_dict)
self.compute_node_dict = dict(vcpus=2, memory_mb=1024, local_gb=2048,
vcpus_used=0, memory_mb_used=0,
local_gb_used=0, free_ram_mb=1024,
free_disk_gb=2048, hypervisor_type="xen",
hypervisor_version=1, cpu_info="",
running_vms=0, current_workload=0,
service_id=self.service['id'],
disk_available_least=100,
hypervisor_hostname='abracadabra104',
host_ip='127.0.0.1',
supported_instances='',
pci_stats='')
# add some random stats
self.stats = dict(num_instances=3, num_proj_12345=2,
num_proj_23456=2, num_vm_building=3)
self.compute_node_dict['stats'] = self.stats
self.flags(reserved_host_memory_mb=0)
self.flags(reserved_host_disk_mb=0)
self.item = db.compute_node_create(self.ctxt, self.compute_node_dict)
def _stats_as_dict(self, stats):
d = {}
for s in stats:
key = s['key']
d[key] = s['value']
return d
def _stats_equal(self, stats, new_stats):
for k, v in stats.iteritems():
self.assertEqual(v, int(new_stats[k]))
def test_compute_node_create(self):
self._assertEqualObjects(self.compute_node_dict, self.item,
ignored_keys=self._ignored_keys + ['stats'])
new_stats = self._stats_as_dict(self.item['stats'])
self._stats_equal(self.stats, new_stats)
def test_compute_node_get_all(self):
date_fields = set(['created_at', 'updated_at',
'deleted_at', 'deleted'])
for no_date_fields in [False, True]:
nodes = db.compute_node_get_all(self.ctxt, no_date_fields)
self.assertEqual(1, len(nodes))
node = nodes[0]
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys +
['stats', 'service'])
node_fields = set(node.keys())
if no_date_fields:
self.assertFalse(date_fields & node_fields)
else:
self.assertTrue(date_fields <= node_fields)
new_stats = self._stats_as_dict(node['stats'])
self._stats_equal(self.stats, new_stats)
def test_compute_node_get_all_deleted_compute_node(self):
# Create a service and compute node and ensure we can find its stats;
# delete the service and compute node when done and loop again
for x in range(2, 5):
# Create a service
service_data = self.service_dict.copy()
service_data['host'] = 'host-%s' % x
service = db.service_create(self.ctxt, service_data)
# Create a compute node
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = self.stats.copy()
compute_node_data['hypervisor_hostname'] = 'hypervisor-%s' % x
node = db.compute_node_create(self.ctxt, compute_node_data)
# Ensure the "new" compute node is found
nodes = db.compute_node_get_all(self.ctxt, False)
self.assertEqual(2, len(nodes))
found = None
for n in nodes:
if n['id'] == node['id']:
found = n
break
self.assertNotEqual(None, found)
# Now ensure the match has stats!
self.assertNotEqual(self._stats_as_dict(found['stats']), {})
# Now delete the newly-created compute node to ensure the related
# compute node stats are wiped in a cascaded fashion
db.compute_node_delete(self.ctxt, node['id'])
# Clean up the service
db.service_destroy(self.ctxt, service['id'])
def test_compute_node_get_all_mult_compute_nodes_one_service_entry(self):
service_data = self.service_dict.copy()
service_data['host'] = 'host2'
service = db.service_create(self.ctxt, service_data)
existing_node = dict(self.item.iteritems())
existing_node['service'] = dict(self.service.iteritems())
expected = [existing_node]
for name in ['bm_node1', 'bm_node2']:
compute_node_data = self.compute_node_dict.copy()
compute_node_data['service_id'] = service['id']
compute_node_data['stats'] = self.stats
compute_node_data['hypervisor_hostname'] = 'bm_node_1'
node = db.compute_node_create(self.ctxt, compute_node_data)
node = dict(node.iteritems())
node['service'] = dict(service.iteritems())
expected.append(node)
result = sorted(db.compute_node_get_all(self.ctxt, False),
key=lambda n: n['hypervisor_hostname'])
self._assertEqualListsOfObjects(expected, result,
ignored_keys=['stats'])
def test_compute_node_get(self):
compute_node_id = self.item['id']
node = db.compute_node_get(self.ctxt, compute_node_id)
self._assertEqualObjects(self.compute_node_dict, node,
ignored_keys=self._ignored_keys + ['stats', 'service'])
new_stats = self._stats_as_dict(node['stats'])
self._stats_equal(self.stats, new_stats)
def test_compute_node_update(self):
compute_node_id = self.item['id']
stats = self._stats_as_dict(self.item['stats'])
# change some values:
stats['num_instances'] = 8
stats['num_tribbles'] = 1
values = {
'vcpus': 4,
'stats': stats,
}
item_updated = db.compute_node_update(self.ctxt, compute_node_id,
values)
self.assertEqual(4, item_updated['vcpus'])
new_stats = self._stats_as_dict(item_updated['stats'])
self._stats_equal(stats, new_stats)
def test_compute_node_delete(self):
compute_node_id = self.item['id']
db.compute_node_delete(self.ctxt, compute_node_id)
nodes = db.compute_node_get_all(self.ctxt)
self.assertEqual(len(nodes), 0)
def test_compute_node_search_by_hypervisor(self):
nodes_created = []
new_service = copy.copy(self.service_dict)
for i in xrange(3):
new_service['binary'] += str(i)
new_service['topic'] += str(i)
service = db.service_create(self.ctxt, new_service)
self.compute_node_dict['service_id'] = service['id']
self.compute_node_dict['hypervisor_hostname'] = 'testhost' + str(i)
self.compute_node_dict['stats'] = self.stats
node = db.compute_node_create(self.ctxt, self.compute_node_dict)
nodes_created.append(node)
nodes = db.compute_node_search_by_hypervisor(self.ctxt, 'host')
self.assertEqual(3, len(nodes))
self._assertEqualListsOfObjects(nodes_created, nodes,
ignored_keys=self._ignored_keys + ['stats', 'service'])
def test_compute_node_statistics(self):
stats = db.compute_node_statistics(self.ctxt)
self.assertEqual(stats.pop('count'), 1)
for k, v in stats.iteritems():
self.assertEqual(v, self.item[k])
def test_compute_node_not_found(self):
self.assertRaises(exception.ComputeHostNotFound, db.compute_node_get,
self.ctxt, 100500)
def test_compute_node_update_always_updates_updated_at(self):
item_updated = db.compute_node_update(self.ctxt,
self.item['id'], {})
self.assertNotEqual(self.item['updated_at'],
item_updated['updated_at'])
def test_compute_node_update_override_updated_at(self):
# Update the record once so updated_at is set.
first = db.compute_node_update(self.ctxt, self.item['id'],
{'free_ram_mb': '12'})
self.assertIsNotNone(first['updated_at'])
# Update a second time. Make sure that the updated_at value we send
# is overridden.
second = db.compute_node_update(self.ctxt, self.item['id'],
{'updated_at': first.updated_at,
'free_ram_mb': '13'})
self.assertNotEqual(first['updated_at'], second['updated_at'])
def test_compute_node_stat_unchanged(self):
# don't update unchanged stat values:
stats = self.item['stats']
stats_updated_at = dict([(stat['key'], stat['updated_at'])
for stat in stats])
stats_values = self._stats_as_dict(stats)
new_values = {'stats': stats_values}
compute_node_id = self.item['id']
db.compute_node_update(self.ctxt, compute_node_id, new_values)
updated_node = db.compute_node_get(self.ctxt, compute_node_id)
updated_stats = updated_node['stats']
for stat in updated_stats:
self.assertEqual(stat['updated_at'], stats_updated_at[stat['key']])
def test_compute_node_stat_prune(self):
for stat in self.item['stats']:
if stat['key'] == 'num_instances':
num_instance_stat = stat
break
values = {
'stats': dict(num_instances=1)
}
db.compute_node_update(self.ctxt, self.item['id'], values,
prune_stats=True)
item_updated = db.compute_node_get_all(self.ctxt)[0]
self.assertEqual(1, len(item_updated['stats']))
stat = item_updated['stats'][0]
self.assertEqual(num_instance_stat['id'], stat['id'])
self.assertEqual(num_instance_stat['key'], stat['key'])
self.assertEqual(1, int(stat['value']))
class ProviderFwRuleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ProviderFwRuleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.values = self._get_rule_values()
self.rules = [db.provider_fw_rule_create(self.ctxt, rule)
for rule in self.values]
def _get_rule_values(self):
cidr_samples = ['192.168.0.0/24', '10.1.2.3/32',
'2001:4f8:3:ba::/64',
'2001:4f8:3:ba:2e0:81ff:fe22:d1f1/128']
values = []
for i in xrange(len(cidr_samples)):
rule = {}
rule['protocol'] = 'foo' + str(i)
rule['from_port'] = 9999 + i
rule['to_port'] = 9898 + i
rule['cidr'] = cidr_samples[i]
values.append(rule)
return values
def test_provider_fw_rule_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, rule in enumerate(self.values):
self._assertEqualObjects(self.rules[i], rule,
ignored_keys=ignored_keys)
def test_provider_fw_rule_get_all(self):
self._assertEqualListsOfObjects(self.rules,
db.provider_fw_rule_get_all(self.ctxt))
def test_provider_fw_rule_destroy(self):
for rule in self.rules:
db.provider_fw_rule_destroy(self.ctxt, rule.id)
self.assertEqual([], db.provider_fw_rule_get_all(self.ctxt))
class CertificateTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(CertificateTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.created = self._certificates_create()
def _get_certs_values(self):
base_values = {
'user_id': 'user',
'project_id': 'project',
'file_name': 'filename'
}
return [dict((k, v + str(x)) for k, v in base_values.iteritems())
for x in xrange(1, 4)]
def _certificates_create(self):
return [db.certificate_create(self.ctxt, cert)
for cert in self._get_certs_values()]
def test_certificate_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for i, cert in enumerate(self._get_certs_values()):
self._assertEqualObjects(self.created[i], cert,
ignored_keys=ignored_keys)
def test_certificate_get_all_by_project(self):
cert = db.certificate_get_all_by_project(self.ctxt,
self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user(self):
cert = db.certificate_get_all_by_user(self.ctxt,
self.created[1].user_id)
self._assertEqualObjects(self.created[1], cert[0])
def test_certificate_get_all_by_user_and_project(self):
cert = db.certificate_get_all_by_user_and_project(self.ctxt,
self.created[1].user_id, self.created[1].project_id)
self._assertEqualObjects(self.created[1], cert[0])
class ConsoleTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.ctxt = context.get_admin_context()
pools_data = [
{'address': '192.168.10.10',
'username': 'user1',
'password': 'passwd1',
'console_type': 'type1',
'public_hostname': 'public_host1',
'host': 'host1',
'compute_host': 'compute_host1',
},
{'address': '192.168.10.11',
'username': 'user2',
'password': 'passwd2',
'console_type': 'type2',
'public_hostname': 'public_host2',
'host': 'host2',
'compute_host': 'compute_host2',
},
]
self.console_pools = [db.console_pool_create(self.ctxt, val)
for val in pools_data]
instance_uuid = uuidutils.generate_uuid()
db.instance_create(self.ctxt, {'uuid': instance_uuid})
self.console_data = [dict([('instance_name', 'name' + str(x)),
('instance_uuid', instance_uuid),
('password', 'pass' + str(x)),
('port', 7878 + x),
('pool_id', self.console_pools[x]['id'])])
for x in xrange(len(pools_data))]
self.consoles = [db.console_create(self.ctxt, val)
for val in self.console_data]
def test_console_create(self):
ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at',
'updated_at']
for console in self.consoles:
self.assertIsNotNone(console['id'])
self._assertEqualListsOfObjects(self.console_data, self.consoles,
ignored_keys=ignored_keys)
def test_console_get_by_id(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_id_uuid(self):
console = self.consoles[0]
console_get = db.console_get(self.ctxt, console['id'],
console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_by_pool_instance(self):
console = self.consoles[0]
console_get = db.console_get_by_pool_instance(self.ctxt,
console['pool_id'], console['instance_uuid'])
self._assertEqualObjects(console, console_get,
ignored_keys=['pool'])
def test_console_get_all_by_instance(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid)
self._assertEqualListsOfObjects(self.consoles, consoles_get)
def test_console_get_all_by_instance_with_pool(self):
instance_uuid = self.consoles[0]['instance_uuid']
consoles_get = db.console_get_all_by_instance(self.ctxt, instance_uuid,
columns_to_join=['pool'])
self._assertEqualListsOfObjects(self.consoles, consoles_get,
ignored_keys=['pool'])
self._assertEqualListsOfObjects([pool for pool in self.console_pools],
[c['pool'] for c in consoles_get])
def test_console_get_all_by_instance_empty(self):
consoles_get = db.console_get_all_by_instance(self.ctxt,
uuidutils.generate_uuid())
self.assertEqual(consoles_get, [])
def test_console_delete(self):
console_id = self.consoles[0]['id']
db.console_delete(self.ctxt, console_id)
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, console_id)
def test_console_get_by_pool_instance_not_found(self):
self.assertRaises(exception.ConsoleNotFoundInPoolForInstance,
db.console_get_by_pool_instance, self.ctxt,
self.consoles[0]['pool_id'],
uuidutils.generate_uuid())
def test_console_get_not_found(self):
self.assertRaises(exception.ConsoleNotFound, db.console_get,
self.ctxt, 100500)
def test_console_get_not_found_instance(self):
self.assertRaises(exception.ConsoleNotFoundForInstance, db.console_get,
self.ctxt, self.consoles[0]['id'],
uuidutils.generate_uuid())
class CellTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(CellTestCase, self).setUp()
self.ctxt = context.get_admin_context()
def _get_cell_base_values(self):
return {
'name': 'myname',
'api_url': 'apiurl',
'transport_url': 'transporturl',
'weight_offset': 0.5,
'weight_scale': 1.5,
'is_parent': True,
}
def _cell_value_modify(self, value, step):
if isinstance(value, str):
return value + str(step)
elif isinstance(value, float):
return value + step + 0.6
elif isinstance(value, bool):
return bool(step % 2)
elif isinstance(value, int):
return value + step
def _create_cells(self):
test_values = []
for x in xrange(1, 4):
modified_val = dict([(k, self._cell_value_modify(v, x))
for k, v in self._get_cell_base_values().iteritems()])
db.cell_create(self.ctxt, modified_val)
test_values.append(modified_val)
return test_values
def test_cell_create(self):
cell = db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertFalse(cell['id'] is None)
self._assertEqualObjects(cell, self._get_cell_base_values(),
ignored_keys=self._ignored_keys)
def test_cell_update(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
new_values = {
'api_url': 'apiurl1',
'transport_url': 'transporturl1',
'weight_offset': 0.6,
'weight_scale': 1.6,
'is_parent': False,
}
test_cellname = self._get_cell_base_values()['name']
updated_cell = db.cell_update(self.ctxt, test_cellname, new_values)
self._assertEqualObjects(updated_cell, new_values,
ignored_keys=self._ignored_keys + ['name'])
def test_cell_delete(self):
new_cells = self._create_cells()
for cell in new_cells:
test_cellname = cell['name']
db.cell_delete(self.ctxt, test_cellname)
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
test_cellname)
def test_cell_get(self):
new_cells = self._create_cells()
for cell in new_cells:
cell_get = db.cell_get(self.ctxt, cell['name'])
self._assertEqualObjects(cell_get, cell,
ignored_keys=self._ignored_keys)
def test_cell_get_all(self):
new_cells = self._create_cells()
cells = db.cell_get_all(self.ctxt)
self.assertEqual(len(new_cells), len(cells))
cells_byname = dict([(newcell['name'],
newcell) for newcell in new_cells])
for cell in cells:
self._assertEqualObjects(cell, cells_byname[cell['name']],
self._ignored_keys)
def test_cell_get_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_get, self.ctxt,
'cellnotinbase')
def test_cell_update_not_found(self):
self._create_cells()
self.assertRaises(exception.CellNotFound, db.cell_update, self.ctxt,
'cellnotinbase', self._get_cell_base_values())
def test_cell_create_exists(self):
db.cell_create(self.ctxt, self._get_cell_base_values())
self.assertRaises(exception.CellExists, db.cell_create,
self.ctxt, self._get_cell_base_values())
class ConsolePoolTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(ConsolePoolTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.test_console_pool_1 = {
'address': '192.168.2.10',
'username': 'user_1',
'password': 'secret_123',
'console_type': 'type_1',
'public_hostname': 'public_hostname_123',
'host': 'localhost',
'compute_host': '127.0.0.1',
}
self.test_console_pool_2 = {
'address': '192.168.2.11',
'username': 'user_2',
'password': 'secret_1234',
'console_type': 'type_2',
'public_hostname': 'public_hostname_1234',
'host': '127.0.0.1',
'compute_host': 'localhost',
}
self.test_console_pool_3 = {
'address': '192.168.2.12',
'username': 'user_3',
'password': 'secret_12345',
'console_type': 'type_2',
'public_hostname': 'public_hostname_12345',
'host': '127.0.0.1',
'compute_host': '192.168.1.1',
}
def test_console_pool_create(self):
console_pool = db.console_pool_create(
self.ctxt, self.test_console_pool_1)
self.assertTrue(console_pool.get('id') is not None)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id']
self._assertEqualObjects(
console_pool, self.test_console_pool_1, ignored_keys)
def test_console_pool_create_duplicate(self):
db.console_pool_create(self.ctxt, self.test_console_pool_1)
self.assertRaises(exception.ConsolePoolExists, db.console_pool_create,
self.ctxt, self.test_console_pool_1)
def test_console_pool_get_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_1
db_cp = db.console_pool_get_by_host_type(
self.ctxt, cp['compute_host'], cp['host'], cp['console_type']
)
self._assertEqualObjects(cp, db_cp, ignored_keys)
def test_console_pool_get_by_host_type_no_resuls(self):
self.assertRaises(
exception.ConsolePoolNotFoundForHostType,
db.console_pool_get_by_host_type, self.ctxt, 'compute_host',
'host', 'console_type')
def test_console_pool_get_all_by_host_type(self):
params = [
self.test_console_pool_1,
self.test_console_pool_2,
self.test_console_pool_3,
]
for p in params:
db.console_pool_create(self.ctxt, p)
ignored_keys = ['deleted', 'created_at', 'updated_at',
'deleted_at', 'id', 'consoles']
cp = self.test_console_pool_2
db_cp = db.console_pool_get_all_by_host_type(
self.ctxt, cp['host'], cp['console_type'])
self._assertEqualListsOfObjects(
db_cp, [self.test_console_pool_2, self.test_console_pool_3],
ignored_keys)
def test_console_pool_get_all_by_host_type_no_results(self):
res = db.console_pool_get_all_by_host_type(
self.ctxt, 'cp_host', 'cp_console_type')
self.assertEqual([], res)
class DnsdomainTestCase(test.TestCase):
def setUp(self):
super(DnsdomainTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.domain = 'test.domain'
self.testzone = 'testzone'
self.project = 'fake'
def test_dnsdomain_register_for_zone(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['availability_zone'], self.testzone)
self.assertEqual(domain['scope'], 'private')
def test_dnsdomain_register_for_project(self):
db.dnsdomain_register_for_project(self.ctxt, self.domain, self.project)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertEqual(domain['domain'], self.domain)
self.assertEqual(domain['project_id'], self.project)
self.assertEqual(domain['scope'], 'public')
def test_dnsdomain_list(self):
d_list = ['test.domain.one', 'test.domain.two']
db.dnsdomain_register_for_zone(self.ctxt, d_list[0], self.testzone)
db.dnsdomain_register_for_project(self.ctxt, d_list[1], self.project)
db_list = db.dnsdomain_list(self.ctxt)
self.assertEqual(sorted(d_list), sorted(db_list))
def test_dnsdomain_unregister(self):
db.dnsdomain_register_for_zone(self.ctxt, self.domain, self.testzone)
db.dnsdomain_unregister(self.ctxt, self.domain)
domain = db.dnsdomain_get(self.ctxt, self.domain)
self.assertIsNone(domain)
class BwUsageTestCase(test.TestCase, ModelsObjectComparatorMixin):
_ignored_keys = ['id', 'deleted', 'deleted_at', 'created_at', 'updated_at']
def setUp(self):
super(BwUsageTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.useFixture(test.TimeOverride())
def test_bw_usage_get_by_uuids(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
uuid3_refreshed = now - datetime.timedelta(seconds=5)
expected_bw_usages = [{'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now},
{'uuid': 'fake_uuid2',
'mac': 'fake_mac2',
'start_period': start_period,
'bw_in': 200,
'bw_out': 300,
'last_ctr_in': 22345,
'last_ctr_out': 77890,
'last_refreshed': now},
{'uuid': 'fake_uuid3',
'mac': 'fake_mac3',
'start_period': start_period,
'bw_in': 400,
'bw_out': 500,
'last_ctr_in': 32345,
'last_ctr_out': 87890,
'last_refreshed': uuid3_refreshed}]
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2'], start_period)
# No matches
self.assertEqual(len(bw_usages), 0)
# Add 3 entries
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period,
100, 200, 12345, 67890)
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period,
100, 200, 42, 42)
# Test explicit refreshed time
db.bw_usage_update(self.ctxt, 'fake_uuid3',
'fake_mac3', start_period,
400, 500, 32345, 87890,
last_refreshed=uuid3_refreshed)
# Update 2nd entry
db.bw_usage_update(self.ctxt, 'fake_uuid2',
'fake_mac2', start_period,
200, 300, 22345, 77890)
bw_usages = db.bw_usage_get_by_uuids(self.ctxt,
['fake_uuid1', 'fake_uuid2', 'fake_uuid3'], start_period)
self.assertEqual(len(bw_usages), 3)
for i, expected in enumerate(expected_bw_usages):
self._assertEqualObjects(bw_usages[i], expected,
ignored_keys=self._ignored_keys)
def test_bw_usage_get(self):
now = timeutils.utcnow()
start_period = now - datetime.timedelta(seconds=10)
expected_bw_usage = {'uuid': 'fake_uuid1',
'mac': 'fake_mac1',
'start_period': start_period,
'bw_in': 100,
'bw_out': 200,
'last_ctr_in': 12345,
'last_ctr_out': 67890,
'last_refreshed': now}
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
'fake_mac1')
self.assertIsNone(bw_usage)
db.bw_usage_update(self.ctxt, 'fake_uuid1',
'fake_mac1', start_period,
100, 200, 12345, 67890)
bw_usage = db.bw_usage_get(self.ctxt, 'fake_uuid1', start_period,
'fake_mac1')
self._assertEqualObjects(bw_usage, expected_bw_usage,
ignored_keys=self._ignored_keys)
class Ec2TestCase(test.TestCase):
def setUp(self):
super(Ec2TestCase, self).setUp()
self.ctxt = context.RequestContext('fake_user', 'fake_project')
def test_ec2_ids_not_found_are_printable(self):
def check_exc_format(method, value):
try:
method(self.ctxt, value)
except exception.NotFound as exc:
self.assertTrue(unicode(value) in unicode(exc))
check_exc_format(db.get_ec2_volume_id_by_uuid, 'fake')
check_exc_format(db.get_volume_uuid_by_ec2_id, 123456)
check_exc_format(db.get_ec2_snapshot_id_by_uuid, 'fake')
check_exc_format(db.get_snapshot_uuid_by_ec2_id, 123456)
check_exc_format(db.get_ec2_instance_id_by_uuid, 'fake')
check_exc_format(db.get_instance_uuid_by_ec2_id, 123456)
def test_ec2_volume_create(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(vol['id'])
self.assertEqual(vol['uuid'], 'fake-uuid')
def test_get_ec2_volume_id_by_uuid(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol_id = db.get_ec2_volume_id_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(vol['id'], vol_id)
def test_get_volume_uuid_by_ec2_id(self):
vol = db.ec2_volume_create(self.ctxt, 'fake-uuid')
vol_uuid = db.get_volume_uuid_by_ec2_id(self.ctxt, vol['id'])
self.assertEqual(vol_uuid, 'fake-uuid')
def test_get_ec2_volume_id_by_uuid_not_found(self):
self.assertRaises(exception.VolumeNotFound,
db.get_ec2_volume_id_by_uuid,
self.ctxt, 'uuid-not-present')
def test_get_volume_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.VolumeNotFound,
db.get_volume_uuid_by_ec2_id,
self.ctxt, 100500)
def test_ec2_snapshot_create(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(snap['id'])
self.assertEqual(snap['uuid'], 'fake-uuid')
def test_get_ec2_snapshot_id_by_uuid(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap_id = db.get_ec2_snapshot_id_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(snap['id'], snap_id)
def test_get_snapshot_uuid_by_ec2_id(self):
snap = db.ec2_snapshot_create(self.ctxt, 'fake-uuid')
snap_uuid = db.get_snapshot_uuid_by_ec2_id(self.ctxt, snap['id'])
self.assertEqual(snap_uuid, 'fake-uuid')
def test_get_ec2_snapshot_id_by_uuid_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.get_ec2_snapshot_id_by_uuid,
self.ctxt, 'uuid-not-present')
def test_get_snapshot_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.SnapshotNotFound,
db.get_snapshot_uuid_by_ec2_id,
self.ctxt, 100500)
def test_ec2_instance_create(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
self.assertIsNotNone(inst['id'])
self.assertEqual(inst['uuid'], 'fake-uuid')
def test_get_ec2_instance_id_by_uuid(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_id = db.get_ec2_instance_id_by_uuid(self.ctxt, 'fake-uuid')
self.assertEqual(inst['id'], inst_id)
def test_get_instance_uuid_by_ec2_id(self):
inst = db.ec2_instance_create(self.ctxt, 'fake-uuid')
inst_uuid = db.get_instance_uuid_by_ec2_id(self.ctxt, inst['id'])
self.assertEqual(inst_uuid, 'fake-uuid')
def test_get_ec2_instance_id_by_uuid_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_ec2_instance_id_by_uuid,
self.ctxt, 'uuid-not-present')
def test_get_instance_uuid_by_ec2_id_not_found(self):
self.assertRaises(exception.InstanceNotFound,
db.get_instance_uuid_by_ec2_id,
self.ctxt, 100500)
class ArchiveTestCase(test.TestCase):
def setUp(self):
super(ArchiveTestCase, self).setUp()
self.context = context.get_admin_context()
self.engine = get_engine()
self.conn = self.engine.connect()
self.instance_id_mappings = db_utils.get_table(self.engine,
"instance_id_mappings")
self.shadow_instance_id_mappings = db_utils.get_table(self.engine,
"shadow_instance_id_mappings")
self.dns_domains = db_utils.get_table(self.engine, "dns_domains")
self.shadow_dns_domains = db_utils.get_table(self.engine,
"shadow_dns_domains")
self.consoles = db_utils.get_table(self.engine, "consoles")
self.console_pools = db_utils.get_table(self.engine, "console_pools")
self.shadow_consoles = db_utils.get_table(self.engine,
"shadow_consoles")
self.shadow_console_pools = db_utils.get_table(self.engine,
"shadow_console_pools")
self.instances = db_utils.get_table(self.engine, "instances")
self.shadow_instances = db_utils.get_table(self.engine,
"shadow_instances")
self.uuidstrs = []
for unused in range(6):
self.uuidstrs.append(stdlib_uuid.uuid4().hex)
self.ids = []
self.id_tablenames_to_cleanup = set(["console_pools", "consoles"])
self.uuid_tablenames_to_cleanup = set(["instance_id_mappings",
"instances"])
self.domain_tablenames_to_cleanup = set(["dns_domains"])
def tearDown(self):
super(ArchiveTestCase, self).tearDown()
for tablename in self.id_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
del_statement = table.delete(table.c.id.in_(self.ids))
self.conn.execute(del_statement)
for tablename in self.uuid_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
del_statement = table.delete(table.c.uuid.in_(self.uuidstrs))
self.conn.execute(del_statement)
for tablename in self.domain_tablenames_to_cleanup:
for name in [tablename, "shadow_" + tablename]:
table = db_utils.get_table(self.engine, name)
del_statement = table.delete(table.c.domain.in_(self.uuidstrs))
self.conn.execute(del_statement)
def test_shadow_tables(self):
metadata = MetaData(bind=self.engine)
metadata.reflect()
for table_name in metadata.tables:
if table_name.startswith("shadow_"):
self.assertIn(table_name[7:], metadata.tables)
continue
self.assertTrue(db_utils.check_shadow_table(self.engine,
table_name))
def test_archive_deleted_rows(self):
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qiim = select([self.instance_id_mappings]).where(self.
instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qsiim = select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 4 left in main
self.assertEqual(len(rows), 4)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 2 in shadow
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we have 4 in shadow
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=2)
rows = self.conn.execute(qiim).fetchall()
# Verify we still have 2 left in main
self.assertEqual(len(rows), 2)
rows = self.conn.execute(qsiim).fetchall()
# Verify we still have 4 in shadow
self.assertEqual(len(rows), 4)
def test_archive_deleted_rows_for_every_uuid_table(self):
tablenames = []
for model_class in models.__dict__.itervalues():
if hasattr(model_class, "__tablename__"):
tablenames.append(model_class.__tablename__)
tablenames.sort()
for tablename in tablenames:
ret = self._test_archive_deleted_rows_for_one_uuid_table(tablename)
if ret == 0:
self.uuid_tablenames_to_cleanup.add(tablename)
def _test_archive_deleted_rows_for_one_uuid_table(self, tablename):
"""
:returns: 0 on success, 1 if no uuid column, 2 if insert failed
"""
main_table = db_utils.get_table(self.engine, tablename)
if not hasattr(main_table.c, "uuid"):
# Not a uuid table, so skip it.
return 1
shadow_table = db_utils.get_table(self.engine, "shadow_" + tablename)
# Add 6 rows to table
for uuidstr in self.uuidstrs:
ins_stmt = main_table.insert().values(uuid=uuidstr)
try:
self.conn.execute(ins_stmt)
except IntegrityError:
# This table has constraints that require a table-specific
# insert, so skip it.
return 2
# Set 4 to deleted
update_statement = main_table.update().\
where(main_table.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
qmt = select([main_table]).where(main_table.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qmt).fetchall()
# Verify we have 6 in main
self.assertEqual(len(rows), 6)
qst = select([shadow_table]).\
where(shadow_table.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qst).fetchall()
# Verify we have 0 in shadow
self.assertEqual(len(rows), 0)
# Archive 2 rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 4 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 4)
# Verify we have 2 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 2)
# Archive 2 more rows
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows_for_table(self.context, tablename, max_rows=2)
# Verify we still have 2 left in main
rows = self.conn.execute(qmt).fetchall()
self.assertEqual(len(rows), 2)
# Verify we still have 4 in shadow
rows = self.conn.execute(qst).fetchall()
self.assertEqual(len(rows), 4)
return 0
def test_archive_deleted_rows_no_id_column(self):
uuidstr0 = self.uuidstrs[0]
ins_stmt = self.dns_domains.insert().values(domain=uuidstr0)
self.conn.execute(ins_stmt)
update_statement = self.dns_domains.update().\
where(self.dns_domains.c.domain == uuidstr0).\
values(deleted=True)
self.conn.execute(update_statement)
qdd = select([self.dns_domains], self.dns_domains.c.domain ==
uuidstr0)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 1)
qsdd = select([self.shadow_dns_domains],
self.shadow_dns_domains.c.domain == uuidstr0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 0)
db.archive_deleted_rows(self.context, max_rows=1)
rows = self.conn.execute(qdd).fetchall()
self.assertEqual(len(rows), 0)
rows = self.conn.execute(qsdd).fetchall()
self.assertEqual(len(rows), 1)
def test_archive_deleted_rows_fk_constraint(self):
# consoles.pool_id depends on console_pools.id
# SQLite doesn't enforce foreign key constraints without a pragma.
dialect = self.engine.url.get_dialect()
if dialect == sqlite.dialect:
# We're seeing issues with foreign key support in SQLite 3.6.20
# SQLAlchemy doesn't support it at all with < SQLite 3.6.19
# It works fine in SQLite 3.7.
# So return early to skip this test if running SQLite < 3.7
import sqlite3
tup = sqlite3.sqlite_version_info
if tup[0] < 3 or (tup[0] == 3 and tup[1] < 7):
self.skipTest(
'sqlite version too old for reliable SQLA foreign_keys')
self.conn.execute("PRAGMA foreign_keys = ON")
ins_stmt = self.console_pools.insert().values(deleted=1)
result = self.conn.execute(ins_stmt)
id1 = result.inserted_primary_key[0]
self.ids.append(id1)
ins_stmt = self.consoles.insert().values(deleted=1,
pool_id=id1)
result = self.conn.execute(ins_stmt)
id2 = result.inserted_primary_key[0]
self.ids.append(id2)
# The first try to archive console_pools should fail, due to FK.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 0)
# Then archiving consoles should work.
num = db.archive_deleted_rows_for_table(self.context, "consoles")
self.assertEqual(num, 1)
# Then archiving console_pools should work.
num = db.archive_deleted_rows_for_table(self.context, "console_pools")
self.assertEqual(num, 1)
def test_archive_deleted_rows_2_tables(self):
# Add 6 rows to each table
for uuidstr in self.uuidstrs:
ins_stmt = self.instance_id_mappings.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt)
ins_stmt2 = self.instances.insert().values(uuid=uuidstr)
self.conn.execute(ins_stmt2)
# Set 4 of each to deleted
update_statement = self.instance_id_mappings.update().\
where(self.instance_id_mappings.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement)
update_statement2 = self.instances.update().\
where(self.instances.c.uuid.in_(self.uuidstrs[:4]))\
.values(deleted=1)
self.conn.execute(update_statement2)
# Verify we have 6 in each main table
qiim = select([self.instance_id_mappings]).where(
self.instance_id_mappings.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qiim).fetchall()
self.assertEqual(len(rows), 6)
qi = select([self.instances]).where(self.instances.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(rows), 6)
# Verify we have 0 in each shadow table
qsiim = select([self.shadow_instance_id_mappings]).\
where(self.shadow_instance_id_mappings.c.uuid.in_(
self.uuidstrs))
rows = self.conn.execute(qsiim).fetchall()
self.assertEqual(len(rows), 0)
qsi = select([self.shadow_instances]).\
where(self.shadow_instances.c.uuid.in_(self.uuidstrs))
rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(rows), 0)
# Archive 7 rows, which should be 4 in one table and 3 in the other.
db.archive_deleted_rows(self.context, max_rows=7)
# Verify we have 5 left in the two main tables combined
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 5)
# Verify we have 7 in the two shadow tables combined.
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 7)
# Archive the remaining deleted rows.
db.archive_deleted_rows(self.context, max_rows=1)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
# Try to archive more, but there are no deleted rows left.
db.archive_deleted_rows(self.context, max_rows=500)
# Verify we have 4 total left in both main tables.
iim_rows = self.conn.execute(qiim).fetchall()
i_rows = self.conn.execute(qi).fetchall()
self.assertEqual(len(iim_rows) + len(i_rows), 4)
# Verify we have 8 in shadow
siim_rows = self.conn.execute(qsiim).fetchall()
si_rows = self.conn.execute(qsi).fetchall()
self.assertEqual(len(siim_rows) + len(si_rows), 8)
class InstanceGroupDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(InstanceGroupDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
def _get_default_values(self):
return {'name': 'fake_name',
'user_id': self.user_id,
'project_id': self.project_id}
def _create_instance_group(self, context, values, policies=None,
metadata=None, members=None):
return db.instance_group_create(context, values, policies=policies,
metadata=metadata, members=members)
def test_instance_group_create_no_key(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
def test_instance_group_create_with_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
def test_instance_group_create_with_same_key(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
self.assertRaises(exception.InstanceGroupIdExists,
self._create_instance_group, self.context, values)
def test_instance_group_get(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self._assertEqualObjects(result1, result2)
def test_instance_group_update_simple(self):
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
values = {'name': 'new_name', 'user_id': 'new_user',
'project_id': 'new_project'}
db.instance_group_update(self.context, result1['uuid'],
values)
result2 = db.instance_group_get(self.context, result1['uuid'])
self.assertEquals(result1['uuid'], result2['uuid'])
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result2, values, ignored_keys)
def test_instance_group_delete(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
db.instance_group_delete(self.context, result['uuid'])
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete, self.context,
result['uuid'])
def test_instance_group_get_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_get,
self.context,
'nonexistent')
def test_instance_group_delete_nonexistent(self):
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_delete,
self.context,
'nonexistent')
def test_instance_group_get_all(self):
groups = db.instance_group_get_all(self.context)
self.assertEquals(0, len(groups))
value = self._get_default_values()
result1 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
self.assertEquals(1, len(groups))
value = self._get_default_values()
result2 = self._create_instance_group(self.context, value)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
def test_instance_group_get_all_by_project_id(self):
groups = db.instance_group_get_all_by_project_id(self.context,
'invalid_project_id')
self.assertEquals(0, len(groups))
values = self._get_default_values()
result1 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all_by_project_id(self.context,
'fake_project')
self.assertEquals(1, len(groups))
values = self._get_default_values()
values['project_id'] = 'new_project_id'
result2 = self._create_instance_group(self.context, values)
groups = db.instance_group_get_all(self.context)
results = [result1, result2]
self._assertEqualListsOfObjects(results, groups)
projects = [{'name': 'fake_project', 'value': [result1]},
{'name': 'new_project_id', 'value': [result2]}]
for project in projects:
groups = db.instance_group_get_all_by_project_id(self.context,
project['name'])
self._assertEqualListsOfObjects(project['value'], groups)
def test_instance_group_update(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
ignored_keys = ['id', 'uuid', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self.assertTrue(uuidutils.is_uuid_like(result['uuid']))
id = result['uuid']
values = self._get_default_values()
values['name'] = 'new_fake_name'
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self.assertEquals(result['name'], 'new_fake_name')
# update metadata
values = self._get_default_values()
metadataInput = {'key11': 'value1',
'key12': 'value2'}
values['metadata'] = metadataInput
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
metadata = result['metadetails']
self._assertEqualObjects(metadata, metadataInput)
# update update members
values = self._get_default_values()
members = ['instance_id1', 'instance_id2']
values['members'] = members
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
# update update policies
values = self._get_default_values()
policies = ['policy1', 'policy2']
values['policies'] = policies
db.instance_group_update(self.context, id, values)
result = db.instance_group_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
# test invalid ID
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_update, self.context,
'invalid_id', values)
class InstanceGroupMetadataDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_metadata_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
metadata = {'key11': 'value1',
'key12': 'value2'}
result = self._create_instance_group(self.context, values,
metadata=metadata)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualObjects(metadata, result['metadetails'])
def test_instance_group_metadata_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
metadata = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, {})
metadata = {'key1': 'value1',
'key2': 'value2'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata2 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata2)
def test_instance_group_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
metadata = {'key1': 'value1',
'key2': 'value2'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata2 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata2)
# check add with existing keys
metadata = {'key1': 'value1',
'key2': 'value2',
'key3': 'value3'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata3 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata3)
def test_instance_group_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
metadata = {'key1': 'value1',
'key2': 'value2',
'key3': 'value3'}
db.instance_group_metadata_add(self.context, id, metadata)
metadata3 = db.instance_group_metadata_get(self.context, id)
self._assertEqualObjects(metadata, metadata3)
db.instance_group_metadata_delete(self.context, id, 'key1')
metadata = db.instance_group_metadata_get(self.context, id)
self.assertTrue('key1' not in metadata)
db.instance_group_metadata_delete(self.context, id, 'key2')
metadata = db.instance_group_metadata_get(self.context, id)
self.assertTrue('key2' not in metadata)
def test_instance_group_metadata_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_metadata_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_metadata_delete, self.context,
'invalidid', 'key1')
metadata = {'key1': 'value1',
'key2': 'value2'}
db.instance_group_metadata_add(self.context, id, metadata)
self.assertRaises(exception.InstanceGroupMetadataNotFound,
db.instance_group_metadata_delete,
self.context, id, 'invalidkey')
class InstanceGroupMembersDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_members_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
members = ['instance_id1', 'instance_id2']
result = self._create_instance_group(self.context, values,
members=members)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['members'], members)
def test_instance_group_members_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members = db.instance_group_members_get(self.context, id)
self.assertEquals(members, [])
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
def test_instance_group_members_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members2 = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members2)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members2)
# check add with existing keys
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
members3 = ['instance_id1', 'instance_id2', 'instance_id3']
db.instance_group_members_add(self.context, id, members3)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
for instance_id in members3[:]:
db.instance_group_member_delete(self.context, id, instance_id)
members3.remove(instance_id)
members = db.instance_group_members_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(members, members3)
def test_instance_group_members_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_members_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_member_delete, self.context,
'invalidid', 'instance_id1')
members = ['instance_id1', 'instance_id2']
db.instance_group_members_add(self.context, id, members)
self.assertRaises(exception.InstanceGroupMemberNotFound,
db.instance_group_member_delete,
self.context, id, 'invalid_id')
class InstanceGroupPoliciesDBApiTestCase(InstanceGroupDBApiTestCase):
def test_instance_group_policies_on_create(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
policies = ['policy1', 'policy2']
result = self._create_instance_group(self.context, values,
policies=policies)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(result, values, ignored_keys)
self._assertEqualListsOfPrimitivesAsSets(result['policies'], policies)
def test_instance_group_policies_add(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies = db.instance_group_policies_get(self.context, id)
self.assertEquals(policies, [])
policies2 = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies2)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
def test_instance_group_policies_update(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies2 = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies2)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies2)
policies3 = ['policy1', 'policy2', 'policy3']
db.instance_group_policies_add(self.context, id, policies3)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
def test_instance_group_policies_delete(self):
values = self._get_default_values()
values['uuid'] = 'fake_id'
result = self._create_instance_group(self.context, values)
id = result['uuid']
policies3 = ['policy1', 'policy2', 'policy3']
db.instance_group_policies_add(self.context, id, policies3)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
for policy in policies3[:]:
db.instance_group_policy_delete(self.context, id, policy)
policies3.remove(policy)
policies = db.instance_group_policies_get(self.context, id)
self._assertEqualListsOfPrimitivesAsSets(policies, policies3)
def test_instance_group_policies_invalid_ids(self):
values = self._get_default_values()
result = self._create_instance_group(self.context, values)
id = result['uuid']
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_policies_get,
self.context, 'invalid')
self.assertRaises(exception.InstanceGroupNotFound,
db.instance_group_policy_delete, self.context,
'invalidid', 'policy1')
policies = ['policy1', 'policy2']
db.instance_group_policies_add(self.context, id, policies)
self.assertRaises(exception.InstanceGroupPolicyNotFound,
db.instance_group_policy_delete,
self.context, id, 'invalid_policy')
class PciDeviceDBApiTestCase(test.TestCase, ModelsObjectComparatorMixin):
def setUp(self):
super(PciDeviceDBApiTestCase, self).setUp()
self.user_id = 'fake_user'
self.project_id = 'fake_project'
self.context = context.RequestContext(self.user_id, self.project_id)
self.admin_context = context.get_admin_context()
self.ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
def _get_fake_pci_devs(self):
return {'id': 3353,
'compute_node_id': 1,
'address': '0000:0f:08:07',
'vendor_id': '8086',
'product_id': '1520',
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08:07',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
}, {'id': 3356,
'compute_node_id': 1,
'address': '0000:0f:03:07',
'vendor_id': '8083',
'product_id': '1523',
'dev_type': 'type-VF',
'dev_id': 'pci_0000:0f:08:07',
'extra_info': None,
'label': 'label_8086_1520',
'status': 'available',
'instance_uuid': '00000000-0000-0000-0000-000000000010',
}
def _create_fake_pci_devs(self):
v1, v2 = self._get_fake_pci_devs()
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
return (v1, v2)
def test_pci_device_get_by_addr(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_addr(self.admin_context, 1,
'0000:0f:08:07')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_addr_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_get_by_addr, self.admin_context,
1, '0000:0f:08:09')
def test_pci_device_get_by_addr_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_by_addr,
self.context, 1, '0000:0f:08:07')
def test_pci_device_get_by_id(self):
v1, v2 = self._create_fake_pci_devs()
result = db.pci_device_get_by_id(self.admin_context, 3353)
ignored_keys = ['id', 'deleted', 'deleted_at', 'updated_at',
'created_at']
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_get_by_id_not_found(self):
self._create_fake_pci_devs()
self.assertRaises(exception.PciDeviceNotFoundById,
db.pci_device_get_by_id,
self.admin_context, 3354)
def test_pci_device_get_by_id_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_by_id,
self.context, 3553)
def test_pci_device_get_all_by_node(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_all_by_node_empty(self):
v1, v2 = self._get_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 9)
self.assertEqual(len(results), 0)
def test_pci_device_get_all_by_node_low_priv(self):
self._create_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_get_all_by_node,
self.context, 1)
def test_pci_device_get_by_instance_uuid(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
def test_pci_device_get_by_instance_uuid_check_status(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
v2['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
db.pci_device_update(self.admin_context, v2['compute_node_id'],
v2['address'], v2)
results = db.pci_device_get_all_by_instance_uuid(
self.context,
'00000000-0000-0000-0000-000000000010')
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
def test_pci_device_update(self):
v1, v2 = self._get_fake_pci_devs()
v1['status'] = 'allocated'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08:07')
self._assertEqualObjects(v1, result, self.ignored_keys)
v1['status'] = 'claimed'
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
result = db.pci_device_get_by_addr(
self.admin_context, 1, '0000:0f:08:07')
self._assertEqualObjects(v1, result, self.ignored_keys)
def test_pci_device_update_low_priv(self):
v1, v2 = self._get_fake_pci_devs()
self.assertRaises(exception.AdminRequired,
db.pci_device_update, self.context,
v1['compute_node_id'], v1['address'], v1)
def test_pci_device_destroy(self):
v1, v2 = self._create_fake_pci_devs()
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1, v2], self.ignored_keys)
db.pci_device_destroy(self.admin_context, v1['compute_node_id'],
v1['address'])
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v2], self.ignored_keys)
def test_pci_device_destroy_exception(self):
v1, v2 = self._get_fake_pci_devs()
db.pci_device_update(self.admin_context, v1['compute_node_id'],
v1['address'], v1)
results = db.pci_device_get_all_by_node(self.admin_context, 1)
self._assertEqualListsOfObjects(results, [v1], self.ignored_keys)
self.assertRaises(exception.PciDeviceNotFound,
db.pci_device_destroy,
self.admin_context,
v2['compute_node_id'],
v2['address'])
| TieWei/nova | nova/tests/db/test_db_api.py | Python | apache-2.0 | 311,863 | 0.000895 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Libtermkey(Package):
"""Easy keyboard entry processing for terminal programs"""
homepage = "http://www.leonerd.org.uk/code/libtermkey/"
url = "http://www.leonerd.org.uk/code/libtermkey/libtermkey-0.18.tar.gz"
version('0.18', '3be2e3e5a851a49cc5e8567ac108b520')
version('0.17', '20edb99e0d95ec1690fe90e6a555ae6d')
version('0.16', '7a24b675aaeb142d30db28e7554987d4')
version('0.15b', '27689756e6c86c56ae454f2ac259bc3d')
version('0.14', 'e08ce30f440f9715c459060e0e048978')
depends_on('libtool', type='build')
depends_on('ncurses')
def install(self, spec, prefix):
make()
make("install", "PREFIX=" + prefix)
| skosukhin/spack | var/spack/repos/builtin/packages/libtermkey/package.py | Python | lgpl-2.1 | 1,935 | 0.001034 |
# Copyright 2016, 2017 Peter Zybrick and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ProcessSimHumidityToMister
:author: Pete Zybrick
:contact: [email protected]
:version: 1.0.0
"""
import logging
import time
import uuid
import sys
from iote2epyclient.launch.clientutils import ClientUtils
from iote2epyclient.schema.iote2erequest import Iote2eRequest
logger = logging.getLogger(__name__)
class ProcessSimHumidityToMister(object):
'''
Simulate Humidity Sensor and Mister
'''
def __init__(self, loginVo, sensorName):
self.loginVo = loginVo
self.sensorName = sensorName
self.humidityDirectionIncrease = True
self.HUMIDITY_MIN = 82.0
self.HUMIDITY_MAX = 93.0
self.HUMIDITY_INCR = .5
self.humidityNow = 90.0
def createIote2eRequest(self ):
time.sleep(2)
logger.info('ProcessSimHumidityToMister createIote2eRequest:')
if self.humidityDirectionIncrease and self.humidityNow < self.HUMIDITY_MAX:
self.humidityNow += self.HUMIDITY_INCR
elif (not self.humidityDirectionIncrease) and self.humidityNow > self.HUMIDITY_MIN:
self.humidityNow -= self.HUMIDITY_INCR;
logger.info( "humidityNow: {}".format(self.humidityNow))
if self.humidityNow <= self.HUMIDITY_MIN or self.humidityNow >= self.HUMIDITY_MAX:
logger.error("Humidity exceeded: {}".format(self.humidityNow))
# TODO: need to throw an exception or something so the calling thread exits
sys.exit(8)
# TODO: read humidity from sensor here
pairs = { self.sensorName: str(self.humidityNow)}
iote2eRequest = Iote2eRequest( login_name=self.loginVo.loginName,source_name=self.loginVo.sourceName, source_type='humidity',
request_uuid=str(uuid.uuid4()),
request_timestamp=ClientUtils.nowIso8601(),
pairs=pairs, operation='SENSORS_VALUES')
return iote2eRequest
def handleIote2eResult(self, iote2eResult ):
# TODO: turn on/off actuator (fan) here
logger.info('ProcessSimHumidityToMister handleIote2eResult: ' + str(iote2eResult))
actuatorValue = iote2eResult.pairs['actuatorValue'];
logger.info('actuatorValue {}'.format(actuatorValue))
if 'off' == actuatorValue:
self.humidityDirectionIncrease = False;
elif 'on' == actuatorValue:
self.humidityDirectionIncrease = True;
| petezybrick/iote2e | iote2e-pyclient/src/iote2epyclient/processsim/processsimhumiditytomister.py | Python | apache-2.0 | 3,105 | 0.009662 |
#! /usr/bin/env python
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import exceptions
import linecache
from code import InteractiveInterpreter
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from EditorWindow import EditorWindow, fixwordbreaks
from FileList import FileList
from ColorDelegator import ColorDelegator
from UndoDelegator import UndoDelegator
from OutputWindow import OutputWindow
from configHandler import idleConf
import idlever
import rpc
import Debugger
import RemoteDebugger
IDENTCHARS = string.ascii_letters + string.digits + "_"
LOCALHOST = '127.0.0.1'
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno):
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename, lineno))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
line = linecache.getline(filename, lineno).strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(which destroys them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for filename in cache.keys():
if filename[:1] + filename[-1:] == '<>':
save[filename] = cache[filename]
orig_checkcache()
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
if self.io.filename: self.restore_file_breaks()
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
rmenu_specs = [("Set Breakpoint", "<<set-breakpoint-here>>"),
("Clear Breakpoint", "<<clear-breakpoint-here>>")]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
lines = open(self.breakpointPath,"r").readlines()
except IOError:
lines = []
new_file = open(self.breakpointPath,"w")
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
new_file.close()
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index]))
end = int(float(ranges[index+1]))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
None: idleConf.GetHighlight(theme, "normal"),
})
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = self.build_subprocess_arglist()
port = 8833
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
# spawning first avoids passing a listening socket to the subprocess
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
addr = (LOCALHOST, self.port)
# Idle starts listening for connection on localhost
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("stdin", self.tkconsole)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path()
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self):
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
_msg = 'Use File/Exit or your end-of-file key to quit IDLE'
__builtins__.quit = __builtins__.exit = _msg
del _msg
\n""" % (sys.path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
self.tkconsole.endexecuting()
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exceptiopn. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input")
return
try:
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
try:
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
except:
self.showtraceback()
finally:
if not use_subprocess:
self.tkconsole.endexecuting()
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind TCP/IP port 8833, which is necessary to "
"communicate with its Python execution server. Either "
"no networking is installed on this computer or another "
"process (another IDLE?) is using the port. Run IDLE with the -n "
"command line switch to start without a subprocess and refer to "
"Help/IDLE Help 'Running without a subprocess' for further "
"details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
# New classes
from IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "_Shell"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
import __builtin__
__builtin__.quit = __builtin__.exit = "To exit, type Ctrl-D."
#
self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<beginning-of-line>>", self.home_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
import IOBinding
self.stdout = PseudoFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoFile(self, "stderr", IOBinding.encoding)
self.console = PseudoFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response == False:
return "cancel"
if self.reading:
self.top.quit()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
firewallmessage = """
****************************************************************
Personal firewall software may warn about the connection IDLE
makes to its subprocess using this computer's internal loopback
interface. This connection is not visible on any external
interface and no data is sent to or received from the Internet.
****************************************************************
"""
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s\nIDLE %s %s\n" %
(sys.version, sys.platform, self.COPYRIGHT,
self.firewallmessage, idlever.IDLE_VERSION, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def home_callback(self, event):
if event.state != 0 and event.keysym == "Home":
return # <Modifier-Home>; fall back to class binding
if self.text.compare("iomark", "<=", "insert") and \
self.text.compare("insert linestart", "<=", "iomark"):
self.text.mark_set("insert", "iomark")
self.text.tag_remove("sel", "1.0", "end")
self.text.see("insert")
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]))
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]))
return "break"
# No stdin mark -- just get the current line, less any prompt
line = self.text.get("insert linestart", "insert lineend")
last_line_of_prompt = sys.ps1.split('\n')[-1]
if line.startswith(last_line_of_prompt):
line = line[len(last_line_of_prompt):]
self.recall(line)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s):
if self.history:
self.history.recall(s)
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
self.interp.restart_subprocess()
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
class PseudoFile:
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self.encoding = encoding
def write(self, s):
self.shell.write(s, self.tags)
def writelines(self, l):
map(self.write, l)
def flush(self):
pass
def isatty(self):
return True
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
sys.ps1
except AttributeError:
sys.ps1 = '>>> '
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not edit_start
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
if enable_edit:
if not (cmd or script):
for filename in args:
flist.open(filename)
if not args:
flist.new()
if enable_shell:
if not flist.open_shell():
return # couldn't open shell
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
| xbmc/atv2 | xbmc/lib/libPython/Python/Lib/idlelib/PyShell.py | Python | gpl-2.0 | 48,715 | 0.00115 |
# copies.py - copy detection for Mercurial
#
# Copyright 2008 Matt Mackall <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import heapq
from . import (
node,
pathutil,
scmutil,
util,
)
def _findlimit(repo, a, b):
"""
Find the last revision that needs to be checked to ensure that a full
transitive closure for file copies can be properly calculated.
Generally, this means finding the earliest revision number that's an
ancestor of a or b but not both, except when a or b is a direct descendent
of the other, in which case we can return the minimum revnum of a and b.
None if no such revision exists.
"""
# basic idea:
# - mark a and b with different sides
# - if a parent's children are all on the same side, the parent is
# on that side, otherwise it is on no side
# - walk the graph in topological order with the help of a heap;
# - add unseen parents to side map
# - clear side of any parent that has children on different sides
# - track number of interesting revs that might still be on a side
# - track the lowest interesting rev seen
# - quit when interesting revs is zero
cl = repo.changelog
working = len(cl) # pseudo rev for the working directory
if a is None:
a = working
if b is None:
b = working
side = {a: -1, b: 1}
visit = [-a, -b]
heapq.heapify(visit)
interesting = len(visit)
hascommonancestor = False
limit = working
while interesting:
r = -heapq.heappop(visit)
if r == working:
parents = [cl.rev(p) for p in repo.dirstate.parents()]
else:
parents = cl.parentrevs(r)
for p in parents:
if p < 0:
continue
if p not in side:
# first time we see p; add it to visit
side[p] = side[r]
if side[p]:
interesting += 1
heapq.heappush(visit, -p)
elif side[p] and side[p] != side[r]:
# p was interesting but now we know better
side[p] = 0
interesting -= 1
hascommonancestor = True
if side[r]:
limit = r # lowest rev visited
interesting -= 1
if not hascommonancestor:
return None
# Consider the following flow (see test-commit-amend.t under issue4405):
# 1/ File 'a0' committed
# 2/ File renamed from 'a0' to 'a1' in a new commit (call it 'a1')
# 3/ Move back to first commit
# 4/ Create a new commit via revert to contents of 'a1' (call it 'a1-amend')
# 5/ Rename file from 'a1' to 'a2' and commit --amend 'a1-msg'
#
# During the amend in step five, we will be in this state:
#
# @ 3 temporary amend commit for a1-amend
# |
# o 2 a1-amend
# |
# | o 1 a1
# |/
# o 0 a0
#
# When _findlimit is called, a and b are revs 3 and 0, so limit will be 2,
# yet the filelog has the copy information in rev 1 and we will not look
# back far enough unless we also look at the a and b as candidates.
# This only occurs when a is a descendent of b or visa-versa.
return min(limit, a, b)
def _chain(src, dst, a, b):
'''chain two sets of copies a->b'''
t = a.copy()
for k, v in b.iteritems():
if v in t:
# found a chain
if t[v] != k:
# file wasn't renamed back to itself
t[k] = t[v]
if v not in dst:
# chain was a rename, not a copy
del t[v]
if v in src:
# file is a copy of an existing file
t[k] = v
# remove criss-crossed copies
for k, v in t.items():
if k in src and v in dst:
del t[k]
return t
def _tracefile(fctx, am, limit=-1):
'''return file context that is the ancestor of fctx present in ancestor
manifest am, stopping after the first ancestor lower than limit'''
for f in fctx.ancestors():
if am.get(f.path(), None) == f.filenode():
return f
if limit >= 0 and f.linkrev() < limit and f.rev() < limit:
return None
def _dirstatecopies(d):
ds = d._repo.dirstate
c = ds.copies().copy()
for k in c.keys():
if ds[k] not in 'anm':
del c[k]
return c
def _computeforwardmissing(a, b, match=None):
"""Computes which files are in b but not a.
This is its own function so extensions can easily wrap this call to see what
files _forwardcopies is about to process.
"""
ma = a.manifest()
mb = b.manifest()
if match:
ma = ma.matches(match)
mb = mb.matches(match)
return mb.filesnotin(ma)
def _forwardcopies(a, b, match=None):
'''find {dst@b: src@a} copy mapping where a is an ancestor of b'''
# check for working copy
w = None
if b.rev() is None:
w = b
b = w.p1()
if a == b:
# short-circuit to avoid issues with merge states
return _dirstatecopies(w)
# files might have to be traced back to the fctx parent of the last
# one-side-only changeset, but not further back than that
limit = _findlimit(a._repo, a.rev(), b.rev())
if limit is None:
limit = -1
am = a.manifest()
# find where new files came from
# we currently don't try to find where old files went, too expensive
# this means we can miss a case like 'hg rm b; hg cp a b'
cm = {}
# Computing the forward missing is quite expensive on large manifests, since
# it compares the entire manifests. We can optimize it in the common use
# case of computing what copies are in a commit versus its parent (like
# during a rebase or histedit). Note, we exclude merge commits from this
# optimization, since the ctx.files() for a merge commit is not correct for
# this comparison.
forwardmissingmatch = match
if not match and b.p1() == a and b.p2().node() == node.nullid:
forwardmissingmatch = scmutil.matchfiles(a._repo, b.files())
missing = _computeforwardmissing(a, b, match=forwardmissingmatch)
ancestrycontext = a._repo.changelog.ancestors([b.rev()], inclusive=True)
for f in missing:
fctx = b[f]
fctx._ancestrycontext = ancestrycontext
ofctx = _tracefile(fctx, am, limit)
if ofctx:
cm[f] = ofctx.path()
# combine copies from dirstate if necessary
if w is not None:
cm = _chain(a, w, cm, _dirstatecopies(w))
return cm
def _backwardrenames(a, b):
if a._repo.ui.configbool('experimental', 'disablecopytrace'):
return {}
# Even though we're not taking copies into account, 1:n rename situations
# can still exist (e.g. hg cp a b; hg mv a c). In those cases we
# arbitrarily pick one of the renames.
f = _forwardcopies(b, a)
r = {}
for k, v in sorted(f.iteritems()):
# remove copies
if v in a:
continue
r[v] = k
return r
def pathcopies(x, y, match=None):
'''find {dst@y: src@x} copy mapping for directed compare'''
if x == y or not x or not y:
return {}
a = y.ancestor(x)
if a == x:
return _forwardcopies(x, y, match=match)
if a == y:
return _backwardrenames(x, y)
return _chain(x, y, _backwardrenames(x, a),
_forwardcopies(a, y, match=match))
def _computenonoverlap(repo, c1, c2, addedinm1, addedinm2):
"""Computes, based on addedinm1 and addedinm2, the files exclusive to c1
and c2. This is its own function so extensions can easily wrap this call
to see what files mergecopies is about to process.
Even though c1 and c2 are not used in this function, they are useful in
other extensions for being able to read the file nodes of the changed files.
"""
u1 = sorted(addedinm1 - addedinm2)
u2 = sorted(addedinm2 - addedinm1)
if u1:
repo.ui.debug(" unmatched files in local:\n %s\n"
% "\n ".join(u1))
if u2:
repo.ui.debug(" unmatched files in other:\n %s\n"
% "\n ".join(u2))
return u1, u2
def _makegetfctx(ctx):
"""return a 'getfctx' function suitable for checkcopies usage
We have to re-setup the function building 'filectx' for each
'checkcopies' to ensure the linkrev adjustment is properly setup for
each. Linkrev adjustment is important to avoid bug in rename
detection. Moreover, having a proper '_ancestrycontext' setup ensures
the performance impact of this adjustment is kept limited. Without it,
each file could do a full dag traversal making the time complexity of
the operation explode (see issue4537).
This function exists here mostly to limit the impact on stable. Feel
free to refactor on default.
"""
rev = ctx.rev()
repo = ctx._repo
ac = getattr(ctx, '_ancestrycontext', None)
if ac is None:
revs = [rev]
if rev is None:
revs = [p.rev() for p in ctx.parents()]
ac = repo.changelog.ancestors(revs, inclusive=True)
ctx._ancestrycontext = ac
def makectx(f, n):
if len(n) != 20: # in a working context?
if ctx.rev() is None:
return ctx.filectx(f)
return repo[None][f]
fctx = repo.filectx(f, fileid=n)
# setup only needed for filectx not create from a changectx
fctx._ancestrycontext = ac
fctx._descendantrev = rev
return fctx
return util.lrucachefunc(makectx)
def mergecopies(repo, c1, c2, ca):
"""
Find moves and copies between context c1 and c2 that are relevant
for merging.
Returns four dicts: "copy", "movewithdir", "diverge", and
"renamedelete".
"copy" is a mapping from destination name -> source name,
where source is in c1 and destination is in c2 or vice-versa.
"movewithdir" is a mapping from source name -> destination name,
where the file at source present in one context but not the other
needs to be moved to destination by the merge process, because the
other context moved the directory it is in.
"diverge" is a mapping of source name -> list of destination names
for divergent renames.
"renamedelete" is a mapping of source name -> list of destination
names for files deleted in c1 that were renamed in c2 or vice-versa.
"""
# avoid silly behavior for update from empty dir
if not c1 or not c2 or c1 == c2:
return {}, {}, {}, {}
# avoid silly behavior for parent -> working dir
if c2.node() is None and c1.node() == repo.dirstate.p1():
return repo.dirstate.copies(), {}, {}, {}
# Copy trace disabling is explicitly below the node == p1 logic above
# because the logic above is required for a simple copy to be kept across a
# rebase.
if repo.ui.configbool('experimental', 'disablecopytrace'):
return {}, {}, {}, {}
limit = _findlimit(repo, c1.rev(), c2.rev())
if limit is None:
# no common ancestor, no copies
return {}, {}, {}, {}
repo.ui.debug(" searching for copies back to rev %d\n" % limit)
m1 = c1.manifest()
m2 = c2.manifest()
ma = ca.manifest()
copy1, copy2, = {}, {}
movewithdir1, movewithdir2 = {}, {}
fullcopy1, fullcopy2 = {}, {}
diverge = {}
# find interesting file sets from manifests
addedinm1 = m1.filesnotin(ma)
addedinm2 = m2.filesnotin(ma)
u1, u2 = _computenonoverlap(repo, c1, c2, addedinm1, addedinm2)
bothnew = sorted(addedinm1 & addedinm2)
for f in u1:
checkcopies(c1, f, m1, m2, ca, limit, diverge, copy1, fullcopy1)
for f in u2:
checkcopies(c2, f, m2, m1, ca, limit, diverge, copy2, fullcopy2)
copy = dict(copy1.items() + copy2.items())
movewithdir = dict(movewithdir1.items() + movewithdir2.items())
fullcopy = dict(fullcopy1.items() + fullcopy2.items())
renamedelete = {}
renamedeleteset = set()
divergeset = set()
for of, fl in diverge.items():
if len(fl) == 1 or of in c1 or of in c2:
del diverge[of] # not actually divergent, or not a rename
if of not in c1 and of not in c2:
# renamed on one side, deleted on the other side, but filter
# out files that have been renamed and then deleted
renamedelete[of] = [f for f in fl if f in c1 or f in c2]
renamedeleteset.update(fl) # reverse map for below
else:
divergeset.update(fl) # reverse map for below
if bothnew:
repo.ui.debug(" unmatched files new in both:\n %s\n"
% "\n ".join(bothnew))
bothdiverge, _copy, _fullcopy = {}, {}, {}
for f in bothnew:
checkcopies(c1, f, m1, m2, ca, limit, bothdiverge, _copy, _fullcopy)
checkcopies(c2, f, m2, m1, ca, limit, bothdiverge, _copy, _fullcopy)
for of, fl in bothdiverge.items():
if len(fl) == 2 and fl[0] == fl[1]:
copy[fl[0]] = of # not actually divergent, just matching renames
if fullcopy and repo.ui.debugflag:
repo.ui.debug(" all copies found (* = to merge, ! = divergent, "
"% = renamed and deleted):\n")
for f in sorted(fullcopy):
note = ""
if f in copy:
note += "*"
if f in divergeset:
note += "!"
if f in renamedeleteset:
note += "%"
repo.ui.debug(" src: '%s' -> dst: '%s' %s\n" % (fullcopy[f], f,
note))
del divergeset
if not fullcopy:
return copy, movewithdir, diverge, renamedelete
repo.ui.debug(" checking for directory renames\n")
# generate a directory move map
d1, d2 = c1.dirs(), c2.dirs()
# Hack for adding '', which is not otherwise added, to d1 and d2
d1.addpath('/')
d2.addpath('/')
invalid = set()
dirmove = {}
# examine each file copy for a potential directory move, which is
# when all the files in a directory are moved to a new directory
for dst, src in fullcopy.iteritems():
dsrc, ddst = pathutil.dirname(src), pathutil.dirname(dst)
if dsrc in invalid:
# already seen to be uninteresting
continue
elif dsrc in d1 and ddst in d1:
# directory wasn't entirely moved locally
invalid.add(dsrc + "/")
elif dsrc in d2 and ddst in d2:
# directory wasn't entirely moved remotely
invalid.add(dsrc + "/")
elif dsrc + "/" in dirmove and dirmove[dsrc + "/"] != ddst + "/":
# files from the same directory moved to two different places
invalid.add(dsrc + "/")
else:
# looks good so far
dirmove[dsrc + "/"] = ddst + "/"
for i in invalid:
if i in dirmove:
del dirmove[i]
del d1, d2, invalid
if not dirmove:
return copy, movewithdir, diverge, renamedelete
for d in dirmove:
repo.ui.debug(" discovered dir src: '%s' -> dst: '%s'\n" %
(d, dirmove[d]))
# check unaccounted nonoverlapping files against directory moves
for f in u1 + u2:
if f not in fullcopy:
for d in dirmove:
if f.startswith(d):
# new file added in a directory that was moved, move it
df = dirmove[d] + f[len(d):]
if df not in copy:
movewithdir[f] = df
repo.ui.debug((" pending file src: '%s' -> "
"dst: '%s'\n") % (f, df))
break
return copy, movewithdir, diverge, renamedelete
def checkcopies(ctx, f, m1, m2, ca, limit, diverge, copy, fullcopy):
"""
check possible copies of f from m1 to m2
ctx = starting context for f in m1
f = the filename to check
m1 = the source manifest
m2 = the destination manifest
ca = the changectx of the common ancestor
limit = the rev number to not search beyond
diverge = record all diverges in this dict
copy = record all non-divergent copies in this dict
fullcopy = record all copies in this dict
"""
ma = ca.manifest()
getfctx = _makegetfctx(ctx)
def _related(f1, f2, limit):
# Walk back to common ancestor to see if the two files originate
# from the same file. Since workingfilectx's rev() is None it messes
# up the integer comparison logic, hence the pre-step check for
# None (f1 and f2 can only be workingfilectx's initially).
if f1 == f2:
return f1 # a match
g1, g2 = f1.ancestors(), f2.ancestors()
try:
f1r, f2r = f1.linkrev(), f2.linkrev()
if f1r is None:
f1 = next(g1)
if f2r is None:
f2 = next(g2)
while True:
f1r, f2r = f1.linkrev(), f2.linkrev()
if f1r > f2r:
f1 = next(g1)
elif f2r > f1r:
f2 = next(g2)
elif f1 == f2:
return f1 # a match
elif f1r == f2r or f1r < limit or f2r < limit:
return False # copy no longer relevant
except StopIteration:
return False
of = None
seen = set([f])
for oc in getfctx(f, m1[f]).ancestors():
ocr = oc.linkrev()
of = oc.path()
if of in seen:
# check limit late - grab last rename before
if ocr < limit:
break
continue
seen.add(of)
fullcopy[f] = of # remember for dir rename detection
if of not in m2:
continue # no match, keep looking
if m2[of] == ma.get(of):
break # no merge needed, quit early
c2 = getfctx(of, m2[of])
cr = _related(oc, c2, ca.rev())
if cr and (of == f or of == c2.path()): # non-divergent
copy[f] = of
of = None
break
if of in ma:
diverge.setdefault(of, []).append(f)
def duplicatecopies(repo, rev, fromrev, skiprev=None):
'''reproduce copies from fromrev to rev in the dirstate
If skiprev is specified, it's a revision that should be used to
filter copy records. Any copies that occur between fromrev and
skiprev will not be duplicated, even if they appear in the set of
copies between fromrev and rev.
'''
exclude = {}
if (skiprev is not None and
not repo.ui.configbool('experimental', 'disablecopytrace')):
# disablecopytrace skips this line, but not the entire function because
# the line below is O(size of the repo) during a rebase, while the rest
# of the function is much faster (and is required for carrying copy
# metadata across the rebase anyway).
exclude = pathcopies(repo[fromrev], repo[skiprev])
for dst, src in pathcopies(repo[fromrev], repo[rev]).iteritems():
# copies.pathcopies returns backward renames, so dst might not
# actually be in the dirstate
if dst in exclude:
continue
if repo.dirstate[dst] in "nma":
repo.dirstate.copy(src, dst)
| dscho/hg | mercurial/copies.py | Python | gpl-2.0 | 19,523 | 0.001639 |
from django.db import transaction
from waldur_core.logging import tasks
def process_hook(sender, instance, created=False, **kwargs):
transaction.on_commit(lambda: tasks.process_event.delay(instance.pk))
| opennode/nodeconductor-assembly-waldur | src/waldur_core/logging/handlers.py | Python | mit | 210 | 0 |
"""
WSGI config for {{ project_name }} project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
#import sys
#import site
#import subprocess
PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__) + "../../")
# Add the virtualenv packages to the site directory. This uses the technique
# described at http://code.google.com/p/modwsgi/wiki/VirtualEnvironments
# Remember original sys.path.
#prev_sys_path = list(sys.path)
# Get the path to the env's site-packages directory
#site_packages = subprocess.check_output([
# os.path.join(PROJECT_ROOT, '.virtualenv/bin/python'),
# '-c',
# 'from distutils.sysconfig import get_python_lib;'
# 'print get_python_lib(),'
#]).strip()
# Add the virtualenv site-packages to the site packages
#site.addsitedir(site_packages)
# Reorder sys.path so the new directories are at the front.
#new_sys_path = []
#for item in list(sys.path):
# if item not in prev_sys_path:
# new_sys_path.append(item)
# sys.path.remove(item)
#sys.path[:0] = new_sys_path
# Add the app code to the path
#sys.path.append(PROJECT_ROOT)
os.environ['CELERY_LOADER'] = 'django'
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "push_and_pull.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| sivaprakashniet/push_pull | push_and_pull/wsgi.py | Python | bsd-3-clause | 2,240 | 0.005357 |
"""
Django settings for gettingstarted project, on Heroku. For more info, see:
https://github.com/heroku/heroku-django-template
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: change this before deploying to production!
SECRET_KEY = 'i+acxn5(akgsn!sr4^qgf(^m&*@+g1@u^t@=8s@axc41ml*f=s'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'puzzler'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'gettingstarted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'debug': True,
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gettingstarted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Update database configuration with $DATABASE_URL.
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'staticfiles')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static'),
)
# Simplified static file serving.
# https://warehouse.python.org/project/whitenoise/
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
# ... your other backends
'django.contrib.auth.backends.ModelBackend',
)
try:
from .local_settings import *
except ImportError:
PASSWORD = os.environ.get('CHESS_PASSWORD', None)
USERNAME = os.environ.get('CHESS_USERNAME', None)
ALLOWED_HOSTS = [os.environ.get('HOST_URL', None), 'chess.com']
CURRENT_HOST = os.environ.get('HOST_URL', None)
DEBUG = False
SECRET_KEY = os.environ.get('SECRET_KEY', None)
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
CSRF_COOKIE_SECURE = False
CSRF_COOKIE_HTTPONLY = True | TomWerner/BlunderPuzzler | gettingstarted/settings.py | Python | mit | 4,580 | 0.001092 |
import os
import sys
import warnings
from setuptools import setup
version_contents = {}
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "shippo", "version.py"), encoding="utf-8") as f:
exec(f.read(), version_contents)
setup(
name='shippo',
version=version_contents['VERSION'],
description='Shipping API Python library (USPS, FedEx, UPS and more)',
author='Shippo',
author_email='[email protected]',
url='https://goshippo.com/',
packages=['shippo', 'shippo.test', 'shippo.test.integration'],
package_data={'shippo': ['../VERSION']},
install_requires=[
'requests >= 2.21.0, <= 2.27.1',
'simplejson >= 3.16.0, <= 3.17.2',
],
test_suite='shippo.test.all',
tests_require=['unittest2', 'mock', 'vcrpy'],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
]
)
| goshippo/shippo-python-client | setup.py | Python | mit | 1,408 | 0 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Shinichi Nakagawa'
from house import views
from django.conf.urls import patterns, url, include
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register(r'metrics', views.MetricsViewSet)
urlpatterns = patterns('',
url(r'^', include(router.urls)),
)
| Shinichi-Nakagawa/country-house-server | server/house/urls.py | Python | mit | 349 | 0.002865 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
import ast
import pickle
import numpy as np
import os
import socket
import sys
import importlib
from datetime import datetime
import theano as th
import theanet.neuralnet as nn
################################ HELPER FUNCTIONS ############################
def share(data, dtype=th.config.floatX, borrow=True):
return th.shared(np.asarray(data, dtype), borrow=borrow)
def fixdim(arr):
if arr.ndim == 2:
side = int(arr.shape[-1] ** .5)
assert side**2 == arr.shape[-1], "Need a perfect square"
return arr.reshape((arr.shape[0], 1, side, side))
if arr.ndim == 3:
return np.expand_dims(arr, axis=1)
if arr.ndim == 4:
return arr
raise ValueError("Image data arrays must have 2,3 or 4 dimensions only")
class WrapOut:
def __init__(self, use_file, name=''):
self.name = name
self.use_file = use_file
if use_file:
self.stream = open(name, 'w', 1)
else:
self.stream = sys.stdout
def write(self, data):
self.stream.write(data)
def forceflush(self):
if self.use_file:
self.stream.close()
self.stream = open(self.name, 'a', 1)
def __getattr__(self, attr):
return getattr(self.stream, attr)
################################### MAIN CODE ################################
if len(sys.argv) < 3:
print('Usage:', sys.argv[0],
''' <dataset> <params_file(s)> [redirect=0]
dataset:
Should be the name of a module in the data folder.
Like "mnist", "telugu_ocr", "numbers" etc.
params_file(s) :
Parameters for the NeuralNet
- name.prms : contains the initialization code
- name.pkl : pickled file from a previous run (has wts too).
redirect:
1 - redirect stdout to a params_<SEED>.txt file
''')
sys.exit()
dataset_name = sys.argv[1]
prms_file_name = sys.argv[2]
########################################## Import Parameters
if prms_file_name.endswith('.pkl'):
with open(prms_file_name, 'rb') as f:
params = pickle.load(f)
else:
with open(prms_file_name, 'r') as f:
params = ast.literal_eval(f.read())
layers = params['layers']
tr_prms = params['training_params']
try:
allwts = params['allwts']
except KeyError:
allwts = None
## Init SEED
if (not 'SEED' in tr_prms) or (tr_prms['SEED'] is None):
tr_prms['SEED'] = np.random.randint(0, 1e6)
out_file_head = os.path.basename(prms_file_name,).replace(
os.path.splitext(prms_file_name)[1], "_{:06d}".format(tr_prms['SEED']))
if sys.argv[-1] is '1':
print("Printing output to {}.txt".format(out_file_head), file=sys.stderr)
sys.stdout = WrapOut(True, out_file_head + '.txt')
else:
sys.stdout = WrapOut(False)
########################################## Print Parameters
print(' '.join(sys.argv), file=sys.stderr)
print(' '.join(sys.argv))
print('Time :' + datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
print('Device : {} ({})'.format(th.config.device, th.config.floatX))
print('Host :', socket.gethostname())
print(nn.get_layers_info(layers))
print(nn.get_training_params_info(tr_prms))
########################################## Load Data
data = importlib.import_module("data." + dataset_name)
tr_corpus_sz, n_maps, _, layers[0][1]['img_sz'] = data.training_x.shape
te_corpus_sz = data.testing_x.shape[0]
data.training_x = fixdim(data.training_x)
data.testing_x = fixdim(data.testing_x)
trin_x = share(data.training_x)
test_x = share(data.testing_x)
trin_y = share(data.training_y, 'int32')
test_y = share(data.testing_y, 'int32')
try:
trin_aux = share(data.training_aux)
test_aux = share(data.testing_aux)
except AttributeError:
trin_aux, test_aux = None, None
print("\nInitializing the net ... ")
net = nn.NeuralNet(layers, tr_prms, allwts)
print(net)
print(net.get_wts_info(detailed=True).replace("\n\t", ""))
print("\nCompiling ... ")
training_fn = net.get_trin_model(trin_x, trin_y, trin_aux)
test_fn_tr = net.get_test_model(trin_x, trin_y, trin_aux)
test_fn_te = net.get_test_model(test_x, test_y, test_aux)
batch_sz = tr_prms['BATCH_SZ']
nEpochs = tr_prms['NUM_EPOCHS']
nTrBatches = tr_corpus_sz // batch_sz
nTeBatches = te_corpus_sz // batch_sz
############################################## MORE HELPERS
def test_wrapper(nylist):
sym_err, bit_err, n = 0., 0., 0
for symdiff, bitdiff in nylist:
sym_err += symdiff
bit_err += bitdiff
n += 1
return 100 * sym_err / n, 100 * bit_err / n
if net.tr_layers[-1].kind == 'LOGIT':
aux_err_name = 'BitErr'
else:
aux_err_name = 'P(MLE)'
def get_test_indices(tot_samps, bth_samps=tr_prms['TEST_SAMP_SZ']):
n_bths_each = int(bth_samps / batch_sz)
n_bths_all = int(tot_samps / batch_sz)
cur = 0
while True:
yield [i % n_bths_all for i in range(cur, cur + n_bths_each)]
cur = (cur + n_bths_each) % n_bths_all
test_indices = get_test_indices(te_corpus_sz)
trin_indices = get_test_indices(tr_corpus_sz)
pickle_file_name = out_file_head + '_{:02.0f}.pkl'
saved_file_name = None
def do_test():
global saved_file_name
test_err, aux_test_err = test_wrapper(test_fn_te(i)
for i in next(test_indices))
trin_err, aux_trin_err = test_wrapper(test_fn_tr(i)
for i in next(trin_indices))
print("{:5.2f}% ({:5.2f}%) {:5.2f}% ({:5.2f}%)".format(
trin_err, aux_trin_err, test_err, aux_test_err))
sys.stdout.forceflush()
if saved_file_name:
os.remove(saved_file_name)
saved_file_name = pickle_file_name.format(test_err)
with open(saved_file_name, 'wb') as pkl_file:
pickle.dump(net.get_init_params(), pkl_file, -1)
############################################ Training Loop
np.set_printoptions(precision=2)
print("Training ...")
print("Epoch Cost Tr_Error Tr_{0} Te_Error Te_{0}".format(aux_err_name))
for epoch in range(nEpochs):
total_cost = 0
for ibatch in range(nTrBatches):
cost, features, logprobs = training_fn(ibatch)
total_cost += cost
labels = data.training_y[ibatch*batch_sz:(ibatch+1)*batch_sz]
true_features = features[np.arange(batch_sz), labels]
if np.min(true_features) < -6 and layers[-1][0][:3] == "Exp":
print("Epoch:{} Iteration:{}".format(epoch, ibatch))
print(labels)
print(true_features)
print(net.get_wts_info(detailed=True))
if np.isnan(total_cost):
print("Epoch:{} Iteration:{}".format(epoch, ibatch))
print(net.get_wts_info(detailed=True))
raise ZeroDivisionError("Nan cost at Epoch:{} Iteration:{}"
"".format(epoch, ibatch))
if epoch % tr_prms['EPOCHS_TO_TEST'] == 0:
print("{:3d} {:>8.2f}".format(net.get_epoch(), total_cost), end=' ')
do_test()
if total_cost > 1e6:
print(net.get_wts_info(detailed=True))
net.inc_epoch_set_rate()
########################################## Final Error Rates
test_err, aux_test_err = test_wrapper(test_fn_te(i)
for i in range(te_corpus_sz//batch_sz))
trin_err, aux_trin_err = test_wrapper(test_fn_tr(i)
for i in range(tr_corpus_sz//batch_sz))
print("{:3d} {:>8.2f}".format(net.get_epoch(), 0), end=' ')
print("{:5.2f}% ({:5.2f}%) {:5.2f}% ({:5.2f}%)".format(
trin_err, aux_trin_err, test_err, aux_test_err))
| rakeshvar/theanet | train.py | Python | apache-2.0 | 7,597 | 0.001843 |
# Copyright 2012 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
import uuid
from testlib import VdsmTestCase as TestCaseBase
from storage import blockSD
SDBLKSZ = 512
class FakeBlockStorageDomain(blockSD.BlockStorageDomain):
DOMAIN_VERSION = 3
def __init__(self, sdUUID, occupiedMetadataSlots=None):
self._sdUUID = sdUUID
self._logBlkSize = SDBLKSZ
self.occupiedMetadataSlots = occupiedMetadataSlots
@property
def sdUUID(self):
return self._sdUUID
@property
def logBlkSize(self):
return self._logBlkSize
@property
def stat(self):
return None
def getVersion(self):
return self.DOMAIN_VERSION
def _getOccupiedMetadataSlots(self):
return self.occupiedMetadataSlots
class BlockDomainMetadataSlotTests(TestCaseBase):
OCCUPIED_METADATA_SLOTS = [(4, 1), (7, 1)]
EXPECTED_METADATA_SLOT = 5
def setUp(self):
self.blksd = FakeBlockStorageDomain(str(uuid.uuid4()),
self.OCCUPIED_METADATA_SLOTS)
def testMetaSlotSelection(self):
with self.blksd.acquireVolumeMetadataSlot(None, 1) as mdSlot:
self.assertEqual(mdSlot, self.EXPECTED_METADATA_SLOT)
def testMetaSlotLock(self):
with self.blksd.acquireVolumeMetadataSlot(None, 1):
acquired = self.blksd._lvTagMetaSlotLock.acquire(False)
self.assertEqual(acquired, False)
| kvaps/vdsm | tests/volumeTests.py | Python | gpl-2.0 | 2,197 | 0 |
#!/bin/python3.5
# Programa obtenido de hacker run, se le pasa lista con 0 y 1, que simbolizan puertas, 0 la puerta abierta 1 la puerta cerrada.
# Nuestro objetivo es abrir todas las puertas
# si se abre y las subyacentes se abrirán si no están abiertas
# el programa devuelve para una lista de 0 y 1 le mínimo de puertas a abrir y el máximo siguiendo este patrón
import sys
def puertas( doors ):
min = 0
max = 0
i = 1
while i < len( doors) -2 :
# Casos en los que hay reducción
if(doors[i]) == 1:
if doors[ i-1 : i+2] == [1,1,1]:
min += 1
max += 2
i += 2
elif doors[ i] == 1:
min += 1
max += 1
i += 1
else:
min += 1
max += 1
i += 1
return [ min , max]
def prueba ( ):
for i in range (10):
print (i )
i += i
if __name__ == "__main__":
doors = list ( map( int, input().strip().split(' ')))
print ("La puerta creada: " , doors)
result = puertas (doors)
print( " ".join( map(str , result )))
prueba();
| BlancaCC/cultutrilla | python_aprendizaje/ejemplos_básicos/puertas.py | Python | gpl-3.0 | 1,203 | 0.030075 |
# -*- coding: utf-8 -*-
# Copyright (c) 2016-2017, Zhijiang Yao, Jie Dong and Dongsheng Cao
# All rights reserved.
# This file is part of the PyBioMed.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the PyBioMed source tree.
"""
This module is used for obtaining the properties of amino acids or their pairs
from the aaindex database. You can freely use and distribute it. If you hava
any problem, you could contact with us timely!
Authors: Zhijiang Yao and Dongsheng Cao.
Date: 2016.06.04
Email: [email protected]
"""
# Core Library modules
import os
import string
import sys
AALetter = [
"A",
"R",
"N",
"D",
"C",
"E",
"Q",
"G",
"H",
"I",
"L",
"K",
"M",
"F",
"P",
"S",
"T",
"W",
"Y",
"V",
]
_aaindex = dict()
#####################################################################################################
class Record:
"""
Amino acid index (AAindex) Record
"""
aakeys = "ARNDCQEGHILKMFPSTWYV"
def __init__(self):
self.key = None
self.desc = ""
self.ref = ""
self.authors = ""
self.title = ""
self.journal = ""
self.correlated = dict()
self.index = dict()
self.comment = ""
def extend(self, row):
i = len(self.index)
for x in row:
self.index[self.aakeys[i]] = x
i += 1
def get(self, aai, aaj=None, d=None):
assert aaj is None
return self.index.get(aai, d)
def __getitem__(self, aai):
return self.get(aai)
def median(self):
x = sorted(filter(None, self.index.values()))
half = len(x) / 2
if len(x) % 2 == 1:
return x[half]
return (x[half - 1] + x[half]) / 2.0
def __str__(self):
desc = self.desc.replace("\n", " ").strip()
return "%s(%s: %s)" % (self.__class__.__name__, self.key, desc)
#####################################################################################################
class MatrixRecord(Record):
"""
Matrix record for mutation matrices or pair-wise contact potentials
"""
def __init__(self):
Record.__init__(self)
self.index = []
self.rows = dict()
self.cols = dict()
def extend(self, row):
self.index.append(row)
def _get(self, aai, aaj):
i = self.rows[aai]
j = self.cols[aaj]
return self.index[i][j]
def get(self, aai, aaj, d=None):
try:
return self._get(aai, aaj)
except:
pass
try:
return self._get(aaj, aai)
except:
return d
def __getitem__(self, aaij):
return self.get(aaij[0], aaij[1])
def median(self):
x = []
for y in self.index:
x.extend(filter(None, y))
x.sort()
if len(x) % 2 == 1:
return x[len(x) / 2]
return sum(x[len(x) / 2 - 1 : len(x) / 2 + 1]) / 2.0
#####################################################################################################
def search(pattern, searchtitle=True, casesensitive=False):
"""
Search for pattern in description and title (optional) of all records and
return matched records as list. By default search case insensitive.
"""
whatcase = lambda i: i
if not casesensitive:
pattern = pattern.lower()
whatcase = lambda i: i.lower()
matches = []
for record in _aaindex.itervalues():
if (
pattern in whatcase(record.desc)
or searchtitle
and pattern in whatcase(record.title)
):
matches.append(record)
return matches
#####################################################################################################
def grep(pattern):
"""
Search for pattern in title and description of all records (case
insensitive) and print results on standard output.
"""
for record in search(pattern):
print(record)
#####################################################################################################
def get(key):
"""
Get record for key
"""
if len(_aaindex) == 0:
init()
return _aaindex[key]
#####################################################################################################
def _float_or_None(x):
if x == "NA" or x == "-":
return None
return float(x)
#####################################################################################################
def init(path=None, index="123"):
"""
Read in the aaindex files. You need to run this (once) before you can
access any records. If the files are not within the current directory,
you need to specify the correct directory path. By default all three
aaindex files are read in.
"""
index = str(index)
if path is None:
for path in [os.path.split(__file__)[0], "."]:
if os.path.exists(os.path.join(path, "aaindex" + index[0])):
break
print("path =", path, file=sys.stderr)
if "1" in index:
_parse(path + "/aaindex1", Record)
if "2" in index:
_parse(path + "/aaindex2", MatrixRecord)
if "3" in index:
_parse(path + "/aaindex3", MatrixRecord)
#####################################################################################################
def init_from_file(filename, type=Record):
_parse(filename, type)
#####################################################################################################
def _parse(filename, rec, quiet=True):
"""
Parse aaindex input file. `rec` must be `Record` for aaindex1 and
`MarixRecord` for aaindex2 and aaindex3.
"""
if not os.path.exists(filename):
import urllib
url = (
"ftp://ftp.genome.jp/pub/db/community/aaindex/" + os.path.split(filename)[1]
)
# print 'Downloading "%s"' % (url)
filename = urllib.urlretrieve(url, filename)[0]
# print 'Saved to "%s"' % (filename)
f = open(filename)
current = rec()
lastkey = None
for line in f:
key = line[0:2]
if key[0] == " ":
key = lastkey
if key == "//":
_aaindex[current.key] = current
current = rec()
elif key == "H ":
current.key = line[2:].strip()
elif key == "R ":
current.ref += line[2:]
elif key == "D ":
current.desc += line[2:]
elif key == "A ":
current.authors += line[2:]
elif key == "T ":
current.title += line[2:]
elif key == "J ":
current.journal += line[2:]
elif key == "* ":
current.comment += line[2:]
elif key == "C ":
a = line[2:].split()
for i in range(0, len(a), 2):
current.correlated[a[i]] = float(a[i + 1])
elif key == "I ":
a = line[1:].split()
if a[0] != "A/L":
current.extend(map(_float_or_None, a))
elif list(Record.aakeys) != [i[0] for i in a] + [i[-1] for i in a]:
print("Warning: wrong amino acid sequence for", current.key)
else:
try:
assert list(Record.aakeys[:10]) == [i[0] for i in a]
assert list(Record.aakeys[10:]) == [i[2] for i in a]
except:
print("Warning: wrong amino acid sequence for", current.key)
elif key == "M ":
a = line[2:].split()
if a[0] == "rows":
if a[4] == "rows":
a.pop(4)
assert a[3] == "cols" and len(a) == 6
i = 0
for aa in a[2]:
current.rows[aa] = i
i += 1
i = 0
for aa in a[5]:
current.cols[aa] = i
i += 1
else:
current.extend(map(_float_or_None, a))
elif not quiet:
print('Warning: line starts with "%s"' % (key))
lastkey = key
f.close()
#####################################################################################################
def GetAAIndex1(name, path="."):
"""
Get the amino acid property values from aaindex1
Usage:
result=GetAAIndex1(name)
Input: name is the name of amino acid property (e.g., KRIW790103)
Output: result is a dict form containing the properties of 20 amino acids
"""
init(path=path)
name = str(name)
temp = get(name.strip())
res = {}
for i in AALetter:
res[i] = temp.get(i)
return res
#####################################################################################################
def GetAAIndex23(name, path="."):
"""
Get the amino acid property values from aaindex2 and aaindex3
Usage:
result=GetAAIndex23(name)
Input: name is the name of amino acid property (e.g.,TANS760101,GRAR740104)
Output: result is a dict form containing the properties of 400 amino acid pairs
"""
init(path=path)
name = str(name)
temp = get(name.strip())
res = {}
for i in AALetter:
for j in AALetter:
res[i + j] = temp.get(i, j)
return res
#####################################################################################################
if __name__ == "__main__":
temp1 = GetAAIndex1("KRIW790103")
print(len(temp1))
temp2 = GetAAIndex23("TANS760101")
print(len(temp2))
temp2 = GetAAIndex23("GRAR740104")
print(len(temp2))
| gadsbyfly/PyBioMed | PyBioMed/PyProtein/PyProteinAAIndex.py | Python | bsd-3-clause | 9,731 | 0.002158 |
import falcon
import json
class LoginController:
def on_post(self, req, resp):
body = req.stream.read()
loginInfo = json.loads(body)
print 'user: ' + loginInfo['userName']
print 'pass: ' + loginInfo['password']
resp.status = falcon.HTTP_200
resp.body = 'ok'
| yanggujun/meagram | login.py | Python | mit | 312 | 0.00641 |
import re
text = open('khalifa_tarikh.txt', mode="r", encoding="utf-8").read()
text = re.sub(r"َ|ً|ُ|ٌ|ِ|ٍ|ْ|ّ|ـ", "", text)
def search_words(checklist):
search_words = open(checklist, mode='r', encoding='utf-8').read().splitlines()
return search_words
def index_generator(word, text):
juz = 'الجزء:'
safha = 'الصفحة:'
page_regex = juz + r' \d+ ¦ ' + safha + r' \d+'
search_regex = word + r'.+?(' + page_regex + ')'
pagination = re.findall(search_regex, text, re.DOTALL)
return pagination
region = r"[وفبل]{0,2}"+r"[اأإآ]" +"فريقي" +r"[اةه]"
def context_search(region, checklist):
gov_words = search_words(checklist)
regex = "(?:\S+\s+){0,8}"+region+"(?:\s+\S+){0,8}"
contexts = re.findall(regex, text, re.DOTALL)
outcomes = []
for passage in contexts:
for word in gov_words:
pre_all = r"(?:و|ف|ب|ل|ك|ال|أ|س|ت|ي|ن|ا){0,6}"
su_all = r"(?:و|ن|ه|ى|ا|تما|ها|نا|ت|تم|هم|كم|ة|كما|تمو|كن|هما|ي|وا|ني|ات|هن|تن|ك|تا){0,4}"
regex_w = r"\b" + pre_all + word + su_all + r"\b"
if len(re.findall(regex_w, passage)) > 0:
passage_page = index_generator(passage, text)
passage = re.sub(r"\n", " ", passage)
outcomes.append((passage, passage_page))
break
return outcomes
governors = context_search(region, 'governors_checklist.txt')
print(governors)
| jedlitools/find-for-me | ex28_context_search.py | Python | mit | 1,510 | 0.009187 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for indexed datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.data.util import sparse
from tensorflow.python.data.util import structure
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops
class MaterializedIndexedDataset(object):
"""MaterializedIndexedDataset is highly experimental!
"""
def __init__(self, materialized_resource, materializer, output_classes,
output_types, output_shapes):
self._materialized_resource = materialized_resource
self._materializer = materializer
self._output_classes = output_classes
self._output_types = output_types
self._output_shapes = output_shapes
@property
def initializer(self):
if self._materializer is not None:
return self._materializer
raise ValueError("MaterializedDataset does not have a materializer")
def get(self, index):
"""Get retrieves a value (or set of values) from the IndexedDataset.
Args:
index: A uint64 scalar or vector tensor with the indices to retrieve.
Returns:
A tensor containing the values corresponding to `index`.
"""
# TODO(saeta): nest.pack_sequence_as(...)
return ged_ops.experimental_indexed_dataset_get(
self._materialized_resource,
index,
output_types=nest.flatten(
sparse.as_dense_types(self._output_types, self._output_classes)),
output_shapes=nest.flatten(
sparse.as_dense_types(self._output_shapes, self._output_classes)))
# TODO(saeta): Add a `DatasetV1` wrapper if this is exposed via the public API.
class IndexedDataset(dataset_ops.Dataset):
"""IndexedDataset is highly experimental!
"""
def __init__(self):
pass
def materialize(self, shared_name=None, container=None):
"""Materialize creates a MaterializedIndexedDataset.
IndexedDatasets can be combined through operations such as TBD. Therefore,
they are only materialized when absolutely required.
Args:
shared_name: a string for the shared name to use for the resource.
container: a string for the container to store the resource.
Returns:
A MaterializedIndexedDataset.
"""
if container is None:
container = ""
if shared_name is None:
shared_name = ""
materialized_resource = (
ged_ops.experimental_materialized_index_dataset_handle(
container=container,
shared_name=shared_name,
**dataset_ops.flat_structure(self)))
with ops.colocate_with(materialized_resource):
materializer = ged_ops.experimental_indexed_dataset_materialize(
self._as_variant_tensor(), materialized_resource)
return MaterializedIndexedDataset(materialized_resource, materializer,
self.output_classes, self.output_types,
self.output_shapes)
@abc.abstractmethod
def _as_variant_tensor(self):
"""Creates a `tf.variant` `tf.Tensor` representing this IndexedDataset.
Returns:
A scalar `tf.Tensor` of `tf.variant` type, which represents this
IndexedDataset.
"""
raise NotImplementedError("IndexedDataset._as_variant_tensor")
# TODO(saeta): Add a `DatasetV1` wrapper if this is exposed via the public API.
class IdentityIndexedDataset(IndexedDataset):
"""IdentityIndexedDataset is a trivial indexed dataset used for testing.
"""
def __init__(self, size):
super(IdentityIndexedDataset, self).__init__()
# TODO(saeta): Verify _size is a scalar!
self._size = ops.convert_to_tensor(size, dtype=dtypes.uint64, name="size")
@property
def _element_structure(self):
return structure.TensorStructure(dtypes.uint64, [])
def _as_variant_tensor(self):
return ged_ops.experimental_identity_indexed_dataset(self._size)
def _inputs(self):
return []
| kevin-coder/tensorflow-fork | tensorflow/python/data/experimental/ops/indexed_dataset_ops.py | Python | apache-2.0 | 4,824 | 0.004146 |
#!/usr/bin/env python3
"""
Rename and organize Horos QC exported data in <BIDS Root>/incoming and place in <BIDS Root>/sourcedata
AUTHOR
----
Mike Tyszka, Ph.D.
MIT License
Copyright (c) 2019 Mike Tyszka
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import sys
from glob import glob
import argparse
from pathlib import Path
import pydicom
from shutil import rmtree
def main():
parser = argparse.ArgumentParser(description='Fix subject and session directory naming in Horos output')
parser.add_argument('-d', '--dataset', default='.',
help='BIDS dataset directory containing sourcedata subdirectory')
# Parse command line arguments
args = parser.parse_args()
dataset_dir = os.path.realpath(args.dataset)
incoming_dir = os.path.join(dataset_dir, 'incoming')
sourcedata_dir = os.path.join(dataset_dir, 'sourcedata')
qc_dir = os.path.join(sourcedata_dir, 'QC')
# Create single QC subject
print("Checking that QC subject exists in sourcedata")
if os.path.isdir(qc_dir):
print(" It does - continuning")
else:
print(" QC subject does not exist - creating QC subject in sourcedata")
os.makedirs(qc_dir, exist_ok=True)
# Loop over all Qc study directories in sourcedata
# Expect subject/session directory names in the form "Qc_<session ID>_*/<session dir>/"
# Move session subdirectories from Qc_*/<session dir> to Qc/<ScanDate>
print("Scanning for incoming QC studies")
for inc_qc_dir in glob(os.path.join(incoming_dir, 'Qc*')):
print("")
print(" Processing {}".format(inc_qc_dir))
# There should be only one session subdirectory
dlist = list(glob(os.path.join(inc_qc_dir, '*')))
if len(dlist) > 0:
ses_dir = dlist[0]
# Get first DICOM file in ses_dir at any level
first_dcm = str(list(Path(ses_dir).rglob("*.dcm"))[0])
# Get acquisition date from DICOM header
acq_date = acquisition_date(first_dcm)
# Destination session directory name in QC subject folder
dest_dir = os.path.join(qc_dir, acq_date)
# Move and rename session subdirectory
print(' Moving %s to %s' % (ses_dir, dest_dir))
os.rename(ses_dir, dest_dir)
# Delete incoming Qc_* directory
print(' Deleting %s' % inc_qc_dir)
rmtree(inc_qc_dir)
def acquisition_date(dcm_fname):
"""
Extract acquisition date from DICOM header
:param dcm_fname: DICOM filename
:return acq_date: str, acquisition date (YYYYMMDD)
"""
# Default return date
acq_date = '19010101'
if not os.path.isfile(dcm_fname):
print('* File not found - %s' % dcm_fname)
try:
ds = pydicom.read_file(dcm_fname, force=True)
except IOError:
print("* Problem opening %s" % dcm_fname)
raise
except AttributeError:
print("* Problem opening %s" % dcm_fname)
raise
if ds:
acq_date = ds.AcquisitionDate
else:
print('* DICOM header problem - returning %s' % acq_date)
return acq_date
if 'main' in __name__:
main() | jmtyszka/CBICQA | bin/cbicqc_incoming.py | Python | mit | 4,170 | 0.001439 |
# -*- coding: utf-8 -*-
# Taboot - Client utility for performing deployments with Func.
# Copyright © 2009-2011, Red Hat, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
The Taboot task library.
Taboot was created as a framework to do code deployments which
require a repetitive set of tasks to be run in a certain order against
certain groups of hosts.
"""
__docformat__ = 'restructuredtext'
__author__ = "John Eckersberg"
__license__ = 'GPLv3+'
__version__ = '0.4.0'
__url__ = 'https://fedorahosted.org/Taboot/'
edit_header = '/usr/share/taboot/edit-header'
| tbielawa/Taboot | taboot/__init__.py | Python | gpl-3.0 | 1,161 | 0 |
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import ast
from odoo import api, exceptions, models, _
class MailComposeMessage(models.TransientModel):
_inherit = 'mail.compose.message'
@api.model
def _get_priorities(self):
"""
Load priorities from parameters.
:return: dict
"""
key = 'mail.sending.job.priorities'
try:
priorities = ast.literal_eval(
self.env['ir.config_parameter'].sudo().get_param(
key, default='{}'))
# Catch exception to have a understandable error message
except (ValueError, SyntaxError):
raise exceptions.UserError(
_("Error to load the system parameter (%s) "
"of priorities") % key)
# As literal_eval can transform str into any format, check if we
# have a real dict
if not isinstance(priorities, dict):
raise exceptions.UserError(
_("Error to load the system parameter (%s) of priorities.\n"
"Invalid dictionary") % key)
return priorities
@api.multi
def send_mail(self, auto_commit=False):
"""
Set a priority on subsequent generated mail.mail, using priorities
set into the configuration.
:return: dict/action
"""
active_ids = self.env.context.get('active_ids')
default_priority = self.env.context.get('default_mail_job_priority')
if active_ids and not default_priority:
priorities = self._get_priorities()
size = len(active_ids)
limits = [lim for lim in priorities if lim <= size]
if limits:
prio = priorities.get(max(limits))
self = self.with_context(default_mail_job_priority=prio)
return super().send_mail(auto_commit=auto_commit)
| mozaik-association/mozaik | mail_job_priority/wizards/mail_compose_message.py | Python | agpl-3.0 | 1,920 | 0 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Attachment'
db.create_table('attachments_attachment', (
('reusableplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['wiki.ReusablePlugin'], unique=True, primary_key=True)),
('current_revision', self.gf('django.db.models.fields.related.OneToOneField')(blank=True, related_name='current_set', unique=True, null=True, to=orm['attachments.AttachmentRevision'])),
('original_filename', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal('attachments', ['Attachment'])
# Adding model 'AttachmentRevision'
db.create_table('attachments_attachmentrevision', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('revision_number', self.gf('django.db.models.fields.IntegerField')()),
('user_message', self.gf('django.db.models.fields.TextField')(blank=True)),
('automatic_log', self.gf('django.db.models.fields.TextField')(blank=True)),
('ip_address', self.gf('django.db.models.fields.IPAddressField')(max_length=15, null=True, blank=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm[user_orm_label], null=True, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('previous_revision', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.AttachmentRevision'], null=True, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
('locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('attachment', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['attachments.Attachment'])),
('file', self.gf('django.db.models.fields.files.FileField')(max_length=100)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal('attachments', ['AttachmentRevision'])
def backwards(self, orm):
# Deleting model 'Attachment'
db.delete_table('attachments_attachment')
# Deleting model 'AttachmentRevision'
db.delete_table('attachments_attachmentrevision')
models = {
'attachments.attachment': {
'Meta': {'object_name': 'Attachment', '_ormbases': ['wiki.ReusablePlugin']},
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['attachments.AttachmentRevision']"}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'reusableplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ReusablePlugin']", 'unique': 'True', 'primary_key': 'True'})
},
'attachments.attachmentrevision': {
'Meta': {'ordering': "('created',)", 'object_name': 'AttachmentRevision'},
'attachment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.Attachment']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['attachments.AttachmentRevision']", 'null': 'True', 'blank': 'True'}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.article': {
'Meta': {'object_name': 'Article'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'current_revision': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'current_set'", 'unique': 'True', 'null': 'True', 'to': "orm['wiki.ArticleRevision']"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'other_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'other_write': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'wiki.articleplugin': {
'Meta': {'object_name': 'ArticlePlugin'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'wiki.articlerevision': {
'Meta': {'ordering': "('created',)", 'unique_together': "(('article', 'revision_number'),)", 'object_name': 'ArticleRevision'},
'article': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Article']"}),
'automatic_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'previous_revision': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ArticleRevision']", 'null': 'True', 'blank': 'True'}),
'redirect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'redirect_set'", 'null': 'True', 'to': "orm['wiki.Article']"}),
'revision_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'}),
'user_message': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'wiki.reusableplugin': {
'Meta': {'object_name': 'ReusablePlugin', '_ormbases': ['wiki.ArticlePlugin']},
'articleplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['wiki.ArticlePlugin']", 'unique': 'True', 'primary_key': 'True'}),
'articles': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'shared_plugins_set'", 'symmetrical': 'False', 'to': "orm['wiki.Article']"})
}
}
complete_apps = ['attachments'] | habibmasuro/django-wiki | wiki/plugins/attachments/migrations/0001_initial.py | Python | gpl-3.0 | 12,406 | 0.008061 |
# Copyright (C) 2013-2014 Fox Wilson, Peter Foley, Srijay Kasturi, Samuel Damashek, James Forcier and Reed Koser
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
from helpers.command import Command
@Command(['time', 'date'])
def cmd(send, msg, args):
"""Tells the time.
Syntax: {command}
"""
bold = '\x02'
if not msg:
msg = bold + "Date: " + bold + "%A, %m/%d/%Y" + bold + " Time: " + bold + "%H:%M:%S"
send(time.strftime(msg))
| sckasturi/saltlake | commands/time.py | Python | gpl-2.0 | 1,137 | 0.003518 |
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class DeviceObjectObjectRemote(RemoteModel):
"""
Network Objects cross usage
| ``DeviceObjectObjectID:`` The internal NetMRI identifier of this usage relationship between network objects.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device to which belongs this network objects.
| ``attribute type:`` number
| ``ParentDeviceObjectID:`` The internal NetMRI identifier of the parent network object (the user).
| ``attribute type:`` number
| ``ChildDeviceObjectID:`` The internal NetMRI identifier of the child network object (the used service).
| ``attribute type:`` number
| ``OoFirstSeenTime:`` The timestamp of when NetMRI saw for the first time this relationship.
| ``attribute type:`` datetime
| ``OoProvisionData:`` Internal data - do not modify, may change without warning.
| ``attribute type:`` string
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``OoStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``OoEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``OoTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``OoChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
"""
properties = ("DeviceObjectObjectID",
"DeviceID",
"ParentDeviceObjectID",
"ChildDeviceObjectID",
"OoFirstSeenTime",
"OoProvisionData",
"DataSourceID",
"OoStartTime",
"OoEndTime",
"OoTimestamp",
"OoChangedCols",
)
@property
@check_api_availability
def parent_device_object(self):
"""
The parent network object of this relationship.
``attribute type:`` model
"""
return self.broker.parent_device_object(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def child_device_object(self):
"""
The child network object of this relationship.
``attribute type:`` model
"""
return self.broker.child_device_object(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"DeviceObjectObjectID": self.DeviceObjectObjectID})
| infobloxopen/infoblox-netmri | infoblox_netmri/api/remote/models/device_object_object_remote.py | Python | apache-2.0 | 3,242 | 0.004318 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Meta level spider components. Each communicates with one or more servers via
sub-components.
"""
from .pagegetter import PageGetter
from .worker import Worker
from .interface import Interface
from .jobgetter import JobGetter
from .jobscheduler import JobScheduler
from .identityscheduler import IdentityScheduler
from .testing import Testing
from .deltatesting import DeltaTesting
from .identitygetter import IdentityGetter
from .identityworker import IdentityWorker
from .identityinterface import IdentityInterface
from .base import MetaComponent
__all__ = ['PageGetter', 'Worker', 'JobGetter', 'Interface', "JobScheduler",
"IdentityScheduler", "Testing", "DeltaTesting", "MetaComponent",
"IdentityGetter", "IdentityWorker", "IdentityInterface"]
| hiidef/hiispider | hiispider/metacomponents/__init__.py | Python | mit | 805 | 0.008696 |
import json
import pkg_resources
import pylons
pylons.c = pylons.tmpl_context
pylons.g = pylons.app_globals
from pylons import c
from ming.orm import ThreadLocalORMSession
from datadiff.tools import assert_equal
from allura.lib import helpers as h
from allura.tests import decorators as td
from allura import model as M
from alluratest.controller import TestController
class TestRootController(TestController):
def setUp(self):
TestController.setUp(self)
self.setup_with_tools()
@td.with_hg
def setup_with_tools(self):
h.set_context('test', 'src-hg', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgehg', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'testrepo.hg'
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-hg', neighborhood='Projects')
c.app.repo.refresh()
def test_fork(self):
to_project = M.Project.query.get(shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-hg/fork', params=dict(
project_id=str(to_project._id),
mount_point='code'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
r = self.app.get('/p/test2/code').follow().follow().follow()
assert 'Clone of' in r
r = self.app.get('/src-hg/').follow().follow()
assert 'Forks' in r
def test_merge_request(self):
to_project = M.Project.query.get(shortname='test2', neighborhood_id=c.project.neighborhood_id)
r = self.app.post('/src-hg/fork', params=dict(
project_id=str(to_project._id),
mount_point='code'))
assert "{status: 'error'}" not in str(r.follow())
cloned_from = c.app.repo
with h.push_context('test2', 'code', neighborhood='Projects'):
c.app.repo.init_as_clone(
cloned_from.full_fs_path,
cloned_from.app.config.script_name(),
cloned_from.full_fs_path)
r = self.app.get('/p/test2/code/').follow().follow()
assert 'Request Merge' in r
# Request Merge button only visible to repo admins
kw = dict(extra_environ=dict(username='test-user'))
r = self.app.get('/p/test2/code/', **kw).follow(**kw).follow(**kw)
assert 'Request Merge' not in r, r
# Request merge controller action only permitted for repo admins
r = self.app.get('/p/test2/code/request_merge', status=403, **kw)
r = self.app.get('/p/test2/code/request_merge')
assert 'Request merge' in r
# Merge request detail view
r = r.forms[0].submit().follow()
assert 'would like you to merge' in r
mr_num = r.request.url.split('/')[-2]
# Merge request list view
r = self.app.get('/p/test/src-hg/merge-requests/')
assert 'href="%s/"' % mr_num in r
# Merge request status update
r = self.app.post('/p/test/src-hg/merge-requests/%s/save' % mr_num,
params=dict(status='rejected')).follow()
assert 'Merge Request #%s: (rejected)' % mr_num in r, r
def test_status(self):
resp = self.app.get('/src-hg/status')
d = json.loads(resp.body)
assert d == dict(status='ready')
def test_status_html(self):
resp = self.app.get('/src-hg/').follow().follow()
# repo status not displayed if 'ready'
assert None == resp.html.find('div', dict(id='repo_status'))
h.set_context('test', 'src-hg', neighborhood='Projects')
c.app.repo.status = 'analyzing'
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
# repo status displayed if not 'ready'
resp = self.app.get('/src-hg/').follow().follow()
div = resp.html.find('div', dict(id='repo_status'))
assert div.span.text == 'analyzing'
def test_index(self):
resp = self.app.get('/src-hg/').follow().follow()
assert 'hg clone http://' in resp, resp
def test_index_empty(self):
self.app.get('/test-app-hg/')
def test_commit_browser(self):
resp = self.app.get('/src-hg/commit_browser')
def test_commit_browser_data(self):
resp = self.app.get('/src-hg/commit_browser_data')
data = json.loads(resp.body);
assert data['max_row'] == 5
assert data['next_column'] == 1
assert_equal(data['built_tree']['e5a0b44437be783c41084e7bf0740f9b58b96ecf'],
{u'url': u'/p/test/src-hg/ci/e5a0b44437be783c41084e7bf0740f9b58b96ecf/',
u'oid': u'e5a0b44437be783c41084e7bf0740f9b58b96ecf',
u'column': 0,
u'parents': [u'773d2f8e3a94d0d5872988b16533d67e1a7f5462'],
u'message': u'Modify README', u'row': 4})
def _get_ci(self):
resp = self.app.get('/src-hg/').follow().follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-hg/ci/'):
return tag['href']
return None
def test_commit(self):
ci = self._get_ci()
resp = self.app.get(ci)
assert 'Rick Copeland' in resp, resp.showbrowser()
def test_tree(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/')
assert len(resp.html.findAll('tr')) == 4, resp.showbrowser()
assert 'README' in resp, resp.showbrowser()
def test_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/README')
assert 'README' in resp.html.find('h2', {'class':'dark title'}).contents[2]
content = str(resp.html.find('div', {'class':'clip grid-19'}))
assert 'This is readme' in content, content
assert '<span id="l1" class="code_block">' in resp
assert 'var hash = window.location.hash.substring(1);' in resp
resp = self.app.get(ci + 'tree/test.jpg')
def test_invalid_file(self):
ci = self._get_ci()
resp = self.app.get(ci + 'tree/READMEz', status=404)
def test_diff(self):
ci = '/p/test/src-hg/ci/e5a0b44437be783c41084e7bf0740f9b58b96ecf/'
parent = '773d2f8e3a94d0d5872988b16533d67e1a7f5462'
resp = self.app.get(ci + 'tree/README?barediff=' + parent,
validate_chunk=True)
assert 'readme' in resp, resp.showbrowser()
assert '+++' in resp, resp.showbrowser()
assert '+Another line' in resp, resp.showbrowser()
def test_binary_diff(self):
ci = '/p/test/src-hg/ci/5a0a993efa9bce7d1983344261393e841fcfd65d/'
parent = '4a7f7ec0dcf5f005eb5d177b3d8c00bfc8159843'
resp = self.app.get(ci + 'tree/bin_file?barediff=' + parent,
validate_chunk=True)
assert 'Cannot display: file marked as a binary type.' in resp
class TestLogPagination(TestController):
def setUp(self):
TestController.setUp(self)
self.setup_with_tools()
@td.with_hg
def setup_with_tools(self):
h.set_context('test', 'src-hg', neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgehg', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.status = 'ready'
c.app.repo.name = 'paginationtest.hg'
c.app.repo.refresh()
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
h.set_context('test', 'src-hg', neighborhood='Projects')
c.app.repo.refresh()
def _get_ci(self):
resp = self.app.get('/src-hg/').follow().follow()
for tag in resp.html.findAll('a'):
if tag['href'].startswith('/p/test/src-hg/ci/'):
return tag['href']
return None
def test_show_pagination(self):
resp = self.app.get(self._get_ci() + 'log/')
assert "pager_curpage" in resp
resp = self.app.get(self._get_ci() + 'log/?limit=50')
assert "pager_curpage" not in resp
resp = self.app.get(self._get_ci() + 'log/?page=2')
assert "pager_curpage" not in resp
def test_log_messages(self):
resp = self.app.get(self._get_ci() + 'log/')
# first commit is on the first page
assert "[0debe4]" in resp
# 25th commit is on the first page too
assert "[ab7517]" in resp
# 26th commit is not on the first page
assert "[dc406e]" not in resp
resp = self.app.get(self._get_ci() + 'log/?page=1')
assert "[0debe4]" not in resp
# 26th commit is on the second page
assert "[dc406e]" in resp
# test with greater limit
resp = self.app.get(self._get_ci() + 'log/?limit=50')
assert "[0debe4]" in resp
assert "[dc406e]" in resp
| pombredanne/SourceForge-Allura | ForgeHg/forgehg/tests/functional/test_controllers.py | Python | apache-2.0 | 9,128 | 0.002082 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017 TickSmith Corp.
#
# Licensed under the MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''
Provides reusable query structure
'''
import sys
from tickvaultpythonapi.parsing.operation import Operation, BaseOperation
class Predicate(object):
key = ""
operation = ""
value = ""
opClass = Operation() # Defaults to operation, which allows no operations
def __init__(self, key, op, val):
"""
Assign key, operation and value
"""
self.key = key
self.operation = self.get_valid_op(op)
self.value = val
def get_valid_op(self, op):
"""
Uses opClass (subtypes of Operation) to determine whether the
given operation is allowed. If it is, it returns the string that
will be appended to the key name (ex. '>' results in 'Gte', so that the
query will be 'keyGte')
"""
try:
return self.opClass.get_str(op)
except Exception as e:
sys.exit(e)
def get_as_kv_pair(self):
"""
Get as key-value pair
(ex. key = 'price', operation = '!=', value = '50',
result= {"priceNeq" : "50"})
"""
return {self.key + self.operation : str(self.value)}
def get_as_tuple(self):
"""
Get as tuple
(ex. key = 'price', operation = '!=', value = '50',
result= ("priceNeq","50")
"""
return (self.key + self.operation, str(self.value))
def __str__(self):
"""
@Overrride of __str__()
"""
return self.key + self.operation + "=" + str(self.value)
class BasePredicate(Predicate):
# Replace opClass with BaseOperation
opClass = BaseOperation()
# Getter for opClass
@classmethod
def get_op_class(self):
return self.opClass
if __name__ == '__main__':
params = {"param1":"value1"}
bp = BasePredicate("line_type", "=", "T,E")
print(bp.opClass.op_to_str)
p = bp.get_as_kv_pair()
params = {**params, **p}
print(params)
print(BasePredicate("price", ">", 7).get_as_kv_pair())
print(BasePredicate("price", ">=", "a"))
print(BasePredicate("price", "<=", "7").get_as_kv_pair())
print(BasePredicate("price", "!=", "7"))
| TickSmith/tickvault-python-api | tickvaultpythonapi/parsing/predicate.py | Python | mit | 3,342 | 0.002095 |
# -*- coding: utf-8 -*-
# Copyright 2015-2016 Pavel_M <[email protected]>,
# released under the GNU GPL version 3.
# This plugin is for Zim program by Jaap Karssenberg <[email protected]>.
#
# This plugin uses an icon from Tango Desktop Project (http://tango.freedesktop.org/)
# (the Tango base icon theme is released to the Public Domain).
import gobject
import gtk
import pango
from zim.actions import toggle_action, action
from zim.plugins import PluginClass, extends, WindowExtension
from zim.notebook import Path
from zim.gui.widgets import TOP, TOP_PANE
from zim.signals import ConnectorMixin
from zim.gui.pathbar import ScrolledHBox
from zim.gui.clipboard import Clipboard
import logging
logger = logging.getLogger('zim.plugins.bookmarksbar')
# Keyboard shortcut constants.
BM_TOGGLE_BAR_KEY = 'F4'
BM_ADD_BOOKMARK_KEY = '<alt>0'
class BookmarksBarPlugin(PluginClass):
plugin_info = {
'name': _('BookmarksBar'), # T: plugin name
'description': _('''\
This plugin provides bar for bookmarks.
'''), # T: plugin description
'author': 'Pavel_M',
'help': 'Plugins:BookmarksBar', }
plugin_preferences = (
# key, type, label, default
('max_bookmarks', 'int', _('Maximum number of bookmarks'), 15, (5, 20)), # T: plugin preference
('save', 'bool', _('Save bookmarks'), True), # T: preferences option
('add_bookmarks_to_beginning', 'bool', _('Add new bookmarks to the beginning of the bar'), False), # T: preferences option
)
@extends('MainWindow')
class MainWindowExtension(WindowExtension):
uimanager_xml = '''
<ui>
<menubar name='menubar'>
<menu action='view_menu'>
<placeholder name='plugin_items'>
<menuitem action='toggle_show_bookmarks'/>
</placeholder>
</menu>
<menu action='tools_menu'>
<placeholder name='plugin_items'>
<menuitem action='add_bookmark'/>
</placeholder>
</menu>
<menu action='go_menu'>
<placeholder name='plugin_items'>
<menu action='go_bookmarks_menu'>
<menuitem action='bookmark_1'/>
<menuitem action='bookmark_2'/>
<menuitem action='bookmark_3'/>
<menuitem action='bookmark_4'/>
<menuitem action='bookmark_5'/>
<menuitem action='bookmark_6'/>
<menuitem action='bookmark_7'/>
<menuitem action='bookmark_8'/>
<menuitem action='bookmark_9'/>
</menu>
</placeholder>
</menu>
</menubar>
<toolbar name='toolbar'>
<placeholder name='tools'>
<toolitem action='toggle_show_bookmarks'/>
</placeholder>
</toolbar>
</ui>
'''
uimanager_menu_labels = {
'go_bookmarks_menu': _('Book_marks'), # T: Menu title
}
def __init__(self, plugin, window):
WindowExtension.__init__(self, plugin, window)
self.widget = BookmarkBar(self.window.ui, self.uistate,
self.window.pageview.get_page)
self.widget.show_all()
# Add a new option to the Index popup menu.
try:
self.widget.connectto(self.window.pageindex.treeview,
'populate-popup', self.on_populate_popup)
except AttributeError:
logger.error('BookmarksBar: popup menu not initialized.')
# Show/hide bookmarks.
self.uistate.setdefault('show_bar', True)
self.toggle_show_bookmarks(self.uistate['show_bar'])
# Init preferences in self.widget.
self.widget.on_preferences_changed(plugin.preferences)
self.widget.connectto(plugin.preferences, 'changed',
lambda o: self.widget.on_preferences_changed(plugin.preferences))
def teardown(self):
if self.widget:
try:
self.window.remove(self.widget)
except ValueError:
pass
self.widget.disconnect_all()
self.widget = None
def hide_widget(self):
'''Hide Bar.'''
self.window.remove(self.widget)
def show_widget(self):
'''Show Bar.'''
self.window.add_widget(self.widget, (TOP_PANE, TOP))
def on_populate_popup(self, treeview, menu):
'''Add 'Add Bookmark' option to the Index popup menu.'''
path = treeview.get_selected_path()
if path:
item = gtk.SeparatorMenuItem()
menu.prepend(item)
item = gtk.MenuItem(_('Add Bookmark')) # T: menu item bookmark plugin
page = self.window.ui.notebook.get_page(path)
item.connect('activate', lambda o: self.widget.add_new_page(page))
menu.prepend(item)
menu.show_all()
@action(_('_Run bookmark'), accelerator='<alt>1')
def bookmark_1(self):
self._open_bookmark(1)
@action(_('_Run bookmark'), accelerator='<alt>2')
def bookmark_2(self):
self._open_bookmark(2)
@action(_('_Run bookmark'), accelerator='<alt>3')
def bookmark_3(self):
self._open_bookmark(3)
@action(_('_Run bookmark'), accelerator='<alt>4')
def bookmark_4(self):
self._open_bookmark(4)
@action(_('_Run bookmark'), accelerator='<alt>5')
def bookmark_5(self):
self._open_bookmark(5)
@action(_('_Run bookmark'), accelerator='<alt>6')
def bookmark_6(self):
self._open_bookmark(6)
@action(_('_Run bookmark'), accelerator='<alt>7')
def bookmark_7(self):
self._open_bookmark(7)
@action(_('_Run bookmark'), accelerator='<alt>8')
def bookmark_8(self):
self._open_bookmark(8)
@action(_('_Run bookmark'), accelerator='<alt>9')
def bookmark_9(self):
self._open_bookmark(9)
def _open_bookmark(self, number):
number -= 1
try:
self.window.ui.open_page(Path(self.widget.paths[number]))
except IndexError:
pass
@toggle_action(_('Bookmarks'), stock='zim-add-bookmark',
tooltip = 'Show/Hide Bookmarks', accelerator = BM_TOGGLE_BAR_KEY) # T: menu item bookmark plugin
def toggle_show_bookmarks(self, active):
'''
Show/hide the bar with bookmarks.
'''
if active:
self.show_widget()
else:
self.hide_widget()
self.uistate['show_bar'] = active
@action(_('Add Bookmark'), accelerator = BM_ADD_BOOKMARK_KEY) # T: menu item bookmark plugin
def add_bookmark(self):
'''
Function to add new bookmarks to the bar.
Introduced to be used via keyboard shortcut.
'''
self.widget.add_new_page()
class BookmarkBar(gtk.HBox, ConnectorMixin):
def __init__(self, ui, uistate, get_page_func):
gtk.HBox.__init__(self)
self.ui = ui
self.uistate = uistate
self.save_flag = False # if True save bookmarks in config
self.add_bookmarks_to_beginning = False # add new bookmarks to the end of the bar
self.max_bookmarks = False # maximum number of bookmarks
self._get_page = get_page_func # function to get current page
# Create button to add new bookmarks.
self.plus_button = IconsButton(gtk.STOCK_ADD, gtk.STOCK_REMOVE, relief = False)
self.plus_button.set_tooltip_text(_('Add bookmark/Show settings'))
self.plus_button.connect('clicked', lambda o: self.add_new_page())
self.plus_button.connect('button-release-event', self.do_plus_button_popup_menu)
self.pack_start(self.plus_button, expand = False)
# Create widget for bookmarks.
self.container = ScrolledHBox()
self.pack_start(self.container, expand = True)
# Toggle between full/short page names.
self.uistate.setdefault('show_full_page_name', False)
# Save path to use later in Copy/Paste menu.
self._saved_bookmark = None
self.paths = [] # list of bookmarks as string objects
self.uistate.setdefault('bookmarks', [])
# Add pages from config to the bar.
for path in self.uistate['bookmarks']:
page = self.ui.notebook.get_page(Path(path))
if page.exists() and (page.name not in self.paths):
self.paths.append(page.name)
self.paths_names = {} # dict of changed names of bookmarks
self.uistate.setdefault('bookmarks_names', {})
# Function to transform random string to paths_names format.
self._convert_path_name = lambda a: ' '.join(a[:25].split())
# Add alternative bookmark names from config.
for path, name in self.uistate['bookmarks_names'].iteritems():
if path in self.paths:
try:
name = self._convert_path_name(name)
self.paths_names[path] = name
except:
logger.error('BookmarksBar: Error while loading path_names.')
# Look for new pages to mark corresponding bookmarks in the bar.
self.connectto(self.ui, 'open-page', self.on_open_page)
# Delete a bookmark if a page is deleted.
self.connectto(self.ui.notebook, 'deleted-page',
lambda obj, path: self.delete(path.name))
def on_open_page(self, ui, page, path):
'''If a page is present as a bookmark than select it.'''
pagename = page.name
for button in self.container.get_children()[2:]:
if button.zim_path == pagename:
button.set_active(True)
else:
button.set_active(False)
def add_new_page(self, page = None):
'''
Add new page as bookmark to the bar.
:param page: L{Page}, if None takes currently opened page,
'''
if not page:
page = self._get_page()
if page.exists():
return self._add_new(page.name, self.add_bookmarks_to_beginning)
def _add_new(self, path, add_bookmarks_to_beginning = False):
'''Add bookmark to the bar.
:param path: path as a string object
:param add_bookmarks_to_beginning: bool,
add new bookmarks to the beginning of the bar,
'''
if path in self.paths:
logger.debug('BookmarksBar: path is already in the bar.')
self.plus_button.blink()
return False
# Limit max number of bookmarks.
if self.max_bookmarks and (len(self.paths) >= self.max_bookmarks):
logger.debug('BookmarksBar: max number of bookmarks is achieved.')
return False
# Add a new bookmark to the end or to the beginning.
if add_bookmarks_to_beginning:
self.paths.insert(0, path)
else:
self.paths.append(path)
self._reload_bar()
def delete(self, path):
'''
Remove one button from the bar.
:param path: string corresponding to Path.name.
'''
if path in self.paths:
self.paths.remove(path)
self.paths_names.pop(path, None)
self._reload_bar()
def delete_all(self, ask_confirmation = False):
'''
Remove all bookmarks.
:param ask_confirmation: to confirm deleting.
'''
def _delete_all():
self.paths = []
self.paths_names = {}
self._reload_bar()
if ask_confirmation:
# Prevent accidental deleting of all bookmarks.
menu = gtk.Menu()
item = gtk.MenuItem(_('Do you want to delete all bookmarks?')) # T: message for bookmark plugin
item.connect('activate', lambda o: _delete_all())
menu.append(item)
menu.show_all()
menu.popup(None, None, None, 3, 0)
else:
_delete_all()
def change_bookmark(self, old_path, new_path = None):
'''
Change path in bookmark from 'old_path' to 'new_path'.
:param new_path, old_path: strings corresponding to Path.name.
If 'new_path' == None takes currently opened page.
'''
if not new_path:
page = self._get_page()
if page.exists():
new_path = page.name
if new_path and (new_path not in self.paths) and (new_path != old_path):
self.paths[self.paths.index(old_path)] = new_path
name = self.paths_names.pop(old_path, None)
if name:
self.paths_names[new_path] = name
self._reload_bar()
else:
self.plus_button.blink()
def move_bookmark(self, first, second, direction):
'''
Move bookmark 'first' to the place near the bookmark 'second'.
:param first, second: strings corresponding to Path.name.
:param direction: move 'first' bookmark to the 'left' or 'right' of the 'second'.
'''
if (first == second) or (direction not in ('left', 'right')):
return False
if (first in self.paths) and (second in self.paths):
self.paths.remove(first)
ind = self.paths.index(second)
if direction == 'left':
self.paths.insert(ind, first)
else: # direction == 'right'
self.paths.insert(ind + 1, first)
self._reload_bar()
def rename_bookmark(self, button):
'''
Change label of the button.
New name is taken from the clipboard.
If button's name has been changed before,
change it back to its initial state.
'''
_full, _short = button.zim_path, self._get_short_page_name(button.zim_path)
if button.get_label() in (_short, _full):
# Change the button to new name.
new_name = None
try:
# Take from clipboard.
new_name = self._convert_path_name(Clipboard.get_text())
except:
logger.error('BookmarksBar: Error while converting from buffer.')
if new_name:
self.paths_names[_full] = new_name
button.set_label(new_name)
else:
# Change the button back to its initial state.
new_name = _full if self.uistate['show_full_page_name'] else _short
button.set_label(new_name)
self.paths_names.pop(_full, None)
if self.save_flag:
self.uistate['bookmarks_names'] = self.paths_names
def do_plus_button_popup_menu(self, button, event):
'''Handler for button-release-event, triggers popup menu for plus button.'''
if event.button == 3:
menu = gtk.Menu()
item = gtk.CheckMenuItem(_('Show full Page Name')) # T: menu item for context menu
item.set_active(self.uistate['show_full_page_name'])
item.connect('activate', lambda o: self.toggle_show_full_page_name())
menu.append(item)
menu.show_all()
menu.popup(None, None, None, 3, 0)
return True
def do_bookmarks_popup_menu(self, button, event):
'''Handler for button-release-event, triggers popup menu for bookmarks.'''
if event.button != 3:
return False
path = button.zim_path
_button_width = button.size_request()[0]
direction = 'left' if (int(event.x) <= _button_width / 2) else 'right'
def set_save_bookmark(path):
self._saved_bookmark = path
if button.get_label() in (path, self._get_short_page_name(path)):
rename_button_text = _('Set New Name') # T: button label
else:
rename_button_text = _('Back to Original Name') # T: button label
# main popup menu
main_menu = gtk.Menu()
main_menu_items = (
(_('Remove'), lambda o: self.delete(path)), # T: menu item
(_('Remove All'), lambda o: self.delete_all(True)), # T: menu item
('separator', ''),
('gtk-copy', lambda o: set_save_bookmark(path)),
('gtk-paste', lambda o: self.move_bookmark(self._saved_bookmark, path, direction)),
('separator', ''),
(_('Open in New Window'), lambda o: self.ui.open_new_window(Path(path))), # T: menu item
('separator', ''),
(rename_button_text, lambda o: self.rename_bookmark(button)),
(_('Set to Current Page'), lambda o: self.change_bookmark(path))) # T: menu item
for name, func in main_menu_items:
if name == 'separator':
item = gtk.SeparatorMenuItem()
else:
if 'gtk-' in name:
item = gtk.ImageMenuItem(name)
else:
item = gtk.MenuItem(name)
item.connect('activate', func)
main_menu.append(item)
main_menu.show_all()
main_menu.popup(None, None, None, 3, 0)
return True
def on_bookmark_clicked(self, button):
'''Open page if a bookmark is clicked.'''
self.ui.open_page(Path(button.zim_path))
def on_preferences_changed(self, preferences):
'''Plugin preferences were changed.'''
self.save_flag = preferences['save']
self.add_bookmarks_to_beginning = preferences['add_bookmarks_to_beginning']
if self.save_flag:
self.uistate['bookmarks'] = self.paths
self.uistate['bookmarks_names'] = self.paths_names
else:
self.uistate['bookmarks'] = []
self.uistate['bookmarks_names'] = {}
if self.max_bookmarks != preferences['max_bookmarks']:
self.max_bookmarks = preferences['max_bookmarks']
self._reload_bar() # to update plus_button
def _get_short_page_name(self, name):
'''
Function to return short name for the page.
Is used to set short names to bookmarks.
'''
path = Path(name)
return path.basename
def toggle_show_full_page_name(self):
'''Change page name from short to full and vice versa.'''
self.uistate['show_full_page_name'] = not self.uistate['show_full_page_name']
self._reload_bar()
def _reload_bar(self):
'''Reload bar with bookmarks.'''
for button in self.container.get_children()[2:]:
self.container.remove(button)
page = self._get_page()
if page:
pagename = page.name
else:
pagename = None
for path in self.paths:
if path in self.paths_names:
name = self.paths_names[path]
elif not self.uistate['show_full_page_name']:
name = self._get_short_page_name(path)
else:
name = path
button = gtk.ToggleButton(label = name, use_underline = False)
button.set_tooltip_text(path)
button.zim_path = path
if path == pagename:
button.set_active(True)
button.connect('clicked', self.on_bookmark_clicked)
button.connect('button-release-event', self.do_bookmarks_popup_menu)
button.show()
self.container.add(button)
# 'Disable' plus_button if max bookmarks is reached.
if self.max_bookmarks and (len(self.paths) >= self.max_bookmarks):
self.plus_button.change_state(False)
else:
self.plus_button.change_state(True)
# Update config files.
if self.save_flag:
self.uistate['bookmarks'] = self.paths
self.uistate['bookmarks_names'] = self.paths_names
class IconsButton(gtk.Button):
'''
Need a button which can change icons.
Use this instead of set_sensitive to show 'disabled'/'enabled' state
because of the need to get signal for popup menu.
For using only with one icon look for the standard IconButton from widgets.py.
'''
def __init__(self, stock_enabled, stock_disabled, relief=True, size=gtk.ICON_SIZE_BUTTON):
'''
:param stock_enabled: the stock item for enabled state,
:param stock_disabled: the stock item for disabled state,
:param relief: when C{False} the button has no visible raised,
edge and will be flat against the background,
:param size: the icons size
'''
gtk.Button.__init__(self)
self.stock_enabled = gtk.image_new_from_stock(stock_enabled, size)
self.stock_disabled = gtk.image_new_from_stock(stock_disabled, size)
self.add(self.stock_enabled)
self._enabled_state = True
self.set_alignment(0.5, 0.5)
if not relief:
self.set_relief(gtk.RELIEF_NONE)
def change_state(self, active = 'default'):
'''
Change icon in the button.
:param active: if True - 'enabled', False - 'disabled',
if 'default' change state to another.
'''
if active == 'default':
active = not self._enabled_state
if active != self._enabled_state:
self.remove(self.get_child())
if active:
self.add(self.stock_enabled)
else:
self.add(self.stock_disabled)
self._enabled_state = not self._enabled_state
self.show_all()
def blink(self):
'''Quickly change an icon to show
that bookmark can't be added/changed.'''
def change_icon():
'''Function to be called only once.'''
self.change_state()
return False
self.change_state()
gobject.timeout_add(300, change_icon)
| Osndok/zim-desktop-wiki | zim/plugins/bookmarksbar.py | Python | gpl-2.0 | 18,469 | 0.030646 |
"""
:synopsis: Setup
:copyright: 2014 Nathan Lewis, See LICENSE.txt
.. moduleauthor:: Nathan Lewis <[email protected]>
"""
__version__ = '0.1'
__author__ = 'Nathan Lewis'
__email__ = '[email protected]'
__license__ = 'GPL Version 2'
try:
import twisted
except ImportError:
raise SystemExit("twisted not found. Make sure you "
"have installed the Twisted core package.")
#python-sqlalchemy, python-twisted
from setuptools import setup
setup(
name = "MythTVArchiveServer",
version = __version__,
author = __author__,
author_email = __email__,
license = __license__,
packages=['MythTVArchiveServer', 'MythTVArchiveServer.controllers', 'MythTVArchiveServer.lib',
'MythTVArchiveServer.models', 'MythTVArchiveServer.util', 'MythTVArchiveServer.resource',
'twisted.plugins',],
package_data={
'twisted': ['plugins/mythtvarchiveserver_plugin.py',
'plugins/mythtvarchiveserver_media_plugin.py'],
},
) | natewlew/mythtvarchiveserver | setup.py | Python | gpl-2.0 | 1,038 | 0.014451 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.