repo_name
stringlengths
7
94
repo_path
stringlengths
4
237
repo_head_hexsha
stringlengths
40
40
content
stringlengths
10
680k
apis
stringlengths
2
840k
Manny27nyc/oci-python-sdk
src/oci/apm_traces/models/query_result_row_type_summary.py
de60b04e07a99826254f7255e992f41772902df7
# coding: utf-8 # Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class QueryResultRowTypeSummary(object): """ A summary of the datatype, unit and related metadata of an individual row element of a query result row that is returned. """ def __init__(self, **kwargs): """ Initializes a new QueryResultRowTypeSummary object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param data_type: The value to assign to the data_type property of this QueryResultRowTypeSummary. :type data_type: str :param unit: The value to assign to the unit property of this QueryResultRowTypeSummary. :type unit: str :param display_name: The value to assign to the display_name property of this QueryResultRowTypeSummary. :type display_name: str :param expression: The value to assign to the expression property of this QueryResultRowTypeSummary. :type expression: str :param query_result_row_type_summaries: The value to assign to the query_result_row_type_summaries property of this QueryResultRowTypeSummary. :type query_result_row_type_summaries: list[oci.apm_traces.models.QueryResultRowTypeSummary] """ self.swagger_types = { 'data_type': 'str', 'unit': 'str', 'display_name': 'str', 'expression': 'str', 'query_result_row_type_summaries': 'list[QueryResultRowTypeSummary]' } self.attribute_map = { 'data_type': 'dataType', 'unit': 'unit', 'display_name': 'displayName', 'expression': 'expression', 'query_result_row_type_summaries': 'queryResultRowTypeSummaries' } self._data_type = None self._unit = None self._display_name = None self._expression = None self._query_result_row_type_summaries = None @property def data_type(self): """ Gets the data_type of this QueryResultRowTypeSummary. Datatype of the query result row element. :return: The data_type of this QueryResultRowTypeSummary. :rtype: str """ return self._data_type @data_type.setter def data_type(self, data_type): """ Sets the data_type of this QueryResultRowTypeSummary. Datatype of the query result row element. :param data_type: The data_type of this QueryResultRowTypeSummary. :type: str """ self._data_type = data_type @property def unit(self): """ Gets the unit of this QueryResultRowTypeSummary. Granular unit in which the query result row element's data is represented. :return: The unit of this QueryResultRowTypeSummary. :rtype: str """ return self._unit @unit.setter def unit(self, unit): """ Sets the unit of this QueryResultRowTypeSummary. Granular unit in which the query result row element's data is represented. :param unit: The unit of this QueryResultRowTypeSummary. :type: str """ self._unit = unit @property def display_name(self): """ Gets the display_name of this QueryResultRowTypeSummary. Alias name if an alias is used for the query result row element or an assigned display name from the query language in some default cases. :return: The display_name of this QueryResultRowTypeSummary. :rtype: str """ return self._display_name @display_name.setter def display_name(self, display_name): """ Sets the display_name of this QueryResultRowTypeSummary. Alias name if an alias is used for the query result row element or an assigned display name from the query language in some default cases. :param display_name: The display_name of this QueryResultRowTypeSummary. :type: str """ self._display_name = display_name @property def expression(self): """ Gets the expression of this QueryResultRowTypeSummary. Actual show expression in the user typed query that produced this column. :return: The expression of this QueryResultRowTypeSummary. :rtype: str """ return self._expression @expression.setter def expression(self, expression): """ Sets the expression of this QueryResultRowTypeSummary. Actual show expression in the user typed query that produced this column. :param expression: The expression of this QueryResultRowTypeSummary. :type: str """ self._expression = expression @property def query_result_row_type_summaries(self): """ Gets the query_result_row_type_summaries of this QueryResultRowTypeSummary. A query result row type summary object that represents a nested table structure. :return: The query_result_row_type_summaries of this QueryResultRowTypeSummary. :rtype: list[oci.apm_traces.models.QueryResultRowTypeSummary] """ return self._query_result_row_type_summaries @query_result_row_type_summaries.setter def query_result_row_type_summaries(self, query_result_row_type_summaries): """ Sets the query_result_row_type_summaries of this QueryResultRowTypeSummary. A query result row type summary object that represents a nested table structure. :param query_result_row_type_summaries: The query_result_row_type_summaries of this QueryResultRowTypeSummary. :type: list[oci.apm_traces.models.QueryResultRowTypeSummary] """ self._query_result_row_type_summaries = query_result_row_type_summaries def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
[((187, 15, 187, 40), 'oci.util.formatted_flat_dict', 'formatted_flat_dict', ({(187, 35, 187, 39): 'self'}, {}), '(self)', False, 'from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel\n')]
salesforce/CodeGen
jaxformer/hf/sample.py
2ca076874ca2d26c2437df2968f6c43df92748bc
# Copyright (c) 2022, salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause import os import re import time import random import argparse import torch from transformers import GPT2TokenizerFast from jaxformer.hf.codegen.modeling_codegen import CodeGenForCausalLM ######################################################################## # util class print_time: def __init__(self, desc): self.desc = desc def __enter__(self): print(self.desc) self.t = time.time() def __exit__(self, type, value, traceback): print(f'{self.desc} took {time.time()-self.t:.02f}s') def set_env(): os.environ['TOKENIZERS_PARALLELISM'] = 'false' def set_seed(seed, deterministic=True): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.backends.cudnn.deterministic = deterministic torch.backends.cudnn.benchmark = not deterministic # torch.use_deterministic_algorithms(deterministic) def cast(model, fp16=True): if fp16: model.half() return model ######################################################################## # model def create_model(ckpt, fp16=True): if fp16: return CodeGenForCausalLM.from_pretrained(ckpt, revision='float16', torch_dtype=torch.float16, low_cpu_mem_usage=True) else: return CodeGenForCausalLM.from_pretrained(ckpt) def create_tokenizer(): t = GPT2TokenizerFast.from_pretrained('gpt2') t.max_model_input_sizes['gpt2'] = 1e20 return t def include_whitespace(t, n_min=2, n_max=20, as_special_tokens=False): t.add_tokens([' ' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens) return t def include_tabs(t, n_min=2, n_max=20, as_special_tokens=False): t.add_tokens(['\t' * n for n in reversed(range(n_min, n_max))], special_tokens=as_special_tokens) return t def create_custom_gpt2_tokenizer(): t = create_tokenizer() t = include_whitespace(t=t, n_min=2, n_max=32, as_special_tokens=False) t = include_tabs(t=t, n_min=2, n_max=10, as_special_tokens=False) return t ######################################################################## # sample def sample( device, model, tokenizer, context, pad_token_id, num_return_sequences=1, temp=0.2, top_p=0.95, max_length_sample=128, max_length=2048 ): input_ids = tokenizer( context, truncation=True, padding=True, max_length=max_length, return_tensors='pt', ).input_ids input_ids_len = input_ids.shape[1] assert input_ids_len < max_length with torch.no_grad(): input_ids = input_ids.to(device) tokens = model.generate( input_ids, do_sample=True, num_return_sequences=num_return_sequences, temperature=temp, max_length=input_ids_len + max_length_sample, top_p=top_p, pad_token_id=pad_token_id, use_cache=True, ) text = tokenizer.batch_decode(tokens[:, input_ids_len:, ...]) return text def truncate(completion): def find_re(string, pattern, start_pos): m = pattern.search(string, start_pos) return m.start() if m else -1 terminals = [ re.compile(r, re.MULTILINE) for r in [ '^#', re.escape('<|endoftext|>'), "^'''", '^"""', '\n\n\n' ] ] prints = list(re.finditer('^print', completion, re.MULTILINE)) if len(prints) > 1: completion = completion[:prints[1].start()] defs = list(re.finditer('^def', completion, re.MULTILINE)) if len(defs) > 1: completion = completion[:defs[1].start()] start_pos = 0 terminals_pos = [pos for pos in [find_re(completion, terminal, start_pos) for terminal in terminals] if pos != -1] if len(terminals_pos) > 0: return completion[:min(terminals_pos)] else: return completion def test_truncate(): assert truncate('\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#') == '\nif len_a > len_b:\n result = a\nelse:\n result = b' ######################################################################## # main def main(): # (0) constants models_nl = ['codegen-350M-nl', 'codegen-2B-nl', 'codegen-6B-nl', 'codegen-16B-nl'] models_pl = ['codegen-350M-multi', 'codegen-2B-multi', 'codegen-6B-multi', 'codegen-16B-multi', 'codegen-350M-mono', 'codegen-2B-mono', 'codegen-6B-mono', 'codegen-16B-mono'] models = models_nl + models_pl # (1) params parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, choices=models, default='codegen-350M-mono') parser.add_argument('--device', type=str, default='cuda:0') parser.add_argument('--rng-seed', type=int, default=42) parser.add_argument('--rng-deterministic', type=bool, default=True) parser.add_argument('--p', type=float, default=0.95) parser.add_argument('--t', type=float, default=0.2) parser.add_argument('--max-length', type=int, default=128) parser.add_argument('--batch-size', type=int, default=1) parser.add_argument('--no-fp16', action="store_false") parser.add_argument('--pad', type=int, default=50256) parser.add_argument('--context', type=str, default='def helloworld():') args = parser.parse_args() # (2) preamble set_env() set_seed(args.rng_seed, deterministic=args.rng_deterministic) device = torch.device(args.device) if device.type == "cpu": args.no_fp16 = False if args.model.startswith("codegen-16B"): args.no_fp16 = True ckpt = f'./checkpoints/{args.model}' # (3) load with print_time('loading parameters'): model = create_model(ckpt=ckpt, fp16=args.no_fp16).to(device) with print_time('loading tokenizer'): if args.model in models_pl: tokenizer = create_custom_gpt2_tokenizer() else: tokenizer = create_tokenizer() tokenizer.padding_side = 'left' tokenizer.pad_token = args.pad # (4) sample with print_time('sampling'): completion = sample(device=device, model=model, tokenizer=tokenizer, context=args.context, pad_token_id=args.pad, num_return_sequences=args.batch_size, temp=args.t, top_p=args.p, max_length_sample=args.max_length)[0] truncation = truncate(completion) print('=' * 100) print(completion) print('=' * 100) print(args.context+truncation) print('=' * 100) if __name__ == '__main__': test_truncate() main() print('done.')
[((40, 4, 40, 21), 'random.seed', 'random.seed', ({(40, 16, 40, 20): 'seed'}, {}), '(seed)', False, 'import random\n'), ((42, 4, 42, 27), 'torch.manual_seed', 'torch.manual_seed', ({(42, 22, 42, 26): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((43, 7, 43, 32), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((69, 8, 69, 49), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', ({(69, 42, 69, 48): '"""gpt2"""'}, {}), "('gpt2')", False, 'from transformers import GPT2TokenizerFast\n'), ((191, 13, 191, 38), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((211, 13, 211, 38), 'torch.device', 'torch.device', ({(211, 26, 211, 37): 'args.device'}, {}), '(args.device)', False, 'import torch\n'), ((29, 17, 29, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((44, 8, 44, 36), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', ({(44, 31, 44, 35): 'seed'}, {}), '(seed)', False, 'import torch\n'), ((63, 15, 63, 126), 'jaxformer.hf.codegen.modeling_codegen.CodeGenForCausalLM.from_pretrained', 'CodeGenForCausalLM.from_pretrained', (), '', False, 'from jaxformer.hf.codegen.modeling_codegen import CodeGenForCausalLM\n'), ((65, 15, 65, 55), 'jaxformer.hf.codegen.modeling_codegen.CodeGenForCausalLM.from_pretrained', 'CodeGenForCausalLM.from_pretrained', ({(65, 50, 65, 54): 'ckpt'}, {}), '(ckpt)', False, 'from jaxformer.hf.codegen.modeling_codegen import CodeGenForCausalLM\n'), ((118, 9, 118, 24), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((142, 8, 142, 35), 're.compile', 're.compile', ({(142, 19, 142, 20): 'r', (142, 22, 142, 34): 're.MULTILINE'}, {}), '(r, re.MULTILINE)', False, 'import re\n'), ((153, 18, 153, 65), 're.finditer', 're.finditer', ({(153, 30, 153, 38): '"""^print"""', (153, 40, 153, 50): 'completion', (153, 52, 153, 64): 're.MULTILINE'}, {}), "('^print', completion, re.MULTILINE)", False, 'import re\n'), ((157, 16, 157, 61), 're.finditer', 're.finditer', ({(157, 28, 157, 34): '"""^def"""', (157, 36, 157, 46): 'completion', (157, 48, 157, 60): 're.MULTILINE'}, {}), "('^def', completion, re.MULTILINE)", False, 'import re\n'), ((146, 12, 146, 38), 're.escape', 're.escape', ({(146, 22, 146, 37): '"""<|endoftext|>"""'}, {}), "('<|endoftext|>')", False, 'import re\n'), ((32, 15, 32, 26), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
dev-11/mars-rover-challenge
tests/services/test_rover_runner_service.py
67569fcc4b93e5ec4cbe466d7a2fd5b3e9a316b0
import unittest from services import RoverRunnerService from tests.test_environment.marses import small_mars_with_one_rover_empty_commands from tests.test_environment import mocks as m from data_objects import Rover class TestRoverRunnerService(unittest.TestCase): def test_rover_runner_moves_rover_forward(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run(['M']) self.assertEqual(Rover(0, 1, 'N'), final_pos) def test_rover_runner_turns_rover_left(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run(['L']) self.assertEqual(Rover(0, 0, 'W'), final_pos) def test_rover_runner_turns_rover_right(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run(['R']) self.assertEqual(Rover(0, 0, 'E'), final_pos) def test_rover_runner_goes_off_gird_east(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(1, 1, "E") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_east_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_goes_off_gird_north(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(1, 1, "N") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_goes_off_gird_west(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(0, 1, "W") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_west_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_goes_off_gird_south(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(0, 0, "S") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_south_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_does_nothing_empty_command(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run([]) self.assertEqual(rover, final_pos) def test_rover_runner_raises_error_for_None_command(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(TypeError, rrs.run, None)
[((13, 14, 13, 84), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_left_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_left_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((14, 14, 14, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_north_command_only', 'm.get_mocked_move_command_selector_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((15, 14, 15, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(15, 33, 15, 37): 'grid', (15, 39, 15, 44): 'rover', (15, 46, 15, 49): 'mss', (15, 51, 15, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((22, 14, 22, 84), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_left_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_left_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((23, 14, 23, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_north_command_only', 'm.get_mocked_move_command_selector_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((24, 14, 24, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(24, 33, 24, 37): 'grid', (24, 39, 24, 44): 'rover', (24, 46, 24, 49): 'mss', (24, 51, 24, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((31, 14, 31, 85), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_right_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_right_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((32, 14, 32, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_north_command_only', 'm.get_mocked_move_command_selector_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((33, 14, 33, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(33, 33, 33, 37): 'grid', (33, 39, 33, 44): 'rover', (33, 46, 33, 49): 'mss', (33, 51, 33, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((39, 16, 39, 32), 'data_objects.Rover', 'Rover', ({(39, 22, 39, 23): '1', (39, 25, 39, 26): '1', (39, 28, 39, 31): '"""E"""'}, {}), "(1, 1, 'E')", False, 'from data_objects import Rover\n'), ((40, 14, 40, 85), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_right_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_right_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((41, 14, 41, 68), 'tests.test_environment.mocks.get_mocked_move_command_selector_east_command_only', 'm.get_mocked_move_command_selector_east_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((42, 14, 42, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(42, 33, 42, 37): 'grid', (42, 39, 42, 44): 'rover', (42, 46, 42, 49): 'mss', (42, 51, 42, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((47, 16, 47, 32), 'data_objects.Rover', 'Rover', ({(47, 22, 47, 23): '1', (47, 25, 47, 26): '1', (47, 28, 47, 31): '"""N"""'}, {}), "(1, 1, 'N')", False, 'from data_objects import Rover\n'), ((48, 14, 48, 85), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_right_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_right_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((49, 14, 49, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_north_command_only', 'm.get_mocked_move_command_selector_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((50, 14, 50, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(50, 33, 50, 37): 'grid', (50, 39, 50, 44): 'rover', (50, 46, 50, 49): 'mss', (50, 51, 50, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((55, 16, 55, 32), 'data_objects.Rover', 'Rover', ({(55, 22, 55, 23): '0', (55, 25, 55, 26): '1', (55, 28, 55, 31): '"""W"""'}, {}), "(0, 1, 'W')", False, 'from data_objects import Rover\n'), ((56, 14, 56, 85), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_right_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_right_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((57, 14, 57, 68), 'tests.test_environment.mocks.get_mocked_move_command_selector_west_command_only', 'm.get_mocked_move_command_selector_west_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((58, 14, 58, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(58, 33, 58, 37): 'grid', (58, 39, 58, 44): 'rover', (58, 46, 58, 49): 'mss', (58, 51, 58, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((63, 16, 63, 32), 'data_objects.Rover', 'Rover', ({(63, 22, 63, 23): '0', (63, 25, 63, 26): '0', (63, 28, 63, 31): '"""S"""'}, {}), "(0, 0, 'S')", False, 'from data_objects import Rover\n'), ((64, 14, 64, 85), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_right_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_right_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((65, 14, 65, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_south_command_only', 'm.get_mocked_move_command_selector_south_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((66, 14, 66, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(66, 33, 66, 37): 'grid', (66, 39, 66, 44): 'rover', (66, 46, 66, 49): 'mss', (66, 51, 66, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((72, 14, 72, 84), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_left_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_left_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((73, 14, 73, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_north_command_only', 'm.get_mocked_move_command_selector_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((74, 14, 74, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(74, 33, 74, 37): 'grid', (74, 39, 74, 44): 'rover', (74, 46, 74, 49): 'mss', (74, 51, 74, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((81, 14, 81, 84), 'tests.test_environment.mocks.get_mocked_turn_command_selector_turn_left_from_north_command_only', 'm.get_mocked_turn_command_selector_turn_left_from_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((82, 14, 82, 69), 'tests.test_environment.mocks.get_mocked_move_command_selector_north_command_only', 'm.get_mocked_move_command_selector_north_command_only', ({}, {}), '()', True, 'from tests.test_environment import mocks as m\n'), ((83, 14, 83, 55), 'services.RoverRunnerService', 'RoverRunnerService', ({(83, 33, 83, 37): 'grid', (83, 39, 83, 44): 'rover', (83, 46, 83, 49): 'mss', (83, 51, 83, 54): 'tss'}, {}), '(grid, rover, mss, tss)', False, 'from services import RoverRunnerService\n'), ((17, 25, 17, 41), 'data_objects.Rover', 'Rover', ({(17, 31, 17, 32): '(0)', (17, 34, 17, 35): '(1)', (17, 37, 17, 40): '"""N"""'}, {}), "(0, 1, 'N')", False, 'from data_objects import Rover\n'), ((26, 25, 26, 41), 'data_objects.Rover', 'Rover', ({(26, 31, 26, 32): '(0)', (26, 34, 26, 35): '(0)', (26, 37, 26, 40): '"""W"""'}, {}), "(0, 0, 'W')", False, 'from data_objects import Rover\n'), ((35, 25, 35, 41), 'data_objects.Rover', 'Rover', ({(35, 31, 35, 32): '(0)', (35, 34, 35, 35): '(0)', (35, 37, 35, 40): '"""E"""'}, {}), "(0, 0, 'E')", False, 'from data_objects import Rover\n')]
ericdaat/self-label
retrain_with_rotnet.py
7c12f834c7b6bd5bee2f7f165aab33d4c4e50b51
import argparse import warnings warnings.simplefilter("ignore", UserWarning) import files from tensorboardX import SummaryWriter import os import numpy as np import time import torch import torch.optim import torch.nn as nn import torch.utils.data import torchvision import torchvision.transforms as tfs from data import DataSet,return_model_loader from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage def RotationDataLoader(image_dir, is_validation=False, batch_size=256, crop_size=224, num_workers=4,shuffle=True): normalize = tfs.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) transforms = tfs.Compose([ tfs.RandomResizedCrop(crop_size), tfs.RandomGrayscale(p=0.2), tfs.ColorJitter(0.4, 0.4, 0.4, 0.4), tfs.RandomHorizontalFlip(), tfs.Lambda(lambda img: torch.stack([normalize(tfs.ToTensor()( tfs.functional.rotate(img, angle))) for angle in [0, 90, 180, 270]] )) ]) if is_validation: dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/val', transforms)) else: dataset = DataSet(torchvision.datasets.ImageFolder(image_dir + '/train', transforms)) loader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers, pin_memory=True, drop_last=False ) return loader class Optimizer: def __init__(self): self.num_epochs = 30 self.lr = 0.05 self.lr_schedule = lambda epoch: (self.lr * (0.1 ** (epoch//args.lrdrop)))*(epoch<80) + (epoch>=80)*self.lr*(0.1**3) self.momentum = 0.9 self.weight_decay = 10**(-5) self.resume = True self.checkpoint_dir = None self.writer = None self.K = args.ncl self.dev = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.val_loader = RotationDataLoader(args.imagenet_path, is_validation=True, batch_size=args.batch_size, num_workers=args.workers,shuffle=True) def optimize_epoch(self, model, optimizer, loader, epoch, validation=False): print(f"Starting epoch {epoch}, validation: {validation} " + "="*30) loss_value = AverageMeter() rotacc_value = AverageMeter() # house keeping if not validation: model.train() lr = self.lr_schedule(epoch) for pg in optimizer.param_groups: pg['lr'] = lr else: model.eval() XE = torch.nn.CrossEntropyLoss().to(self.dev) l_dl = 0 # len(loader) now = time.time() batch_time = MovingAverage(intertia=0.9) for iter, (data, label, selected) in enumerate(loader): now = time.time() if not validation: niter = epoch * len(loader.dataset) + iter*args.batch_size data = data.to(self.dev) mass = data.size(0) where = np.arange(mass,dtype=int) * 4 data = data.view(mass * 4, 3, data.size(3), data.size(4)) rotlabel = torch.tensor(range(4)).view(-1, 1).repeat(mass, 1).view(-1).to(self.dev) #################### train CNN ########################################### if not validation: final = model(data) if args.onlyrot: loss = torch.Tensor([0]).to(self.dev) else: if args.hc == 1: loss = XE(final[0][where], self.L[selected]) else: loss = torch.mean(torch.stack([XE(final[k][where], self.L[k, selected]) for k in range(args.hc)])) rotloss = XE(final[-1], rotlabel) pred = torch.argmax(final[-1], 1) total_loss = loss + rotloss optimizer.zero_grad() total_loss.backward() optimizer.step() correct = (pred == rotlabel).to(torch.float) rotacc = correct.sum() / float(mass) else: final = model(data) pred = torch.argmax(final[-1], 1) correct = (pred == rotlabel.cuda()).to(torch.float) rotacc = correct.sum() / float(mass) total_loss = torch.Tensor([0]) loss = torch.Tensor([0]) rotloss = torch.Tensor([0]) rotacc_value.update(rotacc.item(), mass) loss_value.update(total_loss.item(), mass) batch_time.update(time.time() - now) now = time.time() print( f"Loss: {loss_value.avg:03.3f}, RotAcc: {rotacc_value.avg:03.3f} | {epoch: 3}/{iter:05}/{l_dl:05} Freq: {mass / batch_time.avg:04.1f}Hz:", end='\r', flush=True) # every few iter logging if (iter % args.logiter == 0): if not validation: print(niter, " Loss: {0:.3f}".format(loss.item()), flush=True) with torch.no_grad(): if not args.onlyrot: pred = torch.argmax(final[0][where], dim=1) pseudoloss = XE(final[0][where], pred) if not args.onlyrot: self.writer.add_scalar('Pseudoloss', pseudoloss.item(), niter) self.writer.add_scalar('lr', self.lr_schedule(epoch), niter) self.writer.add_scalar('Loss', loss.item(), niter) self.writer.add_scalar('RotLoss', rotloss.item(), niter) self.writer.add_scalar('RotAcc', rotacc.item(), niter) if iter > 0: self.writer.add_scalar('Freq(Hz)', mass/(time.time() - now), niter) # end of epoch logging if self.writer and (epoch % self.log_interval == 0): write_conv(self.writer, model, epoch) if validation: print('val Rot-Acc: ', rotacc_value.avg) self.writer.add_scalar('val Rot-Acc', rotacc_value.avg, epoch) files.save_checkpoint_all(self.checkpoint_dir, model, args.arch, optimizer, self.L, epoch,lowest=False) return {'loss': loss_value.avg} def optimize(self, model, train_loader): """Perform full optimization.""" first_epoch = 0 model = model.to(self.dev) self.optimize_times = [0] optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), weight_decay=self.weight_decay, momentum=self.momentum, lr=self.lr) if self.checkpoint_dir is not None and self.resume: self.L, first_epoch = files.load_checkpoint_all(self.checkpoint_dir, model=None, opt=None) print('loaded from: ', self.checkpoint_dir,flush=True) print('first five entries of L: ', self.L[:5], flush=True) print('found first epoch to be', first_epoch, flush=True) first_epoch = 0 self.optimize_times = [0] self.L = self.L.cuda() print("model.headcount ", model.headcount, flush=True) ##################################################################################### # Perform optmization ############################################################### lowest_loss = 1e9 epoch = first_epoch while epoch < (self.num_epochs+1): if not args.val_only: m = self.optimize_epoch(model, optimizer, train_loader, epoch, validation=False) if m['loss'] < lowest_loss: lowest_loss = m['loss'] files.save_checkpoint_all(self.checkpoint_dir, model, args.arch, optimizer, self.L, epoch, lowest=True) else: print('='*30 +' doing only validation ' + "="*30) epoch = self.num_epochs m = self.optimize_epoch(model, optimizer, self.val_loader, epoch, validation=True) epoch += 1 print(f"Model optimization completed. Saving final model to {os.path.join(self.checkpoint_dir, 'model_final.pth.tar')}") torch.save(model, os.path.join(self.checkpoint_dir, 'model_final.pth.tar')) return model def get_parser(): parser = argparse.ArgumentParser(description='Retrain with given labels combined with RotNet loss') # optimizer parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of epochs') parser.add_argument('--batch-size', default=64, type=int, metavar='BS', help='batch size') parser.add_argument('--lr', default=0.05, type=float, metavar='FLOAT', help='initial learning rate') parser.add_argument('--lrdrop', default=30, type=int, metavar='INT', help='multiply LR by 0.1 every') # architecture parser.add_argument('--arch', default='alexnet', type=str, help='alexnet or resnet') parser.add_argument('--archspec', default='big', type=str, help='big or small for alexnet ') parser.add_argument('--ncl', default=1000, type=int, metavar='INT', help='number of clusters') parser.add_argument('--hc', default=1, type=int, metavar='INT', help='number of heads') parser.add_argument('--init', default=False, action='store_true', help='initialization of network to PyTorch 0.4') # what we do in this code parser.add_argument('--val-only', default=False, action='store_true', help='if we run only validation set') parser.add_argument('--onlyrot', default=False, action='store_true', help='if train only RotNet') # housekeeping parser.add_argument('--data', default="Imagenet", type=str) parser.add_argument('--device', default="0", type=str, metavar='N', help='GPU device') parser.add_argument('--exp', default='./rot-retrain', metavar='DIR', help='path to result dirs') parser.add_argument('--workers', default=6, type=int, metavar='N', help='number workers (default: 6)') parser.add_argument('--imagenet-path', default='/home/ubuntu/data/imagenet', type=str, help='') parser.add_argument('--comment', default='rot-retrain', type=str, help='comment for tensorboardX') parser.add_argument('--log-interval', default=1, type=int, metavar='INT', help='save stuff every x epochs') parser.add_argument('--logiter', default=200, type=int, metavar='INT', help='log every x-th batch') return parser if __name__ == "__main__": args = get_parser().parse_args() name = "%s" % args.comment.replace('/', '_') try: args.device = [int(item) for item in args.device.split(',')] except AttributeError: args.device = [int(args.device)] setup_runtime(seed=42, cuda_dev_id=args.device) print(args, flush=True) print() print(name,flush=True) writer = SummaryWriter('./runs/%s/%s'%(args.data,name)) writer.add_text('args', " \n".join(['%s %s' % (arg, getattr(args, arg)) for arg in vars(args)])) # Setup model and train_loader print('Commencing!', flush=True) model, train_loader = return_model_loader(args) train_loader = RotationDataLoader(args.imagenet_path, is_validation=False, crop_size=224, batch_size=args.batch_size, num_workers=args.workers, shuffle=True) # add additional head to the network for RotNet loss. if args.arch == 'alexnet': if args.hc == 1: model.__setattr__("top_layer0", nn.Linear(4096, args.ncl)) model.top_layer = None model.headcount = args.hc+1 model.__setattr__("top_layer%s" % args.hc, nn.Linear(4096, 4)) else: if args.hc == 1: model.__setattr__("top_layer0", nn.Linear(2048*int(args.archspec), args.ncl)) model.top_layer = None model.headcount = args.hc+1 model.__setattr__("top_layer%s" % args.hc, nn.Linear(2048*int(args.archspec), 4)) if args.init: for mod in model.modules(): mod.apply(weight_init) # Setup optimizer o = Optimizer() o.writer = writer o.lr = args.lr o.num_epochs = args.epochs o.resume = True o.log_interval = args.log_interval o.checkpoint_dir = os.path.join(args.exp, 'checkpoints') # Optimize o.optimize(model, train_loader)
[((3, 0, 3, 44), 'warnings.simplefilter', 'warnings.simplefilter', ({(3, 22, 3, 30): '"""ignore"""', (3, 32, 3, 43): 'UserWarning'}, {}), "('ignore', UserWarning)", False, 'import warnings\n'), ((24, 16, 24, 84), 'torchvision.transforms.Normalize', 'tfs.Normalize', (), '', True, 'import torchvision.transforms as tfs\n'), ((38, 13, 45, 5), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (), '', False, 'import torch\n'), ((204, 13, 204, 103), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((241, 4, 241, 51), 'util.setup_runtime', 'setup_runtime', (), '', False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((246, 13, 246, 59), 'tensorboardX.SummaryWriter', 'SummaryWriter', ({(246, 27, 246, 58): "'./runs/%s/%s' % (args.data, name)"}, {}), "('./runs/%s/%s' % (args.data, name))", False, 'from tensorboardX import SummaryWriter\n'), ((252, 26, 252, 51), 'data.return_model_loader', 'return_model_loader', ({(252, 46, 252, 50): 'args'}, {}), '(args)', False, 'from data import DataSet, return_model_loader\n'), ((282, 23, 282, 60), 'os.path.join', 'os.path.join', ({(282, 36, 282, 44): 'args.exp', (282, 46, 282, 59): '"""checkpoints"""'}, {}), "(args.exp, 'checkpoints')", False, 'import os\n'), ((69, 21, 69, 35), 'util.AverageMeter', 'AverageMeter', ({}, {}), '()', False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((70, 23, 70, 37), 'util.AverageMeter', 'AverageMeter', ({}, {}), '()', False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((83, 14, 83, 25), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((84, 21, 84, 48), 'util.MovingAverage', 'MovingAverage', (), '', False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((156, 8, 157, 75), 'files.save_checkpoint_all', 'files.save_checkpoint_all', (), '', False, 'import files\n'), ((26, 32, 26, 64), 'torchvision.transforms.RandomResizedCrop', 'tfs.RandomResizedCrop', ({(26, 54, 26, 63): 'crop_size'}, {}), '(crop_size)', True, 'import torchvision.transforms as tfs\n'), ((27, 32, 27, 58), 'torchvision.transforms.RandomGrayscale', 'tfs.RandomGrayscale', (), '', True, 'import torchvision.transforms as tfs\n'), ((28, 32, 28, 67), 'torchvision.transforms.ColorJitter', 'tfs.ColorJitter', ({(28, 48, 28, 51): '0.4', (28, 53, 28, 56): '0.4', (28, 58, 28, 61): '0.4', (28, 63, 28, 66): '0.4'}, {}), '(0.4, 0.4, 0.4, 0.4)', True, 'import torchvision.transforms as tfs\n'), ((29, 32, 29, 58), 'torchvision.transforms.RandomHorizontalFlip', 'tfs.RandomHorizontalFlip', ({}, {}), '()', True, 'import torchvision.transforms as tfs\n'), ((35, 26, 35, 90), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ({(35, 59, 35, 77): "image_dir + '/val'", (35, 79, 35, 89): 'transforms'}, {}), "(image_dir + '/val', transforms)", False, 'import torchvision\n'), ((37, 26, 37, 92), 'torchvision.datasets.ImageFolder', 'torchvision.datasets.ImageFolder', ({(37, 59, 37, 79): "image_dir + '/train'", (37, 81, 37, 91): 'transforms'}, {}), "(image_dir + '/train', transforms)", False, 'import torchvision\n'), ((86, 18, 86, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((126, 18, 126, 29), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((151, 12, 151, 49), 'util.write_conv', 'write_conv', ({(151, 23, 151, 34): 'self.writer', (151, 36, 151, 41): 'model', (151, 43, 151, 48): 'epoch'}, {}), '(self.writer, model, epoch)', False, 'from util import weight_init, write_conv, setup_runtime, AverageMeter, MovingAverage\n'), ((170, 34, 170, 102), 'files.load_checkpoint_all', 'files.load_checkpoint_all', (), '', False, 'import files\n'), ((197, 26, 197, 82), 'os.path.join', 'os.path.join', ({(197, 39, 197, 58): 'self.checkpoint_dir', (197, 60, 197, 81): '"""model_final.pth.tar"""'}, {}), "(self.checkpoint_dir, 'model_final.pth.tar')", False, 'import os\n'), ((264, 51, 264, 69), 'torch.nn.Linear', 'nn.Linear', ({(264, 61, 264, 65): '(4096)', (264, 67, 264, 68): '(4)'}, {}), '(4096, 4)', True, 'import torch.nn as nn\n'), ((61, 42, 61, 67), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((81, 13, 81, 40), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ({}, {}), '()', False, 'import torch\n'), ((92, 20, 92, 45), 'numpy.arange', 'np.arange', (), '', True, 'import numpy as np\n'), ((106, 23, 106, 49), 'torch.argmax', 'torch.argmax', ({(106, 36, 106, 45): 'final[-1]', (106, 47, 106, 48): '1'}, {}), '(final[-1], 1)', False, 'import torch\n'), ((116, 23, 116, 49), 'torch.argmax', 'torch.argmax', ({(116, 36, 116, 45): 'final[-1]', (116, 47, 116, 48): '1'}, {}), '(final[-1], 1)', False, 'import torch\n'), ((119, 29, 119, 46), 'torch.Tensor', 'torch.Tensor', ({(119, 42, 119, 45): '[0]'}, {}), '([0])', False, 'import torch\n'), ((120, 23, 120, 40), 'torch.Tensor', 'torch.Tensor', ({(120, 36, 120, 39): '[0]'}, {}), '([0])', False, 'import torch\n'), ((121, 26, 121, 43), 'torch.Tensor', 'torch.Tensor', ({(121, 39, 121, 42): '[0]'}, {}), '([0])', False, 'import torch\n'), ((261, 44, 261, 69), 'torch.nn.Linear', 'nn.Linear', ({(261, 54, 261, 58): '(4096)', (261, 60, 261, 68): 'args.ncl'}, {}), '(4096, args.ncl)', True, 'import torch.nn as nn\n'), ((125, 30, 125, 41), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((189, 20, 190, 84), 'files.save_checkpoint_all', 'files.save_checkpoint_all', (), '', False, 'import files\n'), ((196, 69, 196, 125), 'os.path.join', 'os.path.join', ({(196, 82, 196, 101): 'self.checkpoint_dir', (196, 103, 196, 124): '"""model_final.pth.tar"""'}, {}), "(self.checkpoint_dir, 'model_final.pth.tar')", False, 'import os\n'), ((135, 25, 135, 40), 'torch.no_grad', 'torch.no_grad', ({}, {}), '()', False, 'import torch\n'), ((99, 27, 99, 44), 'torch.Tensor', 'torch.Tensor', ({(99, 40, 99, 43): '[0]'}, {}), '([0])', False, 'import torch\n'), ((137, 35, 137, 71), 'torch.argmax', 'torch.argmax', (), '', False, 'import torch\n'), ((30, 78, 30, 92), 'torchvision.transforms.ToTensor', 'tfs.ToTensor', ({}, {}), '()', True, 'import torchvision.transforms as tfs\n'), ((31, 36, 31, 69), 'torchvision.transforms.functional.rotate', 'tfs.functional.rotate', ({(31, 58, 31, 61): 'img', (31, 63, 31, 68): 'angle'}, {}), '(img, angle)', True, 'import torchvision.transforms as tfs\n'), ((147, 65, 147, 76), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
Jinwithyoo/han
tests/vie.py
931a271e56134dcc35029bf75260513b60884f6c
# -*- coding: utf-8 -*- from tests import HangulizeTestCase from hangulize.langs.vie import Vietnamese class VietnameseTestCase(HangulizeTestCase): """ http://korean.go.kr/09_new/dic/rule/rule_foreign_0218.jsp """ lang = Vietnamese() def test_1st(self): """제1항 nh는 이어지는 모음과 합쳐서 한 음절로 적는다. 어말이나 자음 앞에서는 받침 ‘ㄴ' 으로 적되, 그 앞의 모음이 a인 경우에는 a와 합쳐 ‘아인'으로 적는다. """ self.assert_examples({ # u'Nha Trang': u'냐짱', # u'Hô Chi Minh': u'호찌민', # u'Thanh Hoa': u'타인호아', # u'Đông Khanh': u'동카인', }) def test_2nd(self): """제2항 qu는 이어지는 모음이 a일 경우에는 합쳐서 ‘꽈'로 적는다. """ self.assert_examples({ 'Quang': '꽝', # u'hat quan ho': u'핫꽌호', 'Quôc': '꾸옥', 'Quyên': '꾸옌', }) def test_3rd(self): """제3항 y는 뒤따르는 모음과 합쳐서 한 음절로 적는다. """ self.assert_examples({ 'yên': '옌', 'Nguyên': '응우옌', }) def test_4th(self): """제4항 어중의 l이 모음 앞에 올 때에는 ‘ㄹㄹ'로 적는다. 다만, 인명의 성과 이름은 별개의 단어로 보아 이 규칙을 적용하지 않는다. """ self.assert_examples({ # u'klông put': u'끌롱쁫', 'Pleiku': '쁠래이꾸', # u'Ha Long': u'할롱', # u'My Lay': u'밀라이', })
[((9, 11, 9, 23), 'hangulize.langs.vie.Vietnamese', 'Vietnamese', ({}, {}), '()', False, 'from hangulize.langs.vie import Vietnamese\n')]
jackromo/mathLibPy
tests/test_functions/test_trig.py
b80badd293b93da85aaf122c3d3da022f6dab361
from mathlibpy.functions import * import unittest class SinTester(unittest.TestCase): def setUp(self): self.sin = Sin() def test_call(self): self.assertEqual(self.sin(0), 0) def test_eq(self): self.assertEqual(self.sin, Sin()) def test_get_derivative_call(self): self.assertEqual(self.sin.get_derivative()(0), 1) class CosTester(unittest.TestCase): def setUp(self): self.cos = Cos() def test_call(self): self.assertEqual(self.cos(0), 1) def test_eq(self): self.assertEqual(self.cos, Cos()) def test_get_derivative_call(self): self.assertEqual(self.cos.get_derivative()(math.pi/2), -1) class TanTester(unittest.TestCase): def setUp(self): self.tan = Tan() def test_call(self): self.assertEqual(self.tan(0), 0) def test_eq(self): self.assertEqual(self.tan, Tan()) def test_get_derivative(self): self.assertEqual(self.tan.get_derivative()(0), 1) if __name__ == "__main__": unittest.main()
[((51, 4, 51, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n')]
rajitbanerjee/leetcode
src/smallestLetter/target.py
720fcdd88d371e2d6592ceec8370a6760a77bb89
class Solution: def nextGreatestLetter(self, letters: list, target: str) -> str: if target < letters[0] or target >= letters[-1]: return letters[0] left, right = 0, len(letters) - 1 while left < right: mid = left + (right - left) // 2 if letters[mid] > target: right = mid else: left = mid + 1 return letters[right] if __name__ == '__main__': letters = ["c", "f", "j"] target = "a" print(f"Input: letters = {letters}, target = {target}") print(f"Output: {Solution().nextGreatestLetter(letters, target)}")
[]
hyx0329/nonebot_plugin_anti_cpdaily
anti_cpdaily/command.py
5868626fb95876f9638aaa1edd9a2f914ea7bed1
import nonebot from nonebot import on_command from nonebot.rule import to_me from nonebot.typing import T_State from nonebot.adapters import Bot, Event from nonebot.log import logger from .config import global_config from .schedule import anti_cpdaily_check_routine cpdaily = on_command('cpdaily') scheduler = nonebot.require("nonebot_plugin_apscheduler").scheduler async def one_shot_routine(): scheduler.remove_job('anti_cpdaily_oneshot') await anti_cpdaily_check_routine() @cpdaily.handle() async def handle_command(bot: Bot, event: Event, state: T_State): """ Manually activate the routine in 1 min """ if event.get_user_id() in bot.config.superusers: logger.debug('manually activate the cpdaily routine') # await anti_cpdaily_check_routine() scheduler.add_job(one_shot_routine, trigger='interval', minutes=1, id='anti_cpdaily_oneshot', replace_existing=True) logger.debug('manual process end') await cpdaily.finish('启动今日校园打卡程序ing')
[((12, 10, 12, 31), 'nonebot.on_command', 'on_command', ({(12, 21, 12, 30): '"""cpdaily"""'}, {}), "('cpdaily')", False, 'from nonebot import on_command\n'), ((13, 12, 13, 57), 'nonebot.require', 'nonebot.require', ({(13, 28, 13, 56): '"""nonebot_plugin_apscheduler"""'}, {}), "('nonebot_plugin_apscheduler')", False, 'import nonebot\n'), ((26, 8, 26, 61), 'nonebot.log.logger.debug', 'logger.debug', ({(26, 21, 26, 60): '"""manually activate the cpdaily routine"""'}, {}), "('manually activate the cpdaily routine')", False, 'from nonebot.log import logger\n'), ((29, 8, 29, 42), 'nonebot.log.logger.debug', 'logger.debug', ({(29, 21, 29, 41): '"""manual process end"""'}, {}), "('manual process end')", False, 'from nonebot.log import logger\n')]
gottaegbert/penter
matplotlib/gallery_python/pyplots/dollar_ticks.py
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
""" ============ Dollar Ticks ============ Use a `~.ticker.FormatStrFormatter` to prepend dollar signs on y axis labels. """ import numpy as np import matplotlib.pyplot as plt import matplotlib.ticker as ticker # Fixing random state for reproducibility np.random.seed(19680801) fig, ax = plt.subplots() ax.plot(100*np.random.rand(20)) formatter = ticker.FormatStrFormatter('$%1.2f') ax.yaxis.set_major_formatter(formatter) for tick in ax.yaxis.get_major_ticks(): tick.label1.set_visible(False) tick.label2.set_visible(True) tick.label2.set_color('green') plt.show() ############################################################################# # # ------------ # # References # """""""""" # # The use of the following functions, methods, classes and modules is shown # in this example: import matplotlib matplotlib.ticker matplotlib.ticker.FormatStrFormatter matplotlib.axis.Axis.set_major_formatter matplotlib.axis.Axis.get_major_ticks matplotlib.axis.Tick
[((13, 0, 13, 24), 'numpy.random.seed', 'np.random.seed', ({(13, 15, 13, 23): '(19680801)'}, {}), '(19680801)', True, 'import numpy as np\n'), ((15, 10, 15, 24), 'matplotlib.pyplot.subplots', 'plt.subplots', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((18, 12, 18, 47), 'matplotlib.ticker.FormatStrFormatter', 'ticker.FormatStrFormatter', ({(18, 38, 18, 46): '"""$%1.2f"""'}, {}), "('$%1.2f')", True, 'import matplotlib.ticker as ticker\n'), ((26, 0, 26, 10), 'matplotlib.pyplot.show', 'plt.show', ({}, {}), '()', True, 'import matplotlib.pyplot as plt\n'), ((16, 12, 16, 30), 'numpy.random.rand', 'np.random.rand', ({(16, 27, 16, 29): '(20)'}, {}), '(20)', True, 'import numpy as np\n')]
chiro2001/chibrary
Chibrary/utils.py
da31ef80df394cfb260fbe2c1e675f28717fea3e
import json import re from flask import request, abort, jsonify from Chibrary import config from Chibrary.config import logger from Chibrary.exceptions import * from functools import wraps from urllib import parse from Chibrary.server import db def parse_url_query(url: str) -> dict: if not url.lower().startswith('http://') \ and not url.lower().startswith('https://'): return {} query = url[url.rindex('/') + 1:] if '?' not in query: return {} query = query[query.index('?') + 1:] lines = query.split('&') result = {} for line in lines: if line.count('=') != 1: continue key, val = line.split('=') # 注意这里的类型转化处理 if val == 'undefined': val = None else: try: val = int(val) except ValueError: try: val = float(val) except ValueError: pass if val is not None: if type(val) is str: result[key] = parse.unquote(val) else: result[key] = val return result def form_url_query(url: str, data: dict): # if not url.lower().startswith('http://') \ # and not url.lower().startswith('https://'): # logger.warning('Provided wrong url %s !' % url) # return url # if len(data) == 0: # return url # query = '?' # for key in data: # # 特事特办(?) # if type(data[key]) is str and '/' in data[key]: # query = query + parse.urlencode({key: data[key]}) + '&' # else: # query = query + key + '=' + parse.quote(str(data[key])) + '&' # query = query[:-1] # return url + query # 这里是+和%20的坑 return url + '?' + parse.urlencode(data).replace('+', '%20') def remove_ids_dfs(data: dict): if '_id' in data: del data['_id'] for key in data: if type(data[key]) is dict: data[key] = remove_ids_dfs(data[key]) return data """ 返回值格式: { code: ..., message: ..., data: ..., } """ def make_result(code: int, message=None, data=None): result = { 'code': code, } # 根据code选message if message is None: try: result['message'] = config.code[str(code)] except ValueError: logger.warning('Error code %s not found!' % code) result['message'] = config.code['0'] else: result['message'] = message if data is not None: # 一定要删除所有_id元素 data = remove_ids_dfs(data) result['data'] = data return result def make_error_result(error): return make_result(1, message=str(error)) def dump(data): return json.dumps(data) def check_args(args: dict, requirements: list): for r in requirements: if r not in args: return False return True def format_file_size(size_by_bytes: int) -> str: units = ['B', 'KB', 'MB', 'GB', 'TB'] # 最终数值应该在1~999之间 index = 0 unit = units[index] while size_by_bytes > 1000: index = index + 1 unit = units[index] size_by_bytes = size_by_bytes / 1000 if index == len(units): break if size_by_bytes > 20: return "%.0f%s" % (size_by_bytes, unit) return "%.2f%s" % (size_by_bytes, unit) # 用户在header里面加上Authorization: {token} def login_check(f): @wraps(f) def decorated(*args, **kwargs): headers = dict(request.headers) if 'Authorization' not in headers: return make_result(3) # login error token = headers['Authorization'] if db.token_find_by_token(token) is None: return make_result(3) # login error return f(*args, **kwargs) return decorated # 用户在header里面加上Authorization: {token} def admin_check(f): @wraps(f) def decorated(*args, **kwargs): headers = dict(request.headers) if 'Authorization' not in headers: return make_result(3) # login error token = headers['Authorization'] token_data = db.token_find_by_token(token) if token_data is None: return make_result(3) # login error # 用户level大于等于10表示有管理员效力 user = db.user_find(username=token_data['username']) if user is None: return make_result(3) # login error,不会有效 if user['info']['level'] < 10: return make_result(10) # No permission return f(*args, **kwargs) return decorated # 必须在request过程中调用,获取不到直接打断 def get_user_from_headers(): headers = dict(request.headers) if 'Authorization' not in headers: abort(jsonify(make_result(3))) # login error token = headers['Authorization'] token_data = db.token_find_by_token(token) if token_data is None: abort(jsonify(make_result(3))) # login error # 用户level大于等于10表示有管理员效力 user = db.user_find(username=token_data['username']) if user is None: abort(jsonify(make_result(3))) # login error,不会有效 return user def check_admin_abort(): headers = dict(request.headers) if 'Authorization' not in headers: abort(jsonify(make_result(3))) # login error token = headers['Authorization'] token_data = db.token_find_by_token(token) if token_data is None: abort(jsonify(make_result(3))) # login error # 用户level大于等于10表示有管理员效力 user = db.user_find(username=token_data['username']) if user is None: abort(jsonify(make_result(3))) # login error,不会有效 if user['info']['level'] < 10: abort(jsonify(make_result(10))) # No permission def is_number(s): try: float(s) return True except ValueError: pass # try: # import unicodedata # unicodedata.numeric(s) # return True # except (TypeError, ValueError): # pass return False # def url_check(url: str): # url = url.lower() # reg = "^(https|http|ftp|rtsp|mms)\\://?([a-zA-Z0-9\\.\\-]+(\\:[a-zA-Z0-9\\.&%\\$\\-]+)*@)?((25[0-5]|2" \ # "[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]" \ # "{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\\.(25[0-5]|" \ # "2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|([a-zA-Z0-9\\-]+\\.)*[a-zA-Z0-9\\-]+\\.[a-zA-Z]" \ # "{2,4})(\\:[0-9]+)?(/[^/][a-zA-Z0-9\\.\\,\\?\\'\\\\/\\+&%\\$\\=~_\\-@]*)*$" # print(re.search(url, reg)) if __name__ == '__main__': print(parse_url_query('http://blog.com/sss/ssss/s?wd=dsfa&a=fdsa&a=1&b=1.1&a=s')) print(format_file_size(20250000)) # print(url_check('http://www.bilibili.com/'))
[((110, 11, 110, 27), 'json.dumps', 'json.dumps', ({(110, 22, 110, 26): 'data'}, {}), '(data)', False, 'import json\n'), ((138, 5, 138, 13), 'functools.wraps', 'wraps', ({(138, 11, 138, 12): 'f'}, {}), '(f)', False, 'from functools import wraps\n'), ((153, 5, 153, 13), 'functools.wraps', 'wraps', ({(153, 11, 153, 12): 'f'}, {}), '(f)', False, 'from functools import wraps\n'), ((179, 17, 179, 46), 'Chibrary.server.db.token_find_by_token', 'db.token_find_by_token', ({(179, 40, 179, 45): 'token'}, {}), '(token)', False, 'from Chibrary.server import db\n'), ((183, 11, 183, 56), 'Chibrary.server.db.user_find', 'db.user_find', (), '', False, 'from Chibrary.server import db\n'), ((194, 17, 194, 46), 'Chibrary.server.db.token_find_by_token', 'db.token_find_by_token', ({(194, 40, 194, 45): 'token'}, {}), '(token)', False, 'from Chibrary.server import db\n'), ((198, 11, 198, 56), 'Chibrary.server.db.user_find', 'db.user_find', (), '', False, 'from Chibrary.server import db\n'), ((159, 21, 159, 50), 'Chibrary.server.db.token_find_by_token', 'db.token_find_by_token', ({(159, 44, 159, 49): 'token'}, {}), '(token)', False, 'from Chibrary.server import db\n'), ((163, 15, 163, 60), 'Chibrary.server.db.user_find', 'db.user_find', (), '', False, 'from Chibrary.server import db\n'), ((144, 11, 144, 40), 'Chibrary.server.db.token_find_by_token', 'db.token_find_by_token', ({(144, 34, 144, 39): 'token'}, {}), '(token)', False, 'from Chibrary.server import db\n'), ((39, 30, 39, 48), 'urllib.parse.unquote', 'parse.unquote', ({(39, 44, 39, 47): 'val'}, {}), '(val)', False, 'from urllib import parse\n'), ((63, 23, 63, 44), 'urllib.parse.urlencode', 'parse.urlencode', ({(63, 39, 63, 43): 'data'}, {}), '(data)', False, 'from urllib import parse\n'), ((94, 12, 94, 61), 'Chibrary.config.logger.warning', 'logger.warning', ({(94, 27, 94, 60): "('Error code %s not found!' % code)"}, {}), "('Error code %s not found!' % code)", False, 'from Chibrary.config import logger\n')]
helq/pytropos
tests/inputs/loops/51-arrays-in-loop.py
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
import numpy as np from something import Top i = 0 while i < 10: a = np.ndarray((10,4)) b = np.ones((10, Top)) i += 1 del Top # show_store()
[((6, 8, 6, 26), 'numpy.ndarray', 'np.ndarray', ({(6, 19, 6, 25): '(10, 4)'}, {}), '((10, 4))', True, 'import numpy as np\n'), ((7, 8, 7, 26), 'numpy.ones', 'np.ones', ({(7, 16, 7, 25): '(10, Top)'}, {}), '((10, Top))', True, 'import numpy as np\n')]
Arfey/aiohttp_admin2
tests/mappers/fields/test_float_field.py
2b3782389ec9e25809635811b76ef8111b27d8ba
from aiohttp_admin2.mappers import Mapper from aiohttp_admin2.mappers import fields class FloatMapper(Mapper): field = fields.FloatField() def test_correct_float_type(): """ In this test we check success convert to float type. """ mapper = FloatMapper({"field": 1}) mapper.is_valid() assert mapper.data["field"] == 1.0 mapper = FloatMapper({"field": 2}) mapper.is_valid() assert mapper.data["field"] == 2.0 mapper = FloatMapper({"field": -3}) mapper.is_valid() assert mapper.data["field"] == -3.0 mapper = FloatMapper({"field": 0}) mapper.is_valid() assert mapper.data["field"] == 0.0 def test_wrong_float_type(): """ In this test we check error when we received wrong float type. """ assert FloatMapper({"field": "string"}).is_valid() is False assert FloatMapper({"field": []}).is_valid() is False
[((6, 12, 6, 31), 'aiohttp_admin2.mappers.fields.FloatField', 'fields.FloatField', ({}, {}), '()', False, 'from aiohttp_admin2.mappers import fields\n')]
jdlarsen-UA/flopy
autotest/t038_test.py
bf2c59aaa689de186bd4c80685532802ac7149cd
""" Try to load all of the MODFLOW-USG examples in ../examples/data/mfusg_test. These are the examples that are distributed with MODFLOW-USG. """ import os import flopy # make the working directory tpth = os.path.join("temp", "t038") if not os.path.isdir(tpth): os.makedirs(tpth) # build list of name files to try and load usgpth = os.path.join("..", "examples", "data", "mfusg_test") usg_files = [] for path, subdirs, files in os.walk(usgpth): for name in files: if name.endswith(".nam"): usg_files.append(os.path.join(path, name)) # def test_load_usg(): for fusg in usg_files: d, f = os.path.split(fusg) yield load_model, f, d # function to load a MODFLOW-USG model and then write it back out def load_model(namfile, model_ws): m = flopy.modflow.Modflow.load( namfile, model_ws=model_ws, version="mfusg", verbose=True, check=False ) assert m, f"Could not load namefile {namfile}" assert m.load_fail is False m.change_model_ws(tpth) m.write_input() return if __name__ == "__main__": for fusg in usg_files: d, f = os.path.split(fusg) load_model(f, d)
[((10, 7, 10, 35), 'os.path.join', 'os.path.join', ({(10, 20, 10, 26): '"""temp"""', (10, 28, 10, 34): '"""t038"""'}, {}), "('temp', 't038')", False, 'import os\n'), ((15, 9, 15, 61), 'os.path.join', 'os.path.join', ({(15, 22, 15, 26): '""".."""', (15, 28, 15, 38): '"""examples"""', (15, 40, 15, 46): '"""data"""', (15, 48, 15, 60): '"""mfusg_test"""'}, {}), "('..', 'examples', 'data', 'mfusg_test')", False, 'import os\n'), ((17, 28, 17, 43), 'os.walk', 'os.walk', ({(17, 36, 17, 42): 'usgpth'}, {}), '(usgpth)', False, 'import os\n'), ((11, 7, 11, 26), 'os.path.isdir', 'os.path.isdir', ({(11, 21, 11, 25): 'tpth'}, {}), '(tpth)', False, 'import os\n'), ((12, 4, 12, 21), 'os.makedirs', 'os.makedirs', ({(12, 16, 12, 20): 'tpth'}, {}), '(tpth)', False, 'import os\n'), ((31, 8, 33, 5), 'flopy.modflow.Modflow.load', 'flopy.modflow.Modflow.load', (), '', False, 'import flopy\n'), ((25, 15, 25, 34), 'os.path.split', 'os.path.split', ({(25, 29, 25, 33): 'fusg'}, {}), '(fusg)', False, 'import os\n'), ((43, 15, 43, 34), 'os.path.split', 'os.path.split', ({(43, 29, 43, 33): 'fusg'}, {}), '(fusg)', False, 'import os\n'), ((20, 29, 20, 53), 'os.path.join', 'os.path.join', ({(20, 42, 20, 46): 'path', (20, 48, 20, 52): 'name'}, {}), '(path, name)', False, 'import os\n')]
relikd/botlib
botlib/cli.py
d0c5072d27db1aa3fad432457c90c9e3f23f22cc
#!/usr/bin/env python3 import os from argparse import ArgumentParser, ArgumentTypeError, FileType, Namespace from typing import Any def DirType(string: str) -> str: if os.path.isdir(string): return string raise ArgumentTypeError( 'Directory does not exist: "{}"'.format(os.path.abspath(string))) class Cli(ArgumentParser): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) def arg(self, *args: Any, **kwargs: Any) -> None: self.add_argument(*args, **kwargs) def arg_bool(self, *args: Any, **kwargs: Any) -> None: self.add_argument(*args, **kwargs, action='store_true') def arg_dir(self, *args: Any, **kwargs: Any) -> None: self.add_argument(*args, **kwargs, type=DirType) def arg_file(self, *args: Any, mode: str = 'r', **kwargs: Any) -> None: self.add_argument(*args, **kwargs, type=FileType(mode)) def parse(self) -> Namespace: return self.parse_args()
[((8, 7, 8, 28), 'os.path.isdir', 'os.path.isdir', ({(8, 21, 8, 27): 'string'}, {}), '(string)', False, 'import os\n'), ((11, 48, 11, 71), 'os.path.abspath', 'os.path.abspath', ({(11, 64, 11, 70): 'string'}, {}), '(string)', False, 'import os\n'), ((28, 48, 28, 62), 'argparse.FileType', 'FileType', ({(28, 57, 28, 61): 'mode'}, {}), '(mode)', False, 'from argparse import ArgumentParser, ArgumentTypeError, FileType, Namespace\n')]
MatthiasValvekens/certvalidator
pyhanko_certvalidator/asn1_types.py
246c5075ecdb6d50b14c93fdc97a9d0470f84821
from typing import Optional from asn1crypto import core, x509, cms __all__ = [ 'Target', 'TargetCert', 'Targets', 'SequenceOfTargets', 'AttrSpec', 'AAControls' ] class TargetCert(core.Sequence): _fields = [ ('target_certificate', cms.IssuerSerial), ('target_name', x509.GeneralName, {'optional': True}), ('cert_digest_info', cms.ObjectDigestInfo, {'optional': True}) ] class Target(core.Choice): _alternatives = [ ('target_name', x509.GeneralName, {'explicit': 0}), ('target_group', x509.GeneralName, {'explicit': 1}), ('target_cert', TargetCert, {'explicit': 2}) ] class Targets(core.SequenceOf): _child_spec = Target # Blame X.509... class SequenceOfTargets(core.SequenceOf): _child_spec = Targets class AttrSpec(core.SequenceOf): _child_spec = cms.AttCertAttributeType class AAControls(core.Sequence): _fields = [ ('path_len_constraint', core.Integer, {'optional': True}), ('permitted_attrs', AttrSpec, {'optional': True, 'implicit': 0}), ('excluded_attrs', AttrSpec, {'optional': True, 'implicit': 1}), ('permit_unspecified', core.Boolean, {'default': True}) ] def accept(self, attr_id: cms.AttCertAttributeType) -> bool: attr_id_str = attr_id.native excluded = self['excluded_attrs'].native if excluded is not None: excluded = frozenset(excluded) if excluded is not None and attr_id_str in excluded: return False permitted = self['permitted_attrs'].native if permitted is not None: permitted = frozenset(permitted) if permitted is not None and attr_id_str in permitted: return True return bool(self['permit_unspecified']) @classmethod def read_extension_value(cls, cert: x509.Certificate) \ -> Optional['AAControls']: # handle AA controls (not natively supported by asn1crypto, so # not available as an attribute). try: return next( ext['extn_value'].parsed for ext in cert['tbs_certificate']['extensions'] if ext['extn_id'].native == 'aa_controls' ) except StopIteration: return None def _make_tag_explicit(field_decl): tag_dict = field_decl[2] if 'explicit' in tag_dict: return tag_dict['explicit'] = tag_dict['implicit'] del tag_dict['implicit'] def _make_tag_implicit(field_decl): tag_dict = field_decl[2] if 'implicit' in tag_dict: return tag_dict['implicit'] = tag_dict['explicit'] del tag_dict['explicit'] # Deal with wbond/asn1crypto#218 _make_tag_explicit(cms.RoleSyntax._fields[1]) _make_tag_explicit(cms.SecurityCategory._fields[1]) # Deal with wbond/asn1crypto#220 _make_tag_implicit(cms.AttCertIssuer._alternatives[1]) # patch in attribute certificate extensions # Note: unlike in Certomancer, we don't do this one conditionally, since # we need the actual Python types to agree with what we export ext_map = x509.ExtensionId._map ext_specs = x509.Extension._oid_specs ext_map['2.5.29.55'] = 'target_information' ext_specs['target_information'] = SequenceOfTargets ext_map['2.5.29.56'] = 'no_rev_avail' ext_specs['no_rev_avail'] = core.Null ext_map['1.3.6.1.5.5.7.1.6'] = 'aa_controls' ext_specs['aa_controls'] = AAControls ext_map['1.3.6.1.5.5.7.1.4'] = 'audit_identity' ext_specs['audit_identity'] = core.OctetString
[]
ruslankl9/ironpython_training
test/test_delete_group.py
51eaad4da24fdce60fbafee556160a9e847c08cf
from model.group import Group import random def test_delete_some_group(app): if len(app.group.get_group_list()) <= 1: app.group.add_new_group(Group(name='test')) old_list = app.group.get_group_list() index = random.randrange(len(old_list)) app.group.delete_group_by_index(index) new_list = app.group.get_group_list() assert len(old_list) - 1 == len(new_list) del old_list[index] assert old_list == new_list
[((7, 32, 7, 50), 'model.group.Group', 'Group', (), '', False, 'from model.group import Group\n')]
gurkirt/actNet-inAct
Evaluation/batch_detection.py
1930bcb41553e50ddd83985a497a9d5ce4f1fcf2
''' Autor: Gurkirt Singh Start data: 15th May 2016 purpose: of this file is read frame level predictions and process them to produce a label per video ''' from sklearn.svm import LinearSVC from sklearn.ensemble import RandomForestClassifier import numpy as np import pickle import os import time,json import pylab as plt from eval_detection import ANETdetection import scipy.io as sio #######baseDir = "/mnt/sun-alpha/actnet/"; baseDir = "/data/shared/solar-machines/actnet/"; #baseDir = "/mnt/solar-machines/actnet/"; ########imgDir = "/mnt/sun-alpha/actnet/rgb-images/"; ######## imgDir = "/mnt/DATADISK2/ss-workspace/actnet/rgb-images/"; annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl" def getscore(ground_truth_filename, prediction_filename, tiou_thr=0.5,subset='validation', verbose=True, check_status=True): anet_detection = ANETdetection(ground_truth_filename, prediction_filename, subset=subset, tiou_thr=tiou_thr, verbose=verbose, check_status=True) ap = anet_detection.evaluate() return ap def saveAPs(): K = 5; subset = 'validation';#,'testing']: featType = 'IMS-MBH' # savename = '{}data/predictions-{}-{}.pkl'.format(baseDir,subset,featType) # with open(savename,'r') as f: # data = pickle.load(f) outfilename = '{}results/classification/{}-{}-{}.json'.format(baseDir,subset,featType,str(K).zfill(3)) gtfiile = 'data/activity_net.v1-3.min.json' ap = getscore(gtfiile,outfilename,top_k=1) print ap print np.mean(ap) savename = '{}data/weightAP-{}.pkl'.format(baseDir,featType) print 'Results saved in ',savename with open(savename,'w') as f: pickle.dump(ap,f) def plotAPs(): K = 1; subset = 'validation';#,'testing']: aps = []; count = 0; colors = ['red','green','blue'] for featType in ['IMS-MBH','IMS','MBH']: savename = '{}data/weightAP-{}.pkl'.format(baseDir,featType) print 'Results saved in ',savename with open(savename,'r') as f: ap = pickle.load(f) ind = np.arange(count,600+count,3) plt.bar(ind,ap,width=0.5,color=colors[count]) count += 1 plt.show() def evalAll(): K = 10; subset = 'validation';#,'testing']: gtfiile = 'data/activity_net.v1-3.min.json' result = []; count = 0; featType = 'C3D-BIN-BOOST-LONG' # outfilename = '{}results/detection/{}-{}-K-{}-{}.json'.format(baseDir,subset,featType,str(K).zfill(3),'alpha-001') for alpha in [1,3,5,]: outfilename = '{}results/detection/{}-{}-K-{}-{}.json'.format(baseDir,subset,featType,str(K).zfill(3),'alpha-{}'.format(str(int(alpha*10)).zfill(3))) print 'Evaluating results from ',outfilename for tioth in [0.5,0.4,0.3,0.2,0.1]: ap = getscore(gtfiile,outfilename,tiou_thr=tioth) result.append([alpha,tioth,np.mean(ap)]) result = np.aaarray(result) sio.savemat('result-{}.mat'.format(featType),mdict={'ap':ap}) if __name__=="__main__": #processOnePredictions() # saveAps() # plotmAPs() evalALL()
[]
y2ghost/study
python/csv/csv_dict_writer.py
c5278611b0a732fe19e3d805c0c079e530b1d3b2
import csv def csv_dict_writer(path, headers, data): with open(path, 'w', newline='') as csvfile: writer = csv.DictWriter(csvfile, delimiter=',', fieldnames=headers) writer.writeheader() for record in data: writer.writerow(record) if __name__ == '__main__': data = '''book_title,author,publisher,pub_date,isbn Python 101,Mike Driscoll, Mike Driscoll,2020,123456789 wxPython Recipes,Mike Driscoll,Apress,2018,978-1-4842-3237-8 Python Interviews,Mike Driscoll,Packt Publishing,2018,9781788399081''' records = [] for line in data.splitlines(): records.append(line.strip().split(',')) headers = records.pop(0) list_of_dicts = [] for row in records: my_dict = dict(zip(headers, row)) list_of_dicts.append(my_dict) csv_dict_writer('output_dict.csv', headers, list_of_dicts)
[((6, 17, 7, 51), 'csv.DictWriter', 'csv.DictWriter', (), '', False, 'import csv\n')]
moibenko/decisionengine
src/decisionengine/framework/modules/tests/test_module_decorators.py
4c458e0c225ec2ce1e82d56e752724983331b7d1
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC # SPDX-License-Identifier: Apache-2.0 import pytest from decisionengine.framework.modules import Publisher, Source from decisionengine.framework.modules.Module import verify_products from decisionengine.framework.modules.Source import Parameter def test_multiple_consumes_declarations(): with pytest.raises(Exception, match="@consumes has already been called"): @Publisher.consumes(a=int) @Publisher.consumes(b=float) class _(Publisher.Publisher): pass def test_multiple_produces_declarations(): with pytest.raises(Exception, match="@produces has already been called"): @Source.produces(c=str) @Source.produces(d=bool) class _(Source.Source): pass def test_wrong_product_names(): @Source.produces(a=str) class BMaker(Source.Source): def __init__(self, config): super().__init__(config) def acquire(self): return {"b": ""} maker = BMaker({"channel_name": "test"}) expected_err_msg = ( "The following products were not produced:\n" + " - 'a' of type 'str'\n\n" + "The following products were not declared:\n" + " - 'b' of type 'str'" ) with pytest.raises(Exception, match=expected_err_msg): verify_products(maker, maker.acquire()) def test_wrong_product_types(): @Source.produces(a=str, b=int) class AMaker(Source.Source): def __init__(self, config): super().__init__(config) def acquire(self): return {"a": 42, "b": 17} maker = AMaker({"channel_name": "test"}) expected_err_msg = "The following products have the wrong types:\n" + r" - 'a' \(expected 'str', got 'int'\)" with pytest.raises(Exception, match=expected_err_msg): verify_products(maker, maker.acquire()) def test_supports_config(): expected_err_msg = ( "An error occurred while processing the parameter 'conflicting_types':\n" + "The specified type 'int' conflicts with the type of the default value " + r"'hello' \(type 'str'\)" ) with pytest.raises(Exception, match=expected_err_msg): @Source.supports_config(Parameter("conflicting_types", type=int, default="hello")) class _(Source.Source): pass
[((30, 5, 30, 27), 'decisionengine.framework.modules.Source.produces', 'Source.produces', (), '', False, 'from decisionengine.framework.modules import Publisher, Source\n'), ((50, 5, 50, 34), 'decisionengine.framework.modules.Source.produces', 'Source.produces', (), '', False, 'from decisionengine.framework.modules import Publisher, Source\n'), ((12, 9, 12, 76), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((14, 9, 14, 34), 'decisionengine.framework.modules.Publisher.consumes', 'Publisher.consumes', (), '', False, 'from decisionengine.framework.modules import Publisher, Source\n'), ((15, 9, 15, 36), 'decisionengine.framework.modules.Publisher.consumes', 'Publisher.consumes', (), '', False, 'from decisionengine.framework.modules import Publisher, Source\n'), ((21, 9, 21, 76), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((23, 9, 23, 31), 'decisionengine.framework.modules.Source.produces', 'Source.produces', (), '', False, 'from decisionengine.framework.modules import Publisher, Source\n'), ((24, 9, 24, 32), 'decisionengine.framework.modules.Source.produces', 'Source.produces', (), '', False, 'from decisionengine.framework.modules import Publisher, Source\n'), ((45, 9, 45, 57), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((60, 9, 60, 57), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((70, 9, 70, 57), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((72, 32, 72, 89), 'decisionengine.framework.modules.Source.Parameter', 'Parameter', (), '', False, 'from decisionengine.framework.modules.Source import Parameter\n')]
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
models/cnn_layer.py
b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078
import torch import torch.nn as nn from torch.nn.functional import max_pool1d from utility.model_parameter import Configuration, ModelParameter class CNNLayer(nn.Module): def __init__(self, config: Configuration, vocab_size=30000, use_embeddings=True, embed_dim=-1, **kwargs): super(CNNLayer, self).__init__() # set parameters self.max_seq_length = config.get_int(ModelParameter.MAX_LENGTH) self.use_gpu = torch.cuda.is_available() if embed_dim == -1: self.embedding_dim = config.get_int(ModelParameter.EMBEDDING_SIZE) else: self.embedding_dim = embed_dim self.max_length = config.get_int(ModelParameter.MAX_LENGTH) self.use_embeddings = use_embeddings self.conv_out_channels = config.get_int(ModelParameter.CHANNELS) self.filter_sizes = [2] # create and initialize layers self.embedding = nn.Embedding(vocab_size, self.embedding_dim) self.relu = nn.ReLU() self.convolutions = nn.ModuleList( [nn.Conv2d(1, self.conv_out_channels, (K, self.embedding_dim)) for K in self.filter_sizes]) self.dropout = nn.Dropout(0.3) def get_output_length(self): return len(self.filter_sizes) * self.conv_out_channels def forward(self, samples, **kwargs): encoded_samples = self.encode(samples) return encoded_samples def encode(self, samples): x = self.embedding(samples) x = x.unsqueeze(1) x = [self.relu(conv(x)).squeeze(3) for conv in self.convolutions] x = [max_pool1d(i, i.size(2)).squeeze(2) for i in x] x = self.dropout(torch.cat(x, 1)) return x
[((14, 23, 14, 48), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((25, 25, 25, 69), 'torch.nn.Embedding', 'nn.Embedding', ({(25, 38, 25, 48): 'vocab_size', (25, 50, 25, 68): 'self.embedding_dim'}, {}), '(vocab_size, self.embedding_dim)', True, 'import torch.nn as nn\n'), ((26, 20, 26, 29), 'torch.nn.ReLU', 'nn.ReLU', ({}, {}), '()', True, 'import torch.nn as nn\n'), ((29, 23, 29, 38), 'torch.nn.Dropout', 'nn.Dropout', ({(29, 34, 29, 37): '0.3'}, {}), '(0.3)', True, 'import torch.nn as nn\n'), ((43, 25, 43, 40), 'torch.cat', 'torch.cat', ({(43, 35, 43, 36): 'x', (43, 38, 43, 39): '1'}, {}), '(x, 1)', False, 'import torch\n'), ((28, 13, 28, 74), 'torch.nn.Conv2d', 'nn.Conv2d', ({(28, 23, 28, 24): '1', (28, 26, 28, 48): 'self.conv_out_channels', (28, 50, 28, 73): '(K, self.embedding_dim)'}, {}), '(1, self.conv_out_channels, (K, self.embedding_dim))', True, 'import torch.nn as nn\n')]
alexgorji/music_score
musicscore/musicxml/types/complextypes/backup.py
b4176da52295361f3436826903485c5cb8054c5e
''' <xs:complexType name="backup"> <xs:annotation> <xs:documentation></xs:documentation> </xs:annotation> <xs:sequence> <xs:group ref="duration"/> <xs:group ref="editorial"/> </xs:sequence> </xs:complexType> ''' from musicscore.dtd.dtd import Sequence, GroupReference, Element from musicscore.musicxml.groups.common import Editorial from musicscore.musicxml.elements.note import Duration from musicscore.musicxml.types.complextypes.complextype import ComplexType class ComplexTypeBackup(ComplexType): """ The backup and forward elements are required to coordinate multiple voices in one part, including music on multiple staves. The backup type is generally used to move between voices and staves. Thus the backup element does not include voice or staff elements. Duration values should always be positive, and should not cross measure boundaries or mid-measure changes in the divisions value. """ _DTD = Sequence( Element(Duration), GroupReference(Editorial) ) def __init__(self, tag, *args, **kwargs): super().__init__(tag=tag, *args, **kwargs)
[((26, 8, 26, 25), 'musicscore.dtd.dtd.Element', 'Element', ({(26, 16, 26, 24): 'Duration'}, {}), '(Duration)', False, 'from musicscore.dtd.dtd import Sequence, GroupReference, Element\n'), ((27, 8, 27, 33), 'musicscore.dtd.dtd.GroupReference', 'GroupReference', ({(27, 23, 27, 32): 'Editorial'}, {}), '(Editorial)', False, 'from musicscore.dtd.dtd import Sequence, GroupReference, Element\n')]
AlexandrosPlessias/NLP-Greek-Presentations
NLP programmes in Python/9.Text Clustering/kmeans.py
4ae9d635a777f24bae5238b9f195bd17d00040ea
import nltk import re import csv import string import collections import numpy as np from nltk.corpus import wordnet from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.tokenize import WordPunctTokenizer from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix """"Pre - Processing: tokenization, stopwords removal, remove words(with size 1), lower capitalization & lemmatization""" def preprocessing(text): # text = text.decode("utf8") # remove punctuation text = punctuation(text) # remove extra spaces text = re.sub(' +', ' ', text) # tokenize into words tokens = text.split(" ") # remove number tokens = [word for word in tokens if word.isalpha()] # remove stopwords stop = stopwords.words('english') tokens = [token for token in tokens if token not in stop] # remove words less than three letters tokens = [word for word in tokens if len(word) >= 3] # lower capitalization tokens = [word.lower() for word in tokens] # keep only real words tokens = KeepRealWords(tokens) # lemmatize lmtzr = WordNetLemmatizer() tokens = [lmtzr.lemmatize(word) for word in tokens] # return only tokens with size over 1 if len(tokens) > 0: preprocessed_text = " ".join(tokens) return preprocessed_text return None def KeepRealWords(text): wpt = WordPunctTokenizer() only_recognized_words = [] for s in text: tokens = wpt.tokenize(s) if tokens: # check if empty string for t in tokens: if wordnet.synsets(t): only_recognized_words.append(t) # only keep recognized words return only_recognized_words def punctuation(text): translator = str.maketrans(string.punctuation, ' '*len(string.punctuation)) # map punctuation to space return (text.translate(translator)) """"Read Data""" # Open sms corpus. sms_file = open('SMSSpamCollection.txt', encoding="utf8") # Check the structure of this file! sms_data = [] sms_labels = [] # CSV Reader LABEL & DATA are separated by TAB. csv_reader = csv.reader(sms_file,delimiter='\t') # Store labels and data. for line in csv_reader: sms_text = preprocessing(line[1]) if ( sms_text != None): # adding the sms_id sms_labels.append( line[0]) # adding the cleaned text We are calling preprocessing method sms_data.append(sms_text) sms_file.close() """Sampling steps (70:30)""" trainset_size = int(round(len(sms_data)*0.70)) # I chose this threshold for 70:30 train and test split. print('The training set size for this classifier is ' + str(trainset_size) + '\n') x_train = np.array([''.join(el) for el in sms_data[0:trainset_size]]) # train sms_data (70%). y_train = np.array([el for el in sms_labels[0:trainset_size]]) # train sms_labels (70%). x_test = np.array([''.join(el) for el in sms_data[trainset_size+1:len(sms_data)]]) # test sms_data (30%). y_test = np.array([el for el in sms_labels[trainset_size+1:len(sms_labels)]]) # test sms_labels (30%). """We are building a TFIDF vectorizer here""" from sklearn.feature_extraction.text import TfidfVectorizer vectorizer = TfidfVectorizer(min_df=2, ngram_range=(1, 2), stop_words='english', strip_accents='unicode', norm='l2') X_train = vectorizer.fit_transform(x_train) X_test = vectorizer.transform(x_test) """Text Clustering - K Means""" from sklearn.cluster import KMeans, MiniBatchKMeans print('--> Text Clustering - K Means') true_k = 5 km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1) kmini = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1, init_size=1000, batch_size=1000, verbose=False) #verbose=opts.verbose # we are using the same test,train data in TFIDF form as we did in text classification km_model = km.fit(X_train) print("For K-mean clustering ") clustering = collections.defaultdict(list) for idx, label in enumerate(km_model.labels_): clustering[label].append(idx) print(clustering) kmini_model = kmini.fit(X_train) print("For K-mean Mini batch clustering ") clustering = collections.defaultdict(list) for idx, label in enumerate(kmini_model.labels_): clustering[label].append(idx) print(clustering)
[((82, 13, 82, 48), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((99, 10, 99, 62), 'numpy.array', 'np.array', ({(99, 19, 99, 61): '[el for el in sms_labels[0:trainset_size]]'}, {}), '([el for el in sms_labels[0:trainset_size]])', True, 'import numpy as np\n'), ((105, 13, 105, 119), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', (), '', False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((113, 5, 113, 72), 'sklearn.cluster.KMeans', 'KMeans', (), '', False, 'from sklearn.cluster import KMeans, MiniBatchKMeans\n'), ((114, 8, 114, 118), 'sklearn.cluster.MiniBatchKMeans', 'MiniBatchKMeans', (), '', False, 'from sklearn.cluster import KMeans, MiniBatchKMeans\n'), ((119, 13, 119, 42), 'collections.defaultdict', 'collections.defaultdict', ({(119, 37, 119, 41): 'list'}, {}), '(list)', False, 'import collections\n'), ((126, 13, 126, 42), 'collections.defaultdict', 'collections.defaultdict', ({(126, 37, 126, 41): 'list'}, {}), '(list)', False, 'import collections\n'), ((24, 11, 24, 34), 're.sub', 're.sub', ({(24, 18, 24, 22): '""" +"""', (24, 24, 24, 27): '""" """', (24, 29, 24, 33): 'text'}, {}), "(' +', ' ', text)", False, 'import re\n'), ((33, 11, 33, 37), 'nltk.corpus.stopwords.words', 'stopwords.words', ({(33, 27, 33, 36): '"""english"""'}, {}), "('english')", False, 'from nltk.corpus import stopwords\n'), ((46, 12, 46, 31), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ({}, {}), '()', False, 'from nltk.stem import WordNetLemmatizer\n'), ((58, 10, 58, 30), 'nltk.tokenize.WordPunctTokenizer', 'WordPunctTokenizer', ({}, {}), '()', False, 'from nltk.tokenize import WordPunctTokenizer\n'), ((65, 19, 65, 37), 'nltk.corpus.wordnet.synsets', 'wordnet.synsets', ({(65, 35, 65, 36): 't'}, {}), '(t)', False, 'from nltk.corpus import wordnet\n')]
paTRICK-swk/P-STMO
common/utils.py
def1bff3fcc4f1e3b1dd69c8d3c2d77f412e3b75
import torch import numpy as np import hashlib from torch.autograd import Variable import os def deterministic_random(min_value, max_value, data): digest = hashlib.sha256(data.encode()).digest() raw_value = int.from_bytes(digest[:4], byteorder='little', signed=False) return int(raw_value / (2 ** 32 - 1) * (max_value - min_value)) + min_value def mpjpe_cal(predicted, target): assert predicted.shape == target.shape return torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1)) def test_calculation(predicted, target, action, error_sum, data_type, subject, MAE=False): error_sum = mpjpe_by_action_p1(predicted, target, action, error_sum) if not MAE: error_sum = mpjpe_by_action_p2(predicted, target, action, error_sum) return error_sum def mpjpe_by_action_p1(predicted, target, action, action_error_sum): assert predicted.shape == target.shape batch_num = predicted.size(0) frame_num = predicted.size(1) dist = torch.mean(torch.norm(predicted - target, dim=len(target.shape) - 1), dim=len(target.shape) - 2) if len(set(list(action))) == 1: end_index = action[0].find(' ') if end_index != -1: action_name = action[0][:end_index] else: action_name = action[0] action_error_sum[action_name]['p1'].update(torch.mean(dist).item()*batch_num*frame_num, batch_num*frame_num) else: for i in range(batch_num): end_index = action[i].find(' ') if end_index != -1: action_name = action[i][:end_index] else: action_name = action[i] action_error_sum[action_name]['p1'].update(torch.mean(dist[i]).item()*frame_num, frame_num) return action_error_sum def mpjpe_by_action_p2(predicted, target, action, action_error_sum): assert predicted.shape == target.shape num = predicted.size(0) pred = predicted.detach().cpu().numpy().reshape(-1, predicted.shape[-2], predicted.shape[-1]) gt = target.detach().cpu().numpy().reshape(-1, target.shape[-2], target.shape[-1]) dist = p_mpjpe(pred, gt) if len(set(list(action))) == 1: end_index = action[0].find(' ') if end_index != -1: action_name = action[0][:end_index] else: action_name = action[0] action_error_sum[action_name]['p2'].update(np.mean(dist) * num, num) else: for i in range(num): end_index = action[i].find(' ') if end_index != -1: action_name = action[i][:end_index] else: action_name = action[i] action_error_sum[action_name]['p2'].update(np.mean(dist), 1) return action_error_sum def p_mpjpe(predicted, target): assert predicted.shape == target.shape muX = np.mean(target, axis=1, keepdims=True) muY = np.mean(predicted, axis=1, keepdims=True) X0 = target - muX Y0 = predicted - muY normX = np.sqrt(np.sum(X0 ** 2, axis=(1, 2), keepdims=True)) normY = np.sqrt(np.sum(Y0 ** 2, axis=(1, 2), keepdims=True)) X0 /= normX Y0 /= normY H = np.matmul(X0.transpose(0, 2, 1), Y0) U, s, Vt = np.linalg.svd(H) V = Vt.transpose(0, 2, 1) R = np.matmul(V, U.transpose(0, 2, 1)) sign_detR = np.sign(np.expand_dims(np.linalg.det(R), axis=1)) V[:, :, -1] *= sign_detR s[:, -1] *= sign_detR.flatten() R = np.matmul(V, U.transpose(0, 2, 1)) tr = np.expand_dims(np.sum(s, axis=1, keepdims=True), axis=2) a = tr * normX / normY t = muX - a * np.matmul(muY, R) predicted_aligned = a * np.matmul(predicted, R) + t return np.mean(np.linalg.norm(predicted_aligned - target, axis=len(target.shape) - 1), axis=len(target.shape) - 2) def define_actions( action ): actions = ["Directions","Discussion","Eating","Greeting", "Phoning","Photo","Posing","Purchases", "Sitting","SittingDown","Smoking","Waiting", "WalkDog","Walking","WalkTogether"] if action == "All" or action == "all" or action == '*': return actions if not action in actions: raise( ValueError, "Unrecognized action: %s" % action ) return [action] def define_error_list(actions): error_sum = {} error_sum.update({actions[i]: {'p1':AccumLoss(), 'p2':AccumLoss()} for i in range(len(actions))}) return error_sum class AccumLoss(object): def __init__(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val self.count += n self.avg = self.sum / self.count def get_varialbe(split, target): num = len(target) var = [] if split == 'train': for i in range(num): temp = Variable(target[i], requires_grad=False).contiguous().type(torch.cuda.FloatTensor) var.append(temp) else: for i in range(num): temp = Variable(target[i]).contiguous().cuda().type(torch.cuda.FloatTensor) var.append(temp) return var def print_error(data_type, action_error_sum, is_train): mean_error_p1, mean_error_p2 = print_error_action(action_error_sum, is_train) return mean_error_p1, mean_error_p2 def print_error_action(action_error_sum, is_train): mean_error_each = {'p1': 0.0, 'p2': 0.0} mean_error_all = {'p1': AccumLoss(), 'p2': AccumLoss()} if is_train == 0: print("{0:=^12} {1:=^10} {2:=^8}".format("Action", "p#1 mm", "p#2 mm")) for action, value in action_error_sum.items(): if is_train == 0: print("{0:<12} ".format(action), end="") mean_error_each['p1'] = action_error_sum[action]['p1'].avg * 1000.0 mean_error_all['p1'].update(mean_error_each['p1'], 1) mean_error_each['p2'] = action_error_sum[action]['p2'].avg * 1000.0 mean_error_all['p2'].update(mean_error_each['p2'], 1) if is_train == 0: print("{0:>6.2f} {1:>10.2f}".format(mean_error_each['p1'], mean_error_each['p2'])) if is_train == 0: print("{0:<12} {1:>6.2f} {2:>10.2f}".format("Average", mean_error_all['p1'].avg, \ mean_error_all['p2'].avg)) return mean_error_all['p1'].avg, mean_error_all['p2'].avg def save_model(previous_name, save_dir,epoch, data_threshold, model, model_name): # if os.path.exists(previous_name): # os.remove(previous_name) torch.save(model.state_dict(), '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100)) previous_name = '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100) return previous_name def save_model_new(save_dir,epoch, data_threshold, lr, optimizer, model, model_name): # if os.path.exists(previous_name): # os.remove(previous_name) # torch.save(model.state_dict(), # '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100)) torch.save({ 'epoch': epoch, 'lr': lr, 'optimizer': optimizer.state_dict(), 'model_pos': model.state_dict(), }, '%s/%s_%d_%d.pth' % (save_dir, model_name, epoch, data_threshold * 100))
[((81, 10, 81, 48), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((82, 10, 82, 51), 'numpy.mean', 'np.mean', (), '', True, 'import numpy as np\n'), ((94, 15, 94, 31), 'numpy.linalg.svd', 'np.linalg.svd', ({(94, 29, 94, 30): 'H'}, {}), '(H)', True, 'import numpy as np\n'), ((87, 20, 87, 63), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((88, 20, 88, 63), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((103, 24, 103, 56), 'numpy.sum', 'np.sum', (), '', True, 'import numpy as np\n'), ((98, 39, 98, 55), 'numpy.linalg.det', 'np.linalg.det', ({(98, 53, 98, 54): 'R'}, {}), '(R)', True, 'import numpy as np\n'), ((106, 18, 106, 35), 'numpy.matmul', 'np.matmul', ({(106, 28, 106, 31): 'muY', (106, 33, 106, 34): 'R'}, {}), '(muY, R)', True, 'import numpy as np\n'), ((108, 28, 108, 51), 'numpy.matmul', 'np.matmul', ({(108, 38, 108, 47): 'predicted', (108, 49, 108, 50): 'R'}, {}), '(predicted, R)', True, 'import numpy as np\n'), ((65, 51, 65, 64), 'numpy.mean', 'np.mean', ({(65, 59, 65, 63): 'dist'}, {}), '(dist)', True, 'import numpy as np\n'), ((73, 55, 73, 68), 'numpy.mean', 'np.mean', ({(73, 63, 73, 67): 'dist'}, {}), '(dist)', True, 'import numpy as np\n'), ((39, 51, 39, 67), 'torch.mean', 'torch.mean', ({(39, 62, 39, 66): 'dist'}, {}), '(dist)', False, 'import torch\n'), ((48, 55, 48, 74), 'torch.mean', 'torch.mean', ({(48, 66, 48, 73): 'dist[i]'}, {}), '(dist[i])', False, 'import torch\n'), ((154, 19, 154, 59), 'torch.autograd.Variable', 'Variable', (), '', False, 'from torch.autograd import Variable\n'), ((158, 19, 158, 38), 'torch.autograd.Variable', 'Variable', ({(158, 28, 158, 37): 'target[i]'}, {}), '(target[i])', False, 'from torch.autograd import Variable\n')]
Sailer43/CSE5914Project
personal_ad/advice/converter.py
ebb47bff9a6101fac5173b5520e6002563da67d5
from ibm_watson import TextToSpeechV1, SpeechToTextV1, DetailedResponse from os import system from json import loads class Converter: k_s2t_api_key = "0pxCnJQ_r5Yy3SZDRhYS4XshrTMJyZEsuc45SbBcfGgf" k_t2s_api_key = "euoR7ZdLMOBd29wP1fNaZFJsqwKt45TUmwcVwpzbQBcA" k_s2t_url = "https://stream.watsonplatform.net/speech-to-text/api" k_t2s_url = "https://gateway-wdc.watsonplatform.net/text-to-speech/api" k_t2s_voice = "en-US_AllisonVoice" k_t2s_format = "audio/webm" k_st2_model = "en-US_NarrowbandModel" def __init__(self): self.s2t = SpeechToTextV1(iam_apikey=self.k_s2t_api_key, url=self.k_s2t_url) self.t2s = TextToSpeechV1(iam_apikey=self.k_t2s_api_key, url=self.k_t2s_url) def read(self, string: str): return self.t2s.synthesize( string, voice=self.k_t2s_voice, accept=self.k_t2s_format ).get_result().content def listen(self, audio_input): try: result = self.s2t.recognize(audio_input, model=self.k_st2_model) result = loads(str(result)) result = result["result"]["results"][0]["alternatives"][0]['transcript'] except Exception: return False, "I don't understand what you are saying." return True, str(result) def main(): pass if __name__ == '__main__': main()
[((18, 19, 18, 84), 'ibm_watson.SpeechToTextV1', 'SpeechToTextV1', (), '', False, 'from ibm_watson import TextToSpeechV1, SpeechToTextV1, DetailedResponse\n'), ((19, 19, 19, 84), 'ibm_watson.TextToSpeechV1', 'TextToSpeechV1', (), '', False, 'from ibm_watson import TextToSpeechV1, SpeechToTextV1, DetailedResponse\n')]
neel4os/warg-client
warg_client/client/apis/controller/attack_controller.py
4d97904977a6f6865610afd04ca00ddfbad38ff9
from subprocess import run def perform_shutdown(body): arg = "" if body["reboot"]: _is_reboot = arg + "-r" else: _is_reboot = arg + "-h" time_to_shutdown = str(body['timeToShutdown']) result = run(["/sbin/shutdown", _is_reboot, time_to_shutdown]) return body
[((11, 13, 11, 66), 'subprocess.run', 'run', ({(11, 17, 11, 65): "['/sbin/shutdown', _is_reboot, time_to_shutdown]"}, {}), "(['/sbin/shutdown', _is_reboot, time_to_shutdown])", False, 'from subprocess import run\n')]
2600box/harvest
torrents/migrations/0011_auto_20190223_2345.py
57264c15a3fba693b4b58d0b6d4fbf4bd5453bbd
# Generated by Django 2.1.7 on 2019-02-23 23:45 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('torrents', '0010_auto_20190223_0326'), ] operations = [ migrations.AlterModelOptions( name='realm', options={'ordering': ('name',)}, ), ]
[((13, 8, 16, 9), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', (), '', False, 'from django.db import migrations\n')]
whyh/FavourDemo
common/__init__.py
1b19882fb2e79dee9c3332594bf45c91e7476eaa
from . import (emoji as emj, keyboards as kb, telegram as tg, phrases as phr, finance as fin, utils, glossary, bots, gcp, sed, db)
[]
aneumeier/questions
questions/serializers.py
fe5451b70d85cd5203b4cb624103c1eb154587d9
#!/usr/bin/env python # -*- coding: utf-8 """ :mod:`question.serializers` -- serializers """ from rest_framework import serializers from .models import Question, PossibleAnswer from category.models import Category class PossibleAnswerSerializer(serializers.ModelSerializer): class Meta: model = PossibleAnswer fields = ( 'id', 'possible_answer', ) class QuestionSerializer(serializers.ModelSerializer): category = serializers.StringRelatedField() possible_answer = serializers.StringRelatedField(many=True) class Meta: model = Question fields = ( 'id', 'question', 'category', 'possible_answer', 'male_answer_count', 'female_answer_count', 'all_answer_count', ) class CategorySerializer(serializers.ModelSerializer): def count(self): """ {{ category.question_set.count }} """ return self.question_set.count() class Meta: model = Category fields = ( 'id', 'title', )
[((23, 15, 23, 47), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ({}, {}), '()', False, 'from rest_framework import serializers\n'), ((24, 22, 24, 63), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', (), '', False, 'from rest_framework import serializers\n')]
JaySon-Huang/SecertPhotos
widgets/ui_ShowResultDialog.py
e741cc26c19a5b249d45cc70959ac6817196cb8a
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'src/ui_ShowResultDialog.ui' # # Created: Sat May 16 17:05:43 2015 # by: PyQt5 UI code generator 5.4 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_Dialog(object): def setupUi(self, Dialog): Dialog.setObjectName("Dialog") Dialog.resize(400, 300) self.verticalLayout = QtWidgets.QVBoxLayout(Dialog) self.verticalLayout.setObjectName("verticalLayout") self.lb_image = ImageLabel(Dialog) sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lb_image.sizePolicy().hasHeightForWidth()) self.lb_image.setSizePolicy(sizePolicy) self.lb_image.setMinimumSize(QtCore.QSize(100, 100)) self.lb_image.setAlignment(QtCore.Qt.AlignCenter) self.lb_image.setObjectName("lb_image") self.verticalLayout.addWidget(self.lb_image) self.hLayout = QtWidgets.QHBoxLayout() self.hLayout.setObjectName("hLayout") spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.hLayout.addItem(spacerItem) self.btn_save = QtWidgets.QPushButton(Dialog) self.btn_save.setObjectName("btn_save") self.hLayout.addWidget(self.btn_save) spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) self.hLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.hLayout) self.retranslateUi(Dialog) QtCore.QMetaObject.connectSlotsByName(Dialog) def retranslateUi(self, Dialog): _translate = QtCore.QCoreApplication.translate Dialog.setWindowTitle(_translate("Dialog", "Dialog")) self.lb_image.setText(_translate("Dialog", "Image Label")) self.btn_save.setText(_translate("Dialog", "Save it")) from widgets.ImageLabel import ImageLabel
[((16, 30, 16, 59), 'PyQt5.QtWidgets.QVBoxLayout', 'QtWidgets.QVBoxLayout', ({(16, 52, 16, 58): 'Dialog'}, {}), '(Dialog)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((18, 24, 18, 42), 'widgets.ImageLabel.ImageLabel', 'ImageLabel', ({(18, 35, 18, 41): 'Dialog'}, {}), '(Dialog)', False, 'from widgets.ImageLabel import ImageLabel\n'), ((19, 21, 19, 108), 'PyQt5.QtWidgets.QSizePolicy', 'QtWidgets.QSizePolicy', ({(19, 43, 19, 74): 'QtWidgets.QSizePolicy.Expanding', (19, 76, 19, 107): 'QtWidgets.QSizePolicy.Expanding'}, {}), '(QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Expanding)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((28, 23, 28, 46), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ({}, {}), '()', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((30, 21, 30, 114), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', ({(30, 43, 30, 45): '40', (30, 47, 30, 49): '20', (30, 51, 30, 82): 'QtWidgets.QSizePolicy.Expanding', (30, 84, 30, 113): 'QtWidgets.QSizePolicy.Minimum'}, {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((32, 24, 32, 53), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', ({(32, 46, 32, 52): 'Dialog'}, {}), '(Dialog)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((35, 22, 35, 115), 'PyQt5.QtWidgets.QSpacerItem', 'QtWidgets.QSpacerItem', ({(35, 44, 35, 46): '40', (35, 48, 35, 50): '20', (35, 52, 35, 83): 'QtWidgets.QSizePolicy.Expanding', (35, 85, 35, 114): 'QtWidgets.QSizePolicy.Minimum'}, {}), '(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.\n QSizePolicy.Minimum)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((40, 8, 40, 53), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', ({(40, 46, 40, 52): 'Dialog'}, {}), '(Dialog)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((24, 37, 24, 59), 'PyQt5.QtCore.QSize', 'QtCore.QSize', ({(24, 50, 24, 53): '(100)', (24, 55, 24, 58): '(100)'}, {}), '(100, 100)', False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')]
zomGreg/mixcoatl
mixcoatl/admin/api_key.py
dd8d7e206682955b251d7f858fffee56b11df8c6
""" mixcoatl.admin.api_key ---------------------- Implements access to the DCM ApiKey API """ from mixcoatl.resource import Resource from mixcoatl.decorators.lazy import lazy_property from mixcoatl.decorators.validations import required_attrs from mixcoatl.utils import uncamel, camelize, camel_keys, uncamel_keys import json class ApiKey(Resource): """An API key is an access key and secret key that provide API access into DCM.""" PATH = 'admin/ApiKey' COLLECTION_NAME = 'apiKeys' PRIMARY_KEY = 'access_key' def __init__(self, access_key=None, endpoint=None, *args, **kwargs): Resource.__init__(self, endpoint=endpoint) self.__access_key = access_key @property def access_key(self): """The primary identifier of the `ApiKey`. Same as `DCM_ACCESS_KEY`""" return self.__access_key @lazy_property def account(self): """`dict` - The account with which this API key is associated.""" return self.__account @lazy_property def activation(self): """`str` - The date and time when this key was activated.""" return self.__activation @lazy_property def expiration(self): """`str` - The date and time when this API key should automatically be made inactivate.""" return self.__expiration @expiration.setter def expiration(self, e): self.__expiration = e @lazy_property def customer(self): """`dict` - The customer to whom this API key belongs.""" return self.__customer @lazy_property def customer_management_key(self): """`bool` - Identifies whether or not this key can be used across all customer accounts.""" return self.__customer_management_key @lazy_property def description(self): """`str` - A user-friendly description of this API key.""" return self.__description @description.setter def description(self, d): self.__description = d @lazy_property def name(self): """`str` - The user-friendly name used to identify the key.""" return self.__name @name.setter def name(self, n): self.__name = n @lazy_property def secret_key(self): """`str` - The secret part of this API key.""" return self.__secret_key @lazy_property def state(self): """`str` - The status of the key *(i.e. `ACTIVE`)*""" return self.__state @lazy_property def system_management_key(self): """`bool` - Identifies if the key can be used for DCM system management functions""" return self.__system_management_key @lazy_property def user(self): """`dict` - The user associated with this API key. Account-level keys return `{'user_id': -1}`""" return self.__user @required_attrs(['description', 'name']) def create(self): """Call the API to generate an API key from the current instance of `ApiKey`""" payload = { 'generateApiKey': [{'description': self.description, 'name': self.name}]} s = self.post(data=json.dumps(payload)) if self.last_error is None: self.__access_key = s['apiKeys'][0]['accessKey'] self.load() else: raise ApiKeyGenerationException(self.last_error) def invalidate(self, reason='key deleted via mixcoatl'): """Call the API to invalidate the current instance of `ApiKey` This is the same as deleting the api key :param reason: the reason for invalidating the key :type reason: str. :returns: True :raises: :class:`ApiKeyInvalidationException` """ params = {'reason': reason} self.delete(params=params) if self.last_error is None: return True else: raise ApiKeyInvalidationException(self.last_error) @classmethod def generate_api_key(cls, key_name, description, expiration=None): """Generates a new API key >>> ApiKey.generate_api_key('my-api-key', 'this is my api key') {'access_key':'ABCDEFGHIJKL':....} :param key_name: the name for the key :type key_name: str. :param description: the description for the key :type description: str. :param expiration: *unused for now* :type expiration: str. :returns: :class:`ApiKey` :raises: :class:`ApiKeyGenerationException` """ a = cls() a.name = key_name a.description = description a.create() return a @classmethod def all(cls, keys_only=False, endpoint=None, **kwargs): """Get all api keys .. note:: The keys used to make the request determine results visibility :param keys_only: Only return `access_key` instead of `ApiKey` objects :type keys_only: bool. :param detail: The level of detail to return - `basic` or `extended` :type detail: str. :param account_id: Display all system keys belonging to `account_id` :type account_id: int. :param user_id: Display all keys belonging to `user_id` :type user_id: int. :returns: `list` - of :class:`ApiKey` or :attr:`access_key` """ if 'access_key' in kwargs: r = Resource(cls.PATH + "/" + kwargs['access_key'], endpoint=endpoint) params = {} else: r = Resource(cls.PATH, endpoint=endpoint) if 'detail' in kwargs: r.request_details = kwargs['detail'] else: r.request_details = 'basic' if 'account_id' in kwargs: params = {'accountId': kwargs['account_id']} elif 'user_id' in kwargs: params = {'userId': kwargs['user_id']} else: params = {} x = r.get(params=params) if r.last_error is None: if keys_only is True: return [i[camelize(cls.PRIMARY_KEY)] for i in x[cls.COLLECTION_NAME]] else: return [type(cls.__name__, (object,), i) for i in uncamel_keys(x)[uncamel(cls.COLLECTION_NAME)]] else: raise ApiKeyException(r.last_error) class ApiKeyException(BaseException): pass class ApiKeyGenerationException(ApiKeyException): pass class ApiKeyInvalidationException(ApiKeyException): pass
[((97, 5, 97, 44), 'mixcoatl.decorators.validations.required_attrs', 'required_attrs', ({(97, 20, 97, 43): "['description', 'name']"}, {}), "(['description', 'name'])", False, 'from mixcoatl.decorators.validations import required_attrs\n'), ((22, 8, 22, 50), 'mixcoatl.resource.Resource.__init__', 'Resource.__init__', (), '', False, 'from mixcoatl.resource import Resource\n'), ((169, 16, 169, 82), 'mixcoatl.resource.Resource', 'Resource', (), '', False, 'from mixcoatl.resource import Resource\n'), ((172, 16, 172, 53), 'mixcoatl.resource.Resource', 'Resource', (), '', False, 'from mixcoatl.resource import Resource\n'), ((103, 27, 103, 46), 'json.dumps', 'json.dumps', ({(103, 38, 103, 45): 'payload'}, {}), '(payload)', False, 'import json\n'), ((189, 26, 189, 51), 'mixcoatl.utils.camelize', 'camelize', ({(189, 35, 189, 50): 'cls.PRIMARY_KEY'}, {}), '(cls.PRIMARY_KEY)', False, 'from mixcoatl.utils import uncamel, camelize, camel_keys, uncamel_keys\n'), ((193, 33, 193, 48), 'mixcoatl.utils.uncamel_keys', 'uncamel_keys', ({(193, 46, 193, 47): 'x'}, {}), '(x)', False, 'from mixcoatl.utils import uncamel, camelize, camel_keys, uncamel_keys\n'), ((193, 49, 193, 77), 'mixcoatl.utils.uncamel', 'uncamel', ({(193, 57, 193, 76): 'cls.COLLECTION_NAME'}, {}), '(cls.COLLECTION_NAME)', False, 'from mixcoatl.utils import uncamel, camelize, camel_keys, uncamel_keys\n')]
Johnny-QA/Python_training
Python tests/dictionaries.py
a15de68195eb155c99731db3e4ff1d9d75681752
my_set = {1, 3, 5} my_dict = {'name': 'Jose', 'age': 90} another_dict = {1: 15, 2: 75, 3: 150} lottery_players = [ { 'name': 'Rolf', 'numbers': (13, 45, 66, 23, 22) }, { 'name': 'John', 'numbers': (14, 56, 80, 23, 22) } ] universities = [ { 'name': 'Oxford', 'location': 'UK' }, { 'name': 'MIT', 'location': 'US' } ]
[]
ZhenghengLi/lcls2
psdaq/psdaq/control_gui/QWTable.py
94e75c6536954a58c8937595dcac295163aa1cdf
"""Class :py:class:`QWTable` is a QTableView->QWidget for tree model ====================================================================== Usage :: # Run test: python lcls2/psdaq/psdaq/control_gui/QWTable.py from psdaq.control_gui.QWTable import QWTable w = QWTable() Created on 2019-03-28 by Mikhail Dubrovin Re-designed after copy psana/graphqt/QWTable.py -> psdaq/control_gui/ """ import logging logger = logging.getLogger(__name__) from PyQt5.QtWidgets import QTableView, QVBoxLayout, QAbstractItemView, QSizePolicy from PyQt5.QtGui import QStandardItemModel, QStandardItem from PyQt5.QtCore import Qt, QModelIndex from psdaq.control_gui.QWIcons import icon class QWTable(QTableView): def __init__(self, **kwargs): parent = kwargs.get('parent', None) QTableView.__init__(self, parent) self._name = self.__class__.__name__ icon.set_icons() self.is_connected_item_changed = False self._si_model = QStandardItemModel() self.set_selection_mode() self.fill_table_model(**kwargs) # defines self._si_model self.setModel(self._si_model) self.connect_control() self.set_style() def connect_control(self): self.connect_item_selected_to(self.on_item_selected) self.clicked.connect(self.on_click) self.doubleClicked.connect(self.on_double_click) self.connect_item_changed_to(self.on_item_changed) #def __del__(self): # QTableView.__del__(self) - it does not have __del__ def set_selection_mode(self, smode=QAbstractItemView.ExtendedSelection): logger.debug('Set selection mode: %s'%smode) self.setSelectionMode(smode) def connect_item_changed_to(self, recipient): self._si_model.itemChanged.connect(recipient) self.is_connected_item_changed = True def disconnect_item_changed_from(self, recipient): if self.is_connected_item_changed: self._si_model.itemChanged.disconnect(recipient) self.is_connected_item_changed = False def connect_item_selected_to(self, recipient): self.selectionModel().currentChanged[QModelIndex, QModelIndex].connect(recipient) def disconnect_item_selected_from(self, recipient): #self.selectionModel().selectionChanged[QModelIndex, QModelIndex].disconnect(recipient) self.selectionModel().currentChanged[QModelIndex, QModelIndex].disconnect(recipient) def set_style(self): self.setStyleSheet("QTableView::item:hover{background-color:#00FFAA;}") #self.setSizePolicy(QSizePolicy::Preferred,QSizePolicy::Fixed) self.set_exact_widget_size() def set_exact_widget_size(self): """set window size exactly matching actual size of QTableView. """ self.setSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum) self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.resizeColumnsToContents() self.setFixedSize(self.horizontalHeader().length()+self.verticalHeader().width(),\ self.verticalHeader().length()+self.horizontalHeader().height()) def fill_table_model(self, **kwargs): self.clear_model() self._si_model.setHorizontalHeaderLabels(['col0', 'col1', 'col2', 'col3', 'col4']) self._si_model.setVerticalHeaderLabels(['row0', 'row1', 'row2', 'row3']) for row in range(0, 4): for col in range(0, 6): item = QStandardItem("itemA %d %d"%(row,col)) item.setIcon(icon.icon_table) item.setCheckable(True) self._si_model.setItem(row,col,item) if col==2: item.setIcon(icon.icon_folder_closed) if col==3: item.setText('Some text') #self._si_model.appendRow(item) def clear_model(self): rows,cols = self._si_model.rowCount(), self._si_model.columnCount() self._si_model.removeRows(0, rows) self._si_model.removeColumns(0, cols) def selected_indexes(self): return self.selectedIndexes() def selected_items(self): indexes = self.selectedIndexes() return [self._si_model.itemFromIndex(i) for i in self.selectedIndexes()] def getFullNameFromItem(self, item): #item = self._si_model.itemFromIndex(ind) ind = self._si_model.indexFromItem(item) return self.getFullNameFromIndex(ind) def getFullNameFromIndex(self, ind): item = self._si_model.itemFromIndex(ind) if item is None: return None self._full_name = item.text() self._getFullName(ind) return self._full_name def _getFullName(self, ind): ind_par = self._si_model.parent(ind) if(ind_par.column() == -1): item = self._si_model.itemFromIndex(ind) self.full_name = '/' + self._full_name #logger.debug('Item full name:' + self._full_name) return self._full_name else: item_par = self._si_model.itemFromIndex(ind_par) self._full_name = item_par.text() + '/' + self._full_name self._getFullName(ind_par) # def resizeEvent(self, e): # logger.debug('resizeEvent') # QTableView.resizeEvent(self, e) def closeEvent(self, event): # if the x is clicked logger.debug('closeEvent') QTableView.closeEvent(self, event) def on_click(self, index): item = self._si_model.itemFromIndex(index) msg = 'on_click: item in row:%02d text: %s' % (index.row(), item.text()) logger.debug(msg) def on_double_click(self, index): item = self._si_model.itemFromIndex(index) msg = 'on_double_click: item in row:%02d text: %s' % (index.row(), item.text()) logger.debug(msg) def on_item_selected(self, ind_sel, ind_desel): #logger.debug("ind selected: ", ind_sel.row(), ind_sel.column()) #logger.debug("ind deselected: ", ind_desel.row(),ind_desel.column()) item = self._si_model.itemFromIndex(ind_sel) logger.debug('on_item_selected: "%s" is selected' % (item.text() if item is not None else None)) #logger.debug('on_item_selected: %s' % self.getFullNameFromItem(item)) def on_item_changed(self, item): state = ['UNCHECKED', 'TRISTATE', 'CHECKED'][item.checkState()] logger.debug('abstract on_item_changed: "%s" at state %s' % (self.getFullNameFromItem(item), state)) def process_selected_items(self): selitems = self.selected_items() msg = '%d Selected items:' % len(selitems) for i in selitems: msg += '\n %s' % i.text() logger.info(msg) if __name__ == '__main__': def keyPressEvent(self, e): logger.info('keyPressEvent, key=%s' % e.key()) if e.key() == Qt.Key_Escape: self.close() elif e.key() == Qt.Key_S: self.process_selected_items() else: logger.info('Keys:'\ '\n ESC - exit'\ '\n S - show selected items'\ '\n') if __name__ == '__main__': import sys from PyQt5.QtWidgets import QApplication logging.basicConfig(format='%(asctime)s %(name)s %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) app = QApplication(sys.argv) w = QWTable() #w.setGeometry(100, 100, 700, 300) w.setWindowTitle('QWTable') w.move(100,50) w.show() app.exec_() del w del app # EOF
[((17, 9, 17, 36), 'logging.getLogger', 'logging.getLogger', ({(17, 27, 17, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((221, 4, 221, 122), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((222, 10, 222, 32), 'PyQt5.QtWidgets.QApplication', 'QApplication', ({(222, 23, 222, 31): 'sys.argv'}, {}), '(sys.argv)', False, 'from PyQt5.QtWidgets import QApplication\n'), ((32, 8, 32, 41), 'PyQt5.QtWidgets.QTableView.__init__', 'QTableView.__init__', ({(32, 28, 32, 32): 'self', (32, 34, 32, 40): 'parent'}, {}), '(self, parent)', False, 'from PyQt5.QtWidgets import QTableView, QVBoxLayout, QAbstractItemView, QSizePolicy\n'), ((35, 8, 35, 24), 'psdaq.control_gui.QWIcons.icon.set_icons', 'icon.set_icons', ({}, {}), '()', False, 'from psdaq.control_gui.QWIcons import icon\n'), ((39, 25, 39, 45), 'PyQt5.QtGui.QStandardItemModel', 'QStandardItemModel', ({}, {}), '()', False, 'from PyQt5.QtGui import QStandardItemModel, QStandardItem\n'), ((164, 8, 164, 42), 'PyQt5.QtWidgets.QTableView.closeEvent', 'QTableView.closeEvent', ({(164, 30, 164, 34): 'self', (164, 36, 164, 41): 'event'}, {}), '(self, event)', False, 'from PyQt5.QtWidgets import QTableView, QVBoxLayout, QAbstractItemView, QSizePolicy\n'), ((106, 23, 106, 61), 'PyQt5.QtGui.QStandardItem', 'QStandardItem', ({(106, 37, 106, 60): "'itemA %d %d' % (row, col)"}, {}), "('itemA %d %d' % (row, col))", False, 'from PyQt5.QtGui import QStandardItemModel, QStandardItem\n')]
vadmium/grailbrowser
src/grailbase/mtloader.py
ca94e6db2359bcb16c0da256771550d1327c6d33
"""Extension loader for filetype handlers. The extension objects provided by MIMEExtensionLoader objects have four attributes: parse, embed, add_options, and update_options. The first two are used as handlers for supporting the MIME type as primary and embeded resources. The last two are (currently) only used for printing. """ __version__ = '$Revision: 2.4 $' from . import extloader import string class MIMEExtensionLoader(extloader.ExtensionLoader): def find(self, name): new_name = string.replace(name, "-", "_") major, minor = tuple(string.split(new_name, "/")) if minor: modname = "%s_%s" % (major, minor) else: modname = major mod = self.find_module(modname) ext = None if not mod and modname != major: ext = self.get(major + "/") elif mod: ext = MIMETypeExtension(name, mod, modname) return ext class MIMETypeExtension: def __init__(self, type, mod, modname): self.type = type self.__load_attr(mod, "parse_" + modname, "parse") self.__load_attr(mod, "embed_" + modname, "embed") self.__load_attr(mod, "add_options") self.__load_attr(mod, "update_settings") def __repr__(self): classname = self.__class__.__name__ modulename = self.__class__.__module__ if self.parse and self.embed: flags = " [displayable, embeddable]" elif self.embed: flags = " [embeddable]" elif self.parse: flags = " [displayable]" else: # not very useful, now is it? flags = "" return "<%s.%s for %s%s>" % (modulename, classname, self.type, flags) def __load_attr(self, mod, name, load_as=None): load_as = load_as or name if hasattr(mod, name): v = getattr(mod, name) else: v = None setattr(self, load_as, v)
[((17, 19, 17, 49), 'string.replace', 'string.replace', ({(17, 34, 17, 38): 'name', (17, 40, 17, 43): '"""-"""', (17, 45, 17, 48): '"""_"""'}, {}), "(name, '-', '_')", False, 'import string\n'), ((18, 29, 18, 56), 'string.split', 'string.split', ({(18, 42, 18, 50): 'new_name', (18, 52, 18, 55): '"""/"""'}, {}), "(new_name, '/')", False, 'import string\n')]
IBM/eventstreams-python-sdk
eventstreams_sdk/adminrest_v1.py
cc898e6901c35d1b43e2be7d152c6d770d967b23
# coding: utf-8 # (C) Copyright IBM Corp. 2021. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # IBM OpenAPI SDK Code Generator Version: 3.25.0-2b3f843a-20210115-164628 """ The administration REST API for IBM Event Streams on Cloud. """ from typing import Dict, List import json from ibm_cloud_sdk_core import BaseService, DetailedResponse from ibm_cloud_sdk_core.authenticators.authenticator import Authenticator from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment from ibm_cloud_sdk_core.utils import convert_model from .common import get_sdk_headers ############################################################################## # Service ############################################################################## class AdminrestV1(BaseService): """The adminrest V1 service.""" DEFAULT_SERVICE_URL = 'https://adminrest.cloud.ibm.com' DEFAULT_SERVICE_NAME = 'adminrest' @classmethod def new_instance(cls, service_name: str = DEFAULT_SERVICE_NAME, ) -> 'AdminrestV1': """ Return a new client for the adminrest service using the specified parameters and external configuration. """ authenticator = get_authenticator_from_environment(service_name) service = cls( authenticator ) service.configure_service(service_name) return service def __init__(self, authenticator: Authenticator = None, ) -> None: """ Construct a new client for the adminrest service. :param Authenticator authenticator: The authenticator specifies the authentication mechanism. Get up to date information from https://github.com/IBM/python-sdk-core/blob/master/README.md about initializing the authenticator of your choice. """ BaseService.__init__(self, service_url=self.DEFAULT_SERVICE_URL, authenticator=authenticator) ######################### # default ######################### def create_topic(self, *, name: str = None, partitions: int = None, partition_count: int = None, configs: List['ConfigCreate'] = None, **kwargs ) -> DetailedResponse: """ Create a new topic. Create a new topic. :param str name: (optional) The name of topic to be created. :param int partitions: (optional) The number of partitions. :param int partition_count: (optional) The number of partitions, this field takes precedence over 'partitions'. Default value is 1 if not specified. :param List[ConfigCreate] configs: (optional) The config properties to be set for the new topic. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if configs is not None: configs = [convert_model(x) for x in configs] headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='create_topic') headers.update(sdk_headers) data = { 'name': name, 'partitions': partitions, 'partition_count': partition_count, 'configs': configs } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' url = '/admin/topics' request = self.prepare_request(method='POST', url=url, headers=headers, data=data) response = self.send(request) return response def list_topics(self, *, topic_filter: str = None, per_page: int = None, page: int = None, **kwargs ) -> DetailedResponse: """ Get a list of topics. Returns a list containing information about all of the Kafka topics that are defined for an instance of the Event Streams service. If there are currently no topics defined then an empty list is returned. :param str topic_filter: (optional) A filter to be applied to the topic names. A simple filter can be specified as a string with asterisk (`*`) wildcards representing 0 or more characters, e.g. `topic-name*` will filter all topic names that begin with the string `topic-name` followed by any character sequence. A more complex filter pattern can be used by surrounding a regular expression in forward slash (`/`) delimiters, e.g. `/topic-name.* /`. :param int per_page: (optional) The number of topic names to be returns. :param int page: (optional) The page number to be returned. The number 1 represents the first page. The default value is 1. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `List[TopicDetail]` result """ headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='list_topics') headers.update(sdk_headers) params = { 'topic_filter': topic_filter, 'per_page': per_page, 'page': page } if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' url = '/admin/topics' request = self.prepare_request(method='GET', url=url, headers=headers, params=params) response = self.send(request) return response def get_topic(self, topic_name: str, **kwargs ) -> DetailedResponse: """ Get detailed information on a topic. Get detailed information on a topic. :param str topic_name: The topic name for the topic to be listed. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `TopicDetail` object """ if topic_name is None: raise ValueError('topic_name must be provided') headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_topic') headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' path_param_keys = ['topic_name'] path_param_values = self.encode_path_vars(topic_name) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/admin/topics/{topic_name}'.format(**path_param_dict) request = self.prepare_request(method='GET', url=url, headers=headers) response = self.send(request) return response def delete_topic(self, topic_name: str, **kwargs ) -> DetailedResponse: """ Delete a topic. Delete a topic. :param str topic_name: The topic name for the topic to be listed. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if topic_name is None: raise ValueError('topic_name must be provided') headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='delete_topic') headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' path_param_keys = ['topic_name'] path_param_values = self.encode_path_vars(topic_name) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/admin/topics/{topic_name}'.format(**path_param_dict) request = self.prepare_request(method='DELETE', url=url, headers=headers) response = self.send(request) return response def update_topic(self, topic_name: str, *, new_total_partition_count: int = None, configs: List['ConfigUpdate'] = None, **kwargs ) -> DetailedResponse: """ Increase the number of partitions and/or update one or more topic configuration parameters. Increase the number of partitions and/or update one or more topic configuration parameters. :param str topic_name: The topic name for the topic to be listed. :param int new_total_partition_count: (optional) The new partition number to be increased. :param List[ConfigUpdate] configs: (optional) The config properties to be updated for the topic. Valid config keys are 'cleanup.policy', 'retention.ms', 'retention.bytes', 'segment.bytes', 'segment.ms', 'segment.index.bytes'. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if topic_name is None: raise ValueError('topic_name must be provided') if configs is not None: configs = [convert_model(x) for x in configs] headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='update_topic') headers.update(sdk_headers) data = { 'new_total_partition_count': new_total_partition_count, 'configs': configs } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' path_param_keys = ['topic_name'] path_param_values = self.encode_path_vars(topic_name) path_param_dict = dict(zip(path_param_keys, path_param_values)) url = '/admin/topics/{topic_name}'.format(**path_param_dict) request = self.prepare_request(method='PATCH', url=url, headers=headers, data=data) response = self.send(request) return response def get_mirroring_topic_selection(self, **kwargs ) -> DetailedResponse: """ Get current topic selection for mirroring. Get current topic selection for mirroring. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object """ headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_mirroring_topic_selection') headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' url = '/admin/mirroring/topic-selection' request = self.prepare_request(method='GET', url=url, headers=headers) response = self.send(request) return response def replace_mirroring_topic_selection(self, *, includes: List[str] = None, **kwargs ) -> DetailedResponse: """ Replace topic selection for mirroring. Replace topic selection for mirroring. This operation replaces the complete set of mirroring topic selections. :param List[str] includes: (optional) :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `MirroringTopicSelection` object """ headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='replace_mirroring_topic_selection') headers.update(sdk_headers) data = { 'includes': includes } data = {k: v for (k, v) in data.items() if v is not None} data = json.dumps(data) headers['content-type'] = 'application/json' if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' url = '/admin/mirroring/topic-selection' request = self.prepare_request(method='POST', url=url, headers=headers, data=data) response = self.send(request) return response def get_mirroring_active_topics(self, **kwargs ) -> DetailedResponse: """ Get topics that are being actively mirrored. Get topics that are being actively mirrored. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse with `dict` result representing a `MirroringActiveTopics` object """ headers = {} sdk_headers = get_sdk_headers(service_name=self.DEFAULT_SERVICE_NAME, service_version='V1', operation_id='get_mirroring_active_topics') headers.update(sdk_headers) if 'headers' in kwargs: headers.update(kwargs.get('headers')) headers['Accept'] = 'application/json' url = '/admin/mirroring/active-topics' request = self.prepare_request(method='GET', url=url, headers=headers) response = self.send(request) return response ############################################################################## # Models ############################################################################## class ReplicaAssignmentBrokers(): """ ReplicaAssignmentBrokers. :attr List[int] replicas: (optional) """ def __init__(self, *, replicas: List[int] = None) -> None: """ Initialize a ReplicaAssignmentBrokers object. :param List[int] replicas: (optional) """ self.replicas = replicas @classmethod def from_dict(cls, _dict: Dict) -> 'ReplicaAssignmentBrokers': """Initialize a ReplicaAssignmentBrokers object from a json dictionary.""" args = {} if 'replicas' in _dict: args['replicas'] = _dict.get('replicas') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a ReplicaAssignmentBrokers object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'replicas') and self.replicas is not None: _dict['replicas'] = self.replicas return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this ReplicaAssignmentBrokers object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'ReplicaAssignmentBrokers') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'ReplicaAssignmentBrokers') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class ConfigCreate(): """ ConfigCreate. :attr str name: (optional) The name of the config property. :attr str value: (optional) The value for a config property. """ def __init__(self, *, name: str = None, value: str = None) -> None: """ Initialize a ConfigCreate object. :param str name: (optional) The name of the config property. :param str value: (optional) The value for a config property. """ self.name = name self.value = value @classmethod def from_dict(cls, _dict: Dict) -> 'ConfigCreate': """Initialize a ConfigCreate object from a json dictionary.""" args = {} if 'name' in _dict: args['name'] = _dict.get('name') if 'value' in _dict: args['value'] = _dict.get('value') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a ConfigCreate object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'value') and self.value is not None: _dict['value'] = self.value return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this ConfigCreate object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'ConfigCreate') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'ConfigCreate') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class ConfigUpdate(): """ ConfigUpdate. :attr str name: (optional) The name of the config property. :attr str value: (optional) The value for a config property. :attr bool reset_to_default: (optional) When true, the value of the config property is reset to its default value. """ def __init__(self, *, name: str = None, value: str = None, reset_to_default: bool = None) -> None: """ Initialize a ConfigUpdate object. :param str name: (optional) The name of the config property. :param str value: (optional) The value for a config property. :param bool reset_to_default: (optional) When true, the value of the config property is reset to its default value. """ self.name = name self.value = value self.reset_to_default = reset_to_default @classmethod def from_dict(cls, _dict: Dict) -> 'ConfigUpdate': """Initialize a ConfigUpdate object from a json dictionary.""" args = {} if 'name' in _dict: args['name'] = _dict.get('name') if 'value' in _dict: args['value'] = _dict.get('value') if 'reset_to_default' in _dict: args['reset_to_default'] = _dict.get('reset_to_default') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a ConfigUpdate object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'value') and self.value is not None: _dict['value'] = self.value if hasattr(self, 'reset_to_default') and self.reset_to_default is not None: _dict['reset_to_default'] = self.reset_to_default return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this ConfigUpdate object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'ConfigUpdate') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'ConfigUpdate') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class MirroringActiveTopics(): """ Topics that are being actively mirrored. :attr List[str] active_topics: (optional) """ def __init__(self, *, active_topics: List[str] = None) -> None: """ Initialize a MirroringActiveTopics object. :param List[str] active_topics: (optional) """ self.active_topics = active_topics @classmethod def from_dict(cls, _dict: Dict) -> 'MirroringActiveTopics': """Initialize a MirroringActiveTopics object from a json dictionary.""" args = {} if 'active_topics' in _dict: args['active_topics'] = _dict.get('active_topics') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a MirroringActiveTopics object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'active_topics') and self.active_topics is not None: _dict['active_topics'] = self.active_topics return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this MirroringActiveTopics object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'MirroringActiveTopics') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'MirroringActiveTopics') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class MirroringTopicSelection(): """ Mirroring topic selection payload. :attr List[str] includes: (optional) """ def __init__(self, *, includes: List[str] = None) -> None: """ Initialize a MirroringTopicSelection object. :param List[str] includes: (optional) """ self.includes = includes @classmethod def from_dict(cls, _dict: Dict) -> 'MirroringTopicSelection': """Initialize a MirroringTopicSelection object from a json dictionary.""" args = {} if 'includes' in _dict: args['includes'] = _dict.get('includes') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a MirroringTopicSelection object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'includes') and self.includes is not None: _dict['includes'] = self.includes return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this MirroringTopicSelection object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'MirroringTopicSelection') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'MirroringTopicSelection') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class ReplicaAssignment(): """ ReplicaAssignment. :attr int id: (optional) The ID of the partition. :attr ReplicaAssignmentBrokers brokers: (optional) """ def __init__(self, *, id: int = None, brokers: 'ReplicaAssignmentBrokers' = None) -> None: """ Initialize a ReplicaAssignment object. :param int id: (optional) The ID of the partition. :param ReplicaAssignmentBrokers brokers: (optional) """ self.id = id self.brokers = brokers @classmethod def from_dict(cls, _dict: Dict) -> 'ReplicaAssignment': """Initialize a ReplicaAssignment object from a json dictionary.""" args = {} if 'id' in _dict: args['id'] = _dict.get('id') if 'brokers' in _dict: args['brokers'] = ReplicaAssignmentBrokers.from_dict(_dict.get('brokers')) return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a ReplicaAssignment object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'id') and self.id is not None: _dict['id'] = self.id if hasattr(self, 'brokers') and self.brokers is not None: _dict['brokers'] = self.brokers.to_dict() return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this ReplicaAssignment object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'ReplicaAssignment') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'ReplicaAssignment') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TopicConfigs(): """ TopicConfigs. :attr str cleanup_policy: (optional) The value of config property 'cleanup.policy'. :attr str min_insync_replicas: (optional) The value of config property 'min.insync.replicas'. :attr str retention_bytes: (optional) The value of config property 'retention.bytes'. :attr str retention_ms: (optional) The value of config property 'retention.ms'. :attr str segment_bytes: (optional) The value of config property 'segment.bytes'. :attr str segment_index_bytes: (optional) The value of config property 'segment.index.bytes'. :attr str segment_ms: (optional) The value of config property 'segment.ms'. """ def __init__(self, *, cleanup_policy: str = None, min_insync_replicas: str = None, retention_bytes: str = None, retention_ms: str = None, segment_bytes: str = None, segment_index_bytes: str = None, segment_ms: str = None) -> None: """ Initialize a TopicConfigs object. :param str cleanup_policy: (optional) The value of config property 'cleanup.policy'. :param str min_insync_replicas: (optional) The value of config property 'min.insync.replicas'. :param str retention_bytes: (optional) The value of config property 'retention.bytes'. :param str retention_ms: (optional) The value of config property 'retention.ms'. :param str segment_bytes: (optional) The value of config property 'segment.bytes'. :param str segment_index_bytes: (optional) The value of config property 'segment.index.bytes'. :param str segment_ms: (optional) The value of config property 'segment.ms'. """ self.cleanup_policy = cleanup_policy self.min_insync_replicas = min_insync_replicas self.retention_bytes = retention_bytes self.retention_ms = retention_ms self.segment_bytes = segment_bytes self.segment_index_bytes = segment_index_bytes self.segment_ms = segment_ms @classmethod def from_dict(cls, _dict: Dict) -> 'TopicConfigs': """Initialize a TopicConfigs object from a json dictionary.""" args = {} if 'cleanup.policy' in _dict: args['cleanup_policy'] = _dict.get('cleanup.policy') if 'min.insync.replicas' in _dict: args['min_insync_replicas'] = _dict.get('min.insync.replicas') if 'retention.bytes' in _dict: args['retention_bytes'] = _dict.get('retention.bytes') if 'retention.ms' in _dict: args['retention_ms'] = _dict.get('retention.ms') if 'segment.bytes' in _dict: args['segment_bytes'] = _dict.get('segment.bytes') if 'segment.index.bytes' in _dict: args['segment_index_bytes'] = _dict.get('segment.index.bytes') if 'segment.ms' in _dict: args['segment_ms'] = _dict.get('segment.ms') return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a TopicConfigs object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None: _dict['cleanup.policy'] = self.cleanup_policy if hasattr(self, 'min_insync_replicas') and self.min_insync_replicas is not None: _dict['min.insync.replicas'] = self.min_insync_replicas if hasattr(self, 'retention_bytes') and self.retention_bytes is not None: _dict['retention.bytes'] = self.retention_bytes if hasattr(self, 'retention_ms') and self.retention_ms is not None: _dict['retention.ms'] = self.retention_ms if hasattr(self, 'segment_bytes') and self.segment_bytes is not None: _dict['segment.bytes'] = self.segment_bytes if hasattr(self, 'segment_index_bytes') and self.segment_index_bytes is not None: _dict['segment.index.bytes'] = self.segment_index_bytes if hasattr(self, 'segment_ms') and self.segment_ms is not None: _dict['segment.ms'] = self.segment_ms return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this TopicConfigs object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'TopicConfigs') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'TopicConfigs') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other class TopicDetail(): """ TopicDetail. :attr str name: (optional) The name of the topic. :attr int partitions: (optional) The number of partitions. :attr int replication_factor: (optional) The number of replication factor. :attr int retention_ms: (optional) The value of config property 'retention.ms'. :attr str cleanup_policy: (optional) The value of config property 'cleanup.policy'. :attr TopicConfigs configs: (optional) :attr List[ReplicaAssignment] replica_assignments: (optional) The replia assignment of the topic. """ def __init__(self, *, name: str = None, partitions: int = None, replication_factor: int = None, retention_ms: int = None, cleanup_policy: str = None, configs: 'TopicConfigs' = None, replica_assignments: List['ReplicaAssignment'] = None) -> None: """ Initialize a TopicDetail object. :param str name: (optional) The name of the topic. :param int partitions: (optional) The number of partitions. :param int replication_factor: (optional) The number of replication factor. :param int retention_ms: (optional) The value of config property 'retention.ms'. :param str cleanup_policy: (optional) The value of config property 'cleanup.policy'. :param TopicConfigs configs: (optional) :param List[ReplicaAssignment] replica_assignments: (optional) The replia assignment of the topic. """ self.name = name self.partitions = partitions self.replication_factor = replication_factor self.retention_ms = retention_ms self.cleanup_policy = cleanup_policy self.configs = configs self.replica_assignments = replica_assignments @classmethod def from_dict(cls, _dict: Dict) -> 'TopicDetail': """Initialize a TopicDetail object from a json dictionary.""" args = {} if 'name' in _dict: args['name'] = _dict.get('name') if 'partitions' in _dict: args['partitions'] = _dict.get('partitions') if 'replicationFactor' in _dict: args['replication_factor'] = _dict.get('replicationFactor') if 'retentionMs' in _dict: args['retention_ms'] = _dict.get('retentionMs') if 'cleanupPolicy' in _dict: args['cleanup_policy'] = _dict.get('cleanupPolicy') if 'configs' in _dict: args['configs'] = TopicConfigs.from_dict(_dict.get('configs')) if 'replicaAssignments' in _dict: args['replica_assignments'] = [ReplicaAssignment.from_dict(x) for x in _dict.get('replicaAssignments')] return cls(**args) @classmethod def _from_dict(cls, _dict): """Initialize a TopicDetail object from a json dictionary.""" return cls.from_dict(_dict) def to_dict(self) -> Dict: """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'name') and self.name is not None: _dict['name'] = self.name if hasattr(self, 'partitions') and self.partitions is not None: _dict['partitions'] = self.partitions if hasattr(self, 'replication_factor') and self.replication_factor is not None: _dict['replicationFactor'] = self.replication_factor if hasattr(self, 'retention_ms') and self.retention_ms is not None: _dict['retentionMs'] = self.retention_ms if hasattr(self, 'cleanup_policy') and self.cleanup_policy is not None: _dict['cleanupPolicy'] = self.cleanup_policy if hasattr(self, 'configs') and self.configs is not None: _dict['configs'] = self.configs.to_dict() if hasattr(self, 'replica_assignments') and self.replica_assignments is not None: _dict['replicaAssignments'] = [x.to_dict() for x in self.replica_assignments] return _dict def _to_dict(self): """Return a json dictionary representing this model.""" return self.to_dict() def __str__(self) -> str: """Return a `str` version of this TopicDetail object.""" return json.dumps(self.to_dict(), indent=2) def __eq__(self, other: 'TopicDetail') -> bool: """Return `true` when self and other are equal, false otherwise.""" if not isinstance(other, self.__class__): return False return self.__dict__ == other.__dict__ def __ne__(self, other: 'TopicDetail') -> bool: """Return `true` when self and other are not equal, false otherwise.""" return not self == other
[((51, 24, 51, 72), 'ibm_cloud_sdk_core.get_authenticator.get_authenticator_from_environment', 'get_authenticator_from_environment', ({(51, 59, 51, 71): 'service_name'}, {}), '(service_name)', False, 'from ibm_cloud_sdk_core.get_authenticator import get_authenticator_from_environment\n'), ((68, 8, 70, 57), 'ibm_cloud_sdk_core.BaseService.__init__', 'BaseService.__init__', (), '', False, 'from ibm_cloud_sdk_core import BaseService, DetailedResponse\n'), ((117, 15, 117, 31), 'json.dumps', 'json.dumps', ({(117, 26, 117, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((307, 15, 307, 31), 'json.dumps', 'json.dumps', ({(307, 26, 307, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((386, 15, 386, 31), 'json.dumps', 'json.dumps', ({(386, 26, 386, 30): 'data'}, {}), '(data)', False, 'import json\n'), ((103, 23, 103, 39), 'ibm_cloud_sdk_core.utils.convert_model', 'convert_model', ({(103, 37, 103, 38): 'x'}, {}), '(x)', False, 'from ibm_cloud_sdk_core.utils import convert_model\n'), ((295, 23, 295, 39), 'ibm_cloud_sdk_core.utils.convert_model', 'convert_model', ({(295, 37, 295, 38): 'x'}, {}), '(x)', False, 'from ibm_cloud_sdk_core.utils import convert_model\n')]
BaseCampCoding/python-fundamentals
3-functions/pytest-exercises/test_functions.py
3804c07841d6604b1e5a1c15126b3301aa8ae306
import functions from pytest import approx from bcca.test import should_print def test_add_em_up(): assert functions.add_em_up(1, 2, 3) == 6 assert functions.add_em_up(4, 5, 6) == 15 def test_sub_sub_hubbub(): assert functions.sub_sub_hubbub(1, 2, 3) == -4 def test_square_area(): assert functions.square_area(5, 5) == 25 assert functions.square_area(3, 5) == 15 assert functions.square_area(2, 2) == 4 def test_circle_area(): assert functions.circle_area(1) == approx(3.14) assert functions.circle_area(5) == approx(78.5) def test_kilometers_to_miles(): assert functions.kilometers_to_miles(1) == approx(0.6214) assert functions.kilometers_to_miles(.5) == approx(0.3107) assert functions.kilometers_to_miles(0) == approx(0.0) assert functions.kilometers_to_miles(40) == approx(24.855999999999998) @should_print def test_sales_tax_1(output): functions.sales_tax(1) assert output == """ Purchase Amount: 1 State Sales Tax: 0.04 County Sales Tax: 0.02 Total Sales Tax: 0.06 Total Cost: 1.06 """ @should_print def test_sales_tax_99_99(output): functions.sales_tax(99.99) assert output == """ Purchase Amount: 99.99 State Sales Tax: 3.9996 County Sales Tax: 1.9998 Total Sales Tax: 5.9994 Total Cost: 105.98939999999999 """ @should_print def test_sales_tax_5_95(output): functions.sales_tax(5.95) assert output == """ Purchase Amount: 5.95 State Sales Tax: 0.23800000000000002 County Sales Tax: 0.11900000000000001 Total Sales Tax: 0.35700000000000004 Total Cost: 6.307 """ def test_min_insurance(): assert functions.min_insurance(100000) == approx(80000.0) assert functions.min_insurance(123456789) == approx(98765431.2) assert functions.min_insurance(0) == approx(0.0) assert functions.min_insurance(-54317890) == approx(-43454312.0) @should_print def test_property_tax_10000(output): functions.property_tax(10000) assert output == ''' Assessment Value: 6000.0 Property Tax: 38.4 ''' @should_print def test_property_tax_99999_95(output): functions.property_tax(99999.95) assert output == ''' Assessment Value: 59999.969999999994 Property Tax: 383.999808 ''' def test_bmi(): assert functions.bmi(160, 67) == approx(25.05680552) assert functions.bmi(200, 72) == approx(27.12191358) assert functions.bmi(120, 60) == approx(23.43333333) def test_calories(): assert functions.calories(5, 20) == 125 assert functions.calories(1, 1) == 13 def test_earnings(): assert functions.earnings(100, 100, 100) == 3600 assert functions.earnings(50, 75, 100) == 2550 assert functions.earnings(0, 1000, 79) == 12711 @should_print def test_paint_job_estimator(output): functions.paint_job_estimator(50, 10) assert output == ''' Gallons of paint required: 0.43478260869565216 Hours of labor required: 3.4782608695652173 Cost of paint: 4.3478260869565215 Cost of labor: 69.56521739130434 Total Cost: 73.91304347826086 ''' @should_print def test_paint_job_estimator_2(output): functions.paint_job_estimator(750, 15.95) assert output == ''' Gallons of paint required: 6.521739130434782 Hours of labor required: 52.17391304347826 Cost of paint: 104.02173913043477 Cost of labor: 1043.4782608695652 Total Cost: 1147.5 ''' @should_print def test_monthly_sales_tax(output): functions.monthly_sales_tax(123456.79) assert output == ''' Monthly sales: 123456.79 State sales tax: 4938.2716 County sales tax: 2469.1358 Total sales tax: 7407.4074 ''' @should_print def test_monthly_sales_tax_2(output): functions.monthly_sales_tax(4321567.21) assert output == ''' Monthly sales: 4321567.21 State sales tax: 172862.6884 County sales tax: 86431.3442 Total sales tax: 259294.03260000004 '''
[((35, 4, 35, 26), 'functions.sales_tax', 'functions.sales_tax', ({(35, 24, 35, 25): '(1)'}, {}), '(1)', False, 'import functions\n'), ((48, 4, 48, 30), 'functions.sales_tax', 'functions.sales_tax', ({(48, 24, 48, 29): '(99.99)'}, {}), '(99.99)', False, 'import functions\n'), ((61, 4, 61, 29), 'functions.sales_tax', 'functions.sales_tax', ({(61, 24, 61, 28): '(5.95)'}, {}), '(5.95)', False, 'import functions\n'), ((81, 4, 81, 33), 'functions.property_tax', 'functions.property_tax', ({(81, 27, 81, 32): '(10000)'}, {}), '(10000)', False, 'import functions\n'), ((91, 4, 91, 36), 'functions.property_tax', 'functions.property_tax', ({(91, 27, 91, 35): '(99999.95)'}, {}), '(99999.95)', False, 'import functions\n'), ((118, 4, 118, 41), 'functions.paint_job_estimator', 'functions.paint_job_estimator', ({(118, 34, 118, 36): '(50)', (118, 38, 118, 40): '(10)'}, {}), '(50, 10)', False, 'import functions\n'), ((131, 4, 131, 45), 'functions.paint_job_estimator', 'functions.paint_job_estimator', ({(131, 34, 131, 37): '(750)', (131, 39, 131, 44): '(15.95)'}, {}), '(750, 15.95)', False, 'import functions\n'), ((144, 4, 144, 42), 'functions.monthly_sales_tax', 'functions.monthly_sales_tax', ({(144, 32, 144, 41): '(123456.79)'}, {}), '(123456.79)', False, 'import functions\n'), ((156, 4, 156, 43), 'functions.monthly_sales_tax', 'functions.monthly_sales_tax', ({(156, 32, 156, 42): '(4321567.21)'}, {}), '(4321567.21)', False, 'import functions\n'), ((7, 11, 7, 39), 'functions.add_em_up', 'functions.add_em_up', ({(7, 31, 7, 32): '(1)', (7, 34, 7, 35): '(2)', (7, 37, 7, 38): '(3)'}, {}), '(1, 2, 3)', False, 'import functions\n'), ((8, 11, 8, 39), 'functions.add_em_up', 'functions.add_em_up', ({(8, 31, 8, 32): '(4)', (8, 34, 8, 35): '(5)', (8, 37, 8, 38): '(6)'}, {}), '(4, 5, 6)', False, 'import functions\n'), ((12, 11, 12, 44), 'functions.sub_sub_hubbub', 'functions.sub_sub_hubbub', ({(12, 36, 12, 37): '(1)', (12, 39, 12, 40): '(2)', (12, 42, 12, 43): '(3)'}, {}), '(1, 2, 3)', False, 'import functions\n'), ((16, 11, 16, 38), 'functions.square_area', 'functions.square_area', ({(16, 33, 16, 34): '(5)', (16, 36, 16, 37): '(5)'}, {}), '(5, 5)', False, 'import functions\n'), ((17, 11, 17, 38), 'functions.square_area', 'functions.square_area', ({(17, 33, 17, 34): '(3)', (17, 36, 17, 37): '(5)'}, {}), '(3, 5)', False, 'import functions\n'), ((18, 11, 18, 38), 'functions.square_area', 'functions.square_area', ({(18, 33, 18, 34): '(2)', (18, 36, 18, 37): '(2)'}, {}), '(2, 2)', False, 'import functions\n'), ((22, 11, 22, 35), 'functions.circle_area', 'functions.circle_area', ({(22, 33, 22, 34): '(1)'}, {}), '(1)', False, 'import functions\n'), ((22, 39, 22, 51), 'pytest.approx', 'approx', ({(22, 46, 22, 50): '(3.14)'}, {}), '(3.14)', False, 'from pytest import approx\n'), ((23, 11, 23, 35), 'functions.circle_area', 'functions.circle_area', ({(23, 33, 23, 34): '(5)'}, {}), '(5)', False, 'import functions\n'), ((23, 39, 23, 51), 'pytest.approx', 'approx', ({(23, 46, 23, 50): '(78.5)'}, {}), '(78.5)', False, 'from pytest import approx\n'), ((27, 11, 27, 43), 'functions.kilometers_to_miles', 'functions.kilometers_to_miles', ({(27, 41, 27, 42): '(1)'}, {}), '(1)', False, 'import functions\n'), ((27, 47, 27, 61), 'pytest.approx', 'approx', ({(27, 54, 27, 60): '(0.6214)'}, {}), '(0.6214)', False, 'from pytest import approx\n'), ((28, 11, 28, 44), 'functions.kilometers_to_miles', 'functions.kilometers_to_miles', ({(28, 41, 28, 43): '(0.5)'}, {}), '(0.5)', False, 'import functions\n'), ((28, 48, 28, 62), 'pytest.approx', 'approx', ({(28, 55, 28, 61): '(0.3107)'}, {}), '(0.3107)', False, 'from pytest import approx\n'), ((29, 11, 29, 43), 'functions.kilometers_to_miles', 'functions.kilometers_to_miles', ({(29, 41, 29, 42): '(0)'}, {}), '(0)', False, 'import functions\n'), ((29, 47, 29, 58), 'pytest.approx', 'approx', ({(29, 54, 29, 57): '(0.0)'}, {}), '(0.0)', False, 'from pytest import approx\n'), ((30, 11, 30, 44), 'functions.kilometers_to_miles', 'functions.kilometers_to_miles', ({(30, 41, 30, 43): '(40)'}, {}), '(40)', False, 'import functions\n'), ((30, 48, 30, 74), 'pytest.approx', 'approx', ({(30, 55, 30, 73): '(24.855999999999998)'}, {}), '(24.855999999999998)', False, 'from pytest import approx\n'), ((73, 11, 73, 42), 'functions.min_insurance', 'functions.min_insurance', ({(73, 35, 73, 41): '(100000)'}, {}), '(100000)', False, 'import functions\n'), ((73, 46, 73, 61), 'pytest.approx', 'approx', ({(73, 53, 73, 60): '(80000.0)'}, {}), '(80000.0)', False, 'from pytest import approx\n'), ((74, 11, 74, 45), 'functions.min_insurance', 'functions.min_insurance', ({(74, 35, 74, 44): '(123456789)'}, {}), '(123456789)', False, 'import functions\n'), ((74, 49, 74, 67), 'pytest.approx', 'approx', ({(74, 56, 74, 66): '(98765431.2)'}, {}), '(98765431.2)', False, 'from pytest import approx\n'), ((75, 11, 75, 37), 'functions.min_insurance', 'functions.min_insurance', ({(75, 35, 75, 36): '(0)'}, {}), '(0)', False, 'import functions\n'), ((75, 41, 75, 52), 'pytest.approx', 'approx', ({(75, 48, 75, 51): '(0.0)'}, {}), '(0.0)', False, 'from pytest import approx\n'), ((76, 11, 76, 45), 'functions.min_insurance', 'functions.min_insurance', ({(76, 35, 76, 44): '(-54317890)'}, {}), '(-54317890)', False, 'import functions\n'), ((76, 49, 76, 68), 'pytest.approx', 'approx', ({(76, 56, 76, 67): '(-43454312.0)'}, {}), '(-43454312.0)', False, 'from pytest import approx\n'), ((100, 11, 100, 33), 'functions.bmi', 'functions.bmi', ({(100, 25, 100, 28): '(160)', (100, 30, 100, 32): '(67)'}, {}), '(160, 67)', False, 'import functions\n'), ((100, 37, 100, 56), 'pytest.approx', 'approx', ({(100, 44, 100, 55): '(25.05680552)'}, {}), '(25.05680552)', False, 'from pytest import approx\n'), ((101, 11, 101, 33), 'functions.bmi', 'functions.bmi', ({(101, 25, 101, 28): '(200)', (101, 30, 101, 32): '(72)'}, {}), '(200, 72)', False, 'import functions\n'), ((101, 37, 101, 56), 'pytest.approx', 'approx', ({(101, 44, 101, 55): '(27.12191358)'}, {}), '(27.12191358)', False, 'from pytest import approx\n'), ((102, 11, 102, 33), 'functions.bmi', 'functions.bmi', ({(102, 25, 102, 28): '(120)', (102, 30, 102, 32): '(60)'}, {}), '(120, 60)', False, 'import functions\n'), ((102, 37, 102, 56), 'pytest.approx', 'approx', ({(102, 44, 102, 55): '(23.43333333)'}, {}), '(23.43333333)', False, 'from pytest import approx\n'), ((106, 11, 106, 36), 'functions.calories', 'functions.calories', ({(106, 30, 106, 31): '(5)', (106, 33, 106, 35): '(20)'}, {}), '(5, 20)', False, 'import functions\n'), ((107, 11, 107, 35), 'functions.calories', 'functions.calories', ({(107, 30, 107, 31): '(1)', (107, 33, 107, 34): '(1)'}, {}), '(1, 1)', False, 'import functions\n'), ((111, 11, 111, 44), 'functions.earnings', 'functions.earnings', ({(111, 30, 111, 33): '(100)', (111, 35, 111, 38): '(100)', (111, 40, 111, 43): '(100)'}, {}), '(100, 100, 100)', False, 'import functions\n'), ((112, 11, 112, 42), 'functions.earnings', 'functions.earnings', ({(112, 30, 112, 32): '(50)', (112, 34, 112, 36): '(75)', (112, 38, 112, 41): '(100)'}, {}), '(50, 75, 100)', False, 'import functions\n'), ((113, 11, 113, 42), 'functions.earnings', 'functions.earnings', ({(113, 30, 113, 31): '(0)', (113, 33, 113, 37): '(1000)', (113, 39, 113, 41): '(79)'}, {}), '(0, 1000, 79)', False, 'import functions\n')]
apabaad/django_ecommerce
src/products/admin.py
ca04143477b306413158e5311062563f7418700c
from django.contrib import admin from .models import Product admin.site.register(Product)
[((4, 0, 4, 28), 'django.contrib.admin.site.register', 'admin.site.register', ({(4, 20, 4, 27): 'Product'}, {}), '(Product)', False, 'from django.contrib import admin\n')]
beshrkayali/content-io
cio/plugins/txt.py
ae44aa4c4eba2234f940ca9d7a4bb310e25075b3
# coding=utf-8 from __future__ import unicode_literals from .base import BasePlugin class TextPlugin(BasePlugin): ext = 'txt'
[]
thejoeejoee/SUI-MIT-VUT-2020-2021
ml-scripts/dump-data-to-learn.py
aee307aa772c5a0e97578da5ebedd3e2cd39ab91
#!/usr/bin/env python3 # Project: VUT FIT SUI Project - Dice Wars # Authors: # - Josef Kolář <[email protected]> # - Dominik Harmim <[email protected]> # - Petr Kapoun <[email protected]> # - Jindřich Šesták <[email protected]> # Year: 2020 # Description: Generates game configurations. import random import sys from argparse import ArgumentParser import time from signal import signal, SIGCHLD from utils import run_ai_only_game, BoardDefinition parser = ArgumentParser(prog='Dice_Wars') parser.add_argument('-p', '--port', help="Server port", type=int, default=5005) parser.add_argument('-a', '--address', help="Server address", default='127.0.0.1') procs = [] def signal_handler(): """ Handler for SIGCHLD signal that terminates server and clients. """ for p in procs: try: p.kill() except ProcessLookupError: pass PLAYING_AIs = [ 'xkolar71_orig', 'xkolar71_2', 'xkolar71_3', 'xkolar71_4', ] def board_definitions(): while True: random.seed(int(time.time())) yield BoardDefinition(random.randint(1, 10 ** 10), random.randint(1, 10 ** 10), random.randint(1, 10 ** 10)) def main(): args = parser.parse_args() signal(SIGCHLD, signal_handler) boards_played = 0 try: for board_definition in board_definitions(): boards_played += 1 run_ai_only_game( args.port, args.address, procs, PLAYING_AIs, board_definition, fixed=random.randint(1, 10 ** 10), client_seed=random.randint(1, 10 ** 10), debug=True, logdir='logs', ) print(f'Played {boards_played} games.', file=sys.stderr) except (Exception, KeyboardInterrupt) as e: sys.stderr.write("Breaking the tournament because of {}\n".format(repr(e))) for p in procs: p.kill() raise if __name__ == '__main__': main()
[((20, 9, 20, 41), 'argparse.ArgumentParser', 'ArgumentParser', (), '', False, 'from argparse import ArgumentParser\n'), ((53, 4, 53, 35), 'signal.signal', 'signal', ({(53, 11, 53, 18): 'SIGCHLD', (53, 20, 53, 34): 'signal_handler'}, {}), '(SIGCHLD, signal_handler)', False, 'from signal import signal, SIGCHLD\n'), ((46, 24, 46, 35), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((47, 30, 47, 57), 'random.randint', 'random.randint', ({(47, 45, 47, 46): '(1)', (47, 48, 47, 56): '(10 ** 10)'}, {}), '(1, 10 ** 10)', False, 'import random\n'), ((47, 59, 47, 86), 'random.randint', 'random.randint', ({(47, 74, 47, 75): '(1)', (47, 77, 47, 85): '(10 ** 10)'}, {}), '(1, 10 ** 10)', False, 'import random\n'), ((47, 88, 47, 115), 'random.randint', 'random.randint', ({(47, 103, 47, 104): '(1)', (47, 106, 47, 114): '(10 ** 10)'}, {}), '(1, 10 ** 10)', False, 'import random\n'), ((63, 22, 63, 49), 'random.randint', 'random.randint', ({(63, 37, 63, 38): '(1)', (63, 40, 63, 48): '(10 ** 10)'}, {}), '(1, 10 ** 10)', False, 'import random\n'), ((64, 28, 64, 55), 'random.randint', 'random.randint', ({(64, 43, 64, 44): '(1)', (64, 46, 64, 54): '(10 ** 10)'}, {}), '(1, 10 ** 10)', False, 'import random\n')]
davidszotten/pdbpp
testing/conftest.py
3d90d83902e1d19840d0419362a41c654f93251e
import functools import sys from contextlib import contextmanager import pytest _orig_trace = None def pytest_configure(): global _orig_trace _orig_trace = sys.gettrace() @pytest.fixture(scope="session", autouse=True) def term(): """Configure TERM for predictable output from Pygments.""" from _pytest.monkeypatch import MonkeyPatch m = MonkeyPatch() m.setenv("TERM", "xterm-256color") yield m m.undo() # if _orig_trace and not hasattr(sys, "pypy_version_info"): # Fails with PyPy2 (https://travis-ci.org/antocuni/pdb/jobs/509624590)?! @pytest.fixture(autouse=True) def restore_settrace(monkeypatch): """(Re)store sys.gettrace after test run. This is required to re-enable coverage tracking. """ assert sys.gettrace() is _orig_trace orig_settrace = sys.settrace # Wrap sys.settrace to restore original tracing function (coverage) # with `sys.settrace(None)`. def settrace(func): if func is None: orig_settrace(_orig_trace) else: orig_settrace(func) monkeypatch.setattr("sys.settrace", settrace) yield newtrace = sys.gettrace() if newtrace is not _orig_trace: sys.settrace(_orig_trace) assert newtrace is None @pytest.fixture(scope="session") def _tmphome_path(tmpdir_factory): return tmpdir_factory.mktemp("tmphome") @pytest.fixture(autouse=sys.version_info < (3, 6)) def tmphome(request, monkeypatch): """Set up HOME in a temporary directory. This ignores any real ~/.pdbrc.py then, and seems to be required also with linecache on py27, where it would read contents from ~/.pdbrc?!. """ # Use tmpdir from testdir, if it is used. if "testdir" in request.fixturenames: tmpdir = request.getfixturevalue("testdir").tmpdir else: tmpdir = request.getfixturevalue("_tmphome_path") monkeypatch.setenv("HOME", str(tmpdir)) monkeypatch.setenv("USERPROFILE", str(tmpdir)) with tmpdir.as_cwd(): yield tmpdir @pytest.fixture(params=("pyrepl", "readline"), scope="session") def readline_param(request): from _pytest.monkeypatch import MonkeyPatch m = MonkeyPatch() if request.param == "pyrepl": try: import pyrepl.readline # noqa: F401 except ImportError as exc: pytest.skip(msg="pyrepl not available: {}".format(exc)) m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", True) else: m.setattr("fancycompleter.DefaultConfig.prefer_pyrepl", False) return request.param @pytest.fixture def monkeypatch_readline(request, monkeypatch, readline_param): """Patch readline to return given results.""" def inner(line, begidx, endidx): if readline_param == "pyrepl": readline = "pyrepl.readline" else: assert readline_param == "readline" readline = "readline" monkeypatch.setattr("%s.get_line_buffer" % readline, lambda: line) monkeypatch.setattr("%s.get_begidx" % readline, lambda: begidx) monkeypatch.setattr("%s.get_endidx" % readline, lambda: endidx) return inner @pytest.fixture def monkeypatch_pdb_methods(monkeypatch): def mock(method, *args, **kwargs): print("=== %s(%s, %s)" % (method, args, kwargs)) for mock_method in ("set_trace", "set_continue"): monkeypatch.setattr( "pdb.pdb.Pdb.%s" % mock_method, functools.partial(mock, mock_method) ) @pytest.fixture def monkeypatch_importerror(monkeypatch): @contextmanager def cm(mocked_imports): orig_import = __import__ def import_mock(name, *args): if name in mocked_imports: raise ImportError return orig_import(name, *args) with monkeypatch.context() as m: if sys.version_info >= (3,): m.setattr('builtins.__import__', import_mock) else: m.setattr('__builtin__.__import__', import_mock) yield m return cm
[((15, 1, 15, 46), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((28, 1, 28, 29), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((55, 1, 55, 32), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((60, 1, 60, 50), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((81, 1, 81, 63), 'pytest.fixture', 'pytest.fixture', (), '', False, 'import pytest\n'), ((12, 18, 12, 32), 'sys.gettrace', 'sys.gettrace', ({}, {}), '()', False, 'import sys\n'), ((20, 8, 20, 21), '_pytest.monkeypatch.MonkeyPatch', 'MonkeyPatch', ({}, {}), '()', False, 'from _pytest.monkeypatch import MonkeyPatch\n'), ((49, 15, 49, 29), 'sys.gettrace', 'sys.gettrace', ({}, {}), '()', False, 'import sys\n'), ((85, 8, 85, 21), '_pytest.monkeypatch.MonkeyPatch', 'MonkeyPatch', ({}, {}), '()', False, 'from _pytest.monkeypatch import MonkeyPatch\n'), ((34, 11, 34, 25), 'sys.gettrace', 'sys.gettrace', ({}, {}), '()', False, 'import sys\n'), ((51, 8, 51, 33), 'sys.settrace', 'sys.settrace', ({(51, 21, 51, 32): '_orig_trace'}, {}), '(_orig_trace)', False, 'import sys\n'), ((122, 44, 122, 80), 'functools.partial', 'functools.partial', ({(122, 62, 122, 66): 'mock', (122, 68, 122, 79): 'mock_method'}, {}), '(mock, mock_method)', False, 'import functools\n')]
utiasSTARS/thing-gym-ros
thing_gym_ros/envs/utils.py
6e8a034ac0d1686f29bd29e2aaa63f39a5b188d4
""" Various generic env utilties. """ def center_crop_img(img, crop_zoom): """ crop_zoom is amount to "zoom" into the image. E.g. 2.0 would cut out half of the width, half of the height, and only give the center. """ raw_height, raw_width = img.shape[:2] center = raw_height // 2, raw_width // 2 crop_size = raw_height // crop_zoom, raw_width // crop_zoom min_y, max_y = int(center[0] - crop_size[0] // 2), int(center[0] + crop_size[0] // 2) min_x, max_x = int(center[1] - crop_size[1] // 2), int(center[1] + crop_size[1] // 2) img_cropped = img[min_y:max_y, min_x:max_x] return img_cropped def crop_img(img, relative_corners): """ relative_corners are floats between 0 and 1 designating where the corners of a crop box should be ([[top_left_x, top_left_y], [bottom_right_x, bottom_right_y]]). e.g. [[0, 0], [1, 1]] would be the full image, [[0.5, 0.5], [1, 1]] would be bottom right.""" rc = relative_corners raw_height, raw_width = img.shape[:2] top_left_pix = [int(rc[0][0] * raw_width), int(rc[0][1] * raw_height)] bottom_right_pix = [int(rc[1][0] * raw_width), int(rc[1][1] * raw_height)] img_cropped = img[top_left_pix[1]:bottom_right_pix[1], top_left_pix[0]:bottom_right_pix[0]] return img_cropped
[]
arya-s/sentry
tests/sentry/utils/http/tests.py
959ffbd37cb4a7821f7a2676c137be54cad171a8
# -*- coding: utf-8 -*- from __future__ import absolute_import import mock from exam import fixture from sentry import options from sentry.models import Project from sentry.testutils import TestCase from sentry.utils.http import ( is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip, ) class AbsoluteUriTest(TestCase): def test_without_path(self): assert absolute_uri() == options.get('system.url-prefix') def test_with_path(self): assert absolute_uri('/foo/bar') == '%s/foo/bar' % (options.get('system.url-prefix'),) class SameDomainTestCase(TestCase): def test_is_same_domain(self): url1 = 'http://example.com/foo/bar' url2 = 'http://example.com/biz/baz' self.assertTrue(is_same_domain(url1, url2)) def test_is_same_domain_diff_scheme(self): url1 = 'https://example.com/foo/bar' url2 = 'http://example.com/biz/baz' self.assertTrue(is_same_domain(url1, url2)) def test_is_same_domain_diff_port(self): url1 = 'http://example.com:80/foo/bar' url2 = 'http://example.com:13/biz/baz' self.assertFalse(is_same_domain(url1, url2)) class GetOriginsTestCase(TestCase): def test_project_default(self): project = Project.objects.get() with self.settings(SENTRY_ALLOW_ORIGIN=None): result = get_origins(project) self.assertEquals(result, frozenset(['*'])) def test_project(self): project = Project.objects.get() project.update_option('sentry:origins', [u'http://foo.example']) with self.settings(SENTRY_ALLOW_ORIGIN=None): result = get_origins(project) self.assertEquals(result, frozenset(['http://foo.example'])) def test_project_and_setting(self): project = Project.objects.get() project.update_option('sentry:origins', [u'http://foo.example']) with self.settings(SENTRY_ALLOW_ORIGIN='http://example.com'): result = get_origins(project) self.assertEquals(result, frozenset(['http://foo.example', 'http://example.com'])) def test_setting_empty(self): with self.settings(SENTRY_ALLOW_ORIGIN=None): result = get_origins(None) self.assertEquals(result, frozenset([])) def test_setting_all(self): with self.settings(SENTRY_ALLOW_ORIGIN='*'): result = get_origins(None) self.assertEquals(result, frozenset(['*'])) def test_setting_uri(self): with self.settings(SENTRY_ALLOW_ORIGIN='http://example.com'): result = get_origins(None) self.assertEquals(result, frozenset(['http://example.com'])) class IsValidOriginTestCase(TestCase): @fixture def project(self): return mock.Mock() def isValidOrigin(self, origin, inputs): with mock.patch('sentry.utils.http.get_origins') as get_origins: get_origins.return_value = inputs result = is_valid_origin(origin, self.project) get_origins.assert_called_once_with(self.project) return result def test_global_wildcard_matches_domain(self): result = self.isValidOrigin('http://example.com', ['*']) self.assertEquals(result, True) def test_domain_wildcard_matches_domain(self): result = self.isValidOrigin('http://example.com', ['*.example.com']) self.assertEquals(result, True) def test_domain_wildcard_matches_domain_with_port(self): result = self.isValidOrigin('http://example.com:80', ['*.example.com']) self.assertEquals(result, True) def test_domain_wildcard_matches_subdomain(self): result = self.isValidOrigin('http://foo.example.com', ['*.example.com']) self.assertEquals(result, True) def test_domain_wildcard_matches_subdomain_with_port(self): result = self.isValidOrigin('http://foo.example.com:80', ['*.example.com']) self.assertEquals(result, True) def test_domain_wildcard_does_not_match_others(self): result = self.isValidOrigin('http://foo.com', ['*.example.com']) self.assertEquals(result, False) def test_domain_wildcard_matches_domain_with_path(self): result = self.isValidOrigin('http://foo.example.com/foo/bar', ['*.example.com']) self.assertEquals(result, True) def test_base_domain_matches_domain(self): result = self.isValidOrigin('http://example.com', ['example.com']) self.assertEquals(result, True) def test_base_domain_matches_domain_with_path(self): result = self.isValidOrigin('http://example.com/foo/bar', ['example.com']) self.assertEquals(result, True) def test_base_domain_matches_domain_with_port(self): result = self.isValidOrigin('http://example.com:80', ['example.com']) self.assertEquals(result, True) def test_base_domain_matches_domain_with_explicit_port(self): result = self.isValidOrigin('http://example.com:80', ['example.com:80']) assert result is True def test_base_domain_does_not_match_domain_with_invalid_port(self): result = self.isValidOrigin('http://example.com:80', ['example.com:443']) assert result is False def test_base_domain_does_not_match_subdomain(self): result = self.isValidOrigin('http://example.com', ['foo.example.com']) self.assertEquals(result, False) def test_full_uri_match(self): result = self.isValidOrigin('http://example.com', ['http://example.com']) self.assertEquals(result, True) def test_full_uri_match_requires_scheme(self): result = self.isValidOrigin('https://example.com', ['http://example.com']) self.assertEquals(result, False) def test_full_uri_match_does_not_require_port(self): result = self.isValidOrigin('http://example.com:80', ['http://example.com']) self.assertEquals(result, True) def test_partial_uri_match(self): result = self.isValidOrigin('http://example.com/foo/bar', ['http://example.com']) self.assertEquals(result, True) def test_null_valid_with_global(self): result = self.isValidOrigin('null', ['*']) self.assertEquals(result, True) def test_null_invalid_graceful_with_domains(self): result = self.isValidOrigin('null', ['http://example.com']) self.assertEquals(result, False) def test_custom_protocol_with_location(self): result = self.isValidOrigin('sp://custom-thing/foo/bar', ['sp://custom-thing']) assert result is True result = self.isValidOrigin('sp://custom-thing-two/foo/bar', ['sp://custom-thing']) assert result is False def test_custom_protocol_without_location(self): result = self.isValidOrigin('sp://custom-thing/foo/bar', ['sp://*']) assert result is True result = self.isValidOrigin('dp://custom-thing/foo/bar', ['sp://']) assert result is False def test_custom_protocol_with_domainish_match(self): result = self.isValidOrigin('sp://custom-thing.foobar/foo/bar', ['sp://*.foobar']) assert result is True result = self.isValidOrigin('sp://custom-thing.bizbaz/foo/bar', ['sp://*.foobar']) assert result is False def test_unicode(self): result = self.isValidOrigin(u'http://l\xf8calhost', [u'*.l\xf8calhost']) assert result is True def test_punycode(self): result = self.isValidOrigin('http://xn--lcalhost-54a', [u'*.l\xf8calhost']) assert result is True result = self.isValidOrigin('http://xn--lcalhost-54a', [u'*.xn--lcalhost-54a']) assert result is True result = self.isValidOrigin(u'http://l\xf8calhost', [u'*.xn--lcalhost-54a']) assert result is True result = self.isValidOrigin('http://l\xc3\xb8calhost', [u'*.xn--lcalhost-54a']) assert result is True result = self.isValidOrigin('http://xn--lcalhost-54a', [u'l\xf8calhost']) assert result is True result = self.isValidOrigin('http://xn--lcalhost-54a:80', [u'l\xf8calhost:80']) assert result is True def test_unparseable_uri(self): result = self.isValidOrigin('http://example.com', ['.']) assert result is False class IsValidIPTestCase(TestCase): def is_valid_ip(self, ip, inputs): self.project.update_option('sentry:blacklisted_ips', inputs) return is_valid_ip(ip, self.project) def test_not_in_blacklist(self): assert self.is_valid_ip('127.0.0.1', []) assert self.is_valid_ip('127.0.0.1', ['0.0.0.0', '192.168.1.1', '10.0.0.0/8']) def test_match_blacklist(self): assert not self.is_valid_ip('127.0.0.1', ['127.0.0.1']) assert not self.is_valid_ip('127.0.0.1', ['0.0.0.0', '127.0.0.1', '192.168.1.1']) def test_match_blacklist_range(self): assert not self.is_valid_ip('127.0.0.1', ['127.0.0.0/8']) assert not self.is_valid_ip('127.0.0.1', ['0.0.0.0', '127.0.0.0/8', '192.168.1.0/8'])
[((47, 18, 47, 39), 'sentry.models.Project.objects.get', 'Project.objects.get', ({}, {}), '()', False, 'from sentry.models import Project\n'), ((54, 18, 54, 39), 'sentry.models.Project.objects.get', 'Project.objects.get', ({}, {}), '()', False, 'from sentry.models import Project\n'), ((62, 18, 62, 39), 'sentry.models.Project.objects.get', 'Project.objects.get', ({}, {}), '()', False, 'from sentry.models import Project\n'), ((88, 15, 88, 26), 'mock.Mock', 'mock.Mock', ({}, {}), '()', False, 'import mock\n'), ((220, 15, 220, 44), 'sentry.utils.http.is_valid_ip', 'is_valid_ip', ({(220, 27, 220, 29): 'ip', (220, 31, 220, 43): 'self.project'}, {}), '(ip, self.project)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((19, 15, 19, 29), 'sentry.utils.http.absolute_uri', 'absolute_uri', ({}, {}), '()', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((19, 33, 19, 65), 'sentry.options.get', 'options.get', ({(19, 45, 19, 64): '"""system.url-prefix"""'}, {}), "('system.url-prefix')", False, 'from sentry import options\n'), ((22, 15, 22, 39), 'sentry.utils.http.absolute_uri', 'absolute_uri', ({(22, 28, 22, 38): '"""/foo/bar"""'}, {}), "('/foo/bar')", False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((30, 24, 30, 50), 'sentry.utils.http.is_same_domain', 'is_same_domain', ({(30, 39, 30, 43): 'url1', (30, 45, 30, 49): 'url2'}, {}), '(url1, url2)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((36, 24, 36, 50), 'sentry.utils.http.is_same_domain', 'is_same_domain', ({(36, 39, 36, 43): 'url1', (36, 45, 36, 49): 'url2'}, {}), '(url1, url2)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((42, 25, 42, 51), 'sentry.utils.http.is_same_domain', 'is_same_domain', ({(42, 40, 42, 44): 'url1', (42, 46, 42, 50): 'url2'}, {}), '(url1, url2)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((50, 21, 50, 41), 'sentry.utils.http.get_origins', 'get_origins', ({(50, 33, 50, 40): 'project'}, {}), '(project)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((58, 21, 58, 41), 'sentry.utils.http.get_origins', 'get_origins', ({(58, 33, 58, 40): 'project'}, {}), '(project)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((66, 21, 66, 41), 'sentry.utils.http.get_origins', 'get_origins', ({(66, 33, 66, 40): 'project'}, {}), '(project)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((71, 21, 71, 38), 'sentry.utils.http.get_origins', 'get_origins', ({(71, 33, 71, 37): 'None'}, {}), '(None)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((76, 21, 76, 38), 'sentry.utils.http.get_origins', 'get_origins', ({(76, 33, 76, 37): 'None'}, {}), '(None)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((81, 21, 81, 38), 'sentry.utils.http.get_origins', 'get_origins', ({(81, 33, 81, 37): 'None'}, {}), '(None)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((91, 13, 91, 56), 'mock.patch', 'mock.patch', ({(91, 24, 91, 55): '"""sentry.utils.http.get_origins"""'}, {}), "('sentry.utils.http.get_origins')", False, 'import mock\n'), ((93, 21, 93, 58), 'sentry.utils.http.is_valid_origin', 'is_valid_origin', ({(93, 37, 93, 43): 'origin', (93, 45, 93, 57): 'self.project'}, {}), '(origin, self.project)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((94, 12, 94, 61), 'sentry.utils.http.get_origins.assert_called_once_with', 'get_origins.assert_called_once_with', ({(94, 48, 94, 60): 'self.project'}, {}), '(self.project)', False, 'from sentry.utils.http import is_same_domain, is_valid_origin, get_origins, absolute_uri, is_valid_ip\n'), ((22, 59, 22, 91), 'sentry.options.get', 'options.get', ({(22, 71, 22, 90): '"""system.url-prefix"""'}, {}), "('system.url-prefix')", False, 'from sentry import options\n')]
tongpa/bantak_program
comcenterproject/project/helpers.py
66edfe225e8018f65c9c5a6cd7745c17ba557bd5
# -*- coding: utf-8 -*- """WebHelpers used in project.""" #from webhelpers import date, feedgenerator, html, number, misc, text from markupsafe import Markup def bold(text): return Markup('<strong>%s</strong>' % text)
[((9, 11, 9, 47), 'markupsafe.Markup', 'Markup', ({(9, 18, 9, 46): "('<strong>%s</strong>' % text)"}, {}), "('<strong>%s</strong>' % text)", False, 'from markupsafe import Markup\n')]
arnaudsjs/YCSB-1
Thesis/load/runRiakLoads.py
dc557d209244df72d68c9cb0a048d54e7bd72637
import sys; from Thesis.load.loadBenchmark import runLoadBenchmarkAsBatch; from Thesis.cluster.RiakCluster import RiakCluster; NORMAL_BINDING = 'riak'; CONSISTENCY_BINDING = 'riak_consistency'; IPS_IN_CLUSTER = ['172.16.33.14', '172.16.33.15', '172.16.33.16', '172.16.33.17', '172.16.33.18']; def main(): if len(sys.argv) < 7: printUsageAndExit(); pathToWorkloadFile = sys.argv[1]; dirToWriteResultTo = sys.argv[2]; runtimeBenchmarkInMinutes = int(sys.argv[3]); listOfOpsPerSec = sys.argv[4].split(','); listOfAmountThreads = sys.argv[5].split(','); listOfAmountOfMachines = sys.argv[6].split(','); if len(sys.argv) >= 8: remoteYcsbNodes = sys.argv[7].split(','); else: remoteYcsbNodes = []; cluster = RiakCluster(NORMAL_BINDING, CONSISTENCY_BINDING, IPS_IN_CLUSTER); runLoadBenchmarkAsBatch(cluster, remoteYcsbNodes, pathToWorkloadFile, runtimeBenchmarkInMinutes, dirToWriteResultTo, listOfOpsPerSec, listOfAmountThreads, listOfAmountOfMachines); def printUsageAndExit(): print 'usage: binary <path workload file> <result dir> <runtime benchmark> <list of #ops> <list of #threads> <list of #machines> [<list remote ycsb nodes>]'; exit(); cluster = RiakCluster(NORMAL_BINDING, CONSISTENCY_BINDING, IPS_IN_CLUSTER); runLoadBenchmarkAsBatch(cluster, ['172.16.33.10'], '/root/YCSB/workloads/workload_load', 3, '/root/YCSB/loads/riak', ['1000000000'], ['1'], ['1']); # main();
[]
Mozilla-GitHub-Standards/f9c78643f5862cda82001d4471255ac29ef0c6b2c6171e2c1cbecab3d2fef4dd
auto_nag/tests/test_round_robin.py
28d999fcba9ad47d1dd0b2222880b71726ddd47c
# coding: utf-8 # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. import unittest from mock import patch from auto_nag.people import People from auto_nag.round_robin import BadFallback, RoundRobin class TestRoundRobin(unittest.TestCase): config = { 'doc': 'The triagers need to have a \'Fallback\' entry.', 'triagers': { 'A B': {'bzmail': '[email protected]'}, 'C D': {'bzmail': '[email protected]'}, 'E F': {'bzmail': '[email protected]'}, 'Fallback': {'bzmail': '[email protected]'}, }, 'components': {'P1::C1': 'default', 'P2::C2': 'default', 'P3::C3': 'special'}, 'default': { 'doc': 'All the dates are the duty end dates.', '2019-02-21': 'A B', '2019-02-28': 'C D', '2019-03-07': 'E F', }, 'special': { 'doc': 'All the dates are the duty end dates.', '2019-02-21': 'E F', '2019-02-28': 'A B', '2019-03-07': 'C D', }, } people = People( [ { 'mail': '[email protected]', 'cn': 'G H', 'ismanager': 'FALSE', 'title': 'nothing', } ] ) def mk_bug(self, pc): p, c = pc.split('::') return { 'product': p, 'component': c, 'triage_owner': '[email protected]', 'triage_owner_detail': {'nick': 'ij'}, } @staticmethod def _get_nick(x, bzmail): return bzmail.split('@')[0] def test_get(self): with patch.object(RoundRobin, 'get_nick', new=TestRoundRobin._get_nick): rr = RoundRobin( rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people ) assert rr.get(self.mk_bug('P1::C1'), '2019-02-17') == ( '[email protected]', 'ab', ) assert rr.get(self.mk_bug('P2::C2'), '2019-02-17') == ( '[email protected]', 'ab', ) assert rr.get(self.mk_bug('P3::C3'), '2019-02-17') == ( '[email protected]', 'ef', ) assert rr.get(self.mk_bug('P1::C1'), '2019-02-24') == ( '[email protected]', 'cd', ) assert rr.get(self.mk_bug('P2::C2'), '2019-02-24') == ( '[email protected]', 'cd', ) assert rr.get(self.mk_bug('P3::C3'), '2019-02-24') == ( '[email protected]', 'ab', ) assert rr.get(self.mk_bug('P1::C1'), '2019-02-28') == ( '[email protected]', 'cd', ) assert rr.get(self.mk_bug('P2::C2'), '2019-02-28') == ( '[email protected]', 'cd', ) assert rr.get(self.mk_bug('P3::C3'), '2019-02-28') == ( '[email protected]', 'ab', ) assert rr.get(self.mk_bug('P1::C1'), '2019-03-05') == ( '[email protected]', 'ef', ) assert rr.get(self.mk_bug('P2::C2'), '2019-03-05') == ( '[email protected]', 'ef', ) assert rr.get(self.mk_bug('P3::C3'), '2019-03-05') == ( '[email protected]', 'cd', ) assert rr.get(self.mk_bug('P1::C1'), '2019-03-08') == ( '[email protected]', 'gh', ) assert rr.get(self.mk_bug('P2::C2'), '2019-03-08') == ( '[email protected]', 'gh', ) assert rr.get(self.mk_bug('P3::C3'), '2019-03-08') == ( '[email protected]', 'gh', ) assert rr.get(self.mk_bug('Foo::Bar'), '2019-03-01') == ( '[email protected]', 'ij', ) def test_get_who_to_nag(self): rr = RoundRobin( rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people ) assert rr.get_who_to_nag('2019-02-25') == {} assert rr.get_who_to_nag('2019-02-28') == {'[email protected]': ['']} assert rr.get_who_to_nag('2019-03-05') == {'[email protected]': ['']} assert rr.get_who_to_nag('2019-03-07') == {'[email protected]': ['']} assert rr.get_who_to_nag('2019-03-10') == {'[email protected]': ['']} with patch.object(RoundRobin, 'is_mozilla', return_value=False): rr = RoundRobin( rr={'team': TestRoundRobin.config}, people=TestRoundRobin.people ) self.assertRaises(BadFallback, rr.get_who_to_nag, '2019-03-01')
[((39, 13, 48, 5), 'auto_nag.people.People', 'People', ({(40, 8, 47, 9): "[{'mail': '[email protected]', 'cn': 'G H', 'ismanager': 'FALSE', 'title':\n 'nothing'}]"}, {}), "([{'mail': '[email protected]', 'cn': 'G H', 'ismanager': 'FALSE',\n 'title': 'nothing'}])", False, 'from auto_nag.people import People\n'), ((140, 13, 142, 9), 'auto_nag.round_robin.RoundRobin', 'RoundRobin', (), '', False, 'from auto_nag.round_robin import BadFallback, RoundRobin\n'), ((64, 13, 64, 79), 'mock.patch.object', 'patch.object', (), '', False, 'from mock import patch\n'), ((65, 17, 67, 13), 'auto_nag.round_robin.RoundRobin', 'RoundRobin', (), '', False, 'from auto_nag.round_robin import BadFallback, RoundRobin\n'), ((150, 13, 150, 71), 'mock.patch.object', 'patch.object', (), '', False, 'from mock import patch\n'), ((151, 17, 153, 13), 'auto_nag.round_robin.RoundRobin', 'RoundRobin', (), '', False, 'from auto_nag.round_robin import BadFallback, RoundRobin\n')]
tacaswell/scipy
scipy/weave/inline_tools.py
4d7e924a319299e39c9a9514e021fbfdfceb854e
# should re-write compiled functions to take a local and global dict # as input. from __future__ import absolute_import, print_function import sys import os from . import ext_tools from . import catalog from . import common_info from numpy.core.multiarray import _get_ndarray_c_version ndarray_api_version = '/* NDARRAY API VERSION %x */' % (_get_ndarray_c_version(),) # not an easy way for the user_path_list to come in here. # the PYTHONCOMPILED environment variable offers the most hope. function_catalog = catalog.catalog() class inline_ext_function(ext_tools.ext_function): # Some specialization is needed for inline extension functions def function_declaration_code(self): code = 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n' return code % self.name def template_declaration_code(self): code = 'template<class T>\n' \ 'static PyObject* %s(PyObject*self, PyObject* args)\n{\n' return code % self.name def parse_tuple_code(self): """ Create code block for PyArg_ParseTuple. Variable declarations for all PyObjects are done also. This code got a lot uglier when I added local_dict... """ declare_return = 'py::object return_val;\n' \ 'int exception_occurred = 0;\n' \ 'PyObject *py__locals = NULL;\n' \ 'PyObject *py__globals = NULL;\n' py_objects = ', '.join(self.arg_specs.py_pointers()) if py_objects: declare_py_objects = 'PyObject ' + py_objects + ';\n' else: declare_py_objects = '' py_vars = ' = '.join(self.arg_specs.py_variables()) if py_vars: init_values = py_vars + ' = NULL;\n\n' else: init_values = '' parse_tuple = 'if(!PyArg_ParseTuple(args,"OO:compiled_func",'\ '&py__locals,'\ '&py__globals))\n'\ ' return NULL;\n' return declare_return + declare_py_objects + \ init_values + parse_tuple def arg_declaration_code(self): """Return the declaration code as a string.""" arg_strings = [arg.declaration_code(inline=1) for arg in self.arg_specs] return "".join(arg_strings) def arg_cleanup_code(self): """Return the cleanup code as a string.""" arg_strings = [arg.cleanup_code() for arg in self.arg_specs] return "".join(arg_strings) def arg_local_dict_code(self): """Return the code to create the local dict as a string.""" arg_strings = [arg.local_dict_code() for arg in self.arg_specs] return "".join(arg_strings) def function_code(self): from .ext_tools import indent decl_code = indent(self.arg_declaration_code(),4) cleanup_code = indent(self.arg_cleanup_code(),4) function_code = indent(self.code_block,4) # local_dict_code = indent(self.arg_local_dict_code(),4) try_code = \ ' try \n' \ ' { \n' \ '#if defined(__GNUC__) || defined(__ICC)\n' \ ' PyObject* raw_locals __attribute__ ((unused));\n' \ ' PyObject* raw_globals __attribute__ ((unused));\n' \ '#else\n' \ ' PyObject* raw_locals;\n' \ ' PyObject* raw_globals;\n' \ '#endif\n' \ ' raw_locals = py_to_raw_dict(py__locals,"_locals");\n' \ ' raw_globals = py_to_raw_dict(py__globals,"_globals");\n' \ ' /* argument conversion code */ \n' \ + decl_code + \ ' /* inline code */ \n' \ + function_code + \ ' /*I would like to fill in changed locals and globals here...*/ \n' \ ' }\n' catch_code = "catch(...) \n" \ "{ \n" + \ " return_val = py::object(); \n" \ " exception_occurred = 1; \n" \ "} \n" return_code = " /* cleanup code */ \n" + \ cleanup_code + \ " if(!(PyObject*)return_val && !exception_occurred)\n" \ " {\n \n" \ " return_val = Py_None; \n" \ " }\n \n" \ " return return_val.disown(); \n" \ "} \n" all_code = self.function_declaration_code() + \ indent(self.parse_tuple_code(),4) + \ try_code + \ indent(catch_code,4) + \ return_code return all_code def python_function_definition_code(self): args = (self.name, self.name) function_decls = '{"%s",(PyCFunction)%s , METH_VARARGS},\n' % args return function_decls class inline_ext_module(ext_tools.ext_module): def __init__(self,name,compiler=''): ext_tools.ext_module.__init__(self,name,compiler) self._build_information.append(common_info.inline_info()) function_cache = {} def inline(code,arg_names=[],local_dict=None, global_dict=None, force=0, compiler='', verbose=0, support_code=None, headers=[], customize=None, type_converters=None, auto_downcast=1, newarr_converter=0, **kw): """ Inline C/C++ code within Python scripts. ``inline()`` compiles and executes C/C++ code on the fly. Variables in the local and global Python scope are also available in the C/C++ code. Values are passed to the C/C++ code by assignment much like variables passed are passed into a standard Python function. Values are returned from the C/C++ code through a special argument called return_val. Also, the contents of mutable objects can be changed within the C/C++ code and the changes remain after the C code exits and returns to Python. inline has quite a few options as listed below. Also, the keyword arguments for distutils extension modules are accepted to specify extra information needed for compiling. Parameters ---------- code : string A string of valid C++ code. It should not specify a return statement. Instead it should assign results that need to be returned to Python in the `return_val`. arg_names : [str], optional A list of Python variable names that should be transferred from Python into the C/C++ code. It defaults to an empty string. local_dict : dict, optional If specified, it is a dictionary of values that should be used as the local scope for the C/C++ code. If local_dict is not specified the local dictionary of the calling function is used. global_dict : dict, optional If specified, it is a dictionary of values that should be used as the global scope for the C/C++ code. If `global_dict` is not specified, the global dictionary of the calling function is used. force : {0, 1}, optional If 1, the C++ code is compiled every time inline is called. This is really only useful for debugging, and probably only useful if your editing `support_code` a lot. compiler : str, optional The name of compiler to use when compiling. On windows, it understands 'msvc' and 'gcc' as well as all the compiler names understood by distutils. On Unix, it'll only understand the values understood by distutils. (I should add 'gcc' though to this). On windows, the compiler defaults to the Microsoft C++ compiler. If this isn't available, it looks for mingw32 (the gcc compiler). On Unix, it'll probably use the same compiler that was used when compiling Python. Cygwin's behavior should be similar. verbose : {0,1,2}, optional Specifies how much information is printed during the compile phase of inlining code. 0 is silent (except on windows with msvc where it still prints some garbage). 1 informs you when compiling starts, finishes, and how long it took. 2 prints out the command lines for the compilation process and can be useful if your having problems getting code to work. Its handy for finding the name of the .cpp file if you need to examine it. verbose has no effect if the compilation isn't necessary. support_code : str, optional A string of valid C++ code declaring extra code that might be needed by your compiled function. This could be declarations of functions, classes, or structures. headers : [str], optional A list of strings specifying header files to use when compiling the code. The list might look like ``["<vector>","'my_header'"]``. Note that the header strings need to be in a form than can be pasted at the end of a ``#include`` statement in the C++ code. customize : base_info.custom_info, optional An alternative way to specify `support_code`, `headers`, etc. needed by the function. See :mod:`scipy.weave.base_info` for more details. (not sure this'll be used much). type_converters : [type converters], optional These guys are what convert Python data types to C/C++ data types. If you'd like to use a different set of type conversions than the default, specify them here. Look in the type conversions section of the main documentation for examples. auto_downcast : {1,0}, optional This only affects functions that have numpy arrays as input variables. Setting this to 1 will cause all floating point values to be cast as float instead of double if all the Numeric arrays are of type float. If even one of the arrays has type double or double complex, all variables maintain their standard types. newarr_converter : int, optional Unused. Other Parameters ---------------- Relevant :mod:`distutils` keywords. These are duplicated from Greg Ward's :class:`distutils.extension.Extension` class for convenience: sources : [string] List of source filenames, relative to the distribution root (where the setup script lives), in Unix form (slash-separated) for portability. Source files may be C, C++, SWIG (.i), platform-specific resource files, or whatever else is recognized by the "build_ext" command as source for a Python extension. .. note:: The `module_path` file is always appended to the front of this list include_dirs : [string] List of directories to search for C/C++ header files (in Unix form for portability). define_macros : [(name : string, value : string|None)] List of macros to define; each macro is defined using a 2-tuple, where 'value' is either the string to define it to or None to define it without a particular value (equivalent of "#define FOO" in source or -DFOO on Unix C compiler command line). undef_macros : [string] List of macros to undefine explicitly. library_dirs : [string] List of directories to search for C/C++ libraries at link time. libraries : [string] List of library names (not filenames or paths) to link against. runtime_library_dirs : [string] List of directories to search for C/C++ libraries at run time (for shared extensions, this is when the extension is loaded). extra_objects : [string] List of extra files to link with (e.g. object files not implied by 'sources', static libraries that must be explicitly specified, binary resource files, etc.) extra_compile_args : [string] Any extra platform- and compiler-specific information to use when compiling the source files in 'sources'. For platforms and compilers where "command line" makes sense, this is typically a list of command-line arguments, but for other platforms it could be anything. extra_link_args : [string] Any extra platform- and compiler-specific information to use when linking object files together to create the extension (or to create a new static Python interpreter). Similar interpretation as for 'extra_compile_args'. export_symbols : [string] List of symbols to be exported from a shared extension. Not used on all platforms, and not generally necessary for Python extensions, which typically export exactly one symbol: "init" + extension_name. swig_opts : [string] Any extra options to pass to SWIG if a source file has the .i extension. depends : [string] List of files that the extension depends on. language : string Extension language (i.e. "c", "c++", "objc"). Will be detected from the source extensions if not provided. See Also -------- distutils.extension.Extension : Describes additional parameters. """ # this grabs the local variables from the *previous* call # frame -- that is the locals from the function that called # inline. global function_catalog call_frame = sys._getframe().f_back if local_dict is None: local_dict = call_frame.f_locals if global_dict is None: global_dict = call_frame.f_globals if force: module_dir = global_dict.get('__file__',None) func = compile_function(code,arg_names,local_dict, global_dict,module_dir, compiler=compiler, verbose=verbose, support_code=support_code, headers=headers, customize=customize, type_converters=type_converters, auto_downcast=auto_downcast, **kw) function_catalog.add_function(code,func,module_dir) results = attempt_function_call(code,local_dict,global_dict) else: # 1. try local cache try: results = apply(function_cache[code],(local_dict,global_dict)) return results except TypeError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise TypeError(msg) except NameError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise NameError(msg) except KeyError: pass # 2. try function catalog try: results = attempt_function_call(code,local_dict,global_dict) # 3. build the function except ValueError: # compile the library module_dir = global_dict.get('__file__',None) func = compile_function(code,arg_names,local_dict, global_dict,module_dir, compiler=compiler, verbose=verbose, support_code=support_code, headers=headers, customize=customize, type_converters=type_converters, auto_downcast=auto_downcast, **kw) function_catalog.add_function(code,func,module_dir) results = attempt_function_call(code,local_dict,global_dict) return results def attempt_function_call(code,local_dict,global_dict): # we try 3 levels here -- a local cache first, then the # catalog cache, and then persistent catalog. # global function_catalog # 1. try local cache try: results = apply(function_cache[code],(local_dict,global_dict)) return results except TypeError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise TypeError(msg) except NameError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise NameError(msg) except KeyError: pass # 2. try catalog cache. function_list = function_catalog.get_functions_fast(code) for func in function_list: try: results = apply(func,(local_dict,global_dict)) function_catalog.fast_cache(code,func) function_cache[code] = func return results except TypeError as msg: # should specify argument types here. # This should really have its own error type, instead of # checking the beginning of the message, but I don't know # how to define that yet. msg = str(msg) if msg[:16] == "Conversion Error": pass else: raise TypeError(msg) except NameError as msg: msg = str(msg).strip() if msg[:16] == "Conversion Error": pass else: raise NameError(msg) # 3. try persistent catalog module_dir = global_dict.get('__file__',None) function_list = function_catalog.get_functions(code,module_dir) for func in function_list: try: results = apply(func,(local_dict,global_dict)) function_catalog.fast_cache(code,func) function_cache[code] = func return results except: # should specify argument types here. pass # if we get here, the function wasn't found raise ValueError('function with correct signature not found') def inline_function_code(code,arg_names,local_dict=None, global_dict=None,auto_downcast=1, type_converters=None,compiler=''): call_frame = sys._getframe().f_back if local_dict is None: local_dict = call_frame.f_locals if global_dict is None: global_dict = call_frame.f_globals ext_func = inline_ext_function('compiled_func',code,arg_names, local_dict,global_dict,auto_downcast, type_converters=type_converters) from . import build_tools compiler = build_tools.choose_compiler(compiler) ext_func.set_compiler(compiler) return ext_func.function_code() def compile_function(code,arg_names,local_dict,global_dict, module_dir, compiler='', verbose=1, support_code=None, headers=[], customize=None, type_converters=None, auto_downcast=1, **kw): # figure out where to store and what to name the extension module # that will contain the function. # storage_dir = catalog.intermediate_dir() code = ndarray_api_version + '\n' + code module_path = function_catalog.unique_module_name(code, module_dir) storage_dir, module_name = os.path.split(module_path) mod = inline_ext_module(module_name,compiler) # create the function. This relies on the auto_downcast and # type factories setting ext_func = inline_ext_function('compiled_func',code,arg_names, local_dict,global_dict,auto_downcast, type_converters=type_converters) mod.add_function(ext_func) # if customize (a custom_info object), then set the module customization. if customize: mod.customize = customize # add the extra "support code" needed by the function to the module. if support_code: mod.customize.add_support_code(support_code) # add the extra headers needed by the function to the module. for header in headers: mod.customize.add_header(header) # it's nice to let the users know when anything gets compiled, as the # slowdown is very noticeable. if verbose > 0: print('<weave: compiling>') # compile code in correct location, with the given compiler and verbosity # setting. All input keywords are passed through to distutils mod.compile(location=storage_dir,compiler=compiler, verbose=verbose, **kw) # import the module and return the function. Make sure # the directory where it lives is in the python path. try: sys.path.insert(0,storage_dir) exec('import ' + module_name) func = eval(module_name+'.compiled_func') finally: del sys.path[0] return func
[((461, 31, 461, 57), 'os.path.split', 'os.path.split', ({(461, 45, 461, 56): 'module_path'}, {}), '(module_path)', False, 'import os\n'), ((12, 56, 12, 80), 'numpy.core.multiarray._get_ndarray_c_version', '_get_ndarray_c_version', ({}, {}), '()', False, 'from numpy.core.multiarray import _get_ndarray_c_version\n'), ((306, 17, 306, 32), 'sys._getframe', 'sys._getframe', ({}, {}), '()', False, 'import sys\n'), ((432, 17, 432, 32), 'sys._getframe', 'sys._getframe', ({}, {}), '()', False, 'import sys\n'), ((496, 8, 496, 38), 'sys.path.insert', 'sys.path.insert', ({(496, 24, 496, 25): '(0)', (496, 26, 496, 37): 'storage_dir'}, {}), '(0, storage_dir)', False, 'import sys\n')]
sapcc/trove
trove/guestagent/common/configuration.py
c03ec0827687fba202f72f4d264ab70158604857
# Copyright 2015 Tesora Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import os import re import six from trove.guestagent.common import guestagent_utils from trove.guestagent.common import operating_system from trove.guestagent.common.operating_system import FileMode class ConfigurationManager(object): """ ConfigurationManager is responsible for management of datastore configuration. Its base functionality includes reading and writing configuration files. It is responsible for validating user inputs and requests. When supplied an override strategy it allows the user to manage configuration overrides as well. """ # Configuration group names. The names determine the order in which the # groups get applied. System groups are divided into two camps; pre-user # and post-user. In general system overrides will get applied over the # user group, unless specified otherwise (i.e. SYSTEM_POST_USER_GROUP # will be used). SYSTEM_PRE_USER_GROUP = '10-system' USER_GROUP = '20-user' SYSTEM_POST_USER_GROUP = '50-system' DEFAULT_STRATEGY_OVERRIDES_SUB_DIR = 'overrides' DEFAULT_CHANGE_ID = 'common' def __init__(self, base_config_path, owner, group, codec, requires_root=False, override_strategy=None): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration files. :type owner string :param group Group of the configuration files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the manager requires superuser privileges. :type requires_root boolean :param override_strategy Strategy used to manage configuration overrides (e.g. ImportOverrideStrategy). Defaults to OneFileOverrideStrategy if None. This strategy should be compatible with very much any datastore. It is recommended each datastore defines its strategy explicitly to avoid upgrade compatibility issues in case the default implementation changes in the future. :type override_strategy ConfigurationOverrideStrategy """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._value_cache = None if not override_strategy: # Use OneFile strategy by default. Store the revisions in a # sub-directory at the location of the configuration file. revision_dir = guestagent_utils.build_file_path( os.path.dirname(base_config_path), self.DEFAULT_STRATEGY_OVERRIDES_SUB_DIR) self._override_strategy = OneFileOverrideStrategy(revision_dir) else: self._override_strategy = override_strategy self._override_strategy.configure( base_config_path, owner, group, codec, requires_root) def get_value(self, key, default=None): """Return the current value at a given key or 'default'. """ if self._value_cache is None: self.refresh_cache() return self._value_cache.get(key, default) def parse_configuration(self): """Read contents of the configuration file (applying overrides if any) and parse it into a dict. :returns: Configuration file as a Python dict. """ base_options = operating_system.read_file( self._base_config_path, codec=self._codec, as_root=self._requires_root) updates = self._override_strategy.parse_updates() guestagent_utils.update_dict(updates, base_options) return base_options def save_configuration(self, options): """Write given contents to the base configuration file. Remove all existing overrides (both system and user). :param contents Contents of the configuration file. :type contents string or dict """ if isinstance(options, dict): # Serialize a dict of options for writing. self.save_configuration(self._codec.serialize(options)) else: self._override_strategy.remove(self.USER_GROUP) self._override_strategy.remove(self.SYSTEM_PRE_USER_GROUP) self._override_strategy.remove(self.SYSTEM_POST_USER_GROUP) operating_system.write_file( self._base_config_path, options, as_root=self._requires_root) operating_system.chown( self._base_config_path, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( self._base_config_path, FileMode.ADD_READ_ALL, as_root=self._requires_root) self.refresh_cache() def has_system_override(self, change_id): """Return whether a given 'system' change exists. """ return (self._override_strategy.exists(self.SYSTEM_POST_USER_GROUP, change_id) or self._override_strategy.exists(self.SYSTEM_PRE_USER_GROUP, change_id)) def apply_system_override(self, options, change_id=DEFAULT_CHANGE_ID, pre_user=False): """Apply a 'system' change to the configuration. System overrides are always applied after all user changes so that they override any user-defined setting. :param options Configuration changes. :type options string or dict """ group_name = ( self.SYSTEM_PRE_USER_GROUP if pre_user else self.SYSTEM_POST_USER_GROUP) self._apply_override(group_name, change_id, options) def apply_user_override(self, options, change_id=DEFAULT_CHANGE_ID): """Apply a 'user' change to the configuration. The 'system' values will be re-applied over this override. :param options Configuration changes. :type options string or dict """ self._apply_override(self.USER_GROUP, change_id, options) def get_user_override(self, change_id=DEFAULT_CHANGE_ID): """Get the user overrides""" return self._override_strategy.get(self.USER_GROUP, change_id) def _apply_override(self, group_name, change_id, options): if not isinstance(options, dict): # Deserialize the options into a dict if not already. self._apply_override( group_name, change_id, self._codec.deserialize(options)) else: self._override_strategy.apply(group_name, change_id, options) self.refresh_cache() def remove_system_override(self, change_id=DEFAULT_CHANGE_ID): """Revert a 'system' configuration change. """ self._remove_override(self.SYSTEM_POST_USER_GROUP, change_id) self._remove_override(self.SYSTEM_PRE_USER_GROUP, change_id) def remove_user_override(self, change_id=DEFAULT_CHANGE_ID): """Revert a 'user' configuration change. """ self._remove_override(self.USER_GROUP, change_id) def _remove_override(self, group_name, change_id): self._override_strategy.remove(group_name, change_id) self.refresh_cache() def refresh_cache(self): self._value_cache = self.parse_configuration() @six.add_metaclass(abc.ABCMeta) class ConfigurationOverrideStrategy(object): """ConfigurationOverrideStrategy handles configuration files. The strategy provides functionality to enumerate, apply and remove configuration overrides. """ @abc.abstractmethod def configure(self, *args, **kwargs): """Configure this strategy. A strategy needs to be configured before it can be used. It would typically be configured by the ConfigurationManager. """ @abc.abstractmethod def exists(self, group_name, change_id): """Return whether a given revision exists. """ @abc.abstractmethod def apply(self, group_name, change_id, options): """Apply given options on the most current configuration revision. Update if a file with the same id already exists. :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string :param options Configuration changes. :type options dict """ @abc.abstractmethod def remove(self, group_name, change_id=None): """Rollback a given configuration override. Remove the whole group if 'change_id' is None. :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string """ @abc.abstractmethod def get(self, group_name, change_id=None): """Return the contents of a given configuration override :param group_name The group the override belongs to. :type group_name string :param change_id The name of the override within the group. :type change_id string """ def parse_updates(self): """Return all updates applied to the base revision as a single dict. Return an empty dict if the base file is always the most current version of configuration. :returns: Updates to the base revision as a Python dict. """ return {} class ImportOverrideStrategy(ConfigurationOverrideStrategy): """Import strategy keeps overrides in separate files that get imported into the base configuration file which never changes itself. An override file is simply deleted when the override is removed. We keep two sets of override files in a separate directory. - User overrides - configuration overrides applied by the user via the Trove API. - System overrides - 'internal' configuration changes applied by the guestagent. The name format of override files is: '<set prefix>-<n>-<group name>.<ext>' where 'set prefix' is to used to order user/system sets, 'n' is an index used to keep track of the order in which overrides within their set got applied. """ FILE_NAME_PATTERN = r'%s-([0-9]+)-%s\.%s$' def __init__(self, revision_dir, revision_ext): """ :param revision_dir Path to the directory for import files. :type revision_dir string :param revision_ext Extension of revision files. :type revision_ext string """ self._revision_dir = revision_dir self._revision_ext = revision_ext def configure(self, base_config_path, owner, group, codec, requires_root): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration and revision files. :type owner string :param group Group of the configuration and revision files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the strategy requires superuser privileges. :type requires_root boolean """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root def exists(self, group_name, change_id): return self._find_revision_file(group_name, change_id) is not None def apply(self, group_name, change_id, options): self._initialize_import_directory() revision_file = self._find_revision_file(group_name, change_id) if revision_file is None: # Create a new file. last_revision_index = self._get_last_file_index(group_name) revision_file = guestagent_utils.build_file_path( self._revision_dir, '%s-%03d-%s' % (group_name, last_revision_index + 1, change_id), self._revision_ext) else: # Update the existing file. current = operating_system.read_file( revision_file, codec=self._codec, as_root=self._requires_root) options = guestagent_utils.update_dict(options, current) operating_system.write_file( revision_file, options, codec=self._codec, as_root=self._requires_root) operating_system.chown( revision_file, self._owner, self._group, as_root=self._requires_root) operating_system.chmod( revision_file, FileMode.ADD_READ_ALL, as_root=self._requires_root) def _initialize_import_directory(self): """Lazy-initialize the directory for imported revision files. """ if not os.path.exists(self._revision_dir): operating_system.create_directory( self._revision_dir, user=self._owner, group=self._group, force=True, as_root=self._requires_root) def remove(self, group_name, change_id=None): removed = set() if change_id: # Remove a given file. revision_file = self._find_revision_file(group_name, change_id) if revision_file: removed.add(revision_file) else: # Remove the entire group. removed = self._collect_revision_files(group_name) for path in removed: operating_system.remove(path, force=True, as_root=self._requires_root) def get(self, group_name, change_id): revision_file = self._find_revision_file(group_name, change_id) return operating_system.read_file(revision_file, codec=self._codec, as_root=self._requires_root) def parse_updates(self): parsed_options = {} for path in self._collect_revision_files(): options = operating_system.read_file(path, codec=self._codec, as_root=self._requires_root) guestagent_utils.update_dict(options, parsed_options) return parsed_options @property def has_revisions(self): """Return True if there currently are any revision files. """ return (operating_system.exists( self._revision_dir, is_directory=True, as_root=self._requires_root) and (len(self._collect_revision_files()) > 0)) def _get_last_file_index(self, group_name): """Get the index of the most current file in a given group. """ current_files = self._collect_revision_files(group_name) if current_files: name_pattern = self._build_rev_name_pattern(group_name=group_name) last_file_name = os.path.basename(current_files[-1]) last_index_match = re.match(name_pattern, last_file_name) if last_index_match: return int(last_index_match.group(1)) return 0 def _collect_revision_files(self, group_name='.+'): """Collect and return a sorted list of paths to existing revision files. The files should be sorted in the same order in which they were applied. """ name_pattern = self._build_rev_name_pattern(group_name=group_name) return sorted(operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root)) def _find_revision_file(self, group_name, change_id): name_pattern = self._build_rev_name_pattern(group_name, change_id) found = operating_system.list_files_in_directory( self._revision_dir, recursive=True, pattern=name_pattern, as_root=self._requires_root) return next(iter(found), None) def _build_rev_name_pattern(self, group_name='.+', change_id='.+'): return self.FILE_NAME_PATTERN % (group_name, change_id, self._revision_ext) class OneFileOverrideStrategy(ConfigurationOverrideStrategy): """This is a strategy for datastores that do not support multiple configuration files. It uses the Import Strategy to keep the overrides internally. When an override is applied or removed a new configuration file is generated by applying all changes on a saved-off base revision. """ BASE_REVISION_NAME = 'base' REVISION_EXT = 'rev' def __init__(self, revision_dir): """ :param revision_dir Path to the directory for import files. :type revision_dir string """ self._revision_dir = revision_dir self._import_strategy = ImportOverrideStrategy(revision_dir, self.REVISION_EXT) def configure(self, base_config_path, owner, group, codec, requires_root): """ :param base_config_path Path to the configuration file. :type base_config_path string :param owner Owner of the configuration and revision files. :type owner string :param group Group of the configuration and revision files. :type group string :param codec Codec for reading/writing of the particular configuration format. :type codec StreamCodec :param requires_root Whether the strategy requires superuser privileges. :type requires_root boolean """ self._base_config_path = base_config_path self._owner = owner self._group = group self._codec = codec self._requires_root = requires_root self._base_revision_file = guestagent_utils.build_file_path( self._revision_dir, self.BASE_REVISION_NAME, self.REVISION_EXT) self._import_strategy.configure( base_config_path, owner, group, codec, requires_root) def exists(self, group_name, change_id): return self._import_strategy.exists(group_name, change_id) def apply(self, group_name, change_id, options): self._import_strategy.apply(group_name, change_id, options) self._regenerate_base_configuration() def remove(self, group_name, change_id=None): if self._import_strategy.has_revisions: self._import_strategy.remove(group_name, change_id=change_id) self._regenerate_base_configuration() if not self._import_strategy.has_revisions: # The base revision file is no longer needed if there are no # overrides. It will be regenerated based on the current # configuration file on the first 'apply()'. operating_system.remove(self._base_revision_file, force=True, as_root=self._requires_root) def get(self, group_name, change_id): return self._import_strategy.get(group_name, change_id) def _regenerate_base_configuration(self): """Gather all configuration changes and apply them in order on the base revision. Write the results to the configuration file. """ if not os.path.exists(self._base_revision_file): # Initialize the file with the current configuration contents if it # does not exist. operating_system.copy( self._base_config_path, self._base_revision_file, force=True, preserve=True, as_root=self._requires_root) base_revision = operating_system.read_file( self._base_revision_file, codec=self._codec, as_root=self._requires_root) changes = self._import_strategy.parse_updates() updated_revision = guestagent_utils.update_dict(changes, base_revision) operating_system.write_file( self._base_config_path, updated_revision, codec=self._codec, as_root=self._requires_root)
[((214, 1, 214, 31), 'six.add_metaclass', 'six.add_metaclass', ({(214, 19, 214, 30): 'abc.ABCMeta'}, {}), '(abc.ABCMeta)', False, 'import six\n'), ((114, 23, 116, 40), 'trove.guestagent.common.operating_system.read_file', 'operating_system.read_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((119, 8, 119, 59), 'trove.guestagent.common.guestagent_utils.update_dict', 'guestagent_utils.update_dict', ({(119, 37, 119, 44): 'updates', (119, 46, 119, 58): 'base_options'}, {}), '(updates, base_options)', False, 'from trove.guestagent.common import guestagent_utils\n'), ((358, 8, 360, 40), 'trove.guestagent.common.operating_system.write_file', 'operating_system.write_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((361, 8, 363, 40), 'trove.guestagent.common.operating_system.chown', 'operating_system.chown', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((364, 8, 365, 78), 'trove.guestagent.common.operating_system.chmod', 'operating_system.chmod', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((393, 15, 395, 70), 'trove.guestagent.common.operating_system.read_file', 'operating_system.read_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((440, 16, 442, 40), 'trove.guestagent.common.operating_system.list_files_in_directory', 'operating_system.list_files_in_directory', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((496, 35, 497, 75), 'trove.guestagent.common.guestagent_utils.build_file_path', 'guestagent_utils.build_file_path', ({(497, 12, 497, 30): 'self._revision_dir', (497, 32, 497, 55): 'self.BASE_REVISION_NAME', (497, 57, 497, 74): 'self.REVISION_EXT'}, {}), '(self._revision_dir, self.\n BASE_REVISION_NAME, self.REVISION_EXT)', False, 'from trove.guestagent.common import guestagent_utils\n'), ((535, 24, 537, 40), 'trove.guestagent.common.operating_system.read_file', 'operating_system.read_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((539, 27, 539, 79), 'trove.guestagent.common.guestagent_utils.update_dict', 'guestagent_utils.update_dict', ({(539, 56, 539, 63): 'changes', (539, 65, 539, 78): 'base_revision'}, {}), '(changes, base_revision)', False, 'from trove.guestagent.common import guestagent_utils\n'), ((540, 8, 542, 40), 'trove.guestagent.common.operating_system.write_file', 'operating_system.write_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((138, 12, 139, 77), 'trove.guestagent.common.operating_system.write_file', 'operating_system.write_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((140, 12, 142, 44), 'trove.guestagent.common.operating_system.chown', 'operating_system.chown', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((143, 12, 145, 44), 'trove.guestagent.common.operating_system.chmod', 'operating_system.chmod', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((347, 28, 351, 35), 'trove.guestagent.common.guestagent_utils.build_file_path', 'guestagent_utils.build_file_path', ({(348, 16, 348, 34): 'self._revision_dir', (349, 16, 350, 42): "'%s-%03d-%s' % (group_name, last_revision_index + 1, change_id)", (351, 16, 351, 34): 'self._revision_ext'}, {}), "(self._revision_dir, '%s-%03d-%s' % (\n group_name, last_revision_index + 1, change_id), self._revision_ext)", False, 'from trove.guestagent.common import guestagent_utils\n'), ((354, 22, 355, 78), 'trove.guestagent.common.operating_system.read_file', 'operating_system.read_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((356, 22, 356, 68), 'trove.guestagent.common.guestagent_utils.update_dict', 'guestagent_utils.update_dict', ({(356, 51, 356, 58): 'options', (356, 60, 356, 67): 'current'}, {}), '(options, current)', False, 'from trove.guestagent.common import guestagent_utils\n'), ((370, 15, 370, 49), 'os.path.exists', 'os.path.exists', ({(370, 30, 370, 48): 'self._revision_dir'}, {}), '(self._revision_dir)', False, 'import os\n'), ((371, 12, 373, 56), 'trove.guestagent.common.operating_system.create_directory', 'operating_system.create_directory', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((387, 12, 388, 64), 'trove.guestagent.common.operating_system.remove', 'operating_system.remove', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((400, 22, 401, 77), 'trove.guestagent.common.operating_system.read_file', 'operating_system.read_file', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((402, 12, 402, 65), 'trove.guestagent.common.guestagent_utils.update_dict', 'guestagent_utils.update_dict', ({(402, 41, 402, 48): 'options', (402, 50, 402, 64): 'parsed_options'}, {}), '(options, parsed_options)', False, 'from trove.guestagent.common import guestagent_utils\n'), ((410, 16, 412, 40), 'trove.guestagent.common.operating_system.exists', 'operating_system.exists', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((421, 29, 421, 64), 'os.path.basename', 'os.path.basename', ({(421, 46, 421, 63): 'current_files[-1]'}, {}), '(current_files[-1])', False, 'import os\n'), ((422, 31, 422, 69), 're.match', 're.match', ({(422, 40, 422, 52): 'name_pattern', (422, 54, 422, 68): 'last_file_name'}, {}), '(name_pattern, last_file_name)', False, 'import re\n'), ((434, 22, 436, 40), 'trove.guestagent.common.operating_system.list_files_in_directory', 'operating_system.list_files_in_directory', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((528, 15, 528, 55), 'os.path.exists', 'os.path.exists', ({(528, 30, 528, 54): 'self._base_revision_file'}, {}), '(self._base_revision_file)', False, 'import os\n'), ((531, 12, 533, 71), 'trove.guestagent.common.operating_system.copy', 'operating_system.copy', (), '', False, 'from trove.guestagent.common import operating_system\n'), ((90, 16, 90, 49), 'os.path.dirname', 'os.path.dirname', ({(90, 32, 90, 48): 'base_config_path'}, {}), '(base_config_path)', False, 'import os\n'), ((517, 16, 518, 68), 'trove.guestagent.common.operating_system.remove', 'operating_system.remove', (), '', False, 'from trove.guestagent.common import operating_system\n')]
sawyercade/Documentation
API-Reference-Code-Generator.py
257b68c8ca2928e8a730ea44196297a400587437
import pathlib import yaml documentations = {"Our Platform": "QuantConnect-Platform-2.0.0.yaml", "Alpha Streams": "QuantConnect-Alpha-0.8.yaml"} def RequestTable(api_call, params): writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n' writeUp += f'<th colspan="2"><code>{api_call}</code> Method</th>\n</tr>\n</thead>' example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n{\n' for item in params: example_ = "/" description_ = "Optional. " if "required" not in item or not item["required"] else "" description_ += item["description"] if description_[-1] != ".": description_ += "." if "type" in item["schema"]: type_ = item["schema"]["type"] else: type_ = item["schema"]["$ref"].split("/")[-1] if "minimum" in item["schema"]: description_ += f' Minimum: {item["schema"]["minimum"]}' example_ = item["schema"]["minimum"] elif "maximum" in item["schema"]: description_ += f' Maximum: {item["schema"]["maximum"]}' example_ = item["schema"]["maximum"] elif "default" in item["schema"]: description_ += f' Default: {item["schema"]["default"]}' example_ = item["schema"]["default"] if type_ == "array": array_obj = item["schema"]["items"] if "$ref" in array_obj: type_ = array_obj["$ref"].split("/")[-1] + " Array" ref = array_obj["$ref"].split("/")[1:] type_ = ref[-1] + " Array" request_object_ = doc for path in ref: request_object_ = request_object_[path] if "properties" in request_object_: request_object_properties_ = request_object_["properties"] example_, __, __ = ExampleWriting(request_object_properties_, [], 1) if "type" in array_obj: type_ = array_obj["type"] + " Array" if "enum" in array_obj: type_ = type_ + " Enum" description_ += f' Options: {str(array_obj["enum"])}' example_ = f'"{array_obj["enum"][0]}"' if "Enum" not in type_: if "string" in type_: example_ = '"string"' elif "number" in type_ or "integer" in type_: example_ = '0' elif "boolean" in type_: example_ = 'true' writeUp += f'\n<tr>\n<td width="20%">{item["name"]}</td> <td> <code>{type_}</code><br/>{description_}</td>\n</tr>' example += f' "{item["name"]}": {example_},\n' return writeUp + example + "\b}</pre>\n</div>\n</td>\n</tr>\n</table>" def ResponseTable(requestBody): writeUp = "" array = False order = 0 if "content" in requestBody: component = requestBody["content"]["application/json"]["schema"] if "$ref" in component: component = component["$ref"].split("/")[1:] elif "items" in component and "$ref" in component["items"]: component = component["items"]["$ref"].split("/")[1:] array = True order += 1 else: writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n' writeUp += f'<th colspan="2">{requestBody["description"]}</th>\n' writeUp += '</tr>\n</thead>\n' writeUp += f'<tr>\n<td width="20%">value</td> <td> <code>{component["items"]["type"]}</code> <br/>/</td>\n</tr>\n' writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n' writeUp += f'[\n "{component["items"]["example"]}"\n]' writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>' return writeUp else: component = requestBody["$ref"].split("/")[1:] item_list = [component] i = 0 while i < len(item_list): request_object = doc for item in item_list[i]: request_object = request_object[item] if "items" in request_object and "oneOf" in request_object["items"]: prop = request_object["items"]["oneOf"] example = '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n[\n [' writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n' writeUp += f'<th colspan="2"><code>{item}</code> Model - {request_object["description"]}</th>\n' writeUp += '</tr>\n</thead>' for y in prop: path = y["$ref"].split("/")[1:] name = path[-1] enum = "" item_list.append(path) request_object = doc for item in path: request_object = request_object[item] if "enum" in request_object: enum = " Options: " + str(request_object["enum"]) description_ = request_object["description"] if description_[-1] != ".": description_ += "." writeUp += f'\n<tr>\n<td width="20%">{name}</td> <td> <code>{request_object["type"]}</code> <br/> {description_ + enum}</td>\n</tr>\n' if "example" in request_object: text = request_object["example"] elif "enum" in request_object: text = '"' + request_object["enum"][0] + '"' example += f'\n {text},' example += '\b\n ]\n]' writeUp += example writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>' i += 1 continue elif "oneOf" in request_object: for y in request_object["oneOf"]: item_list.append(y["$ref"].split("/")[1:]) i += 1 continue elif "properties" in request_object: request_object_properties = request_object["properties"] elif "content" in request_object: item_list.append(request_object["content"]["application/json"]["schema"]["$ref"].split("/")[1:]) i += 1 continue elif "type" in request_object and "properties" not in request_object: request_object_properties = {item: request_object} writeUp += '<table class="table qc-table">\n<thead>\n<tr>\n' if "description" in request_object: writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model - {request_object["description"]}</th>\n' else: writeUp += f'<th colspan="2"><code>{item_list[i][-1]}</code> Model</th>\n' writeUp += '</tr>\n</thead>\n' example, html_property, item_list = ExampleWriting(request_object_properties, item_list, array, order) if array: array = False order -= 1 for line in html_property: writeUp += line writeUp += '<tr>\n<td width="20%">Example</td>\n<td>\n<div class="cli section-example-container"><pre>\n' writeUp += example writeUp += '</pre>\n</div>\n</td>\n</tr>\n</table>' i += 1 return writeUp def ExampleWriting(request_object_properties, item_list, array=False, order=0): tab = " " * order if array: example = "[\n {\n" else: example = "{\n" line = [] for name, properties in request_object_properties.items(): type_ = properties["type"] if "type" in properties else "object" description_ = properties["description"] if "description" in properties else "/" if (example != "{\n" and not array) or (example != "[\n {\n" and array): example += ",\n" example_ = tab + f' "{name}": ' if type_ == "array": example_ += '[\n' if "type" in properties["items"]: type_ = properties["items"]["type"] + " Array" example_ += tab + f' "{properties["items"]["type"]}"' elif "$ref" in properties["items"]: ref = properties["items"]["$ref"].split("/")[1:] type_ = ref[-1] + " Array" if ref not in item_list: item_list.append(ref) request_object_ = doc for item in ref: request_object_ = request_object_[item] if "properties" in request_object_: request_object_properties_ = request_object_["properties"] write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+2) example_ += tab + " " * 2 + write_up elif type_ == "object": if "additionalProperties" in properties: add_prop = properties["additionalProperties"] if "type" in add_prop: prop_type = add_prop["type"] if "format" in prop_type: type_ = prop_type + f'$({prop_type["format"]})' + " object" if prop_type["format"] == "date-time": example_ += "2021-11-26T15:18:27.693Z" else: example_ += "0" else: type_ = prop_type + " object" example_ += f'"{prop_type}"' elif "$ref" in add_prop: ref = add_prop["$ref"].split("/")[1:] type_ = ref[-1] + " object" if ref not in item_list: item_list.append(ref) request_object_ = doc for item in ref: request_object_ = request_object_[item] if "properties" in request_object_: request_object_properties_ = request_object_["properties"] write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1) example_ += write_up elif "$ref" in properties: ref = properties["$ref"].split("/")[1:] type_ = ref[-1] + " object" if ref not in item_list: item_list.append(ref) request_object_ = doc for item in ref: request_object_ = request_object_[item] if "properties" in request_object_: request_object_properties_ = request_object_["properties"] description_ = request_object_["description"] if "description" in request_object_ else "/" write_up, __, item_list = ExampleWriting(request_object_properties_, item_list, order=order+1) example_ += write_up elif "type" in request_object_: properties = request_object_properties_ = request_object_ type_ = request_object_["type"] description_ = request_object_["description"] if "description" in request_object_ else "/" elif type_ == "integer" or type_ == "number": example_ += "0" elif type_ == "boolean": example_ += "true" elif type_ == "string": if "format" in properties: type_ += f'(${properties["format"]})' example_ += "2021-11-26T15:18:27.693Z" else: example_ += '"string"' if description_[-1] != ".": description_ += "." if "enum" in properties: type_ += " Enum" description_ += f' Options : {properties["enum"]}' if "string" in type_: example_ = tab + f' "{name}": "{properties["enum"][0]}"' else: example_ = tab + f' "{name}": {properties["enum"][0]}' if "example" in properties: eg = properties["example"] type_ += f'<br/><i><sub>example: {eg}</sub></i>' if isinstance(eg, str): eg = '"' + eg + '"' example_ = tab + f' "{name}": {eg}' if "Array" in type_: example_ += "\n" + tab + " ]" if order == 0 or array: line.append(f'<tr>\n<td width="20%">{name}</td> <td> <code>{type_}</code> <br/> {description_}</td>\n</tr>\n') example += example_ if not array: return example + "\n" + tab + "}", line, item_list return example + "\n" + tab + "}\n" + " " * (order-1) + "]", line, item_list for section, source in documentations.items(): yaml_file = open(source) doc = yaml.load(yaml_file, Loader=yaml.Loader) paths = doc["paths"] for api_call, result in paths.items(): j = 1 content = result["post"] if "post" in result else result["get"] # Create path if not exist destination_folder = pathlib.Path("/".join(content["tags"])) destination_folder.mkdir(parents=True, exist_ok=True) # Create Introduction part with open(destination_folder / f'{j:02} Introduction.html', "w") as html_file: html_file.write("<p>\n") html_file.write(f"{content['summary']}\n") html_file.write("</p>\n") j += 1 # Create Description part if having one if "description" in content: with open(destination_folder / f'{j:02} Description.html', "w") as html_file: html_file.write('<p>\n') html_file.write(f'{content["description"]}\n') html_file.write('</p>\n') j += 1 # Create Request part with open(destination_folder / f'{j:02} Request.html', "w") as html_file: description_ = "" if "parameters" in content: writeUp = RequestTable(api_call, content["parameters"]) elif "requestBody" in content: if "description" in content["requestBody"]: description_ = str(content["requestBody"]["description"]) if description_[-1] != ".": description_ += "." description_ += " " writeUp = ResponseTable(content["requestBody"]) else: writeUp = '<table class="table qc-table">\n<thead>\n<tr>\n' writeUp += f'<th colspan="1"><code>{api_call}</code> Method</th>\n</tr>\n</thead>\n' writeUp += f'</tr>\n<td><code>{api_call}</code> method takes no parameters.</td>\n</tr>\n</table>' description_ += f'The <code>{api_call}</code> API accepts requests in the following format:\n' html_file.write("<p>\n" + description_ + "</p>\n") html_file.write(writeUp) j += 1 # Create Response part with open(destination_folder / f'{j:02} Responses.html', "w") as html_file: html_file.write('<p>\n') html_file.write(f'The <code>{api_call}</code> API provides a response in the following format:\n') html_file.write('</p>\n') request_body = content["responses"] for code, properties in request_body.items(): if code == "200": html_file.write('<h4>200 Success</h4>\n') elif code == "401": html_file.write('<h4>401 Authentication Error</h4>\n<table class="table qc-table">\n<thead>\n<tr>\n') html_file.write('<th colspan="2"><code>UnauthorizedError</code> Model - Unauthorized response from the API. Key is missing, invalid, or timestamp is too old for hash.</th>\n') html_file.write('</tr>\n</thead>\n<tr>\n<td width="20%">www_authenticate</td> <td> <code>string</code> <br/> Header</td>\n</tr>\n</table>\n') continue elif code == "404": html_file.write('<h4>404 Not Found Error</h4>\n') html_file.write('<p>The requested item, index, page was not found.</p>\n') continue elif code == "default": html_file.write('<h4>Default Generic Error</h4>\n') writeUp = ResponseTable(properties) html_file.write(writeUp) print(f"Documentation of {section} is generated and inplace!")
[((353, 10, 353, 50), 'yaml.load', 'yaml.load', (), '', False, 'import yaml\n')]
dmh126/forge-python-data-management-api
forge_api_client/hubs.py
9c33f220021251a0340346065e3dd1998fc49a12
from .utils import get_request, authorized class Hubs: @authorized def getHubs(self): url = self.api_url + '/project/v1/hubs' headers = { 'Authorization': '%s %s' % (self.token_type, self.access_token) } return get_request(url, headers) @authorized def getHub(self, hub_id): url = self.api_url + '/project/v1/hubs/%s' % hub_id headers = { 'Authorization': '%s %s' % (self.token_type, self.access_token) } return get_request(url, headers)
[]
munisisazade/create-django-app
tlp/django_app/app/urls.py
f62395af2adaacacc4d3a3857c6570c9647d13a1
from django.conf.urls import url # from .views import BaseIndexView urlpatterns = [ # url(r'^$', BaseIndexView.as_view(), name="index"), ]
[]
madelinemccombe/iron-skillet
tools/archive/create_loadable_configs.py
f7bb805ac5ed0f2b44e4b438f8c021eaf2f5c66b
# Copyright (c) 2018, Palo Alto Networks # # Permission to use, copy, modify, and/or distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # Author: Scott Shoaf <[email protected]> ''' Palo Alto Networks create_loadable_configs.py Provides rendering of configuration templates with user defined values Output is a set of loadable full configurations and set commands for Panos and Panorama Edit the config_variables.yaml values and then run the script This software is provided without support, warranty, or guarantee. Use at your own risk. ''' import datetime import os import shutil import sys import time import getpass import oyaml from jinja2 import Environment, FileSystemLoader from passlib.hash import des_crypt from passlib.hash import md5_crypt from passlib.hash import sha256_crypt from passlib.hash import sha512_crypt defined_filters = ['md5_hash', 'des_hash', 'sha512_hash'] def myconfig_newdir(myconfigdir_name, foldertime): ''' create a new main loadable_configs folder if required then new subdirectories for configs :param myconfigdir_name: prefix folder name from the my_variables.py file :param foldertime: datetime when script run; to be used as suffix of folder name :return: the myconfigdir full path name ''' # get the full path to the config directory we want (panos / panorama) myconfigpath = os.path.abspath(os.path.join('..', 'loadable_configs')) if os.path.isdir(myconfigpath) is False: os.mkdir(myconfigpath, mode=0o755) print('created new loadable config directory') # check that configs folder exists and if not create a new one # then create snippets and full sub-directories myconfigdir = '{0}/{1}-{2}'.format(myconfigpath, myconfigdir_name, foldertime) if os.path.isdir(myconfigdir) is False: os.mkdir(myconfigdir, mode=0o755) print('\ncreated new archive folder {0}-{1}'.format(myconfigdir_name, foldertime)) if os.path.isdir('{0}/{1}'.format(myconfigdir, config_type)) is False: os.mkdir('{0}/{1}'.format(myconfigdir, config_type)) print('created new subdirectories for {0}'.format(config_type)) return myconfigdir def create_context(config_var_file): # read the metafile to get variables and values try: with open(config_var_file, 'r') as var_metadata: variables = oyaml.safe_load(var_metadata.read()) except IOError as ioe: print(f'Could not open metadata file {config_var_file}') print(ioe) sys.exit() # grab the metadata values and convert to key-based dictionary jinja_context = dict() for snippet_var in variables['variables']: jinja_context[snippet_var['name']] = snippet_var['value'] return jinja_context def template_render(filename, template_path, render_type, context): ''' render the jinja template using the context value from config_variables.yaml :param filename: name of the template file :param template_path: path for the template file :param render_type: type if full or set commands; aligns with folder name :param context: dict of variables to render :return: return the rendered xml file and set conf file ''' print('..creating template for {0}'.format(filename)) env = Environment(loader=FileSystemLoader('{0}/{1}'.format(template_path, render_type))) # load our custom jinja filters here, see the function defs below for reference env.filters['md5_hash'] = md5_hash env.filters['des_hash'] = des_hash env.filters['sha512_hash'] = sha512_hash template = env.get_template(filename) rendered_template = template.render(context) return rendered_template def template_save(snippet_name, myconfigdir, config_type, element): ''' after rendering the template save to the myconfig directory each run saves with a unique prefix name + datetime :param snippet_name: name of the output file :param myconfigdir: path to the my_config directory :param config_type: based on initial run list; eg. panos or panorama :param element: xml element rendered based on input variables; used as folder name :param render_type: type eg. if full or snippets; aligns with folder name :return: no value returned (future could be success code) ''' print('..saving template for {0}'.format(snippet_name)) filename = snippet_name with open('{0}/{1}/{2}'.format(myconfigdir, config_type, filename), 'w') as configfile: configfile.write(element) # copy the variables file used for the render into the my_template folder var_file = 'loadable_config_vars/config_variables.yaml' if os.path.isfile('{0}/{1}'.format(myconfigdir, var_file)) is False: vfilesrc = var_file vfiledst = '{0}/{1}'.format(myconfigdir, var_file) shutil.copy(vfilesrc, vfiledst) return # define functions for custom jinja filters def md5_hash(txt): ''' Returns the MD5 Hashed secret for use as a password hash in the PanOS configuration :param txt: text to be hashed :return: password hash of the string with salt and configuration information. Suitable to place in the phash field in the configurations ''' return md5_crypt.hash(txt) def des_hash(txt): ''' Returns the DES Hashed secret for use as a password hash in the PanOS configuration :param txt: text to be hashed :return: password hash of the string with salt and configuration information. Suitable to place in the phash field in the configurations ''' return des_crypt.hash(txt) def sha256_hash(txt): ''' Returns the SHA256 Hashed secret for use as a password hash in the PanOS configuration :param txt: text to be hashed :return: password hash of the string with salt and configuration information. Suitable to place in the phash field in the configurations ''' return sha256_crypt.hash(txt) def sha512_hash(txt): ''' Returns the SHA512 Hashed secret for use as a password hash in the PanOS configuration :param txt: text to be hashed :return: password hash of the string with salt and configuration information. Suitable to place in the phash field in the configurations ''' return sha512_crypt.hash(txt) def replace_variables(config_type, render_type, input_var): ''' get the input variables and render the output configs with jinja2 inputs are read from the template directory and output to my_config :param config_type: panos or panorama to read/write to the respective directories :param archivetime: datetimestamp used for the output my_config folder naming ''' config_variables = 'config_variables.yaml' # create dict of values for the jinja template render context = create_context(config_variables) # update context dict with variables from user input for snippet_var in input_var: context[snippet_var] = input_var[snippet_var] # get the full path to the output directory we want (panos / panorama) template_path = os.path.abspath(os.path.join('..', 'templates', config_type)) # append to the sys path for module lookup sys.path.append(template_path) # output subdir located in loadable_configs dir myconfig_path = myconfig_newdir(input_var['output_dir'], input_var['archive_time']) # render full and set conf files print('\nworking with {0} config template'.format(render_type)) if render_type == 'full': filename = 'iron_skillet_{0}_full.xml'.format(config_type) if render_type == 'set_commands': filename = 'iron_skillet_{0}_full.conf'.format(config_type) element = template_render(filename, template_path, render_type, context) template_save(filename, myconfig_path, config_type, element) print('\nconfigs have been created and can be found in {0}'.format(myconfig_path)) print('along with the metadata values used to render the configs\n') return if __name__ == '__main__': # Use the timestamp to create a unique folder name print('=' * 80) print(' ') print('Welcome to Iron-Skillet'.center(80)) print(' ') print('=' * 80) input_var = {} # archive_time used as part of the my_config directory name input_var['archive_time'] = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S') print('\ndatetime used for folder creation: {0}\n'.format(input_var['archive_time'])) # this prompts for the prefix name of the output directory input_var['output_dir'] = input('Enter the name of the output directory: ') # this prompts for the superuser username to be added into the configuration; no default admin/admin used input_var['ADMINISTRATOR_USERNAME'] = input('Enter the superuser administrator account username: ') print('\na phash will be created for superuser {0} and added to the config file\n'.format( input_var['ADMINISTRATOR_USERNAME'])) passwordmatch = False # prompt for the superuser password to create a phash and store in the my_config files; no default admin/admin while passwordmatch is False: password1 = getpass.getpass("Enter the superuser administrator account password: ") password2 = getpass.getpass("Enter password again to verify: ") if password1 == password2: input_var['ADMINISTRATOR_PASSWORD'] = password1 passwordmatch = True else: print('\nPasswords do not match. Please try again.\n') # loop through all config types that have their respective template folders for config_type in ['panos', 'panorama']: for render_type in ['full', 'set_commands']: replace_variables(config_type, render_type, input_var)
[((156, 11, 156, 30), 'passlib.hash.md5_crypt.hash', 'md5_crypt.hash', ({(156, 26, 156, 29): 'txt'}, {}), '(txt)', False, 'from passlib.hash import md5_crypt\n'), ((166, 11, 166, 30), 'passlib.hash.des_crypt.hash', 'des_crypt.hash', ({(166, 26, 166, 29): 'txt'}, {}), '(txt)', False, 'from passlib.hash import des_crypt\n'), ((176, 11, 176, 33), 'passlib.hash.sha256_crypt.hash', 'sha256_crypt.hash', ({(176, 29, 176, 32): 'txt'}, {}), '(txt)', False, 'from passlib.hash import sha256_crypt\n'), ((186, 11, 186, 33), 'passlib.hash.sha512_crypt.hash', 'sha512_crypt.hash', ({(186, 29, 186, 32): 'txt'}, {}), '(txt)', False, 'from passlib.hash import sha512_crypt\n'), ((211, 4, 211, 34), 'sys.path.append', 'sys.path.append', ({(211, 20, 211, 33): 'template_path'}, {}), '(template_path)', False, 'import sys\n'), ((55, 35, 55, 73), 'os.path.join', 'os.path.join', ({(55, 48, 55, 52): '""".."""', (55, 54, 55, 72): '"""loadable_configs"""'}, {}), "('..', 'loadable_configs')", False, 'import os\n'), ((56, 7, 56, 34), 'os.path.isdir', 'os.path.isdir', ({(56, 21, 56, 33): 'myconfigpath'}, {}), '(myconfigpath)', False, 'import os\n'), ((57, 8, 57, 42), 'os.mkdir', 'os.mkdir', (), '', False, 'import os\n'), ((63, 7, 63, 33), 'os.path.isdir', 'os.path.isdir', ({(63, 21, 63, 32): 'myconfigdir'}, {}), '(myconfigdir)', False, 'import os\n'), ((64, 8, 64, 41), 'os.mkdir', 'os.mkdir', (), '', False, 'import os\n'), ((143, 8, 143, 39), 'shutil.copy', 'shutil.copy', ({(143, 20, 143, 28): 'vfilesrc', (143, 30, 143, 38): 'vfiledst'}, {}), '(vfilesrc, vfiledst)', False, 'import shutil\n'), ((207, 36, 208, 74), 'os.path.join', 'os.path.join', ({(207, 49, 207, 53): '""".."""', (208, 49, 208, 60): '"""templates"""', (208, 62, 208, 73): 'config_type'}, {}), "('..', 'templates', config_type)", False, 'import os\n'), ((258, 20, 258, 91), 'getpass.getpass', 'getpass.getpass', ({(258, 36, 258, 90): '"""Enter the superuser administrator account password: """'}, {}), "('Enter the superuser administrator account password: ')", False, 'import getpass\n'), ((259, 20, 259, 71), 'getpass.getpass', 'getpass.getpass', ({(259, 36, 259, 70): '"""Enter password again to verify: """'}, {}), "('Enter password again to verify: ')", False, 'import getpass\n'), ((83, 8, 83, 18), 'sys.exit', 'sys.exit', ({}, {}), '()', False, 'import sys\n'), ((243, 64, 243, 75), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n')]
piotrantosz/pactman
pactman/verifier/pytest_plugin.py
2838e273d79831721da9c1b658b8f9d249efc789
import glob import logging import os import warnings import pytest from _pytest.outcomes import Failed from _pytest.reports import TestReport from .broker_pact import BrokerPact, BrokerPacts, PactBrokerConfig from .result import PytestResult, log def pytest_addoption(parser): group = parser.getgroup("pact specific options (pactman)") group.addoption( "--pact-files", default=None, help="pact JSON files to verify (wildcards allowed)" ) group.addoption("--pact-broker-url", default="", help="pact broker URL") group.addoption("--pact-broker-token", default="", help="pact broker bearer token") group.addoption( "--pact-provider-name", default=None, help="pact name of provider being verified" ) group.addoption( "--pact-consumer-name", default=None, help="consumer name to limit pact verification to - " "DEPRECATED, use --pact-verify-consumer instead", ) group.addoption( "--pact-verify-consumer", default=None, help="consumer name to limit pact verification to" ) group.addoption( "--pact-verify-consumer-tag", metavar="TAG", action="append", help="limit broker pacts verified to those matching the tag. May be " "specified multiple times in which case pacts matching any of these " "tags will be verified.", ) group.addoption( "--pact-publish-results", action="store_true", default=False, help="report pact verification results to pact broker", ) group.addoption( "--pact-provider-version", default=None, help="provider version to use when reporting pact results to pact broker", ) group.addoption( "--pact-allow-fail", default=False, action="store_true", help="do not fail the pytest run if any pacts fail verification", ) # Future options to be implemented. Listing them here so naming consistency can be a thing. # group.addoption("--pact-publish-pacts", action="store_true", default=False, # help="publish pacts to pact broker") # group.addoption("--pact-consumer-version", default=None, # help="consumer version to use when publishing pacts to the broker") # group.addoption("--pact-consumer-version-source", default=None, # help="generate consumer version from source 'git-tag' or 'git-hash'") # group.addoption("--pact-consumer-version-tag", metavar='TAG', action="append", # help="tag(s) that should be applied to the consumer version when pacts " # "are uploaded to the broker; multiple tags may be supplied") def get_broker_url(config): return config.getoption("pact_broker_url") or os.environ.get("PACT_BROKER_URL") def get_provider_name(config): return config.getoption("pact_provider_name") or os.environ.get("PACT_PROVIDER_NAME") # add the pact broker URL to the pytest output if running verbose def pytest_report_header(config): if config.getoption("verbose") > 0: location = get_broker_url(config) or config.getoption("pact_files") return [f"Loading pacts from {location}"] def pytest_configure(config): logging.getLogger("pactman").handlers = [] logging.basicConfig(format="%(message)s") verbosity = config.getoption("verbose") if verbosity > 0: log.setLevel(logging.DEBUG) class PytestPactVerifier: def __init__(self, publish_results, provider_version, interaction, consumer): self.publish_results = publish_results self.provider_version = provider_version self.interaction = interaction self.consumer = consumer def verify(self, provider_url, provider_setup, extra_provider_headers={}): try: self.interaction.verify_with_callable_setup(provider_url, provider_setup, extra_provider_headers) except (Failed, AssertionError) as e: raise Failed(str(e)) from None def finish(self): if self.consumer and self.publish_results and self.provider_version: self.consumer.publish_result(self.provider_version) def flatten_pacts(pacts): for consumer in pacts: last = consumer.interactions[-1] for interaction in consumer.interactions: if interaction is last: yield (interaction, consumer) else: yield (interaction, None) def load_pact_files(file_location): for filename in glob.glob(file_location, recursive=True): yield BrokerPact.load_file(filename, result_factory=PytestResult) def test_id(identifier): interaction, _ = identifier return str(interaction) def pytest_generate_tests(metafunc): if "pact_verifier" in metafunc.fixturenames: broker_url = get_broker_url(metafunc.config) if not broker_url: pact_files_location = metafunc.config.getoption("pact_files") if not pact_files_location: raise ValueError("need a --pact-broker-url or --pact-files option") pact_files = load_pact_files(pact_files_location) metafunc.parametrize( "pact_verifier", flatten_pacts(pact_files), ids=test_id, indirect=True ) else: provider_name = get_provider_name(metafunc.config) if not provider_name: raise ValueError("--pact-broker-url requires the --pact-provider-name option") broker = PactBrokerConfig( broker_url, metafunc.config.getoption("pact_broker_token"), metafunc.config.getoption("pact_verify_consumer_tag", []), ) broker_pacts = BrokerPacts( provider_name, pact_broker=broker, result_factory=PytestResult ) pacts = broker_pacts.consumers() filter_consumer_name = metafunc.config.getoption("pact_verify_consumer") if not filter_consumer_name: filter_consumer_name = metafunc.config.getoption("pact_consumer_name") if filter_consumer_name: warnings.warn( "The --pact-consumer-name command-line option is deprecated " "and will be removed in the 3.0.0 release.", DeprecationWarning, ) if filter_consumer_name: pacts = [pact for pact in pacts if pact.consumer == filter_consumer_name] metafunc.parametrize("pact_verifier", flatten_pacts(pacts), ids=test_id, indirect=True) class PactTestReport(TestReport): """Custom TestReport that allows us to attach an interaction to the result, and then display the interaction's verification result ouput as well as the traceback of the failure. """ @classmethod def from_item_and_call(cls, item, call, interaction): report = super().from_item_and_call(item, call) report.pact_interaction = interaction # the toterminal() call can't reasonably get at this config, so we store it here report.verbosity = item.config.option.verbose return report def toterminal(self, out): out.line("Pact failure details:", bold=True) for text, kw in self.pact_interaction.result.results_for_terminal(): out.line(text, **kw) if self.verbosity > 0: out.line("Traceback:", bold=True) return super().toterminal(out) else: out.line("Traceback not shown, use pytest -v to show it") def pytest_runtest_makereport(item, call): if call.when != "call" or "pact_verifier" not in getattr(item, "fixturenames", []): return # use our custom TestReport subclass if we're reporting on a pact verification call interaction = item.funcargs["pact_verifier"].interaction report = PactTestReport.from_item_and_call(item, call, interaction) if report.failed and item.config.getoption("pact_allow_fail"): # convert the fail into an "expected" fail, which allows the run to pass report.wasxfail = True report.outcome = "passed" return report def pytest_report_teststatus(report, config): if not hasattr(report, "pact_interaction"): return if hasattr(report, "wasxfail"): # wasxfail usually displays an "X" but since it's not *expected* to fail an "f" is a little clearer return "ignore fail", "f", "IGNORE_FAIL" @pytest.fixture() def pact_verifier(pytestconfig, request): interaction, consumer = request.param p = PytestPactVerifier( pytestconfig.getoption("pact_publish_results"), pytestconfig.getoption("pact_provider_version"), interaction, consumer, ) yield p p.finish()
[((217, 1, 217, 17), 'pytest.fixture', 'pytest.fixture', ({}, {}), '()', False, 'import pytest\n'), ((89, 4, 89, 45), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((124, 20, 124, 61), 'glob.glob', 'glob.glob', (), '', False, 'import glob\n'), ((73, 50, 73, 83), 'os.environ.get', 'os.environ.get', ({(73, 65, 73, 82): '"""PACT_BROKER_URL"""'}, {}), "('PACT_BROKER_URL')", False, 'import os\n'), ((77, 53, 77, 89), 'os.environ.get', 'os.environ.get', ({(77, 68, 77, 88): '"""PACT_PROVIDER_NAME"""'}, {}), "('PACT_PROVIDER_NAME')", False, 'import os\n'), ((88, 4, 88, 32), 'logging.getLogger', 'logging.getLogger', ({(88, 22, 88, 31): '"""pactman"""'}, {}), "('pactman')", False, 'import logging\n'), ((161, 20, 165, 21), 'warnings.warn', 'warnings.warn', ({(162, 24, 163, 67): '"""The --pact-consumer-name command-line option is deprecated and will be removed in the 3.0.0 release."""', (164, 24, 164, 42): 'DeprecationWarning'}, {}), "(\n 'The --pact-consumer-name command-line option is deprecated and will be removed in the 3.0.0 release.'\n , DeprecationWarning)", False, 'import warnings\n')]
karttur/geoimagine02-grass
interface/docstring.py
09c207707ddd0dae04a871e006e184409aa87d99
# -*- coding: utf-8 -*- def docstring_property(class_doc): """Property attribute for docstrings. Took from: https://gist.github.com/bfroehle/4041015 >>> class A(object): ... '''Main docstring''' ... def __init__(self, x): ... self.x = x ... @docstring_property(__doc__) ... def __doc__(self): ... return "My value of x is %s." % self.x >>> A.__doc__ 'Main docstring' >>> a = A(10) >>> a.__doc__ 'My value of x is 10.' """ def wrapper(fget): return DocstringProperty(class_doc, fget) return wrapper class DocstringProperty(object): """Property for the `__doc__` attribute. Different than `property` in the following two ways: * When the attribute is accessed from the main class, it returns the value of `class_doc`, *not* the property itself. This is necessary so Sphinx and other documentation tools can access the class docstring. * Only supports getting the attribute; setting and deleting raise an `AttributeError`. """ def __init__(self, class_doc, fget): self.class_doc = class_doc self.fget = fget def __get__(self, obj, type=None): if obj is None: return self.class_doc else: return self.fget(obj) def __set__(self, obj, value): raise AttributeError("can't set attribute") def __delete__(self, obj): raise AttributeError("can't delete attribute")
[]
gsn9/autocnet
autocnet/matcher/cuda_matcher.py
ddcca3ce3a6b59f720804bb3da03857efa4ff534
import warnings try: import cudasift as cs except: cs = None import numpy as np import pandas as pd def match(edge, aidx=None, bidx=None, **kwargs): """ Apply a composite CUDA matcher and ratio check. If this method is used, no additional ratio check is necessary and no symmetry check is required. The ratio check is embedded on the cuda side and returned as an ambiguity value. In testing symmetry is not required as it is expensive without significant gain in accuracy when using this implementation. """ source_kps = edge.source.get_keypoints(index=aidx) source_des = edge.source.descriptors[aidx] source_map = {k:v for k, v in enumerate(source_kps.index)} destin_kps = edge.destination.get_keypoints(index=bidx) destin_des = edge.destination.descriptors[bidx] destin_map = {k:v for k, v in enumerate(destin_kps.index)} s_siftdata = cs.PySiftData.from_data_frame(source_kps, source_des) d_siftdata = cs.PySiftData.from_data_frame(destin_kps, destin_des) cs.PyMatchSiftData(s_siftdata, d_siftdata) matches, _ = s_siftdata.to_data_frame() # Matches are reindexed 0-n, but need to be remapped to the source_kps, # destin_kps indices. This is the mismatch) source = np.empty(len(matches)) source[:] = edge.source['node_id'] destination = np.empty(len(matches)) destination[:] = edge.destination['node_id'] df = pd.concat([pd.Series(source), pd.Series(matches.index), pd.Series(destination), matches.match, matches.score, matches.ambiguity], axis=1) df.columns = ['source_image', 'source_idx', 'destination_image', 'destination_idx', 'score', 'ambiguity'] df.source_idx = df.source_idx.map(source_map) df.destination_idx = df.destination_idx.map(destin_map) # Set the matches and set the 'ratio' (ambiguity) mask edge.matches = df
[((29, 17, 29, 70), 'cudasift.PySiftData.from_data_frame', 'cs.PySiftData.from_data_frame', ({(29, 47, 29, 57): 'source_kps', (29, 59, 29, 69): 'source_des'}, {}), '(source_kps, source_des)', True, 'import cudasift as cs\n'), ((30, 17, 30, 70), 'cudasift.PySiftData.from_data_frame', 'cs.PySiftData.from_data_frame', ({(30, 47, 30, 57): 'destin_kps', (30, 59, 30, 69): 'destin_des'}, {}), '(destin_kps, destin_des)', True, 'import cudasift as cs\n'), ((33, 4, 33, 46), 'cudasift.PyMatchSiftData', 'cs.PyMatchSiftData', ({(33, 23, 33, 33): 's_siftdata', (33, 35, 33, 45): 'd_siftdata'}, {}), '(s_siftdata, d_siftdata)', True, 'import cudasift as cs\n'), ((42, 20, 42, 37), 'pandas.Series', 'pd.Series', ({(42, 30, 42, 36): 'source'}, {}), '(source)', True, 'import pandas as pd\n'), ((42, 39, 42, 63), 'pandas.Series', 'pd.Series', ({(42, 49, 42, 62): 'matches.index'}, {}), '(matches.index)', True, 'import pandas as pd\n'), ((43, 12, 43, 34), 'pandas.Series', 'pd.Series', ({(43, 22, 43, 33): 'destination'}, {}), '(destination)', True, 'import pandas as pd\n')]
FabienArcellier/blueprint-webapp-flask-restx
app/apis/__init__.py
84bc9dbe697c4b0f6667d2a2d8144a3f934a307a
from flask_restx import Api from app.apis.hello import api as hello api = Api( title='api', version='1.0', description='', prefix='/api', doc='/api' ) api.add_namespace(hello)
[((5, 6, 11, 1), 'flask_restx.Api', 'Api', (), '', False, 'from flask_restx import Api\n')]
Kantouzin/brainfuck
tests/test_core.py
812834320b080e2317d3fac377db64782057c8f4
# coding: utf-8 import unittest from test.support import captured_stdout from brainfuck import BrainFuck class TestCore(unittest.TestCase): def test_hello_world(self): bf = BrainFuck() with captured_stdout() as stdout: bf.run() self.assertEqual(stdout.getvalue(), "Hello, world!\n") def test_fizzbuzz(self): bf = BrainFuck() bf.load_file("./tests/fizz_buzz.txt") with captured_stdout() as stdout: bf.run() fizzbuzz_list = list() for i in range(1, 101): if i % 15 == 0: fizzbuzz_list.append("FizzBuzz") elif i % 3 == 0: fizzbuzz_list.append("Fizz") elif i % 5 == 0: fizzbuzz_list.append("Buzz") else: fizzbuzz_list.append(str(i)) fizzbuzz_list.append("\n") self.assertEqual(stdout.getvalue(), " ".join(fizzbuzz_list)) def test_set_command(self): bf = BrainFuck() bf.set_command("にゃにゃ", "にゃー", "にゃっ", "にゃん", "にゃ。", "にゃ、", "「", "」") bf.load_file("./tests/hello_world_nya.txt") with captured_stdout() as stdout: bf.run() self.assertEqual(stdout.getvalue(), "Hello, world!\n") if __name__ == "__main__": unittest.main()
[((54, 4, 54, 19), 'unittest.main', 'unittest.main', ({}, {}), '()', False, 'import unittest\n'), ((11, 13, 11, 24), 'brainfuck.BrainFuck', 'BrainFuck', ({}, {}), '()', False, 'from brainfuck import BrainFuck\n'), ((19, 13, 19, 24), 'brainfuck.BrainFuck', 'BrainFuck', ({}, {}), '()', False, 'from brainfuck import BrainFuck\n'), ((41, 13, 41, 24), 'brainfuck.BrainFuck', 'BrainFuck', ({}, {}), '()', False, 'from brainfuck import BrainFuck\n'), ((13, 13, 13, 30), 'test.support.captured_stdout', 'captured_stdout', ({}, {}), '()', False, 'from test.support import captured_stdout\n'), ((23, 13, 23, 30), 'test.support.captured_stdout', 'captured_stdout', ({}, {}), '()', False, 'from test.support import captured_stdout\n'), ((47, 13, 47, 30), 'test.support.captured_stdout', 'captured_stdout', ({}, {}), '()', False, 'from test.support import captured_stdout\n')]
poltavski/social-network-frontend
main.py
ccc3410e23e42cfc65efd811aba262ec88163481
from fastapi import FastAPI, Request, Response from fastapi.responses import HTMLResponse from fastapi.staticfiles import StaticFiles from fastapi.templating import Jinja2Templates from utils import get_page_data, process_initial import uvicorn app = FastAPI() templates = Jinja2Templates(directory="templates") app.mount("/static", StaticFiles(directory="static"), name="static") @app.get("/", response_class=HTMLResponse) async def home(request: Request): # Expect requests with cookies return process_initial(request) @app.get("/page", response_class=HTMLResponse) async def home(request: Request): # Expect requests with cookies return get_page_data(request) if __name__ == "__main__": uvicorn.run("main:app", host="127.0.0.1", port=8050, log_level="info")
[((8, 6, 8, 15), 'fastapi.FastAPI', 'FastAPI', ({}, {}), '()', False, 'from fastapi import FastAPI, Request, Response\n'), ((9, 12, 9, 50), 'fastapi.templating.Jinja2Templates', 'Jinja2Templates', (), '', False, 'from fastapi.templating import Jinja2Templates\n'), ((10, 21, 10, 52), 'fastapi.staticfiles.StaticFiles', 'StaticFiles', (), '', False, 'from fastapi.staticfiles import StaticFiles\n'), ((16, 11, 16, 35), 'utils.process_initial', 'process_initial', ({(16, 27, 16, 34): 'request'}, {}), '(request)', False, 'from utils import get_page_data, process_initial\n'), ((22, 11, 22, 33), 'utils.get_page_data', 'get_page_data', ({(22, 25, 22, 32): 'request'}, {}), '(request)', False, 'from utils import get_page_data, process_initial\n'), ((26, 4, 26, 74), 'uvicorn.run', 'uvicorn.run', (), '', False, 'import uvicorn\n')]
Ku-Al/OpenManage-Enterprise
Core/Python/create_static_group.py
5cc67435d7cedb091edb07311ed9dceeda43277f
# # Python script using OME API to create a new static group # # _author_ = Raajeev Kalyanaraman <[email protected]> # _version_ = 0.1 # # Copyright (c) 2020 Dell EMC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ SYNOPSIS: Script to create a new static group DESCRIPTION: This script exercises the OME REST API to create a new static group. The user is responsible for adding devices to the group once the group has been successfully created. For authentication X-Auth is used over Basic Authentication Note that the credentials entered are not stored to disk. EXAMPLE: python create_static_group.py --ip <xx> --user <username> --password <pwd> --groupname "Random Test Group" """ import json import argparse from argparse import RawTextHelpFormatter import urllib3 import requests def create_static_group(ip_address, user_name, password, group_name): """ Authenticate with OME and enumerate groups """ try: session_url = 'https://%s/api/SessionService/Sessions' % ip_address group_url = "https://%s/api/GroupService/Groups?$filter=Name eq 'Static Groups'" % ip_address headers = {'content-type': 'application/json'} user_details = {'UserName': user_name, 'Password': password, 'SessionType': 'API'} session_info = requests.post(session_url, verify=False, data=json.dumps(user_details), headers=headers) if session_info.status_code == 201: headers['X-Auth-Token'] = session_info.headers['X-Auth-Token'] response = requests.get(group_url, headers=headers, verify=False) if response.status_code == 200: json_data = response.json() if json_data['@odata.count'] > 0: # Technically there should be only one result in the filter group_id = json_data['value'][0]['Id'] group_payload = {"GroupModel": { "Name": group_name, "Description": "", "MembershipTypeId": 12, "ParentId": int(group_id)} } create_url = 'https://%s/api/GroupService/Actions/GroupService.CreateGroup' % ip_address create_resp = requests.post(create_url, headers=headers, verify=False, data=json.dumps(group_payload)) if create_resp.status_code == 200: print("New group created : ID =", create_resp.text) elif create_resp.status_code == 400: print("Failed group creation ...See error info below") print(json.dumps(create_resp.json(), indent=4, sort_keys=False)) else: print("Unable to retrieve group list from %s" % ip_address) else: print("Unable to create a session with appliance %s" % ip_address) except Exception as error: print("Unexpected error:", str(error)) if __name__ == '__main__': urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) parser = argparse.ArgumentParser(description=__doc__, formatter_class=RawTextHelpFormatter) parser.add_argument("--ip", "-i", required=True, help="OME Appliance IP") parser.add_argument("--user", "-u", required=False, help="Username for OME Appliance", default="admin") parser.add_argument("--password", "-p", required=True, help="Password for OME Appliance") parser.add_argument("--groupname", "-g", required=True, help="A valid name for the group") args = parser.parse_args() create_static_group(args.ip, args.user, args.password, args.groupname)
[((89, 4, 89, 71), 'urllib3.disable_warnings', 'urllib3.disable_warnings', ({(89, 29, 89, 70): 'urllib3.exceptions.InsecureRequestWarning'}, {}), '(urllib3.exceptions.InsecureRequestWarning)', False, 'import urllib3\n'), ((91, 13, 91, 95), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((58, 23, 58, 77), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((54, 42, 54, 66), 'json.dumps', 'json.dumps', ({(54, 53, 54, 65): 'user_details'}, {}), '(user_details)', False, 'import json\n'), ((73, 53, 73, 78), 'json.dumps', 'json.dumps', ({(73, 64, 73, 77): 'group_payload'}, {}), '(group_payload)', False, 'import json\n')]
kagrze/ignite
examples/references/segmentation/pascal_voc2012/code/dataflow/dataloaders.py
18708a76f86623545311d35bc48673eac9e55591
from typing import Callable, Optional, Tuple, Union import numpy as np from torch.utils.data import DataLoader, Sampler from torch.utils.data.dataset import Subset, ConcatDataset import torch.utils.data.distributed as data_dist from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset def get_train_val_loaders(root_path: str, train_transforms: Callable, val_transforms: Callable, batch_size: int = 16, num_workers: int = 8, val_batch_size: Optional[int] = None, pin_memory: bool = True, random_seed: Optional[int] = None, train_sampler: Optional[Union[Sampler, str]] = None, val_sampler: Optional[Union[Sampler, str]] = None, with_sbd: Optional[str] = None, limit_train_num_samples: Optional[int] = None, limit_val_num_samples: Optional[int] = None) -> Tuple[DataLoader, DataLoader, DataLoader]: train_ds = get_train_dataset(root_path) val_ds = get_val_dataset(root_path) if with_sbd is not None: sbd_train_ds = get_train_noval_sbdataset(with_sbd) train_ds = ConcatDataset([train_ds, sbd_train_ds]) if random_seed is not None: np.random.seed(random_seed) if limit_train_num_samples is not None: train_indices = np.random.permutation(len(train_ds))[:limit_train_num_samples] train_ds = Subset(train_ds, train_indices) if limit_val_num_samples is not None: val_indices = np.random.permutation(len(val_ds))[:limit_val_num_samples] val_ds = Subset(val_ds, val_indices) # random samples for evaluation on training dataset if len(val_ds) < len(train_ds): train_eval_indices = np.random.permutation(len(train_ds))[:len(val_ds)] train_eval_ds = Subset(train_ds, train_eval_indices) else: train_eval_ds = train_ds train_ds = TransformedDataset(train_ds, transform_fn=train_transforms) val_ds = TransformedDataset(val_ds, transform_fn=val_transforms) train_eval_ds = TransformedDataset(train_eval_ds, transform_fn=val_transforms) if isinstance(train_sampler, str): assert train_sampler == 'distributed' train_sampler = data_dist.DistributedSampler(train_ds) if isinstance(val_sampler, str): assert val_sampler == 'distributed' val_sampler = data_dist.DistributedSampler(val_ds, shuffle=False) train_loader = DataLoader(train_ds, shuffle=train_sampler is None, batch_size=batch_size, num_workers=num_workers, sampler=train_sampler, pin_memory=pin_memory, drop_last=True) val_batch_size = batch_size * 4 if val_batch_size is None else val_batch_size val_loader = DataLoader(val_ds, shuffle=False, sampler=val_sampler, batch_size=val_batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=False) train_eval_loader = DataLoader(train_eval_ds, shuffle=False, sampler=val_sampler, batch_size=val_batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=False) return train_loader, val_loader, train_eval_loader def get_inference_dataloader(root_path: str, mode: str, transforms: Callable, batch_size: int = 16, num_workers: int = 8, pin_memory: bool = True, limit_num_samples: Optional[int] = None) -> DataLoader: assert mode in ('train', 'test'), "Mode should be 'train' or 'test'" get_dataset_fn = get_train_dataset if mode == "train" else get_val_dataset dataset = get_dataset_fn(root_path, return_meta=True) if limit_num_samples is not None: indices = np.random.permutation(len(dataset))[:limit_num_samples] dataset = Subset(dataset, indices) dataset = TransformedDataset(dataset, transform_fn=transforms) loader = DataLoader(dataset, shuffle=False, batch_size=batch_size, num_workers=num_workers, pin_memory=pin_memory, drop_last=False) return loader
[((26, 15, 26, 43), 'dataflow.datasets.get_train_dataset', 'get_train_dataset', ({(26, 33, 26, 42): 'root_path'}, {}), '(root_path)', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((27, 13, 27, 39), 'dataflow.datasets.get_val_dataset', 'get_val_dataset', ({(27, 29, 27, 38): 'root_path'}, {}), '(root_path)', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((51, 15, 51, 74), 'dataflow.datasets.TransformedDataset', 'TransformedDataset', (), '', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((52, 13, 52, 68), 'dataflow.datasets.TransformedDataset', 'TransformedDataset', (), '', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((53, 20, 53, 82), 'dataflow.datasets.TransformedDataset', 'TransformedDataset', (), '', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((63, 19, 66, 68), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Sampler\n'), ((69, 17, 71, 67), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Sampler\n'), ((73, 24, 75, 74), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Sampler\n'), ((97, 14, 97, 66), 'dataflow.datasets.TransformedDataset', 'TransformedDataset', (), '', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((99, 13, 101, 63), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader, Sampler\n'), ((30, 23, 30, 58), 'dataflow.datasets.get_train_noval_sbdataset', 'get_train_noval_sbdataset', ({(30, 49, 30, 57): 'with_sbd'}, {}), '(with_sbd)', False, 'from dataflow.datasets import get_train_dataset, get_val_dataset, TransformedDataset, get_train_noval_sbdataset\n'), ((31, 19, 31, 58), 'torch.utils.data.dataset.ConcatDataset', 'ConcatDataset', ({(31, 33, 31, 57): '[train_ds, sbd_train_ds]'}, {}), '([train_ds, sbd_train_ds])', False, 'from torch.utils.data.dataset import Subset, ConcatDataset\n'), ((34, 8, 34, 35), 'numpy.random.seed', 'np.random.seed', ({(34, 23, 34, 34): 'random_seed'}, {}), '(random_seed)', True, 'import numpy as np\n'), ((38, 19, 38, 50), 'torch.utils.data.dataset.Subset', 'Subset', ({(38, 26, 38, 34): 'train_ds', (38, 36, 38, 49): 'train_indices'}, {}), '(train_ds, train_indices)', False, 'from torch.utils.data.dataset import Subset, ConcatDataset\n'), ((42, 17, 42, 44), 'torch.utils.data.dataset.Subset', 'Subset', ({(42, 24, 42, 30): 'val_ds', (42, 32, 42, 43): 'val_indices'}, {}), '(val_ds, val_indices)', False, 'from torch.utils.data.dataset import Subset, ConcatDataset\n'), ((47, 24, 47, 60), 'torch.utils.data.dataset.Subset', 'Subset', ({(47, 31, 47, 39): 'train_ds', (47, 41, 47, 59): 'train_eval_indices'}, {}), '(train_ds, train_eval_indices)', False, 'from torch.utils.data.dataset import Subset, ConcatDataset\n'), ((57, 24, 57, 62), 'torch.utils.data.distributed.DistributedSampler', 'data_dist.DistributedSampler', ({(57, 53, 57, 61): 'train_ds'}, {}), '(train_ds)', True, 'import torch.utils.data.distributed as data_dist\n'), ((61, 22, 61, 73), 'torch.utils.data.distributed.DistributedSampler', 'data_dist.DistributedSampler', (), '', True, 'import torch.utils.data.distributed as data_dist\n'), ((95, 18, 95, 42), 'torch.utils.data.dataset.Subset', 'Subset', ({(95, 25, 95, 32): 'dataset', (95, 34, 95, 41): 'indices'}, {}), '(dataset, indices)', False, 'from torch.utils.data.dataset import Subset, ConcatDataset\n')]
autobotasia/saleor
saleor/core/jwt.py
e03e9f6ab1bddac308a6609d6b576a87e90ae655
from datetime import datetime, timedelta from typing import Any, Dict, Optional import graphene import jwt from django.conf import settings from django.core.handlers.wsgi import WSGIRequest from ..account.models import User from ..app.models import App from .permissions import ( get_permission_names, get_permissions_from_codenames, get_permissions_from_names, ) JWT_ALGORITHM = "HS256" SALEOR_AUTH_HEADER = "HTTP_AUTHORIZATION_BEARER" DEFAULT_AUTH_HEADER = "HTTP_AUTHORIZATION" AUTH_HEADER_PREFIXES = ["JWT", "BEARER"] JWT_ACCESS_TYPE = "access" JWT_REFRESH_TYPE = "refresh" JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty" JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken" PERMISSIONS_FIELD = "permissions" JWT_SALEOR_OWNER_NAME = "saleor" JWT_OWNER_FIELD = "owner" def jwt_base_payload( exp_delta: Optional[timedelta], token_owner: str ) -> Dict[str, Any]: utc_now = datetime.utcnow() payload = {"iat": utc_now, JWT_OWNER_FIELD: token_owner} if exp_delta: payload["exp"] = utc_now + exp_delta return payload def jwt_user_payload( user: User, token_type: str, exp_delta: Optional[timedelta], additional_payload: Optional[Dict[str, Any]] = None, token_owner: str = JWT_SALEOR_OWNER_NAME, ) -> Dict[str, Any]: payload = jwt_base_payload(exp_delta, token_owner) payload.update( { "token": user.jwt_token_key, "email": user.email, "type": token_type, "user_id": graphene.Node.to_global_id("User", user.id), "is_staff": user.is_staff, "is_supplier": user.is_supplier, } ) if additional_payload: payload.update(additional_payload) return payload def jwt_encode(payload: Dict[str, Any]) -> str: return jwt.encode( payload, settings.SECRET_KEY, # type: ignore JWT_ALGORITHM, ) def jwt_decode_with_exception_handler( token: str, verify_expiration=settings.JWT_EXPIRE ) -> Optional[Dict[str, Any]]: try: return jwt_decode(token, verify_expiration=verify_expiration) except jwt.PyJWTError: return None def jwt_decode(token: str, verify_expiration=settings.JWT_EXPIRE) -> Dict[str, Any]: return jwt.decode( token, settings.SECRET_KEY, # type: ignore algorithms=[JWT_ALGORITHM], options={"verify_exp": verify_expiration}, ) def create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str: payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME)) return jwt_encode(payload) def create_access_token( user: User, additional_payload: Optional[Dict[str, Any]] = None ) -> str: payload = jwt_user_payload( user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload ) return jwt_encode(payload) def create_refresh_token( user: User, additional_payload: Optional[Dict[str, Any]] = None ) -> str: payload = jwt_user_payload( user, JWT_REFRESH_TYPE, settings.JWT_TTL_REFRESH, additional_payload, ) return jwt_encode(payload) def get_token_from_request(request: WSGIRequest) -> Optional[str]: auth_token = request.META.get(SALEOR_AUTH_HEADER) if not auth_token: auth = request.META.get(DEFAULT_AUTH_HEADER, "").split(maxsplit=1) if len(auth) == 2 and auth[0].upper() in AUTH_HEADER_PREFIXES: auth_token = auth[1] return auth_token def get_user_from_payload(payload: Dict[str, Any]) -> Optional[User]: user = User.objects.filter(email=payload["email"], is_active=True).first() user_jwt_token = payload.get("token") if not user_jwt_token or not user: raise jwt.InvalidTokenError( "Invalid token. Create new one by using tokenCreate mutation." ) if user.jwt_token_key != user_jwt_token: raise jwt.InvalidTokenError( "Invalid token. Create new one by using tokenCreate mutation." ) return user def is_saleor_token(token: str) -> bool: """Confirm that token was generated by Saleor not by plugin.""" try: payload = jwt.decode(token, options={"verify_signature": False}) except jwt.PyJWTError: return False owner = payload.get(JWT_OWNER_FIELD) if not owner or owner != JWT_SALEOR_OWNER_NAME: return False return True def get_user_from_access_token(token: str) -> Optional[User]: if not is_saleor_token(token): return None payload = jwt_decode(token) return get_user_from_access_payload(payload) def get_user_from_access_payload(payload: dict) -> Optional[User]: jwt_type = payload.get("type") if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]: raise jwt.InvalidTokenError( "Invalid token. Create new one by using tokenCreate mutation." ) permissions = payload.get(PERMISSIONS_FIELD, None) user = get_user_from_payload(payload) if user and permissions is not None: token_permissions = get_permissions_from_names(permissions) token_codenames = [perm.codename for perm in token_permissions] user.effective_permissions = get_permissions_from_codenames(token_codenames) user.is_staff = True if user.effective_permissions else False return user def create_access_token_for_app(app: "App", user: "User"): """Create access token for app. App can use user jwt token to proceed given operation on the Saleor side. The token which can be used by App has additional field defining the permissions assigned to it. The permissions set is the intersection of user permissions and app permissions. """ app_permissions = app.permissions.all() app_permission_enums = get_permission_names(app_permissions) permissions = user.effective_permissions user_permission_enums = get_permission_names(permissions) app_id = graphene.Node.to_global_id("App", app.id) additional_payload = { "app": app_id, PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums), } payload = jwt_user_payload( user, JWT_THIRDPARTY_ACCESS_TYPE, exp_delta=settings.JWT_TTL_APP_ACCESS, additional_payload=additional_payload, ) return jwt_encode(payload)
[((36, 14, 36, 31), 'datetime.datetime.utcnow', 'datetime.utcnow', ({}, {}), '()', False, 'from datetime import datetime, timedelta\n'), ((68, 11, 72, 5), 'jwt.encode', 'jwt.encode', ({(69, 8, 69, 15): 'payload', (70, 8, 70, 27): 'settings.SECRET_KEY', (71, 8, 71, 21): 'JWT_ALGORITHM'}, {}), '(payload, settings.SECRET_KEY, JWT_ALGORITHM)', False, 'import jwt\n'), ((85, 11, 90, 5), 'jwt.decode', 'jwt.decode', (), '', False, 'import jwt\n'), ((192, 13, 192, 54), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(192, 40, 192, 45): '"""App"""', (192, 47, 192, 53): 'app.id'}, {}), "('App', app.id)", False, 'import graphene\n'), ((134, 14, 136, 9), 'jwt.InvalidTokenError', 'jwt.InvalidTokenError', ({(135, 12, 135, 74): '"""Invalid token. Create new one by using tokenCreate mutation."""'}, {}), "(\n 'Invalid token. Create new one by using tokenCreate mutation.')", False, 'import jwt\n'), ((138, 14, 140, 9), 'jwt.InvalidTokenError', 'jwt.InvalidTokenError', ({(139, 12, 139, 74): '"""Invalid token. Create new one by using tokenCreate mutation."""'}, {}), "(\n 'Invalid token. Create new one by using tokenCreate mutation.')", False, 'import jwt\n'), ((147, 18, 147, 72), 'jwt.decode', 'jwt.decode', (), '', False, 'import jwt\n'), ((166, 14, 168, 9), 'jwt.InvalidTokenError', 'jwt.InvalidTokenError', ({(167, 12, 167, 74): '"""Invalid token. Create new one by using tokenCreate mutation."""'}, {}), "(\n 'Invalid token. Create new one by using tokenCreate mutation.')", False, 'import jwt\n'), ((57, 23, 57, 66), 'graphene.Node.to_global_id', 'graphene.Node.to_global_id', ({(57, 50, 57, 56): '"""User"""', (57, 58, 57, 65): 'user.id'}, {}), "('User', user.id)", False, 'import graphene\n')]
pancaprima/locust
locust/configuration.py
dba803fcdd13ff2fada4e8b8ee37a163aa519a48
import os, json, logging, jsonpath_rw_ext, jsonpath_rw from jsonpath_rw import jsonpath, parse from . import events from ast import literal_eval from flask import make_response logger = logging.getLogger(__name__) CONFIG_PATH = '/tests/settings/config.json' class ClientConfiguration: """ This class is a handler for data configuration with JSON data structure. """ def __init__(self): self.config_data = None def read_json(self, path=None): """ Will get the data of configuration as JSON. It reads configuration file once. """ if self.config_data is None: if path is None: path = CONFIG_PATH else : if path.startswith('./') : path = path[1:] elif not path.startswith('/'): path = '/%s' % (path) try: with open((os.environ['PYTHONPATH'].split(os.pathsep))[-1] + path, "r") as data_file: self.config_data = json.load(data_file) except Exception as err: logger.info(err) self.config_data = json.load({}) return self.config_data def update_json_config(self, json_added, json_path, options, list_column, config_text): """ Write JSON file configuration """ data = literal_eval(config_text) if(options != "replace"): json_target = jsonpath_rw_ext.match(json_path, data) if isinstance(json_target[0], dict): if len(list_column)==1: json_target[0][list_column[0]] = json_added json_final = json_target[0] else: return False, json.dumps(data, indent=4) else: for json_target_value in json_target[0]: json_added.append(json_target_value) json_final = json_added else: json_final = json_added jsonpath_expr = parse(json_path) matches = jsonpath_expr.find(data) if len(matches)==0: return make_response(json.dumps({'success':False, 'message':'JSON path not found.'})) for match in matches: data = ClientConfiguration.update_json(data, ClientConfiguration.get_path(match), json_final) return make_response(json.dumps({'success':True, 'data':json.dumps(data, indent=4)})) @classmethod def get_path(self, match): """ Return an iterator based upon MATCH.PATH. Each item is a path component, start from outer most item. """ if match.context is not None: for path_element in ClientConfiguration.get_path(match.context): yield path_element yield str(match.path) @classmethod def update_json(self, json, path, value): """ Update JSON dictionary PATH with VALUE. Return updated JSON """ try: first = next(path) # check if item is an array if (first.startswith('[') and first.endswith(']')) or (first.startswith('{') and first.endswith('}')): try: first = int(first[1:-1]) except ValueError: pass json[first] = ClientConfiguration.update_json(json[first], path, value) return json except StopIteration: return value
[((7, 9, 7, 36), 'logging.getLogger', 'logging.getLogger', ({(7, 27, 7, 35): '__name__'}, {}), '(__name__)', False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n'), ((43, 15, 43, 40), 'ast.literal_eval', 'literal_eval', ({(43, 28, 43, 39): 'config_text'}, {}), '(config_text)', False, 'from ast import literal_eval\n'), ((59, 24, 59, 40), 'jsonpath_rw.parse', 'parse', ({(59, 30, 59, 39): 'json_path'}, {}), '(json_path)', False, 'from jsonpath_rw import jsonpath, parse\n'), ((46, 26, 46, 64), 'jsonpath_rw_ext.match', 'jsonpath_rw_ext.match', ({(46, 48, 46, 57): 'json_path', (46, 59, 46, 63): 'data'}, {}), '(json_path, data)', False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n'), ((64, 33, 64, 96), 'json.dumps', 'json.dumps', ({(64, 44, 64, 95): "{'success': False, 'message': 'JSON path not found.'}"}, {}), "({'success': False, 'message': 'JSON path not found.'})", False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n'), ((33, 39, 33, 59), 'json.load', 'json.load', ({(33, 49, 33, 58): 'data_file'}, {}), '(data_file)', False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n'), ((36, 35, 36, 48), 'json.load', 'json.load', ({(36, 45, 36, 47): '{}'}, {}), '({})', False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n'), ((69, 64, 69, 90), 'json.dumps', 'json.dumps', (), '', False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n'), ((52, 34, 52, 60), 'json.dumps', 'json.dumps', (), '', False, 'import os, json, logging, jsonpath_rw_ext, jsonpath_rw\n')]
SIXMON/peps
data/migrations/0023_discardaction_answers.py
48c09a951a0193ada7b91c8bb6efc4b1232c3520
# Generated by Django 2.2.4 on 2019-11-14 16:48 import django.contrib.postgres.fields.jsonb from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('data', '0022_discardaction'), ] operations = [ migrations.AddField( model_name='discardaction', name='answers', field=django.contrib.postgres.fields.jsonb.JSONField(blank=True, null=True), ), ]
[]
juanitodread/pitaya-falcon
app/models.py
f4b889f9fa39072aeb9f1c71fe5f3bb259082e93
from json import JSONEncoder from time import time class Jsonable: """Abstract class to standardize the toJson method to be implemented by any class that wants to be serialized to JSON""" def toJson(self): """Abstract method""" raise NotImplementedError('You should implement this method in your classes.') class CommonMessage(Jsonable): def __init__(self): self.client = Client() self.emitter = Emitter() self.type = "" self.body = "" self.tags = ["music", "culture", "food"] def toJson(self): return dict(client=self.client, emitter=self.emitter, type=self.type, body=self.body, tags=self.tags) class Client(Jsonable): def __init__(self): self.id = "" self.name = "" self.time = int(round(time() * 1000)) def toJson(self): return dict(id=self.id, name=self.name, time=self.time) class Emitter(Jsonable): def __init__(self): self.id = "" def toJson(self): return dict(id=self.id) class ComplexJsonEncoder(JSONEncoder): """Basic JSON encoder for 'complex (nested)' Python objects.""" def default(self, o): if hasattr(o, 'toJson'): return o.toJson() else: return JSONEncoder.default(self, o)
[((46, 19, 46, 47), 'json.JSONEncoder.default', 'JSONEncoder.default', ({(46, 39, 46, 43): 'self', (46, 45, 46, 46): 'o'}, {}), '(self, o)', False, 'from json import JSONEncoder\n'), ((27, 30, 27, 36), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n')]
jakobkogler/pi_memorize
compute_pi.py
c82c24f26407f1728ad1e73851b72dea9bf779f6
"""Compute pi.""" from decimal import Decimal, getcontext import argparse import itertools class ComputePi: """Compute pi to a specific precision using multiple algorithms.""" @staticmethod def BBP(precision): """Compute pi using the Bailey-Borwein-Plouffe formula.""" getcontext().prec = precision + 20 pi = Decimal(0) for k in itertools.count(): term = (Decimal(4)/(8*k+1) - Decimal(2)/(8*k+4) - Decimal(1)/(8*k+5) - Decimal(1)/(8*k+6)) term /= Decimal(16)**k pi += term if term < Decimal(10)**(-precision-10): break pi = str(pi)[:-19] return pi @staticmethod def arctan_euler(x, one=1000000): """Calculate arctan(1/x) using euler's accelerated formula. Based on http://www.craig-wood.com/nick/articles/pi-machin/""" x_squared = x * x x_squared_plus_1 = x_squared + 1 term = (x * one) // x_squared_plus_1 total = term two_n = 2 while 1: divisor = (two_n+1) * x_squared_plus_1 term *= two_n term += divisor // 2 # round the division term = term // divisor if term == 0: break total += term two_n += 2 return total @staticmethod def machin_euler(digits): """Compute pi using Machin's formula. Based on http://www.craig-wood.com/nick/articles/pi-machin/""" one = 10**(digits + 20) pi = 4*(4*ComputePi.arctan_euler(5, one) - ComputePi.arctan_euler(239, one)) pi //= 10**20 return '3.{}'.format(str(pi)[1:]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Calculates pi.') parser.add_argument('--precision', type=int, default=100, help='The desired precision of pi (default: 100 digits)') args = parser.parse_args() pi_computer = ComputePi() print(pi_computer.machin_euler(args.precision))
[((61, 13, 61, 66), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (), '', False, 'import argparse\n'), ((15, 13, 15, 23), 'decimal.Decimal', 'Decimal', ({(15, 21, 15, 22): '0'}, {}), '(0)', False, 'from decimal import Decimal, getcontext\n'), ((16, 17, 16, 34), 'itertools.count', 'itertools.count', ({}, {}), '()', False, 'import itertools\n'), ((13, 8, 13, 20), 'decimal.getcontext', 'getcontext', ({}, {}), '()', False, 'from decimal import Decimal, getcontext\n'), ((18, 20, 18, 31), 'decimal.Decimal', 'Decimal', ({(18, 28, 18, 30): '(16)'}, {}), '(16)', False, 'from decimal import Decimal, getcontext\n'), ((17, 83, 17, 93), 'decimal.Decimal', 'Decimal', ({(17, 91, 17, 92): '(1)'}, {}), '(1)', False, 'from decimal import Decimal, getcontext\n'), ((21, 22, 21, 33), 'decimal.Decimal', 'Decimal', ({(21, 30, 21, 32): '(10)'}, {}), '(10)', False, 'from decimal import Decimal, getcontext\n'), ((17, 62, 17, 72), 'decimal.Decimal', 'Decimal', ({(17, 70, 17, 71): '(1)'}, {}), '(1)', False, 'from decimal import Decimal, getcontext\n'), ((17, 20, 17, 30), 'decimal.Decimal', 'Decimal', ({(17, 28, 17, 29): '(4)'}, {}), '(4)', False, 'from decimal import Decimal, getcontext\n'), ((17, 41, 17, 51), 'decimal.Decimal', 'Decimal', ({(17, 49, 17, 50): '(2)'}, {}), '(2)', False, 'from decimal import Decimal, getcontext\n')]
LaMemeBete/nodys-smart-contract
scripts/01_deploy_data_types.py
f67b88d98ebf7063b72f46cb2b014d5de96eb56d
#!/usr/bin/python3 import time from brownie import ( DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract, ) from scripts.helpful_scripts import get_account, encode_function_data def main(): account = get_account() print(config["networks"][network.show_active()]) print(f"Deploying to {network.show_active()}") data_types = DataTypes.deploy( {"from": account}, publish_source=config["networks"][network.show_active()]["verify"], ) # Optional, deploy the ProxyAdmin and use that as the admin contract proxy_admin = ProxyAdmin.deploy( {"from": account}, publish_source=config["networks"][network.show_active()]["verify"], ) # If we want an intializer function we can add # `initializer=box.store, 1` # to simulate the initializer being the `store` function # with a `newValue` of 1 # data_types_encoded_initializer_function = encode_function_data(data_types.setDataTypes) data_types_encoded_initializer_function = encode_function_data( data_types.setDataTypes, 10 ) proxy = TransparentUpgradeableProxy.deploy( data_types.address, proxy_admin.address, data_types_encoded_initializer_function, # gas limit removed fort an issue not very clear # {"from": account, "gas_limit": 100000000000}, {"from": account}, publish_source=config["networks"][network.show_active()]["verify"], ) print(f"Proxy deployed to {proxy} ! You can now upgrade it to dataTypesV2!") proxy_data_types = Contract.from_abi("DataTypes", proxy.address, DataTypes.abi)
[((15, 14, 15, 27), 'scripts.helpful_scripts.get_account', 'get_account', ({}, {}), '()', False, 'from scripts.helpful_scripts import get_account, encode_function_data\n'), ((33, 46, 35, 5), 'scripts.helpful_scripts.encode_function_data', 'encode_function_data', ({(34, 8, 34, 31): 'data_types.setDataTypes', (34, 33, 34, 35): '10'}, {}), '(data_types.setDataTypes, 10)', False, 'from scripts.helpful_scripts import get_account, encode_function_data\n'), ((46, 23, 46, 83), 'brownie.Contract.from_abi', 'Contract.from_abi', ({(46, 41, 46, 52): '"""DataTypes"""', (46, 54, 46, 67): 'proxy.address', (46, 69, 46, 82): 'DataTypes.abi'}, {}), "('DataTypes', proxy.address, DataTypes.abi)", False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((16, 29, 16, 50), 'brownie.network.show_active', 'network.show_active', ({}, {}), '()', False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((17, 26, 17, 47), 'brownie.network.show_active', 'network.show_active', ({}, {}), '()', False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((20, 42, 20, 63), 'brownie.network.show_active', 'network.show_active', ({}, {}), '()', False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((25, 42, 25, 63), 'brownie.network.show_active', 'network.show_active', ({}, {}), '()', False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n'), ((43, 42, 43, 63), 'brownie.network.show_active', 'network.show_active', ({}, {}), '()', False, 'from brownie import DataTypes, TransparentUpgradeableProxy, ProxyAdmin, config, network, Contract\n')]
omni-us/pytorch-retinanet
modules/BidirectionalLSTM.py
8d3ee38d50df0afec2ab4dfa0eabb8219eb399f5
import torch.nn as nn class BidirectionalLSTM(nn.Module): # Module to extract BLSTM features from convolutional feature map def __init__(self, nIn, nHidden, nOut): super(BidirectionalLSTM, self).__init__() self.rnn = nn.LSTM(nIn, nHidden, bidirectional=True) self.embedding = nn.Linear(nHidden * 2, nOut) self.rnn.cuda() self.embedding.cuda() def forward(self, input): recurrent, _ = self.rnn(input) T, b, h = recurrent.size() t_rec = recurrent.view(T * b, h) output = self.embedding(t_rec) # [T * b, nOut] output = output.view(T, b, -1) return output
[((9, 19, 9, 60), 'torch.nn.LSTM', 'nn.LSTM', (), '', True, 'import torch.nn as nn\n'), ((10, 25, 10, 53), 'torch.nn.Linear', 'nn.Linear', ({(10, 35, 10, 46): 'nHidden * 2', (10, 48, 10, 52): 'nOut'}, {}), '(nHidden * 2, nOut)', True, 'import torch.nn as nn\n')]
tranconbv/ironpython-stubs
release/stubs.min/System/Windows/Forms/__init___parts/PaintEventArgs.py
a601759e6c6819beff8e6b639d18a24b7e351851
class PaintEventArgs(EventArgs,IDisposable): """ Provides data for the System.Windows.Forms.Control.Paint event. PaintEventArgs(graphics: Graphics,clipRect: Rectangle) """ def Instance(self): """ This function has been arbitrarily put into the stubs""" return PaintEventArgs() def Dispose(self): """ Dispose(self: PaintEventArgs) Releases all resources used by the System.Windows.Forms.PaintEventArgs. """ pass def __enter__(self,*args): """ __enter__(self: IDisposable) -> object """ pass def __exit__(self,*args): """ __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """ pass def __init__(self,*args): """ x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """ pass @staticmethod def __new__(self,graphics,clipRect): """ __new__(cls: type,graphics: Graphics,clipRect: Rectangle) """ pass ClipRectangle=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the rectangle in which to paint. Get: ClipRectangle(self: PaintEventArgs) -> Rectangle """ Graphics=property(lambda self: object(),lambda self,v: None,lambda self: None) """Gets the graphics used to paint. Get: Graphics(self: PaintEventArgs) -> Graphics """
[]
JaekwangCha/my_pytorch_templet
main.py
7b6b67116e9d69abd64631d90b38fedc79be6c8c
# written by Jaekwang Cha # version 0.1 # ================== IMPORT CUSTOM LEARNING LIBRARIES ===================== # from customs.train import train, test from customs.dataset import load_dataset from customs.model import load_model # ================== TRAINING SETTINGS ================== # import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--train_method', default='supervised', type=str, help='type of training: supervised(default), unsupervised, reinforce') parser.add_argument('--task', default='classification', type=str, help='task of training: classification(default), regression') parser.add_argument('--dataset', default='mnist', type=str, help='dataset to use') parser.add_argument('--model', default='CNN', type=str, help='model to use') parser.add_argument('--seed', default=42, type=int, help='random seed (default: 42)') parser.add_argument('--num_worker', default=1, type=int, help='number of dataloader worker') parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--gpu', default=0, type=str, help='GPU-id for GPU to use') parser.add_argument('--multi_gpu', default=0, type=str, help='GPU-ids for multi-GPU usage') parser.add_argument('--pin_memory', default=True, type=bool, help='pin memory option selector') parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model') parser.add_argument('--save_path', default=os.getcwd()+'/weights', type=str, help='Where to save weights') parser.add_argument('--log_path', default=os.getcwd()+'/Logs', type=str, help='Where to save Logs') # data setting parser.add_argument('--val_rate', default=0.2, type=float, help='split rate for the validation data') parser.add_argument('--transform', default='default', type=str, help='choose the data transform type') # training parameter setting parser.add_argument('--n_epoch', default=10, type=int, help='number of total training iteration') parser.add_argument('--batch_size', default=32, type=int, help='size of minibatch') parser.add_argument('--test_batch_size', default=32, type=int, help='size of test-minibatch') # optimizer & scheduler setting parser.add_argument('--lr', default=0.03, type=float, help='training learning rate') parser.add_argument('--optimizer', default='adam', type=str, help='optimizer select') parser.add_argument('--scheduler', default='steplr', type=str, help='scheduler select') opt = parser.parse_args() # ===================== IMPORT PYTORCH LIBRARIES ================== # import torch from torch.utils.data import DataLoader torch.manual_seed(opt.seed) # ================== GPU SETTINGS ================== # def gpu_setup(opt): use_cuda = not opt.no_cuda and torch.cuda.is_available() os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID" if opt.multi_gpu != 0: print() print('Activating multi-gpu training mode') print(opt.multi_gpu) os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.multi_gpu) opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') else: print() print('Activating single-gpu training mode') os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu) opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Using gpu number ' + str(opt.gpu)) return use_cuda # ======================= MAIN SCRIPT ============================= # def main(opt): use_cuda = gpu_setup(opt) dataset_train, dataset_validation = load_dataset(opt, train=True) print('training data size: {}'.format(len(dataset_train))) print('validation data size: {}'.format(len(dataset_validation))) dataset_test = load_dataset(opt, train=False) print('test data size: {}'.format(len(dataset_test))) print() kwargs = {'num_workers': opt.num_worker, 'pin_memory': opt.pin_memory} if use_cuda else {} train_dataloader = DataLoader(dataset_train, batch_size=opt.batch_size, shuffle=True, **kwargs) validation_dataloader = DataLoader(dataset_validation, batch_size=opt.batch_size, shuffle=True, **kwargs) test_dataloader = DataLoader(dataset_test, batch_size=opt.test_batch_size, shuffle=True, **kwargs) model = load_model(opt) if opt.multi_gpu != 0: model = torch.nn.DataParallel(model) model.to(opt.device) train(opt, model, train_dataloader, validation_dataloader) test(opt, model, test_dataloader) if __name__ == '__main__': main(opt)
[((13, 9, 13, 34), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ({}, {}), '()', False, 'import argparse\n'), ((51, 0, 51, 27), 'torch.manual_seed', 'torch.manual_seed', ({(51, 18, 51, 26): 'opt.seed'}, {}), '(opt.seed)', False, 'import torch\n'), ((75, 40, 75, 69), 'customs.dataset.load_dataset', 'load_dataset', (), '', False, 'from customs.dataset import load_dataset\n'), ((79, 19, 79, 49), 'customs.dataset.load_dataset', 'load_dataset', (), '', False, 'from customs.dataset import load_dataset\n'), ((84, 23, 84, 99), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((85, 28, 85, 109), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((86, 22, 86, 102), 'torch.utils.data.DataLoader', 'DataLoader', (), '', False, 'from torch.utils.data import DataLoader\n'), ((88, 12, 88, 27), 'customs.model.load_model', 'load_model', ({(88, 23, 88, 26): 'opt'}, {}), '(opt)', False, 'from customs.model import load_model\n'), ((93, 4, 93, 62), 'customs.train.train', 'train', ({(93, 10, 93, 13): 'opt', (93, 15, 93, 20): 'model', (93, 22, 93, 38): 'train_dataloader', (93, 40, 93, 61): 'validation_dataloader'}, {}), '(opt, model, train_dataloader, validation_dataloader)', False, 'from customs.train import train, test\n'), ((94, 4, 94, 37), 'customs.train.test', 'test', ({(94, 9, 94, 12): 'opt', (94, 14, 94, 19): 'model', (94, 21, 94, 36): 'test_dataloader'}, {}), '(opt, model, test_dataloader)', False, 'from customs.train import train, test\n'), ((55, 35, 55, 60), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((90, 16, 90, 44), 'torch.nn.DataParallel', 'torch.nn.DataParallel', ({(90, 38, 90, 43): 'model'}, {}), '(model)', False, 'import torch\n'), ((25, 49, 25, 60), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((26, 49, 26, 60), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n'), ((63, 44, 63, 69), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n'), ((68, 44, 68, 69), 'torch.cuda.is_available', 'torch.cuda.is_available', ({}, {}), '()', False, 'import torch\n')]
ahnitz/pegasus
test/core/024-sc4-gridftp-http/Rosetta.py
e269b460f4d87eb3f3a7e91cd82e2c28fdb55573
#!/usr/bin/env python3 import logging import sys import subprocess from pathlib import Path from datetime import datetime from Pegasus.api import * logging.basicConfig(level=logging.DEBUG) # --- Work Dir Setup ----------------------------------------------------------- RUN_ID = "024-sc4-gridftp-http-" + datetime.now().strftime("%s") TOP_DIR = Path.cwd() WORK_DIR = TOP_DIR / "work" try: Path.mkdir(WORK_DIR) except FileExistsError: pass # --- Configuration ------------------------------------------------------------ print("Generating pegasus.properties at: {}".format(TOP_DIR / "pegasus.properties")) props = Properties() props["pegasus.dir.useTimestamp"] = "true" props["pegasus.dir.storage.deep"] = "false" props["pegasus.data.configuration"] = "nonsharedfs" with (TOP_DIR / "pegasus.properties").open(mode="w") as f: props.write(f) # --- Sites -------------------------------------------------------------------- print("Generating site catalog at: sites.yml") LOCAL = "local" CONDOR_POOL = "condorpool" STAGING_SITE = "staging_site" try: pegasus_config = subprocess.run( ["pegasus-config", "--bin"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) except FileNotFoundError as e: print("Unable to find pegasus-config") assert pegasus_config.returncode == 0 PEGASUS_BIN_DIR = pegasus_config.stdout.decode().strip() sites = """ pegasus: "5.0" sites: - name: "condor_pool" arch: "x86_64" os.type: "linux" profiles: condor: universe: "vanilla" pegasus: style: "condor" - name: "staging_site" arch: "x86_64" os.type: "linux" directories: - type: "sharedScratch" path: "/lizard/scratch-90-days/http-scratch/ptesting" fileServers: - operation: "get" url: "http://workflow.isi.edu/shared-scratch/ptesting" - operation: "put" url: "gsiftp://workflow.isi.edu/lizard/scratch-90-days/http-scratch/ptesting" - name: "local" arch: "x86_64" os.type: "linux" os.release: "rhel" os.version: "7" directories: - type: "sharedScratch" path: "{work_dir}/scratch" fileServers: - operation: "all" url: "file://{work_dir}/scratch" - type: "localStorage" path: "{work_dir}/outputs" fileServers: - operation: "all" url: "file://{work_dir}/outputs" profiles: env: PEGASUS_BIN_DIR: "{pegasus_bin_dir}" """.format( work_dir=str(WORK_DIR), pegasus_bin_dir=PEGASUS_BIN_DIR ) with (TOP_DIR / "sites.yml").open(mode="w") as f: f.write(sites) # --- Transformations ---------------------------------------------------------- rosetta_exe = Transformation( "rosetta.exe", arch=Arch.X86_64, os_type=OS.LINUX, site="local", pfn="file://" + str(TOP_DIR / "rosetta.exe"), is_stageable=True, ).add_pegasus_profile(clusters_size=3) tc = TransformationCatalog().add_transformations(rosetta_exe) # --- Replicas & Workflow ------------------------------------------------------ rc = ReplicaCatalog() # add all files in minirosetta_database inputs = list() def get_files(d: Path) -> None: for p in d.iterdir(): if p.is_file(): f = File(str(p)) inputs.append(f) rc.add_replica(LOCAL, str(p), str(p.resolve())) else: get_files(p) get_files(Path("minirosetta_database")) f1 = File("design.resfile") inputs.append(f1) rc.add_replica(LOCAL, f1, str(Path("design.resfile").resolve())) f2 = File("repack.resfile") inputs.append(f2) rc.add_replica(LOCAL, f2, str(Path("repack.resfile").resolve())) wf = Workflow("rosetta") pdb_files = list(Path("pdbs").iterdir()) for i in range(10): current_file = pdb_files[i] if current_file.is_file(): job = ( Job(rosetta_exe, _id=current_file.name.replace(".pdb", "")) .add_inputs(File(current_file.name), *inputs) .add_outputs(File(current_file.name + ".score.sc"), register_replica=True) .add_args( "-in:file:s", current_file.name, "-out:prefix " + current_file.name + ".", "-database ./minirosetta_database", "-linmem_ig 10", "-nstruct 1", "-pert_num 2", "-inner_num 1", "-jd2::ntrials 1", ) ) rc.add_replica("local", current_file.name, str(current_file.resolve())) wf.add_jobs(job) # write rc to separate file for registration jobs with (TOP_DIR / "replicas.yml").open("w") as f: rc.write(f) wf.add_transformation_catalog(tc) try: wf.plan( dir=str(WORK_DIR), verbose=5, sites=[CONDOR_POOL], staging_sites={CONDOR_POOL: STAGING_SITE}, ) except PegasusClientError as e: print(e.output)
[((11, 0, 11, 40), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((15, 10, 15, 20), 'pathlib.Path.cwd', 'Path.cwd', ({}, {}), '()', False, 'from pathlib import Path\n'), ((19, 4, 19, 24), 'pathlib.Path.mkdir', 'Path.mkdir', ({(19, 15, 19, 23): 'WORK_DIR'}, {}), '(WORK_DIR)', False, 'from pathlib import Path\n'), ((43, 21, 45, 5), 'subprocess.run', 'subprocess.run', (), '', False, 'import subprocess\n'), ((140, 10, 140, 38), 'pathlib.Path', 'Path', ({(140, 15, 140, 37): '"""minirosetta_database"""'}, {}), "('minirosetta_database')", False, 'from pathlib import Path\n'), ((14, 35, 14, 49), 'datetime.datetime.now', 'datetime.now', ({}, {}), '()', False, 'from datetime import datetime\n'), ((152, 17, 152, 29), 'pathlib.Path', 'Path', ({(152, 22, 152, 28): '"""pdbs"""'}, {}), "('pdbs')", False, 'from pathlib import Path\n'), ((144, 30, 144, 52), 'pathlib.Path', 'Path', ({(144, 35, 144, 51): '"""design.resfile"""'}, {}), "('design.resfile')", False, 'from pathlib import Path\n'), ((148, 30, 148, 52), 'pathlib.Path', 'Path', ({(148, 35, 148, 51): '"""repack.resfile"""'}, {}), "('repack.resfile')", False, 'from pathlib import Path\n')]
sisl/CEEM
tests/nls_smoother_test.py
6154587fe3cdb92e8b7f70eedb1262caa1553cc8
import torch from ceem.opt_criteria import * from ceem.systems import LorenzAttractor from ceem.dynamics import * from ceem.smoother import * from ceem import utils def test_smoother(): utils.set_rng_seed(1) torch.set_default_dtype(torch.float64) sigma = torch.tensor([10.]) rho = torch.tensor([28.]) beta = torch.tensor([8. / 3.]) C = torch.randn(2, 3) dt = 0.04 sys = LorenzAttractor(sigma, rho, beta, C, dt, method='midpoint') B = 1 T = 200 xs = [torch.randn(B, 1, 3)] for t in range(T - 1): xs.append(sys.step(torch.tensor([0.] * B), xs[-1])) x = torch.cat(xs, dim=1).detach() x.requires_grad = True y = sys.observe(0., x).detach() # y += torch.rand_like(y) * 0.01 t = torch.stack([torch.arange(T), torch.arange(T)]).to(torch.get_default_dtype()) x0 = torch.zeros_like(x) obscrit = GaussianObservationCriterion(torch.ones(2), t, y) dyncrit = GaussianDynamicsCriterion(torch.ones(3), t) # Test GroupSOSCriterion crit = GroupSOSCriterion([obscrit, dyncrit]) xsm, metrics = NLSsmoother(x0, crit, sys, solver_kwargs={'verbose': 2, 'tr_rho': 0.}) err = float((xsm - x).norm()) assert err < 1e-8, 'Smoothing Error: %.3e' % err print('Passed.') # Test BlockSparseGroupSOSCriterion crit = BlockSparseGroupSOSCriterion([obscrit, dyncrit]) xsm, metrics = NLSsmoother(torch.zeros_like(x), crit, sys) err = float((xsm - x).norm()) assert err < 1e-8, 'Smoothing Error: %.3e' % err print('Passed.') if __name__ == '__main__': test_smoother()
[((12, 4, 12, 25), 'ceem.utils.set_rng_seed', 'utils.set_rng_seed', ({(12, 23, 12, 24): '(1)'}, {}), '(1)', False, 'from ceem import utils\n'), ((14, 4, 14, 42), 'torch.set_default_dtype', 'torch.set_default_dtype', ({(14, 28, 14, 41): 'torch.float64'}, {}), '(torch.float64)', False, 'import torch\n'), ((16, 12, 16, 31), 'torch.tensor', 'torch.tensor', ({(16, 25, 16, 30): '[10.0]'}, {}), '([10.0])', False, 'import torch\n'), ((17, 10, 17, 29), 'torch.tensor', 'torch.tensor', ({(17, 23, 17, 28): '[28.0]'}, {}), '([28.0])', False, 'import torch\n'), ((18, 11, 18, 34), 'torch.tensor', 'torch.tensor', ({(18, 24, 18, 33): '[8.0 / 3.0]'}, {}), '([8.0 / 3.0])', False, 'import torch\n'), ((20, 8, 20, 25), 'torch.randn', 'torch.randn', ({(20, 20, 20, 21): '2', (20, 23, 20, 24): '3'}, {}), '(2, 3)', False, 'import torch\n'), ((24, 10, 24, 69), 'ceem.systems.LorenzAttractor', 'LorenzAttractor', (), '', False, 'from ceem.systems import LorenzAttractor\n'), ((39, 9, 39, 28), 'torch.zeros_like', 'torch.zeros_like', ({(39, 26, 39, 27): 'x'}, {}), '(x)', False, 'import torch\n'), ((28, 10, 28, 30), 'torch.randn', 'torch.randn', ({(28, 22, 28, 23): 'B', (28, 25, 28, 26): '(1)', (28, 28, 28, 29): '(3)'}, {}), '(B, 1, 3)', False, 'import torch\n'), ((37, 59, 37, 84), 'torch.get_default_dtype', 'torch.get_default_dtype', ({}, {}), '()', False, 'import torch\n'), ((41, 43, 41, 56), 'torch.ones', 'torch.ones', ({(41, 54, 41, 55): '2'}, {}), '(2)', False, 'import torch\n'), ((43, 40, 43, 53), 'torch.ones', 'torch.ones', ({(43, 51, 43, 52): '3'}, {}), '(3)', False, 'import torch\n'), ((58, 31, 58, 50), 'torch.zeros_like', 'torch.zeros_like', ({(58, 48, 58, 49): 'x'}, {}), '(x)', False, 'import torch\n'), ((32, 8, 32, 28), 'torch.cat', 'torch.cat', (), '', False, 'import torch\n'), ((30, 27, 30, 49), 'torch.tensor', 'torch.tensor', ({(30, 40, 30, 48): '([0.0] * B)'}, {}), '([0.0] * B)', False, 'import torch\n'), ((37, 21, 37, 36), 'torch.arange', 'torch.arange', ({(37, 34, 37, 35): 'T'}, {}), '(T)', False, 'import torch\n'), ((37, 38, 37, 53), 'torch.arange', 'torch.arange', ({(37, 51, 37, 52): 'T'}, {}), '(T)', False, 'import torch\n')]
godspeed5/qiskit-terra
qiskit/visualization/pulse_v2/device_info.py
a5d87c3e4a663ab962704585fba0caef15061246
# This code is part of Qiskit. # # (C) Copyright IBM 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=invalid-name """A collection of backend information formatted to generate drawing data. This instance will be provided to generator functions. The module provides an abstract class :py:class:``DrawerBackendInfo`` with necessary methods to generate drawing objects. Because the data structure of backend class may depend on providers, this abstract class has an abstract factory method `create_from_backend`. Each subclass should provide the factory method which conforms to the associated provider. By default we provide :py:class:``OpenPulseBackendInfo`` class that has the factory method taking backends satisfying OpenPulse specification [1]. This class can be also initialized without the factory method by manually specifying required information. This may be convenient for visualizing a pulse program for simulator backend that only has a device Hamiltonian information. This requires two mapping objects for channel/qubit and channel/frequency along with the system cycle time. If those information are not provided, this class will be initialized with a set of empty data and the drawer illustrates a pulse program without any specific information. Reference: - [1] Qiskit Backend Specifications for OpenQASM and OpenPulse Experiments, https://arxiv.org/abs/1809.03452 """ from abc import ABC, abstractmethod from collections import defaultdict from typing import Dict, List, Union, Optional from qiskit import pulse from qiskit.providers import BaseBackend, BackendConfigurationError class DrawerBackendInfo(ABC): """Backend information to be used for the drawing data generation.""" def __init__(self, name: Optional[str] = None, dt: Optional[float] = None, channel_frequency_map: Optional[Dict[pulse.channels.Channel, float]] = None, qubit_channel_map: Optional[Dict[int, List[pulse.channels.Channel]]] = None): """Create new backend information. Args: name: Name of the backend. dt: System cycle time. channel_frequency_map: Mapping of channel and associated frequency. qubit_channel_map: Mapping of qubit and associated channels. """ self.backend_name = name or 'no-backend' self._dt = dt self._chan_freq_map = channel_frequency_map or dict() self._qubit_channel_map = qubit_channel_map or dict() @classmethod @abstractmethod def create_from_backend(cls, backend: BaseBackend): """Initialize a class with backend information provided by provider. Args: backend: Backend object. """ raise NotImplementedError @property def dt(self): """Return cycle time.""" return self._dt def get_qubit_index(self, chan: pulse.channels.Channel) -> Union[int, None]: """Get associated qubit index of given channel object.""" for qind, chans in self._qubit_channel_map.items(): if chan in chans: return qind return chan.index def get_channel_frequency(self, chan: pulse.channels.Channel) -> Union[float, None]: """Get frequency of given channel object.""" return self._chan_freq_map.get(chan, None) class OpenPulseBackendInfo(DrawerBackendInfo): """Drawing information of backend that conforms to OpenPulse specification.""" @classmethod def create_from_backend(cls, backend: BaseBackend): """Initialize a class with backend information provided by provider. Args: backend: Backend object. Returns: OpenPulseBackendInfo: New configured instance. """ configuration = backend.configuration() defaults = backend.defaults() # load name name = backend.name() # load cycle time dt = configuration.dt # load frequencies chan_freqs = dict() chan_freqs.update({pulse.DriveChannel(qind): freq for qind, freq in enumerate(defaults.qubit_freq_est)}) chan_freqs.update({pulse.MeasureChannel(qind): freq for qind, freq in enumerate(defaults.meas_freq_est)}) for qind, u_lo_mappers in enumerate(configuration.u_channel_lo): temp_val = .0 + .0j for u_lo_mapper in u_lo_mappers: temp_val += defaults.qubit_freq_est[u_lo_mapper.q] * complex(*u_lo_mapper.scale) chan_freqs[pulse.ControlChannel(qind)] = temp_val.real # load qubit channel mapping qubit_channel_map = defaultdict(list) for qind in range(configuration.n_qubits): qubit_channel_map[qind].append(configuration.drive(qubit=qind)) qubit_channel_map[qind].append(configuration.measure(qubit=qind)) for tind in range(configuration.n_qubits): try: qubit_channel_map[qind].extend(configuration.control(qubits=(qind, tind))) except BackendConfigurationError: pass return OpenPulseBackendInfo(name=name, dt=dt, channel_frequency_map=chan_freqs, qubit_channel_map=qubit_channel_map)
[((131, 28, 131, 45), 'collections.defaultdict', 'defaultdict', ({(131, 40, 131, 44): 'list'}, {}), '(list)', False, 'from collections import defaultdict\n'), ((120, 27, 120, 51), 'qiskit.pulse.DriveChannel', 'pulse.DriveChannel', ({(120, 46, 120, 50): 'qind'}, {}), '(qind)', False, 'from qiskit import pulse\n'), ((122, 27, 122, 53), 'qiskit.pulse.MeasureChannel', 'pulse.MeasureChannel', ({(122, 48, 122, 52): 'qind'}, {}), '(qind)', False, 'from qiskit import pulse\n'), ((128, 23, 128, 49), 'qiskit.pulse.ControlChannel', 'pulse.ControlChannel', ({(128, 44, 128, 48): 'qind'}, {}), '(qind)', False, 'from qiskit import pulse\n')]
ParikhKadam/gotolong
django_gotolong/mfund/views.py
839beb8aa37055a2078eaa289b8ae05b62e8905e
# Create your views here. from .models import Mfund import plotly.graph_objects as go from plotly.offline import plot from plotly.tools import make_subplots from django.db.models import Q from django.conf import settings from django.shortcuts import redirect from django.contrib.auth.decorators import login_required from django.utils.decorators import method_decorator from django.views.generic.list import ListView from django.views import View from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min from django.db.models.functions import Trim, Lower, Round import pandas as pd import csv, io import openpyxl from django.contrib import messages from django.urls import reverse from django.http import HttpResponseRedirect from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf def Mfund_url(): return "unused-mfund-refresh-url" class MfundListView(ListView): model = Mfund # if pagination is desired # paginate_by = 300 # filter_backends = [filters.OrderingFilter,] # ordering_fields = ['sno', 'nse_symbol'] def get_queryset(self): queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id) return queryset @method_decorator(login_required) def dispatch(self, *args, **kwargs): return super(MfundListView, self).dispatch(*args, **kwargs) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) refresh_url = Mfund_url() context["refresh_url"] = refresh_url return context class MfundListView_Amount(ListView): model = Mfund def get_queryset(self): queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id).order_by('-mf_nav_value') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) refresh_url = Mfund_url() context["refresh_url"] = refresh_url return context class MfundListView_AMC(ListView): model = Mfund def get_queryset(self): queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \ order_by('mf_amc', 'mf_category', 'mf_subcat', '-mf_nav_value') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) refresh_url = Mfund_url() context["refresh_url"] = refresh_url return context class MfundListView_AMC_Amount(ListView): model = Mfund def get_queryset(self): self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \ values('mf_amc').annotate(scheme_sum=Sum('mf_nav_value')). \ exclude(scheme_sum=0.0).order_by('-scheme_sum') print('hi ', self.queryset) return self.queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) labels = [] values = [] labels_values_dict = {} sum_total = 0 for q_row in self.queryset: sum_total += q_row['scheme_sum'] labels_values_dict[q_row['mf_amc']] = q_row['scheme_sum'] context['sum_total'] = int(sum_total) print('labels values dict', labels_values_dict) for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]): labels.append(k) values.append(v) print('labels ', labels) print('values ', values) fig = go.Figure(data=[go.Pie(labels=labels, values=values)]) fig.update_traces(textposition='inside', textinfo='percent+label') # fig.show() plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False) context['plot_div_1'] = plot_div_1 return context class MfundListView_Category(ListView): model = Mfund def get_queryset(self): queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \ order_by('mf_category', 'mf_subcat', '-mf_nav_value') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) refresh_url = Mfund_url() context["refresh_url"] = refresh_url return context class MfundListView_Subcat(ListView): model = Mfund def get_queryset(self): queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \ order_by('mf_subcat', '-mf_nav_value') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) refresh_url = Mfund_url() context["refresh_url"] = refresh_url return context class MfundListView_Reco(ListView): model = Mfund def get_queryset(self): queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \ order_by('mf_research_reco', '-mf_rating') return queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) refresh_url = Mfund_url() context["refresh_url"] = refresh_url return context class MfundListView_SubcatAmount(ListView): model = Mfund def get_queryset(self): self.queryset = Mfund.objects.all().filter(mf_user_id=self.request.user.id). \ values('mf_subcat').annotate(scheme_sum=Sum('mf_nav_value')). \ exclude(scheme_sum=0.0).order_by('-scheme_sum') return self.queryset def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) labels = [] values = [] labels_values_dict = {} sum_total = 0 for q_row in self.queryset: sum_total += q_row['scheme_sum'] labels_values_dict[q_row['mf_subcat']] = q_row['scheme_sum'] context['sum_total'] = int(sum_total) print('labels values dict', labels_values_dict) for k, v in sorted(labels_values_dict.items(), key=lambda item: item[1]): labels.append(k) values.append(v) print('labels ', labels) print('values ', values) fig = go.Figure(data=[go.Pie(labels=labels, values=values)]) fig.update_traces(textposition='inside', textinfo='percent+label') # fig.show() plot_div_1 = plot(fig, output_type='div', include_plotlyjs=False) context['plot_div_1'] = plot_div_1 return context class MfundRefreshView(View): debug_level = 1 def get(self, request): self.mfund_refresh(request) return HttpResponseRedirect(reverse("mfund-list")) def __init__(self): super(MfundRefreshView, self).__init__() def mfund_refresh(self, request): debug_level = 1 # declaring template # first delete all existing mfund objects Mfund.objects.all().filter(mf_user_id=request.user.id).delete() max_id_instances = Mfund.objects.aggregate(max_id=Max('mf_id')) max_mf_id = max_id_instances['max_id'] print('DS: found max id ', max_mf_id) if max_mf_id is None: max_mf_id = 0 print('max_mf_id ', max_mf_id) unique_id = max_mf_id for brec in BrokerIcidirMf.objects.all().filter(bim_user_id=request.user.id): unique_id += 1 print(brec.bim_amc, brec.bim_name, brec.bim_category, brec.bim_subcat) print(brec.bim_rating, brec.bim_units, brec.bim_cost_value, brec.bim_nav_value) print(brec.bim_research_reco) # skip 0 units if int(float(brec.bim_units)) != 0: _, created = Mfund.objects.update_or_create( mf_id=unique_id, mf_user_id=request.user.id, mf_broker='icidir', mf_amc=brec.bim_amc, mf_name=brec.bim_name, mf_category=brec.bim_category, mf_subcat=brec.bim_subcat, mf_rating=brec.bim_rating, mf_cost_value=brec.bim_cost_value, mf_nav_value=brec.bim_nav_value, mf_research_reco=brec.bim_research_reco ) # breakpoint() # import pdb # pdb.set_trace() # Updated Gfundareco objects lastrefd_update("mfund")
[((50, 5, 50, 37), 'django.utils.decorators.method_decorator', 'method_decorator', ({(50, 22, 50, 36): 'login_required'}, {}), '(login_required)', False, 'from django.utils.decorators import method_decorator\n'), ((125, 21, 125, 73), 'plotly.offline.plot', 'plot', (), '', False, 'from plotly.offline import plot\n'), ((209, 21, 209, 73), 'plotly.offline.plot', 'plot', (), '', False, 'from plotly.offline import plot\n'), ((266, 8, 266, 32), 'django_gotolong.lastrefd.models.lastrefd_update', 'lastrefd_update', ({(266, 24, 266, 31): '"""mfund"""'}, {}), "('mfund')", False, 'from django_gotolong.lastrefd.models import Lastrefd, lastrefd_update\n'), ((220, 36, 220, 57), 'django.urls.reverse', 'reverse', ({(220, 44, 220, 56): '"""mfund-list"""'}, {}), "('mfund-list')", False, 'from django.urls import reverse\n'), ((231, 58, 231, 70), 'django.db.models.Max', 'Max', ({(231, 62, 231, 69): '"""mf_id"""'}, {}), "('mf_id')", False, 'from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min\n'), ((239, 20, 239, 48), 'django_gotolong.broker.icidir.imf.models.BrokerIcidirMf.objects.all', 'BrokerIcidirMf.objects.all', ({}, {}), '()', False, 'from django_gotolong.broker.icidir.imf.models import BrokerIcidirMf\n'), ((121, 30, 121, 66), 'plotly.graph_objects.Pie', 'go.Pie', (), '', True, 'import plotly.graph_objects as go\n'), ((205, 30, 205, 66), 'plotly.graph_objects.Pie', 'go.Pie', (), '', True, 'import plotly.graph_objects as go\n'), ((96, 49, 96, 68), 'django.db.models.Sum', 'Sum', ({(96, 53, 96, 67): '"""mf_nav_value"""'}, {}), "('mf_nav_value')", False, 'from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min\n'), ((180, 52, 180, 71), 'django.db.models.Sum', 'Sum', ({(180, 56, 180, 70): '"""mf_nav_value"""'}, {}), "('mf_nav_value')", False, 'from django.db.models import OuterRef, Subquery, Count, Sum, Max, Min\n')]
akria00/m3u8-Downloader-master
m3u8.py
37bf4683b0390998a819d0bb5b8af18ffb2166f6
#coding: utf-8 from gevent import monkey monkey.patch_all() from gevent.pool import Pool import gevent import requests import urllib import os import time import re import ssl class Downloader: def __init__(self, pool_size, retry=3): self.pool = Pool(pool_size) self.session = self._get_http_session(pool_size, pool_size, retry) self.retry = retry self.dir = '' self.succed = {} self.failed = [] self.ts_total = 0 def _get_http_session(self, pool_connections, pool_maxsize, max_retries): session = requests.Session() adapter = requests.adapters.HTTPAdapter(pool_connections=pool_connections, pool_maxsize=pool_maxsize, max_retries=max_retries) session.mount('http://', adapter) session.mount('https://', adapter) return session def run(self, m3u8_url, dir='',moreTs=False): self.dir = dir if self.dir and not os.path.isdir(self.dir): os.makedirs(self.dir) r = self.session.get(m3u8_url, timeout=10) if r.ok: body = r.content if body: ssl._create_default_https_context = ssl._create_unverified_context ts_list = [urllib.parse.urljoin(m3u8_url, n.strip()) for n in str(body, encoding = "utf8").split('\n') if n and not n.startswith("#")] if moreTs: ts_list = self.getMoreTsList(ts_list) ts_list = list(zip(ts_list, [n for n in range(len(list(ts_list)))])) if ts_list: self.ts_total = len(ts_list) print(self.ts_total) g1 = gevent.spawn(self._join_file) self._download(ts_list) g1.join() else: print( r.status_code) def _download(self, ts_list): self.pool.map(self._worker, ts_list) if self.failed: ts_list = self.failed self.failed = [] self._download(ts_list) def _worker(self, ts_tuple): url = ts_tuple[0] index = ts_tuple[1] retry = self.retry while retry: try: r = self.session.get(url, timeout=20) if r.ok: file_name = url.split('/')[-1].split('?')[0] print( file_name) with open(os.path.join(self.dir, file_name), 'wb') as f: f.write(r.content) self.succed[index] = file_name return except: retry -= 1 print ('[FAIL]%s' % url) self.failed.append((url, index)) def _join_file(self): index = 0 outfile = '' while index < self.ts_total: file_name = self.succed.get(index, '') if file_name: infile = open(os.path.join(self.dir, file_name), 'rb') if not outfile: outfile = open(os.path.join(self.dir, file_name.split('.')[0]+'_all.'+file_name.split('.')[-1]), 'wb') outfile.write(infile.read()) infile.close() os.remove(os.path.join(self.dir, file_name)) index += 1 else: time.sleep(1) if outfile: outfile.close() def getMoreTsList(self,ts_list): headers = {'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1', 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'zh-CN,zh;q=0.9', 'upgrade-insecure-requests':1, 'scheme':'https' } retry = self.retry isOk = False lastTs = ts_list[-1] pattern = re.compile(r'(\d+\.?\d)\.ts') tsNum = '{:0>3}'.format(int(pattern.findall(lastTs)[0]) + 1 ) nextTs = re.sub(pattern,str(tsNum),lastTs,1) + ".ts" req = urllib.request.Request(url=nextTs,headers=headers,method='GET') l = r = int(tsNum) maxTs = 0 while retry or isOk: try: isOk = urllib.request.urlopen(req).status==200 if isOk: retry = 3 l = r + 1 r = l + 100 if maxTs < r else maxTs - int((maxTs-l)/2) nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts" req = urllib.request.Request(url=nextTs,headers=headers,method='GET') else: r = r - int((r-l)/2) except : if int((r-l)/2) == 0: for i in range(int(tsNum) , r): ts_list.append(re.sub(pattern,'{:0>3}'.format(i),lastTs,1) + ".ts") return ts_list maxTs = r r = r - int((r-l)/2) nextTs = re.sub(pattern,'{:0>3}'.format(r),lastTs,1) + ".ts" req = urllib.request.Request(url=nextTs,headers=headers,method='GET') retry -= 1 isOk = False return ts_list if __name__ == '__main__': downloader = Downloader(5) downloader.run('https://www.xiaodianying.com/filets/2069/dp.m3u8', './video',True)
[((4, 0, 4, 18), 'gevent.monkey.patch_all', 'monkey.patch_all', ({}, {}), '()', False, 'from gevent import monkey\n'), ((16, 20, 16, 35), 'gevent.pool.Pool', 'Pool', ({(16, 25, 16, 34): 'pool_size'}, {}), '(pool_size)', False, 'from gevent.pool import Pool\n'), ((25, 22, 25, 40), 'requests.Session', 'requests.Session', ({}, {}), '()', False, 'import requests\n'), ((26, 22, 26, 138), 'requests.adapters.HTTPAdapter', 'requests.adapters.HTTPAdapter', (), '', False, 'import requests\n'), ((108, 18, 108, 47), 're.compile', 're.compile', ({(108, 29, 108, 46): '"""(\\\\d+\\\\.?\\\\d)\\\\.ts"""'}, {}), "('(\\\\d+\\\\.?\\\\d)\\\\.ts')", False, 'import re\n'), ((111, 14, 111, 77), 'urllib.request.Request', 'urllib.request.Request', (), '', False, 'import urllib\n'), ((34, 12, 34, 33), 'os.makedirs', 'os.makedirs', ({(34, 24, 34, 32): 'self.dir'}, {}), '(self.dir)', False, 'import os\n'), ((33, 28, 33, 51), 'os.path.isdir', 'os.path.isdir', ({(33, 42, 33, 50): 'self.dir'}, {}), '(self.dir)', False, 'import os\n'), ((93, 16, 93, 29), 'time.sleep', 'time.sleep', ({(93, 27, 93, 28): '(1)'}, {}), '(1)', False, 'import time\n'), ((47, 25, 47, 54), 'gevent.spawn', 'gevent.spawn', ({(47, 38, 47, 53): 'self._join_file'}, {}), '(self._join_file)', False, 'import gevent\n'), ((85, 30, 85, 63), 'os.path.join', 'os.path.join', ({(85, 43, 85, 51): 'self.dir', (85, 53, 85, 62): 'file_name'}, {}), '(self.dir, file_name)', False, 'import os\n'), ((90, 26, 90, 59), 'os.path.join', 'os.path.join', ({(90, 39, 90, 47): 'self.dir', (90, 49, 90, 58): 'file_name'}, {}), '(self.dir, file_name)', False, 'import os\n'), ((122, 26, 122, 89), 'urllib.request.Request', 'urllib.request.Request', (), '', False, 'import urllib\n'), ((133, 22, 133, 85), 'urllib.request.Request', 'urllib.request.Request', (), '', False, 'import urllib\n'), ((116, 23, 116, 50), 'urllib.request.urlopen', 'urllib.request.urlopen', ({(116, 46, 116, 49): 'req'}, {}), '(req)', False, 'import urllib\n'), ((70, 30, 70, 63), 'os.path.join', 'os.path.join', ({(70, 43, 70, 51): 'self.dir', (70, 53, 70, 62): 'file_name'}, {}), '(self.dir, file_name)', False, 'import os\n')]
Danielvalev/kutiika
buzzbox/restaurants/migrations/0002_restaurant_description.py
661b850163de942a137157a97d98d90553861044
# Generated by Django 3.2.9 on 2021-12-06 10:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('restaurants', '0001_initial'), ] operations = [ migrations.AddField( model_name='restaurant', name='description', field=models.CharField(default='Description', max_length=255, verbose_name='Description'), preserve_default=False, ), ]
[((16, 18, 16, 101), 'django.db.models.CharField', 'models.CharField', (), '', False, 'from django.db import migrations, models\n')]
fraca7/dsremap
src/dsrlib/ui/utils.py
fb8f4fb13e74b512ed0cac05387fbe9694faebcf
#!/usr/bin/env python3 import os import contextlib from PyQt5 import QtCore, QtWidgets from dsrlib.settings import Settings class LayoutBuilder: def __init__(self, target): self.target = target self._stack = [] @contextlib.contextmanager def _layout(self, cls, *args, **kwargs): layout = cls() self._stack.append(layout) try: yield layout finally: self._pop(*args, **kwargs) def _pop(self, *args, **kwargs): layout = self._stack.pop() if self._stack: parent = self._stack[-1] if isinstance(layout, QtWidgets.QSplitter): parent.addWidget(layout) else: if isinstance(parent, QtWidgets.QSplitter): container = QtWidgets.QWidget(parent) container.setLayout(layout) parent.addWidget(container) else: parent.addLayout(layout, *args, **kwargs) elif isinstance(self.target, QtWidgets.QMainWindow): if isinstance(layout, QtWidgets.QSplitter): self.target.setCentralWidget(layout) else: container = QtWidgets.QWidget(self.target) container.setLayout(layout) self.target.setCentralWidget(container) else: if isinstance(layout, QtWidgets.QSplitter): layout2 = QtWidgets.QHBoxLayout() layout2.setContentsMargins(0, 0, 0, 0) layout2.addWidget(layout) self.target.setLayout(layout2) else: self.target.setLayout(layout) @contextlib.contextmanager def hbox(self, *args, **kwargs): # pragma: no cover with self._layout(QtWidgets.QHBoxLayout, *args, **kwargs) as layout: layout.setContentsMargins(1, 1, 1, 1) layout.setSpacing(1) yield layout @contextlib.contextmanager def vbox(self, *args, **kwargs): # pragma: no cover with self._layout(QtWidgets.QVBoxLayout, *args, **kwargs) as layout: layout.setContentsMargins(1, 1, 1, 1) layout.setSpacing(1) yield layout def stack(self, *args, **kwargs): # pragma: no cover return self._layout(QtWidgets.QStackedLayout, *args, **kwargs) def form(self, *args, **kwargs): class _FormLayout(QtWidgets.QFormLayout): def addLayout(self, layout): self.addRow(layout) def addRow(self, label, widget=None): # pylint: disable=C0111 if isinstance(label, str): label = QtWidgets.QLabel(label) label.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding) label.setAlignment(QtCore.Qt.AlignVCenter) if widget is None: super().addRow(label) else: super().addRow(label, widget) return self._layout(_FormLayout, *args, **kwargs) def split(self, *args, **kwargs): # pragma: no cover return self._layout(QtWidgets.QSplitter, *args, **kwargs) def getSaveFilename(parent, domain, extension): with Settings().grouped('Paths') as settings: path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation) sname = 'save_%s' % domain if settings.contains(sname): path = settings.value(sname) while True: name, dummy = QtWidgets.QFileDialog.getSaveFileName(parent, _('Save'), path, '*.%s' % extension, options=QtWidgets.QFileDialog.DontConfirmOverwrite) if not name: return None if not name.endswith('.%s' % extension): name = '%s.%s' % (name, extension) if os.path.exists(name): resp = QtWidgets.QMessageBox.question(parent, _('Overwrite file?'), _('This file already exists. Overwrite?'), QtWidgets.QMessageBox.Yes|QtWidgets.QMessageBox.No|QtWidgets.QMessageBox.Cancel) if resp == QtWidgets.QMessageBox.Yes: settings.setValue(sname, os.path.dirname(name)) return name if resp == QtWidgets.QMessageBox.No: continue return None settings.setValue(sname, os.path.dirname(name)) return name def getOpenFilename(parent, domain, extension): with Settings().grouped('Paths') as settings: path = QtCore.QStandardPaths.writableLocation(QtCore.QStandardPaths.DocumentsLocation) sname = 'open_%s' % domain if settings.contains(sname): path = settings.value(sname) name, dummy = QtWidgets.QFileDialog.getOpenFileName(parent, _('Open file'), path, '*.%s' % extension if extension else '') if name: settings.setValue(sname, os.path.dirname(name)) return name return None class EnumComboBox(QtWidgets.QComboBox): valueChanged = QtCore.pyqtSignal(object) def __init__(self, *args, enum, value=None, **kwargs): super().__init__(*args, **kwargs) self._enum = enum for item in enum: self.addItem(enum.label(item), item) if value is not None: self.setValue(value) self.currentIndexChanged.connect(self._emit) def setValue(self, value): for index, item in enumerate(self._enum): if value == item: self.setCurrentIndex(index) break else: raise ValueError('Value "%s" not found in enum' % str(value)) def _emit(self, _): self.valueChanged.emit(self.currentData())
[((135, 19, 135, 44), 'PyQt5.QtCore.pyqtSignal', 'QtCore.pyqtSignal', ({(135, 37, 135, 43): 'object'}, {}), '(object)', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((94, 15, 94, 94), 'PyQt5.QtCore.QStandardPaths.writableLocation', 'QtCore.QStandardPaths.writableLocation', ({(94, 54, 94, 93): 'QtCore.QStandardPaths.DocumentsLocation'}, {}), '(QtCore.QStandardPaths.DocumentsLocation)', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((122, 15, 122, 94), 'PyQt5.QtCore.QStandardPaths.writableLocation', 'QtCore.QStandardPaths.writableLocation', ({(122, 54, 122, 93): 'QtCore.QStandardPaths.DocumentsLocation'}, {}), '(QtCore.QStandardPaths.DocumentsLocation)', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((105, 15, 105, 35), 'os.path.exists', 'os.path.exists', ({(105, 30, 105, 34): 'name'}, {}), '(name)', False, 'import os\n'), ((93, 9, 93, 19), 'dsrlib.settings.Settings', 'Settings', ({}, {}), '()', False, 'from dsrlib.settings import Settings\n'), ((116, 37, 116, 58), 'os.path.dirname', 'os.path.dirname', ({(116, 53, 116, 57): 'name'}, {}), '(name)', False, 'import os\n'), ((121, 9, 121, 19), 'dsrlib.settings.Settings', 'Settings', ({}, {}), '()', False, 'from dsrlib.settings import Settings\n'), ((129, 37, 129, 58), 'os.path.dirname', 'os.path.dirname', ({(129, 53, 129, 57): 'name'}, {}), '(name)', False, 'import os\n'), ((33, 32, 33, 57), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ({(33, 50, 33, 56): 'parent'}, {}), '(parent)', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((42, 28, 42, 58), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', ({(42, 46, 42, 57): 'self.target'}, {}), '(self.target)', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((47, 26, 47, 49), 'PyQt5.QtWidgets.QHBoxLayout', 'QtWidgets.QHBoxLayout', ({}, {}), '()', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((78, 28, 78, 51), 'PyQt5.QtWidgets.QLabel', 'QtWidgets.QLabel', ({(78, 45, 78, 50): 'label'}, {}), '(label)', False, 'from PyQt5 import QtCore, QtWidgets\n'), ((111, 45, 111, 66), 'os.path.dirname', 'os.path.dirname', ({(111, 61, 111, 65): 'name'}, {}), '(name)', False, 'import os\n')]
mshonichev/example_pkg
src/tiden/tidenrunner.py
556a703fe8ea4a7737b8cae9c5d4d19c1397a70b
#!/usr/bin/env python3 # # Copyright 2017-2020 GridGain Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tidenpluginmanager import PluginManager from .report.steps import step, InnerReportConfig, Step, add_attachment, AttachmentType from .util import log_print, unix_path, call_method, create_case, kill_stalled_java, exec_time from .result import Result from .util import write_yaml_file, should_be_skipped from .logger import * from .runner import get_test_modules, get_long_path_len, get_class_from_module, known_issue_str from .priority_decorator import get_priority_key from .sshpool import SshPool from uuid import uuid4 from traceback import format_exc from .runner import set_configuration_options, get_configuration_representation, get_actual_configuration from importlib import import_module from os import path, mkdir from time import time from shutil import copyfile from os.path import join, basename from glob import glob import traceback class TidenTestPlan: all_tests = None skipped_tests = None tests_to_execute = None def __init__(self): self.all_tests = {} self.skipped_tests = [] self.tests_to_execute = [] def update(self, other): self.all_tests.update(other.all_tests) self.skipped_tests.extend(other.skipped_tests) self.tests_to_execute.extend(other.tests_to_execute) class TidenRunner: # { # '<suite_name>.<test_file_name>': { # 'path': <full-path-to-test-file>, # 'module_short_name': <test_file_name>, # } # } modules = None # Tiden config dictionary config = None # Tiden SshPool instance ssh_pool = None # Tiden PluginManager instance pm = None # longest length of the test name long_path_len = 0 # instance of Result class result = None # current test module, a key to self.modules dictionary test_module = None # == TidenTestPlan for all modules: total = None # dictionary of TidenTestPlan indexed by test module name test_plan = {} # == for current test module: # a short name of test module, e.g. test module file name without .py extension module_short_name = None # a name of module' test class test_class_name = None # instance of current module' test case class test_class = None # == for current test within module: # test name, with all configuration options current_test_name = None # test method name only current_test_method = None def __init__(self, config, **kwargs): if kwargs.get('modules', None) is not None: self.modules = kwargs.get('modules') else: self.modules = get_test_modules(config, collect_only=kwargs.get('collect_only')) self.config = config self.long_path_len = get_long_path_len(self.modules) xunit_path_var = None if kwargs.get('xunit_path'): xunit_path_var = kwargs.get('xunit_path') elif config.get('var_dir') and config.get('xunit_file'): xunit_path_var = join(config.get('var_dir'), config.get('xunit_file')) self.result = Result(xunit_path=xunit_path_var) self.ssh_pool: SshPool = kwargs.get('ssh_pool') self.pm: PluginManager = kwargs.get('plugin_manager') def collect_tests(self): """ Collect tests from all modules. """ log_print("*** Collecting tests ***", color='blue') long_path_len = get_long_path_len(self.modules) from tiden.sshpool import AbstractSshPool self.ssh_pool = AbstractSshPool({'hosts': []}) def empty_init(self, config, ssh_pool): self.config = config self.ssh = ssh_pool self.__prepare_session_vars() for test_module in sorted(self.modules.keys()): # cleanup instance vars self.test_plan[test_module] = TidenTestPlan() self.__prepare_module_vars(test_module, fake_init=empty_init) self.__print_current_module_name() test_method_names = sorted(list(self.gen_tests(self.test_class))) self.create_test_module_attr_yaml(test_method_names) self.collect_tests0(test_method_names) self.total.update(self.test_plan[test_module]) log_print("*** Found %s tests. %s skipped. Going to 'run' %s tests ***" % ( len(self.total.all_tests), len(self.total.skipped_tests), len(self.total.tests_to_execute) ), color='blue') test_cnt = 0 # Skipped tests do not hit collect report # Now generate results for 'executed' tests for test_module in sorted(self.modules.keys()): self.__prepare_module_vars(test_module, fake_init=empty_init) test_plan = self.test_plan[self.test_module] for test_name in sorted(test_plan.tests_to_execute): test_param = test_plan.all_tests[test_name] self.__prepare_test_vars(**test_param) test_cnt = test_cnt + 1 self.result.start_testcase(self.test_class, self.current_test_name) self.__print_found_test_method_to_execute(long_path_len, test_cnt, test_module) self.result.stop_testcase('pass') def process_tests(self): """ Run all tests :return: """ log_print("*** Tests ***", color='blue') self.__prepare_session_vars() # Check requirements for applications for test_module in sorted(self.modules.keys()): module = import_module("suites.%s" % test_module) test_class_name = get_class_from_module(self.modules[test_module]['module_short_name']) test_class = getattr(module, test_class_name)(self.config, self.ssh_pool) if hasattr(test_class, 'check_requirements'): test_class.check_requirements() for test_module in sorted(self.modules.keys()): # cleanup instance vars self.test_plan[test_module] = TidenTestPlan() self.__prepare_module_vars(test_module) # find test methods: if hasattr(self.test_class, '__configurations__'): cfg_options = getattr(self.test_class, '__configuration_options__') configuration = get_actual_configuration(self.config, cfg_options) log_print("Configuration options for %s:\n%s" % (self.test_class.__class__.__name__, '\n'.join([ '\t' + cfg_option_name + '=' + str( configuration[i]) for i, cfg_option_name in enumerate(cfg_options) ])), color='blue') else: cfg_options = None configuration = None test_method_names = list(self.gen_tests(self.test_class)) self.collect_tests1(test_method_names, common_test_param={ 'configuration': configuration, 'cfg_options': cfg_options, }) test_plan = self.test_plan[self.test_module] if len(test_plan.skipped_tests) > 0: self._skip_tests() if len(test_plan.tests_to_execute) > 0: tests_to_execute = sorted(test_plan.tests_to_execute, key=get_priority_key(self.test_class)) log_print("*** Found %s tests in %s. %s skipped. Going to run %s tests ***\n%s" % ( len(test_plan.all_tests), self.test_class_name, len(test_plan.skipped_tests), len(test_plan.tests_to_execute), '\n'.join([ test_plan.all_tests[test_name]['test_method_name'] for test_name in tests_to_execute ])), color='blue') # Execute module setup setup_passed = self.__call_module_setup_teardown('setup') if setup_passed: self._run_tests(tests_to_execute) # Execute module teardown self.__call_module_setup_teardown('teardown') # this is for correct fail in Jenkins if not setup_passed: exit(1) def create_test_module_attr_yaml(self, test_method_names): # create attr.yaml for current_test_name in test_method_names: test_function = getattr(self.test_class, current_test_name) create_case(test_function) def __prepare_session_vars(self): self.test_plan = {} self.total = TidenTestPlan() def __prepare_module_vars(self, module_name, fake_init=None): """ Prepare per-module initialization of internal variables: Expects self.test_module be set to proper full name of module under 'suites' directory sets up self.test_class_name self.module_short_name self.test_class - creates instance of test case class resets self.all_tests, self.tests_to_execute, self.skipped_tests config fills in config['rt'], config['rt']['remote'] Creates test module working local and remote directories. Copies resources from suite directory to local test module working directory. :param module_name: name of the module to prepare :param fake_init: do not init module :return: """ self.test_module = module_name # fill new module vars self.module_short_name = self.modules[self.test_module]['module_short_name'] test_module_dir = "%s/%s" % (self.config['suite_var_dir'], self.module_short_name) remote_test_module_dir = "%s/%s" % (self.config['remote']['suite_var_dir'], self.module_short_name) self.test_class_name = get_class_from_module(self.module_short_name) # Update Tiden config self.config['rt'] = { 'test_class': self.test_class_name, 'test_method': None, 'test_module': self.test_module, 'test_module_name': self.module_short_name, 'test_module_dir': test_module_dir, 'remote': { 'test_module_dir': remote_test_module_dir, } } module = import_module("suites.%s" % self.test_module) # used for collect_only if fake_init: self.test_class = getattr(module, self.test_class_name) self.test_class.__init__ = fake_init self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool) else: # for process tests - prepare test directory and resources self.__create_test_module_directory(remote_test_module_dir, test_module_dir) self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool) if hasattr(self.test_class, 'tiden'): self.__copy_resources_to_local_test_module_directory() # Set ssh and config apps model classes self.test_class.tiden.config = self.config self.test_class.tiden.ssh = self.ssh_pool self.test_class.config = self.config self.test_class.ssh = self.ssh_pool self._save_config() def __prepare_test_vars(self, test_method_name=None, configuration=None, cfg_options=None, **kwargs): if not test_method_name: return self.test_iteration = 1 self.current_test_method = test_method_name if hasattr(self.test_class, '__configurations__'): if cfg_options is None: cfg_options = getattr(self.test_class, '__configuration_options__') if configuration is None: configuration = get_actual_configuration(self.config, cfg_options) configuration_representation = get_configuration_representation(cfg_options, configuration) self.current_test_name = self.current_test_method + configuration_representation else: self.current_test_name = self.current_test_method def collect_test0(self): # collect test params test_params = { 'test_name': self.current_test_name, } test_function = getattr(self.test_class, self.current_test_method) # first setup fixture if hasattr(test_function, "__setup__"): setup_fixture = getattr(test_function, "__setup__") if type(setup_fixture) == type(''): setup_method = getattr(self.test_class, setup_fixture) else: setup_method = setup_fixture test_params['setup_test_params'] = True test_params['setup_test_method'] = setup_method # next, teardown fixture if hasattr(test_function, "__teardown__"): teardown_fixture = getattr(test_function, "__teardown__") teardown_method = getattr(self.test_class, teardown_fixture) test_params['teardown_test_method'] = teardown_method # don't forget known issues if hasattr(test_function, "__known_issues__"): known_issue = getattr(test_function, "__known_issues__") test_params['known_issue'] = known_issue # test by default runs only once, # unless repeated_test_count set explicitly by decorator or framework option repeat_count = 1 # here, we check --to=repeated_test=N and --to=repeated_test.test_name=N options # and decorate test with @repeated_test automagically if that's required if self.config.get('repeated_test'): repeated_test_option = self.config['repeated_test'] re_decorate = False if type({}) != type(repeated_test_option): # if option was given as --to=repeated_test=N, re-decorate all tests re_decorate = True repeat_count = int(repeated_test_option) elif self.current_test_method in repeated_test_option.keys(): # otherwise re-decorate only if test name matches given option re_decorate = True repeat_count = int(repeated_test_option[self.current_test_method]) if re_decorate: from tiden.util import repeated_test original_test = test_function if hasattr(original_test, 'repeated_test_name'): # that test was previously decorated by @repeated_test, extract original test_names original_names = original_test.repeated_test_name decorated_test = repeated_test(repeat_count, test_names=original_names)(original_test.__func__) else: # that's a brand new decoration decorated_test = repeated_test(repeat_count)(original_test.__func__) # this magic required to convert decorated test function to method of a test class from types import MethodType setattr(self.test_class, self.current_test_method, MethodType(decorated_test, self.test_class)) test_function = getattr(self.test_class, self.current_test_method) if hasattr(test_function, 'repeated_test_count'): repeat_count = test_function.repeated_test_count repeated_test_name = test_function.repeated_test_name test_params['repeated_test_count'] = repeat_count test_params['repeated_test_name'] = repeated_test_name test_params['continue_on_fail'] = self.config.get('repeated_test_continue_on_fail', False) return test_params def _skip_tests(self): test_plan = self.test_plan[self.test_module] skipped_tests = sorted(test_plan.skipped_tests) try: for current_test in skipped_tests: test_param = test_plan.all_tests[current_test] self.__prepare_test_vars(**test_param) pad_string = self.__get_pad_string(msg=self.current_test_method) self.result.skip_testcase_no_start(self.test_class, self.current_test_name, skip_message=test_param['skip_msg'], skip_no_start=test_param['skip_no_start']) self.result.update_xunit() log_print("%s %s" % (pad_string, test_param['skip_msg']), color='yellow') finally: self.current_test_name = None self.current_test_method = None def _run_tests(self, tests_to_execute): test_plan = self.test_plan[self.test_module] try: for test_cnt, current_test in enumerate(tests_to_execute, start=1): test_param = test_plan.all_tests[current_test] self.__prepare_test_vars(**test_param) repeated_test_count = test_param.get('repeated_test_count', 1) repeated_test_continue_on_fail = test_param.get('continue_on_fail') test_with_iterations = True if repeated_test_count > 1 else False pad_string = self.__get_pad_string() log_print("%s started (%s from %s)" % (pad_string, test_cnt, len(tests_to_execute)), color='yellow') for self.test_iteration in range(repeated_test_count): if test_with_iterations: log_print("{} started (iteration {} from {})".format(pad_string, self.test_iteration + 1, repeated_test_count), color='yellow') test_status = self._run_test() if test_with_iterations and test_status != 'pass' and not repeated_test_continue_on_fail: self.result.update_test_name('{}_iteration_{}'.format(current_test, self.test_iteration + 1)) break finally: self.current_test_name = None self.current_test_method = None def _run_test(self): setattr(self, '_secret_report_storage', InnerReportConfig()) test_exception = None tb_msg = None test_status = 'pass' pad_string = self.__get_pad_string() started = int(time()) known_issue = self.test_plan[self.test_module].all_tests[self.current_test_name].get('known_issue') setattr(self.test_class, '_secret_report_storage', InnerReportConfig()) try: self.pm.do("before_test_method", test_module=self.test_module, test_name=self.current_test_name, artifacts=self.config.get('artifacts', {})) self.result.start_testcase(self.test_class, self.current_test_name) self.__update_config_and_save(current_method_name=self.current_test_name) # Execute test setup method self.__call_test_setup_teardown('setup') # self.__print_with_format() with Step(self, 'Execution'): try: call_method(self.test_class, self.current_test_method) finally: self.__set_child_steps_to_parent() self.__save_logs() log_print(f"{pad_string} passed {exec_time(started)}", color='green') except (AssertionError, TidenException) as e: test_status = 'fail' test_exception = e tb_msg = traceback.format_exc() except Exception as e: test_status = 'error' test_exception = e tb_msg = traceback.format_exc() finally: if test_status != 'pass': log_print(tb_msg, color='red') log_print("{} {} {}{}".format(pad_string, test_status, exec_time(started), known_issue_str(known_issue)), color='red') self.result.stop_testcase( test_status, e=test_exception, tb=tb_msg, known_issue=known_issue, run_info=self.test_class.get_run_info() if hasattr(self.test_class, 'get_run_info') else None ) # Execute test teardown method self.__call_test_setup_teardown('teardown') self.pm.do('after_test_method', test_status=test_status, exception=test_exception, stacktrace=tb_msg, known_issue=known_issue, description=getattr(self.test_class, self.current_test_method, lambda: None).__doc__, inner_report_config=getattr(self, '_secret_report_storage')) # Kill java process if teardown function didn't kill nodes if not hasattr(self.test_class, 'keep_ignite_between_tests'): kill_stalled_java(self.ssh_pool) return test_status @step('logs') def __save_logs(self): test_dir = self.config.get('rt', {}).get('remote', {}).get('test_dir') if 'WardReport' in self.config.get('plugins', []): report_config = self.config['plugins']['WardReport'] files_receiver_url = report_config['files_url'] upload_logs = report_config['upload_logs'] else: return if test_dir: try: for host_ip, output_lines in self.ssh_pool.exec([f"ls {test_dir}"]).items(): with Step(self, host_ip): for line in output_lines: file_name: str for file_name in line.split('\n'): if file_name and file_name.endswith('.log'): send_file_name = f'{uuid4()}_{file_name}' add_attachment(self, file_name, send_file_name, AttachmentType.FILE) if upload_logs: cmd = f'cd {test_dir}; ' \ f'curl -H "filename: {send_file_name}" ' \ f'-F "file=@{file_name};filename={file_name}" ' \ f'{files_receiver_url}/files/add' self.ssh_pool.exec_on_host(host_ip, [cmd]) except: log_print(f'Failed to send report. \n{format_exc()}', color='pink') def __copy_resources_to_local_test_module_directory(self): """ Copy resources in test resource directory :return: """ test_resource_dir = "%s/res" % self.config['rt']['test_module_dir'] if not path.exists(test_resource_dir): mkdir(test_resource_dir) self.config['rt']['resource_dir'] = "%s/res/%s" % (self.config['suite_dir'], self.module_short_name[5:]) for file in glob("%s/*" % self.config['rt']['resource_dir']): if path.isfile(file): copyfile(file, f"{test_resource_dir}/{basename(file)}") self.config['rt']['test_resource_dir'] = unix_path(test_resource_dir) def __create_test_module_directory(self, remote_test_module_dir, test_module_dir): mkdir(test_module_dir) self.ssh_pool.exec([f'mkdir -p {remote_test_module_dir}']) @step('{method_name}') def __call_test_setup_teardown(self, method_name): method_to_execute = None try: self._call_plugin_manager(f'before_test_method_{method_name}') all_tests = self.test_plan[self.test_module].all_tests if all_tests[self.current_test_name].get(f'{method_name}_test_method'): method_to_execute = all_tests[self.current_test_name].get(f'{method_name}_test_method') self.__print_with_format(msg=str(method_to_execute.__name__)) try: if all_tests[self.current_test_name].get(f'{method_name}_test_params'): method_to_execute(self.test_class) else: method_to_execute() except Exception as e: log_print(f'!!! Exception in {method_name} code !!!', color='red') log_print(traceback.format_exc()) try: self.__save_logs() except: log_print(f'Failed to get logs\n{traceback.format_exc()}', color='pink') # if exception in setup method then re-raise the exception as we should fail the test if method_name == 'setup': raise e finally: self.__set_child_steps_to_parent() self._call_plugin_manager(f'after_test_method_{method_name}') def __set_child_steps_to_parent(self): exec_report: InnerReportConfig = getattr(self.test_class, '_secret_report_storage', None) test_report: InnerReportConfig = getattr(self, '_secret_report_storage') idx_to_add = None for idx, test_step in enumerate(test_report.steps): if test_step['status'] is None: idx_to_add = idx break test_report.steps[idx_to_add]['children'] = exec_report.steps + test_report.steps[idx_to_add].get('children', []) title = getattr(getattr(self.test_class, self.current_test_method), '__report_title__', None) suites = getattr(getattr(self.test_class, self.current_test_method), '__report_suites__', None) if title: test_report.title = title test_report.suites = suites setattr(self, '_secret_report_storage', test_report) setattr(self.test_class, '_secret_report_storage', InnerReportConfig()) def __call_module_setup_teardown(self, fixture_name): """ Execute test module setup/teardown fixture. :param fixture_name: either 'setup' or 'teardown' :return: """ self._call_plugin_manager('before_test_class_%s' % fixture_name) fixture_passed = True try: if hasattr(self.test_class, fixture_name): started = time() try: self.__print_with_format('started', current_method_name=fixture_name) self.__update_config_and_save(current_method_name=fixture_name) # Execute setup or teardown method call_method(self.test_class, fixture_name) self.__print_with_format('finished in %s sec' % (int(time() - started)), current_method_name=fixture_name) # except (AssertionError, TidenException) as e: except Exception as e: fixture_passed = False self.__print_with_format('failed in %s sec' % (int(time() - started)), current_method_name=fixture_name) log_print('Exception in %s.%s.%s: %s\n%s' % (self.test_module, self.test_class_name, fixture_name, str(e), str(traceback.format_exc())), color='red') finally: self._call_plugin_manager('after_test_class_%s' % fixture_name) return fixture_passed def _call_plugin_manager(self, execution_point): args = [self.test_module, self.test_class] if self.current_test_method: args.append(self.current_test_method) self.pm.do(execution_point, *args) def __update_config_and_save(self, current_method_name=None): test_method = current_method_name if current_method_name else self.current_test_method test_method_name = test_method.split('(')[0] if '(' in test_method else test_method test_dir_name = test_method_name all_tests = self.test_plan[self.test_module].all_tests # cause of repeated_tests decorator if all_tests.get(test_method) and all_tests[test_method].get('repeated_test_name'): test_dir_name = '{}_{}'.format( test_method_name, all_tests[test_method].get('repeated_test_name')[self.test_iteration]) self.config['rt']['test_method'] = test_method_name self.config['rt']['remote']['test_dir'] = "{}/{}/{}".format( self.config['rt']['remote']['test_module_dir'], self.config['rt']['test_class'], test_dir_name ) self.config['rt']['test_dir'] = "{}/{}/{}".format( self.config['rt']['test_module_dir'], self.config['rt']['test_class'], test_dir_name) try: create_remote_dir = [ 'mkdir -p %s/%s/%s' % (self.config['rt']['remote']['test_module_dir'], self.test_class_name, str(test_dir_name)), 'ln -sfn %s %s/current_test_directory' % (self.config['rt']['remote']['test_module_dir'], self.config['environment']['home']) ] self.ssh_pool.exec(create_remote_dir) except Exception: log_print("Can't create symlink to current test", color='red') self._save_config() def _check_test_for_skip(self): attribs = [] skip_test = False skip_msg = None skip_no_start = False test_function = getattr(self.test_class, self.current_test_method) if hasattr(test_function, "__attrib__"): attribs = getattr(test_function, "__attrib__") attribs.append(str(self.current_test_method)) # if attr is passed to runner and test is not marked with one of the attribute # then skip it. if 'mute' in attribs: skip_msg = 'skipped cause test is MUTED' known_issue = None if hasattr(test_function, "__known_issues__"): known_issue = getattr(test_function, "__known_issues__") if known_issue: skip_msg = '{} cause of {}'.format(skip_msg, known_issue) skip_test = True skip_no_start = True elif self.config.get('attrib') and should_be_skipped(self.config.get('attrib'), attribs, self.config.get('attr_match', 'any')): skip_msg = 'skipped cause of attrib mismatch' skip_test = True skip_no_start = True if hasattr(test_function, "__skipped__"): skip_msg = 'skipped cause of %s' % test_function.__skipped_message__ skip_test = True if hasattr(test_function, "__skip_cond__"): skip_condition = getattr(test_function, "__skip_cond__") conditions_met, skip_message = skip_condition(self.config) if not conditions_met: skip_msg = 'skipped cause of %s' % skip_message skip_test = True if hasattr(test_function, "__skip_conds__") and \ len(test_function.__skip_conds__) > 0: skip_conditions = test_function.__skip_conds__ for skip_condition in skip_conditions: conditions_met, skip_message = skip_condition(self.test_class) if not conditions_met: skip_msg = 'skipped cause of %s' % skip_message skip_test = True return skip_test, skip_msg, skip_no_start def get_tests_results(self): return self.result def _save_config(self): write_yaml_file(self.config['config_path'], self.config) @staticmethod def gen_tests(test_class): """ Generates all test method of given test class :param test_class: :return: """ for class_attr in dir(test_class): if class_attr.startswith('test_'): yield class_attr def collect_tests0(self, test_method_names): """ Collect given set of tests from test module for all configurations :param test_method_names: :return: """ if not hasattr(self.test_class, '__configurations__'): self.collect_tests1(test_method_names) else: cfg_options = getattr(self.test_class, '__configuration_options__').copy() configurations = getattr(self.test_class, '__configurations__').copy() for configuration in configurations: # set configuration options from given configuration to Tiden config, # so that test can check options and skip itself set_configuration_options(cfg_options, self.config, configuration) self.collect_tests1(test_method_names, common_test_param={ 'configuration': configuration, 'cfg_options': cfg_options, }) def collect_tests1(self, test_method_names, common_test_param={}): """ Collect given tests from current test module :param test_method_names: :param common_test_param: :return: """ try: test_plan = self.test_plan[self.test_module] for test_method_name in test_method_names: self.__prepare_test_vars(test_method_name, **common_test_param) test_param = { 'test_method_name': test_method_name, } is_skipped, skip_msg, skip_no_start = self._check_test_for_skip() test_param.update(self.collect_test0()) repeat_count = test_param.get('repeated_test_count', 1) if repeat_count > 0: if repeat_count == 1: # don't rename tests when only one iteration requested test_param['repeated_test_name'] = [] else: # rare case, skip by --to=repeated_test.test_name=0 is_skipped = True skip_msg = 'skipped due to repeated_test iterations <= 0' skip_no_start = False if is_skipped: test_param.update({ 'skip_msg': skip_msg, 'skip_no_start': skip_no_start, }) test_plan.skipped_tests.append(self.current_test_name) else: if common_test_param: test_param.update(common_test_param) test_plan.tests_to_execute.append(self.current_test_name) test_plan.all_tests[self.current_test_name] = test_param.copy() finally: self.current_test_method = None self.current_test_name = None def __print_found_test_method_to_execute(self, long_path_len, test_cnt, test_module): method_long_name = "%s.%s.%s " % (test_module, self.test_class_name, self.current_test_name) pad_string = method_long_name.ljust(long_path_len, '.') log_print("%s found (%s from %s)" % (pad_string, test_cnt, len(self.total.tests_to_execute)), color='yellow') def __print_with_format(self, msg='', current_method_name=''): if not current_method_name: if self.current_test_method: current_method_name = self.current_test_method else: current_method_name = '' log_print("[{}][.{}.{}] {}".format( datetime.now().isoformat()[11:-7], self.test_class_name, current_method_name, msg)) def __print_current_module_name(self): log_print("[%s][%s]" % ( datetime.now().isoformat()[11:-7], self.test_module)) def __get_pad_string(self, msg=None): return ("%s.%s.%s " % ( self.test_module, self.test_class_name, msg if msg else self.current_test_method)) \ .ljust(self.long_path_len, '.')
[((136, 24, 136, 54), 'tiden.sshpool.AbstractSshPool', 'AbstractSshPool', ({(136, 40, 136, 53): "{'hosts': []}"}, {}), "({'hosts': []})", False, 'from tiden.sshpool import AbstractSshPool\n'), ((310, 17, 310, 62), 'importlib.import_module', 'import_module', ({(310, 31, 310, 61): "'suites.%s' % self.test_module"}, {}), "('suites.%s' % self.test_module)", False, 'from importlib import import_module\n'), ((582, 8, 582, 30), 'os.mkdir', 'mkdir', ({(582, 14, 582, 29): 'test_module_dir'}, {}), '(test_module_dir)', False, 'from os import path, mkdir\n'), ((192, 21, 192, 61), 'importlib.import_module', 'import_module', ({(192, 35, 192, 60): "'suites.%s' % test_module"}, {}), "('suites.%s' % test_module)", False, 'from importlib import import_module\n'), ((475, 22, 475, 28), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((573, 15, 573, 45), 'os.path.exists', 'path.exists', ({(573, 27, 573, 44): 'test_resource_dir'}, {}), '(test_resource_dir)', False, 'from os import path, mkdir\n'), ((574, 12, 574, 36), 'os.mkdir', 'mkdir', ({(574, 18, 574, 35): 'test_resource_dir'}, {}), '(test_resource_dir)', False, 'from os import path, mkdir\n'), ((576, 24, 576, 72), 'glob.glob', 'glob', ({(576, 29, 576, 71): "('%s/*' % self.config['rt']['resource_dir'])"}, {}), "('%s/*' % self.config['rt']['resource_dir'])", False, 'from glob import glob\n'), ((502, 21, 502, 43), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((506, 21, 506, 43), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((577, 19, 577, 36), 'os.path.isfile', 'path.isfile', ({(577, 31, 577, 35): 'file'}, {}), '(file)', False, 'from os import path, mkdir\n'), ((643, 26, 643, 32), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((410, 67, 410, 110), 'types.MethodType', 'MethodType', ({(410, 78, 410, 92): 'decorated_test', (410, 94, 410, 109): 'self.test_class'}, {}), '(decorated_test, self.test_class)', False, 'from types import MethodType\n'), ((402, 37, 403, 77), 'tiden.util.repeated_test', 'repeated_test', (), '', False, 'from tiden.util import repeated_test\n'), ((406, 37, 406, 64), 'tiden.util.repeated_test', 'repeated_test', ({(406, 51, 406, 63): 'repeat_count'}, {}), '(repeat_count)', False, 'from tiden.util import repeated_test\n'), ((602, 30, 602, 52), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((565, 54, 565, 66), 'traceback.format_exc', 'format_exc', ({}, {}), '()', False, 'from traceback import format_exc\n'), ((578, 58, 578, 72), 'os.path.basename', 'basename', ({(578, 67, 578, 71): 'file'}, {}), '(file)', False, 'from os.path import join, basename\n'), ((650, 73, 650, 79), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((655, 71, 655, 77), 'time.time', 'time', ({}, {}), '()', False, 'from time import time\n'), ((659, 43, 659, 65), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n'), ((556, 56, 556, 63), 'uuid.uuid4', 'uuid4', ({}, {}), '()', False, 'from uuid import uuid4\n'), ((606, 57, 606, 79), 'traceback.format_exc', 'traceback.format_exc', ({}, {}), '()', False, 'import traceback\n')]
ludwig-ai/ludw
ludwig/data/cache/manager.py
b9d95bbdb474bc22260269de1bc094bc5455f37c
import logging import os import re import uuid from pathlib import Path from ludwig.constants import CHECKSUM, META, TEST, TRAINING, VALIDATION from ludwig.data.cache.util import calculate_checksum from ludwig.utils import data_utils from ludwig.utils.fs_utils import delete, path_exists logger = logging.getLogger(__name__) def alphanum(v): """Filters a string to only its alphanumeric characters.""" return re.sub(r"\W+", "", v) class DatasetCache: def __init__(self, config, checksum, cache_map, dataset_manager): self.config = config self.checksum = checksum self.cache_map = cache_map self.dataset_manager = dataset_manager def get(self): training_set_metadata_fp = self.cache_map[META] if not path_exists(training_set_metadata_fp): return None cache_training_set_metadata = data_utils.load_json(training_set_metadata_fp) cached_training_set = self.cache_map[TRAINING] if path_exists(self.cache_map[TRAINING]) else None cached_test_set = self.cache_map[TEST] if path_exists(self.cache_map[TEST]) else None cached_validation_set = self.cache_map[VALIDATION] if path_exists(self.cache_map[VALIDATION]) else None valid = self.checksum == cache_training_set_metadata.get(CHECKSUM) and cached_training_set is not None return valid, cache_training_set_metadata, cached_training_set, cached_test_set, cached_validation_set def put(self, training_set, test_set, validation_set, training_set_metadata): logger.info("Writing preprocessed training set cache") training_set = self.dataset_manager.save( self.cache_map[TRAINING], training_set, self.config, training_set_metadata, TRAINING, ) if test_set is not None: logger.info("Writing preprocessed test set cache") test_set = self.dataset_manager.save( self.cache_map[TEST], test_set, self.config, training_set_metadata, TEST, ) if validation_set is not None: logger.info("Writing preprocessed validation set cache") validation_set = self.dataset_manager.save( self.cache_map[VALIDATION], validation_set, self.config, training_set_metadata, VALIDATION, ) logger.info("Writing train set metadata") data_utils.save_json(self.cache_map[META], training_set_metadata) return training_set, test_set, validation_set, training_set_metadata def delete(self): for fname in self.cache_map.values(): if path_exists(fname): delete(fname) class CacheManager: def __init__(self, dataset_manager, cache_dir=None): self._dataset_manager = dataset_manager self._cache_dir = cache_dir def get_dataset_cache(self, config, dataset=None, training_set=None, test_set=None, validation_set=None): if dataset is not None: key = self.get_cache_key(dataset, config) cache_map = { META: self.get_cache_path(dataset, key, META, "json"), TRAINING: self.get_cache_path(dataset, key, TRAINING), TEST: self.get_cache_path(dataset, key, TEST), VALIDATION: self.get_cache_path(dataset, key, VALIDATION), } return DatasetCache(config, key, cache_map, self._dataset_manager) else: key = self.get_cache_key(training_set, config) cache_map = { META: self.get_cache_path(training_set, key, META, "json"), TRAINING: self.get_cache_path(training_set, key, TRAINING), TEST: self.get_cache_path(test_set, key, TEST), VALIDATION: self.get_cache_path(validation_set, key, VALIDATION), } return DatasetCache(config, key, cache_map, self._dataset_manager) def get_cache_key(self, dataset, config): if not isinstance(dataset, str): # TODO(travis): could try hashing the in-memory dataset, but this is tricky for Dask return str(uuid.uuid1()) return calculate_checksum(dataset, config) def get_cache_path(self, dataset, key, tag, ext=None): if not isinstance(dataset, str): dataset = None if self._cache_dir is None and dataset is not None: # Use the input dataset filename (minus the extension) as the cache path stem = Path(dataset).stem else: # To avoid collisions across different directories, we use the unique checksum # as the cache path stem = alphanum(key) ext = ext or self.data_format cache_fname = f"{stem}.{tag}.{ext}" return os.path.join(self.get_cache_directory(dataset), cache_fname) def get_cache_directory(self, input_fname): if self._cache_dir is None: if input_fname is not None: return os.path.dirname(input_fname) return "." return self._cache_dir def can_cache(self, skip_save_processed_input): return self._dataset_manager.can_cache(skip_save_processed_input) @property def data_format(self): return self._dataset_manager.data_format
[((12, 9, 12, 36), 'logging.getLogger', 'logging.getLogger', ({(12, 27, 12, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((17, 11, 17, 32), 're.sub', 're.sub', ({(17, 18, 17, 24): '"""\\\\W+"""', (17, 26, 17, 28): '""""""', (17, 30, 17, 31): 'v'}, {}), "('\\\\W+', '', v)", False, 'import re\n'), ((32, 38, 32, 84), 'ludwig.utils.data_utils.load_json', 'data_utils.load_json', ({(32, 59, 32, 83): 'training_set_metadata_fp'}, {}), '(training_set_metadata_fp)', False, 'from ludwig.utils import data_utils\n'), ((75, 8, 75, 73), 'ludwig.utils.data_utils.save_json', 'data_utils.save_json', ({(75, 29, 75, 49): 'self.cache_map[META]', (75, 51, 75, 72): 'training_set_metadata'}, {}), '(self.cache_map[META], training_set_metadata)', False, 'from ludwig.utils import data_utils\n'), ((114, 15, 114, 50), 'ludwig.data.cache.util.calculate_checksum', 'calculate_checksum', ({(114, 34, 114, 41): 'dataset', (114, 43, 114, 49): 'config'}, {}), '(dataset, config)', False, 'from ludwig.data.cache.util import calculate_checksum\n'), ((29, 15, 29, 52), 'ludwig.utils.fs_utils.path_exists', 'path_exists', ({(29, 27, 29, 51): 'training_set_metadata_fp'}, {}), '(training_set_metadata_fp)', False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((34, 58, 34, 95), 'ludwig.utils.fs_utils.path_exists', 'path_exists', ({(34, 70, 34, 94): 'self.cache_map[TRAINING]'}, {}), '(self.cache_map[TRAINING])', False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((36, 50, 36, 83), 'ludwig.utils.fs_utils.path_exists', 'path_exists', ({(36, 62, 36, 82): 'self.cache_map[TEST]'}, {}), '(self.cache_map[TEST])', False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((38, 62, 38, 101), 'ludwig.utils.fs_utils.path_exists', 'path_exists', ({(38, 74, 38, 100): 'self.cache_map[VALIDATION]'}, {}), '(self.cache_map[VALIDATION])', False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((81, 15, 81, 33), 'ludwig.utils.fs_utils.path_exists', 'path_exists', ({(81, 27, 81, 32): 'fname'}, {}), '(fname)', False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((82, 16, 82, 29), 'ludwig.utils.fs_utils.delete', 'delete', ({(82, 23, 82, 28): 'fname'}, {}), '(fname)', False, 'from ludwig.utils.fs_utils import delete, path_exists\n'), ((113, 23, 113, 35), 'uuid.uuid1', 'uuid.uuid1', ({}, {}), '()', False, 'import uuid\n'), ((122, 19, 122, 32), 'pathlib.Path', 'Path', ({(122, 24, 122, 31): 'dataset'}, {}), '(dataset)', False, 'from pathlib import Path\n'), ((135, 23, 135, 51), 'os.path.dirname', 'os.path.dirname', ({(135, 39, 135, 50): 'input_fname'}, {}), '(input_fname)', False, 'import os\n')]
kshshkim/factorioCalcPy
test_calc_base.py
2a7c6ca567a3bf0d2b19f3cf0bc05274f83d4205
import pprint from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict from FactorioCalcBase.recipe import Recipe from FactorioCalcBase.calculator_base import CalculatorBase from FactorioCalcBase.dependency_dict_common_function import dict_add_number import time def test_change_machine(test_obj: CalculatorBase, target_recipe, failed_dict): recipe_obj = Recipe(recipe_name=target_recipe) cat = recipe_obj.get_category() available_machine_list = production_machine_category_list_dict.get(cat) failed_dict['method_failed']['change_machine_failed'] = {} if len(available_machine_list) > 1: for machine in available_machine_list: test_obj.change_machine_to_specific_block(recipe_name=target_recipe, machine_name=machine) if test_obj.block_obj_dict['recipe']['machine_name'] != machine: raise 'MachineNotChanged' def test_calculator_base_methods(test_obj: CalculatorBase, failed_dict: dict): recipe_list = list(test_obj.block_obj_dict['recipe'].keys()) for recipe in recipe_list: try: test_change_machine(test_obj, recipe, failed_dict) except: dict_add_number(failed_dict['method_failed']['change_machine_failed'], recipe, 1) def test_calculator_base(failed_dict): mrms = [0, 0.3] pm = [None, ["assembling-machine-2", "stone-furnace", "burner-mining-drill"]] uk = [True, False] am = [1, 101.5] failed_dict['init_failed'] = {} failed_dict['method_failed'] = { 'change_machine_failed': { } } for recipe in sorted_recipe_list: for mining_research_modifier in mrms: for preferred_machines in pm: for use_kovarex in uk: for amount in am: try: test_obj = CalculatorBase(recipe_name=recipe, amount=amount, preferred_machine_list=preferred_machines, use_kovarex=use_kovarex, mining_research_modifier=mining_research_modifier) except: dict_add_number(failed_dict['init_failed'], key=recipe, val=1) test_calculator_base_methods(test_obj, failed_dict) pprint.pp(failed_dict) return failed_dict def run_test(): start_time = time.time() test_calculator_base({}) print(f'finished in {time.time()-start_time}')
[((10, 17, 10, 50), 'FactorioCalcBase.recipe.Recipe', 'Recipe', (), '', False, 'from FactorioCalcBase.recipe import Recipe\n'), ((12, 29, 12, 75), 'FactorioCalcBase.data.binary.production_machine_category_list_dict.get', 'production_machine_category_list_dict.get', ({(12, 71, 12, 74): 'cat'}, {}), '(cat)', False, 'from FactorioCalcBase.data.binary import sorted_recipe_list, production_machine_category_list_dict\n'), ((57, 4, 57, 26), 'pprint.pp', 'pprint.pp', ({(57, 14, 57, 25): 'failed_dict'}, {}), '(failed_dict)', False, 'import pprint\n'), ((62, 17, 62, 28), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((29, 12, 29, 93), 'FactorioCalcBase.dependency_dict_common_function.dict_add_number', 'dict_add_number', ({(29, 28, 29, 81): "failed_dict['method_failed']['change_machine_failed']", (29, 83, 29, 89): 'recipe', (29, 91, 29, 92): '(1)'}, {}), "(failed_dict['method_failed']['change_machine_failed'],\n recipe, 1)", False, 'from FactorioCalcBase.dependency_dict_common_function import dict_add_number\n'), ((64, 25, 64, 36), 'time.time', 'time.time', ({}, {}), '()', False, 'import time\n'), ((50, 39, 53, 104), 'FactorioCalcBase.calculator_base.CalculatorBase', 'CalculatorBase', (), '', False, 'from FactorioCalcBase.calculator_base import CalculatorBase\n'), ((55, 28, 55, 90), 'FactorioCalcBase.dependency_dict_common_function.dict_add_number', 'dict_add_number', (), '', False, 'from FactorioCalcBase.dependency_dict_common_function import dict_add_number\n')]
ahfeel/thrift
lib/py/src/Thrift.py
3ac3fa6fede4b2446209cfeb6fcae5900da543cc
# Copyright (c) 2006- Facebook # Distributed under the Thrift Software License # # See accompanying file LICENSE or visit the Thrift site at: # http://developers.facebook.com/thrift/ class TType: STOP = 0 VOID = 1 BOOL = 2 BYTE = 3 I08 = 3 DOUBLE = 4 I16 = 6 I32 = 8 I64 = 10 STRING = 11 UTF7 = 11 STRUCT = 12 MAP = 13 SET = 14 LIST = 15 UTF8 = 16 UTF16 = 17 class TMessageType: CALL = 1 REPLY = 2 EXCEPTION = 3 class TProcessor: """Base class for procsessor, which works on two streams.""" def process(iprot, oprot): pass class TException(Exception): """Base class for all thrift exceptions.""" def __init__(self, message=None): Exception.__init__(self, message) self.message = message class TApplicationException(TException): """Application level thrift exceptions.""" UNKNOWN = 0 UNKNOWN_METHOD = 1 INVALID_MESSAGE_TYPE = 2 WRONG_METHOD_NAME = 3 BAD_SEQUENCE_ID = 4 MISSING_RESULT = 5 def __init__(self, type=UNKNOWN, message=None): TException.__init__(self, message) self.type = type def __str__(self): if self.message: return self.message elif self.type == UNKNOWN_METHOD: return 'Unknown method' elif self.type == INVALID_MESSAGE_TYPE: return 'Invalid message type' elif self.type == WRONG_METHOD_NAME: return 'Wrong method name' elif self.type == BAD_SEQUENCE_ID: return 'Bad sequence ID' elif self.type == MISSING_RESULT: return 'Missing result' else: return 'Default (unknown) TApplicationException' def read(self, iprot): iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.message = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.type = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): oprot.writeStructBegin('TApplicationException') if self.message != None: oprot.writeFieldBegin('message', TType.STRING, 1) oprot.writeString(self.message) oprot.writeFieldEnd() if self.type != None: oprot.writeFieldBegin('type', TType.I32, 2) oprot.writeI32(self.type) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd()
[]
nyumaya/wake-word-benchmark
engine.py
d2f7ac091d31403f3398bc3ef2e2de4876a4629e
# # Copyright 2018 Picovoice Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from collections import namedtuple from enum import Enum import numpy as np from pocketsphinx import get_model_path from pocketsphinx.pocketsphinx import Decoder from engines import Porcupine from engines import snowboydetect from engines import AudioRecognition, FeatureExtractor class Engines(Enum): POCKET_SPHINX = 'PocketSphinx' PORCUPINE = 'Porcupine' SNOWBOY = 'Snowboy' NYUMAYA = 'Nyumaya' SensitivityInfo = namedtuple('SensitivityInfo', 'min, max, step') class Engine(object): def process(self, pcm): raise NotImplementedError() def release(self): raise NotImplementedError() def __str__(self): raise NotImplementedError() @staticmethod def frame_length(engine_type): if engine_type is Engines.NYUMAYA: return 1600 else: return 512 @staticmethod def sensitivity_info(engine_type): if engine_type is Engines.POCKET_SPHINX: return SensitivityInfo(-21, 15, 3) elif engine_type is Engines.PORCUPINE: return SensitivityInfo(0, 1, 0.1) elif engine_type is Engines.SNOWBOY: return SensitivityInfo(0, 1, 0.05) elif engine_type is Engines.NYUMAYA: return SensitivityInfo(0, 1, 0.1) else: raise ValueError("no sensitivity range for '%s'", engine_type.value) @staticmethod def create(engine, keyword, sensitivity): if engine is Engines.POCKET_SPHINX: return PocketSphinxEngine(keyword, sensitivity) elif engine is Engines.PORCUPINE: return PorcupineEngine(keyword, sensitivity) elif engine is Engines.SNOWBOY: return SnowboyEngine(keyword, sensitivity) elif engine is Engines.NYUMAYA: return NyumayaEngine(keyword, sensitivity) else: ValueError("cannot create engine of type '%s'", engine.value) class PocketSphinxEngine(Engine): def __init__(self, keyword, sensitivity): config = Decoder.default_config() config.set_string('-logfn', '/dev/null') config.set_string('-hmm', os.path.join(get_model_path(), 'en-us')) config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict')) config.set_string('-keyphrase', keyword if keyword != 'snowboy' else 'snow boy') config.set_float('-kws_threshold', 10 ** -sensitivity) self._decoder = Decoder(config) self._decoder.start_utt() def process(self, pcm): assert pcm.dtype == np.int16 self._decoder.process_raw(pcm.tobytes(), False, False) detected = self._decoder.hyp() if detected: self._decoder.end_utt() self._decoder.start_utt() return detected def release(self): self._decoder.end_utt() def __str__(self): return 'PocketSphinx' class PorcupineEngine(Engine): def __init__(self, keyword, sensitivity): self._porcupine = Porcupine( library_path=os.path.join(self._repo_path, 'lib/linux/x86_64/libpv_porcupine.so'), model_path=os.path.join(self._repo_path, 'lib/common/porcupine_params.pv'), keyword_paths=[os.path.join(self._repo_path, 'resources/keyword_files/linux/%s_linux.ppn' % keyword.lower())], sensitivities=[sensitivity]) def process(self, pcm): assert pcm.dtype == np.int16 return self._porcupine.process(pcm) == 0 def release(self): self._porcupine.delete() def __str__(self): return 'Porcupine' @property def _repo_path(self): return os.path.join(os.path.dirname(__file__), 'engines/porcupine') class SnowboyEngine(Engine): def __init__(self, keyword, sensitivity): keyword = keyword.lower() if keyword == 'alexa': model_relative_path = 'engines/snowboy/resources/alexa/alexa-avs-sample-app/alexa.umdl' else: model_relative_path = 'engines/snowboy/resources/models/%s.umdl' % keyword.replace(' ', '_') model_str = os.path.join(os.path.dirname(__file__), model_relative_path).encode() resource_filename = os.path.join(os.path.dirname(__file__), 'engines/snowboy/resources/common.res').encode() self._snowboy = snowboydetect.SnowboyDetect(resource_filename=resource_filename, model_str=model_str) # https://github.com/Kitt-AI/snowboy#pretrained-universal-models if keyword == 'jarvis': self._snowboy.SetSensitivity(('%f,%f' % (sensitivity, sensitivity)).encode()) else: self._snowboy.SetSensitivity(str(sensitivity).encode()) if keyword in {'alexa', 'computer', 'jarvis', 'view glass'}: self._snowboy.ApplyFrontend(True) else: self._snowboy.ApplyFrontend(False) def process(self, pcm): assert pcm.dtype == np.int16 return self._snowboy.RunDetection(pcm.tobytes()) == 1 def release(self): pass def __str__(self): return 'Snowboy' class NyumayaEngine(Engine): def __init__(self, keyword, sensitivity): #logging.info("INIT NYUMAYA") keyword = keyword.lower() model_relative_path = 'engines/nyumaya_audio_recognition/models/Hotword/%s_v1.0.0.premium' % keyword model_str = os.path.join(os.path.dirname(__file__), model_relative_path) libpath="engines/nyumaya_audio_recognition/lib/linux_x86_64/libnyumaya_premium.so.1.0.0" self._extractor = FeatureExtractor(libpath) self._detector = AudioRecognition(libpath) keywordId = self._detector.addModel(model_str,sensitivity) def process(self, pcm): assert pcm.dtype == np.int16 #logging.info(len(pcm)) features = self._extractor.signalToMel(pcm.tobytes(),1.0) return self._detector.runDetection(features) == 1 def release(self): pass def __str__(self): return 'Nyumaya'
[((35, 18, 35, 65), 'collections.namedtuple', 'namedtuple', ({(35, 29, 35, 46): '"""SensitivityInfo"""', (35, 48, 35, 64): '"""min, max, step"""'}, {}), "('SensitivityInfo', 'min, max, step')", False, 'from collections import namedtuple\n'), ((84, 17, 84, 41), 'pocketsphinx.pocketsphinx.Decoder.default_config', 'Decoder.default_config', ({}, {}), '()', False, 'from pocketsphinx.pocketsphinx import Decoder\n'), ((91, 24, 91, 39), 'pocketsphinx.pocketsphinx.Decoder', 'Decoder', ({(91, 32, 91, 38): 'config'}, {}), '(config)', False, 'from pocketsphinx.pocketsphinx import Decoder\n'), ((147, 24, 147, 109), 'engines.snowboydetect.SnowboyDetect', 'snowboydetect.SnowboyDetect', (), '', False, 'from engines import snowboydetect\n'), ((181, 26, 181, 51), 'engines.FeatureExtractor', 'FeatureExtractor', ({(181, 43, 181, 50): 'libpath'}, {}), '(libpath)', False, 'from engines import AudioRecognition, FeatureExtractor\n'), ((182, 25, 182, 50), 'engines.AudioRecognition', 'AudioRecognition', ({(182, 42, 182, 49): 'libpath'}, {}), '(libpath)', False, 'from engines import AudioRecognition, FeatureExtractor\n'), ((134, 28, 134, 53), 'os.path.dirname', 'os.path.dirname', ({(134, 44, 134, 52): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((178, 33, 178, 58), 'os.path.dirname', 'os.path.dirname', ({(178, 49, 178, 57): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((86, 47, 86, 63), 'pocketsphinx.get_model_path', 'get_model_path', ({}, {}), '()', False, 'from pocketsphinx import get_model_path\n'), ((87, 48, 87, 64), 'pocketsphinx.get_model_path', 'get_model_path', ({}, {}), '()', False, 'from pocketsphinx import get_model_path\n'), ((116, 25, 116, 93), 'os.path.join', 'os.path.join', ({(116, 38, 116, 53): 'self._repo_path', (116, 55, 116, 92): '"""lib/linux/x86_64/libpv_porcupine.so"""'}, {}), "(self._repo_path, 'lib/linux/x86_64/libpv_porcupine.so')", False, 'import os\n'), ((117, 23, 117, 86), 'os.path.join', 'os.path.join', ({(117, 36, 117, 51): 'self._repo_path', (117, 53, 117, 85): '"""lib/common/porcupine_params.pv"""'}, {}), "(self._repo_path, 'lib/common/porcupine_params.pv')", False, 'import os\n'), ((145, 33, 145, 58), 'os.path.dirname', 'os.path.dirname', ({(145, 49, 145, 57): '__file__'}, {}), '(__file__)', False, 'import os\n'), ((146, 41, 146, 66), 'os.path.dirname', 'os.path.dirname', ({(146, 57, 146, 65): '__file__'}, {}), '(__file__)', False, 'import os\n')]
thirschbuechler/didactic-barnacles
objO_and_ctxMgr/harakiri.py
88d0a2b572aacb2cb45e68bb4f05fa5273224439
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 20 22:18:58 2020 @author: https://stackoverflow.com/questions/293431/python-object-deleting-itself @editor: thirschbuechler this is probably overkill to alternatively exit a with-context, rather than by exception, but hey, maybe it will be needed, or related to getting rid of the visa-handle within thvisa # for some reason, __enter__ does not work in the with-context """ # NOTE: This is Python 3 code, it should work with python 2, but I haven't tested it. import weakref #https://docs.python.org/3/library/weakref.html class InsaneClass(object): _alive = [] def __new__(cls): # there is a difference btw. cls and self, but i don't understand self = super().__new__(cls) InsaneClass._alive.append(self) return weakref.proxy(self) def commit_suicide(self): self._alive.remove(self) def __enter__(self): print("enter says hello") return self def __init__(self): pass def __exit__(self, exc_type, exc_value, tb):# "with" context exit: call del print("bye") if __name__ == '__main__': # test if called as executable, not as library instance = InsaneClass() instance.__enter__() instance.commit_suicide() #print(instance) print(InsaneClass) # pointer print(InsaneClass().__enter__()) # an object print("now, something completely different!") with InsaneClass() as i: i.commit_suicide() print(i)
[((24, 15, 24, 34), 'weakref.proxy', 'weakref.proxy', ({(24, 29, 24, 33): 'self'}, {}), '(self)', False, 'import weakref\n')]
srimani-programmer/Opencv-with-Python-Blueprints-second-Edition
chapter2/gestures.py
8762022a58a379229f02d7250d8344087d98516d
#!/usr/bin/env python # -*- coding: utf-8 -*- """A module containing an algorithm for hand gesture recognition""" import numpy as np import cv2 from typing import Tuple __author__ = "Michael Beyeler" __license__ = "GNU GPL 3.0 or later" def recognize(img_gray): """Recognizes hand gesture in a single-channel depth image This method estimates the number of extended fingers based on a single-channel depth image showing a hand and arm region. :param img_gray: single-channel depth image :returns: (num_fingers, img_draw) The estimated number of extended fingers and an annotated RGB image """ # segment arm region segment = segment_arm(img_gray) # find the hull of the segmented area, and based on that find the # convexity defects (contour, defects) = find_hull_defects(segment) # detect the number of fingers depending on the contours and convexity # defects, then draw defects that belong to fingers green, others red img_draw = cv2.cvtColor(segment, cv2.COLOR_GRAY2RGB) (num_fingers, img_draw) = detect_num_fingers(contour, defects, img_draw) return (num_fingers, img_draw) def segment_arm(frame: np.ndarray, abs_depth_dev: int = 14) -> np.ndarray: """Segments arm region This method accepts a single-channel depth image of an arm and hand region and extracts the segmented arm region. It is assumed that the hand is placed in the center of the image. :param frame: single-channel depth image :returns: binary image (mask) of segmented arm region, where arm=255, else=0 """ height, width = frame.shape # find center (21x21 pixel) region of imageheight frame center_half = 10 # half-width of 21 is 21/2-1 center = frame[height // 2 - center_half:height // 2 + center_half, width // 2 - center_half:width // 2 + center_half] # find median depth value of center region med_val = np.median(center) # try this instead: frame = np.where(abs(frame - med_val) <= abs_depth_dev, 128, 0).astype(np.uint8) # morphological kernel = np.ones((3, 3), np.uint8) frame = cv2.morphologyEx(frame, cv2.MORPH_CLOSE, kernel) # connected component small_kernel = 3 frame[height // 2 - small_kernel:height // 2 + small_kernel, width // 2 - small_kernel:width // 2 + small_kernel] = 128 mask = np.zeros((height + 2, width + 2), np.uint8) flood = frame.copy() cv2.floodFill(flood, mask, (width // 2, height // 2), 255, flags=4 | (255 << 8)) ret, flooded = cv2.threshold(flood, 129, 255, cv2.THRESH_BINARY) return flooded def find_hull_defects(segment: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """Find hull defects This method finds all defects in the hull of a segmented arm region. :param segment: a binary image (mask) of a segmented arm region, where arm=255, else=0 :returns: (max_contour, defects) the largest contour in the image and all corresponding defects """ contours, hierarchy = cv2.findContours(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # find largest area contour max_contour = max(contours, key=cv2.contourArea) epsilon = 0.01 * cv2.arcLength(max_contour, True) max_contour = cv2.approxPolyDP(max_contour, epsilon, True) # find convexity hull and defects hull = cv2.convexHull(max_contour, returnPoints=False) defects = cv2.convexityDefects(max_contour, hull) return max_contour, defects def detect_num_fingers(contour: np.ndarray, defects: np.ndarray, img_draw: np.ndarray, thresh_deg: float = 80.0) -> Tuple[int, np.ndarray]: """Detects the number of extended fingers This method determines the number of extended fingers based on a contour and convexity defects. It will annotate an RGB color image of the segmented arm region with all relevant defect points and the hull. :param contours: a list of contours :param defects: a list of convexity defects :param img_draw: an RGB color image to be annotated :returns: (num_fingers, img_draw) the estimated number of extended fingers and an annotated RGB color image """ # if there are no convexity defects, possibly no hull found or no # fingers extended if defects is None: return [0, img_draw] # we assume the wrist will generate two convexity defects (one on each # side), so if there are no additional defect points, there are no # fingers extended if len(defects) <= 2: return [0, img_draw] # if there is a sufficient amount of convexity defects, we will find a # defect point between two fingers so to get the number of fingers, # start counting at 1 num_fingers = 1 # Defects are of shape (num_defects,1,4) for defect in defects[:, 0, :]: # Each defect is an array of four integers. # First three indexes of start, end and the furthest # points respectively # contour is of shape (num_points,1,2) - 2 for point coordinates start, end, far = [contour[i][0] for i in defect[:3]] # draw the hull cv2.line(img_draw, tuple(start), tuple(end), (0, 255, 0), 2) # if angle is below a threshold, defect point belongs to two # extended fingers if angle_rad(start - far, end - far) < deg2rad(thresh_deg): # increment number of fingers num_fingers += 1 # draw point as green cv2.circle(img_draw, tuple(far), 5, (0, 255, 0), -1) else: # draw point as red cv2.circle(img_draw, tuple(far), 5, (0, 0, 255), -1) # make sure we cap the number of fingers return min(5, num_fingers), img_draw def angle_rad(v1, v2): """Angle in radians between two vectors This method returns the angle (in radians) between two array-like vectors using the cross-product method, which is more accurate for small angles than the dot-product-acos method. """ return np.arctan2(np.linalg.norm(np.cross(v1, v2)), np.dot(v1, v2)) def deg2rad(angle_deg): """Convert degrees to radians This method converts an angle in radians e[0,2*np.pi) into degrees e[0,360) """ return angle_deg / 180.0 * np.pi
[((32, 15, 32, 56), 'cv2.cvtColor', 'cv2.cvtColor', ({(32, 28, 32, 35): 'segment', (32, 37, 32, 55): 'cv2.COLOR_GRAY2RGB'}, {}), '(segment, cv2.COLOR_GRAY2RGB)', False, 'import cv2\n'), ((56, 14, 56, 31), 'numpy.median', 'np.median', ({(56, 24, 56, 30): 'center'}, {}), '(center)', True, 'import numpy as np\n'), ((63, 13, 63, 38), 'numpy.ones', 'np.ones', ({(63, 21, 63, 27): '(3, 3)', (63, 29, 63, 37): 'np.uint8'}, {}), '((3, 3), np.uint8)', True, 'import numpy as np\n'), ((64, 12, 64, 60), 'cv2.morphologyEx', 'cv2.morphologyEx', ({(64, 29, 64, 34): 'frame', (64, 36, 64, 51): 'cv2.MORPH_CLOSE', (64, 53, 64, 59): 'kernel'}, {}), '(frame, cv2.MORPH_CLOSE, kernel)', False, 'import cv2\n'), ((71, 11, 71, 54), 'numpy.zeros', 'np.zeros', ({(71, 20, 71, 43): '(height + 2, width + 2)', (71, 45, 71, 53): 'np.uint8'}, {}), '((height + 2, width + 2), np.uint8)', True, 'import numpy as np\n'), ((73, 4, 74, 39), 'cv2.floodFill', 'cv2.floodFill', (), '', False, 'import cv2\n'), ((76, 19, 76, 68), 'cv2.threshold', 'cv2.threshold', ({(76, 33, 76, 38): 'flood', (76, 40, 76, 43): '129', (76, 45, 76, 48): '255', (76, 50, 76, 67): 'cv2.THRESH_BINARY'}, {}), '(flood, 129, 255, cv2.THRESH_BINARY)', False, 'import cv2\n'), ((90, 26, 91, 67), 'cv2.findContours', 'cv2.findContours', ({(90, 43, 90, 50): 'segment', (90, 52, 90, 65): 'cv2.RETR_TREE', (91, 43, 91, 66): 'cv2.CHAIN_APPROX_SIMPLE'}, {}), '(segment, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)', False, 'import cv2\n'), ((96, 18, 96, 62), 'cv2.approxPolyDP', 'cv2.approxPolyDP', ({(96, 35, 96, 46): 'max_contour', (96, 48, 96, 55): 'epsilon', (96, 57, 96, 61): 'True'}, {}), '(max_contour, epsilon, True)', False, 'import cv2\n'), ((99, 11, 99, 58), 'cv2.convexHull', 'cv2.convexHull', (), '', False, 'import cv2\n'), ((100, 14, 100, 53), 'cv2.convexityDefects', 'cv2.convexityDefects', ({(100, 35, 100, 46): 'max_contour', (100, 48, 100, 52): 'hull'}, {}), '(max_contour, hull)', False, 'import cv2\n'), ((95, 21, 95, 53), 'cv2.arcLength', 'cv2.arcLength', ({(95, 35, 95, 46): 'max_contour', (95, 48, 95, 52): '(True)'}, {}), '(max_contour, True)', False, 'import cv2\n'), ((169, 56, 169, 70), 'numpy.dot', 'np.dot', ({(169, 63, 169, 65): 'v1', (169, 67, 169, 69): 'v2'}, {}), '(v1, v2)', True, 'import numpy as np\n'), ((169, 37, 169, 53), 'numpy.cross', 'np.cross', ({(169, 46, 169, 48): 'v1', (169, 50, 169, 52): 'v2'}, {}), '(v1, v2)', True, 'import numpy as np\n')]
jnippula/satt
satt/trace/logger/panic.py
aff4562b7e94f095d2e13eb10b9ac872484bb5cd
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' // Copyright (c) 2015 Intel Corporation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. ''' """ PanicLogger RAM-tracing """ import sys import time from logger import Logger class PanicLogger(Logger): """ Panic logger """ def __init__(self, control): # Base class init call Logger.__init__(self, control) # Add default kernel module parameter for RAM-tracing self._kernel_module_parameters += " trace_method=1 sideband_log_method=1" # Add more option to command line input self._parser.add_argument('-p', '--panic', action='store', help='Panic tracing mode: 1=Normal, 2=Hooked(default)', required=False, default=2) self._parser.add_argument('-s', '--sideband', action='store', help='Panic tracing mode: 0=Off, 1=On(default)', required=False, default=1) self._parser.add_argument('-g', '--gbuffer', action='store', help='Dump trace data to gbuffer: 0=Off, 1=On(default)', required=False, default=1) self._parser.add_argument('-u', '--userspace', action='store', help='Exclude user space: 0=Off, 1=On(default)', required=False, default=1) self._parser.add_argument('-k', '--kernel', action='store', help='Exclude kernel: 0=Off(default), 1=On', required=False, default=0) self._parser.add_argument('-d', '--dump', action='store', help='Dump kernel and kernel modules for processing: 0=Off, 1=On(default)', required=False, default=0) self.args = self._parser.parse_args() self._kernel_module_parameters += " panic_tracer=" + str(self.args.panic) self._kernel_module_parameters += " panic_sideband=" + str(self.args.sideband) self._kernel_module_parameters += " panic_gbuffer=" + str(self.args.gbuffer) self._kernel_module_parameters += " exclude_userspace=" + str(self.args.userspace) self._kernel_module_parameters += " exclude_kernel=" + str(self.args.kernel) def initialize(self): self._debug_print("PanicLogger::initialize") # Initialize Logger base class Logger.initialize(self) # Call start_tracing earlier to stop execution earlier self.start_tracing() def start_tracing(self): self._debug_print("start_tracing") trace_name, trace_path = self.get_trace_name("Enter <<trace name>> to start panic tracing? :") if trace_name: self.set_trace_path(trace_path, trace_name) self.get_build_info() # TODO Problem, there is no Sideband.bin info yet # Quick Fix # Start tracing, wait 100ms, Stop tracing, fetch sideband info Logger.start_tracing(self) time.sleep(0.2) Logger.stop_tracing(self) time.sleep(0.2) Logger.get_sideband_data(self) self.dump_kernel() self.dump_linux_gate() self.dump_kernel_modules() Logger.start_tracing(self) print "" print "Panic tracing activated" print "If panic happens, wait 10s and reboot device." print "" print "When device boot up run following command:" print "sat-panic-fetch " + self.trace_name sys.exit(0) else: print "Panic Tracer did not get started" def stop_tracing(self): return def get_data(self): return def get_trace_data(self): return
[]
csalcedo001/xlab
xlab/cli.py
8c51f035a870dd57339ff0208a3ab27ef6b8b41f
import sys import os from . import filesys MAIN_USAGE_MESSAGE = """ usage: xlab command ... Options: positional arguments: command project """ def project(args): if len(args) != 1: print("error: Invalid arguments.") exit() if args[0] == 'init': root = os.getcwd() dirs = filesys.Directories() dirs.set_root(root) def main(): if len(sys.argv) <= 1: print(MAIN_USAGE_MESSAGE) exit() command = sys.argv[1] args = sys.argv[2:] if command == 'project': exe = project else: print("error: No command 'xlab {}'.".format(command)) exit() exe(args)
[((22, 15, 22, 26), 'os.getcwd', 'os.getcwd', ({}, {}), '()', False, 'import os\n')]
jzhang533/Paddle
python/paddle/optimizer/adamw.py
3227b2c401a80104e0c01dedcef2061ffa1ebbed
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .optimizer import Optimizer from .adam import Adam from ..fluid import core from ..fluid import framework from ..fluid.framework import Variable from ..fluid.dygraph import base as imperative_base from collections import Callable import paddle _C_ops = core.ops __all__ = [] class AdamW(Adam): r""" The AdamW optimizer is implemented based on the AdamW Optimization in paper `DECOUPLED WEIGHT DECAY REGULARIZATION <https://arxiv.org/pdf/1711.05101.pdf>`_. it can resolves the problem of L2 regularization failure in the Adam optimizer. .. math:: t & = t + 1 moment\_1\_out & = {\beta}_1 * moment\_1 + (1 - {\beta}_1) * grad moemnt\_2\_out & = {\beta}_2 * moment\_2 + (1 - {\beta}_2) * grad * grad learning\_rate & = learning\_rate * \frac{\sqrt{1 - {\beta}_2^t}}{1 - {beta}_1^t} param\_out & = param - learning\_rate * (\frac{moment\_1}{\sqrt{moment\_2} + \epsilon} + \lambda * param) Args: learning_rate (float|LRScheduler, optional): The learning rate used to update ``Parameter``. It can be a float value or a LRScheduler. The default value is 0.001. parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \ This parameter is required in dygraph mode. And you can specify different options for \ different parameter groups such as the learning rate, weight decay, etc, \ then the parameters are list of dict. Note that the learning_rate in paramter groups \ represents the scale of base learning_rate. \ The default value is None in static mode, at this time all parameters will be updated. beta1 (float|Tensor, optional): The exponential decay rate for the 1st moment estimates. It should be a float number or a Tensor with shape [1] and data type as float32. The default value is 0.9. beta2 (float|Tensor, optional): The exponential decay rate for the 2nd moment estimates. It should be a float number or a Tensor with shape [1] and data type as float32. The default value is 0.999. epsilon (float, optional): A small float value for numerical stability. The default value is 1e-08. weight_decay (float|Tensor, optional): The weight decay coefficient, it can be float or Tensor. The default value is 0.01. lr_ratio (function|None, optional): If it is not None, the learning rate will be updated with layerwise learning rate ratio. Otherwise, the learning rate is the original. Default: None. apply_decay_param_fun (function|None, optional): If it is not None, only tensors that makes apply_decay_param_fun(Tensor.name)==True will be updated with weight decay. It only works when we want to specify tensors. Default: None. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators. The accumulators are updated at every step. Every element of the two moving-average is updated in both dense mode and sparse mode. If the size of parameter is very large, then the update may be very slow. The lazy mode only update the element that has gradient in current mini-batch, so it will be much more faster. But this mode has different semantics with the original Adam algorithm and may lead to different result. The default value is False. multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. **Notes**: **Currently, AdamW doesn't support sparse parameter optimization.** Examples: .. code-block:: python import paddle linear = paddle.nn.Linear(10, 10) inp = paddle.rand([10,10], dtype="float32") out = linear(inp) loss = paddle.mean(out) beta1 = paddle.to_tensor([0.9], dtype="float32") beta2 = paddle.to_tensor([0.99], dtype="float32") adam = paddle.optimizer.AdamW(learning_rate=0.1, parameters=linear.parameters(), beta1=beta1, beta2=beta2, weight_decay=0.01) out.backward() adam.step() adam.clear_grad() #Note that the learning_rate of linear_2 is 0.01. linear_1 = paddle.nn.Linear(10, 10) linear_2 = paddle.nn.Linear(10, 10) inp = paddle.uniform(shape=[10, 10], min=-0.1, max=0.1) out = linear_1(inp) out = linear_2(out) loss = paddle.mean(out) adam = paddle.optimizer.AdamW( learning_rate=0.1, parameters=[{ 'params': linear_1.parameters() }, { 'params': linear_2.parameters(), 'weight_decay': 0.001, 'learning_rate': 0.1, 'beta1': 0.8 }], weight_decay=0.01, beta1=0.9) out.backward() adam.step() adam.clear_grad() """ def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, parameters=None, weight_decay=0.01, lr_ratio=None, apply_decay_param_fun=None, grad_clip=None, lazy_mode=False, multi_precision=False, name=None): assert learning_rate is not None assert beta1 is not None assert beta2 is not None assert epsilon is not None if not 0 <= beta1 < 1: raise ValueError("Invaild value of beta1, expect beta1 in [0,1).") if not 0 <= beta2 < 1: raise ValueError("Invaild value of beta2, expect beta2 in [0,1).") if not 0 <= epsilon: raise ValueError("Invaild value of epsilon, expect epsilon >= 0.") coeff = weight_decay if not isinstance(coeff, float) and \ not isinstance(coeff, framework.Variable): raise TypeError("coeff should be float or Tensor.") self._params_name = set() self._apply_decay_param_fun = apply_decay_param_fun self._coeff = coeff self._lr_to_coeff = dict() if lr_ratio is not None: assert isinstance(lr_ratio, Callable) if core.is_compiled_with_xpu() or core.is_compiled_with_npu(): raise NotImplementedError( "'lr_ratio' is unimplemented in XPU and NPU") self._lr_ratio = lr_ratio super(AdamW, self).__init__( learning_rate=learning_rate, parameters=parameters, beta1=beta1, beta2=beta2, epsilon=epsilon, grad_clip=grad_clip, name=name, lazy_mode=lazy_mode, multi_precision=multi_precision) self._default_dict = {'coeff': coeff} self.type = "adamw" if core.is_compiled_with_xpu(): self.type = "adam" # Use _auxiliary_vars together with _set_auxiliary_var/_get_auxiliary_var to achieve that. self._auxiliary_vars = dict() def _set_auxiliary_var(self, key, val): self._auxiliary_vars[key] = val def _get_auxiliary_var(self, key): if key in self._auxiliary_vars: return self._auxiliary_vars[key] else: return None def _append_decoupled_weight_decay(self, block, param_and_grad): """ Add decoupled weight decay op. parameter = parameter - parameter * coeff * lr Args: block: block in which variable is to be created param_and_grad: (parameters, gradients) pairs, the parameters need to decay. Raises: Exception: The type of coeff and parameter is not consistent. """ if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) param, grad = param_and_grad if self._apply_decay_param_fun is not None \ and not self._apply_decay_param_fun(param.name): return if isinstance(self._learning_rate, float): learning_rate = self._learning_rate else: # NOTE. We add this function to the _append_optimize_op(), # for we must make sure _create_param_lr() be called after # optimizer._create_global_learning_rate(). learning_rate = self._create_param_lr(param_and_grad) with block.program._optimized_guard( [param, grad]), framework.name_scope('weight decay'): self._params_name.add(param.name) # If it has been calculated, the result will be reused. # NOTE(wangxi): In dygraph mode, apply_gradient will be executed # every step, so need clear _lr_to_coeff every step, # we do this in _create_optimization_pass decay_coeff = self._lr_to_coeff.get(learning_rate, None) if decay_coeff is None: # NOTE(wangxi): for pipeline to set device:all with paddle.static.device_guard(None): decay_coeff = 1.0 - learning_rate * self._coeff self._lr_to_coeff[learning_rate] = decay_coeff find_master = (self._multi_precision and param.dtype == core.VarDesc.VarType.FP16) if find_master: master_weight = self._master_weights[param.name] scaled_param = master_weight * decay_coeff paddle.fluid.layers.assign( input=scaled_param, output=master_weight) else: scaled_param = param * decay_coeff paddle.fluid.layers.assign(input=scaled_param, output=param) def _append_optimize_op(self, block, param_and_grad): if paddle.is_compiled_with_xpu(): self._append_decoupled_weight_decay(block, param_and_grad) return super(AdamW, self)._append_optimize_op(block, param_and_grad) assert isinstance(block, framework.Block) if isinstance(param_and_grad, dict): param_and_grad = self._update_param_group(param_and_grad) param, grad = param_and_grad # Whether we should do weight decay for the parameter. with_decay = True if self._apply_decay_param_fun is not None \ and not self._apply_decay_param_fun(param.name): with_decay = False moment1 = self._get_accumulator(self._moment1_acc_str, param_and_grad[0]) moment2 = self._get_accumulator(self._moment2_acc_str, param_and_grad[0]) beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, param_and_grad[0]) beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, param_and_grad[0]) find_master = self._multi_precision and param_and_grad[ 0].dtype == core.VarDesc.VarType.FP16 master_weight = (self._master_weights[param_and_grad[0].name] if find_master else None) lr = self._create_param_lr(param_and_grad) # create the adamw optimize op if framework.in_dygraph_mode(): lr_ratio_ = 1. if self._lr_ratio is None else self._lr_ratio( param_and_grad[0]) _beta1 = self._beta1 if not isinstance( self._beta1, Variable) else self._beta1.numpy().item(0) _beta2 = self._beta2 if not isinstance( self._beta2, Variable) else self._beta2.numpy().item(0) _, _, _, _, _ = _C_ops.adamw( param_and_grad[0], param_and_grad[1], lr, moment1, moment2, beta1_pow_acc, beta2_pow_acc, param_and_grad[0], moment1, moment2, beta1_pow_acc, beta2_pow_acc, 'epsilon', self._epsilon, 'lazy_mode', self._lazy_mode, 'min_row_size_to_use_multithread', 1000, 'beta1', _beta1, 'beta2', _beta2, 'coeff', self._coeff, "lr_ratio", lr_ratio_) return None inputs = { "Param": [param_and_grad[0]], "Grad": [param_and_grad[1]], "LearningRate": [lr], "Moment1": [moment1], "Moment2": [moment2], "Beta1Pow": [beta1_pow_acc], "Beta2Pow": [beta2_pow_acc], } # Pass found_inf to adamw, to skip update for not only param, but also momentum and beta_pow found_inf = self._get_auxiliary_var('found_inf') if found_inf: inputs['SkipUpdate'] = found_inf outputs = { "ParamOut": [param_and_grad[0]], "Moment1Out": [moment1], "Moment2Out": [moment2], "Beta1PowOut": [beta1_pow_acc], "Beta2PowOut": [beta2_pow_acc], } attrs = { "lazy_mode": self._lazy_mode, "min_row_size_to_use_multithread": 1000, "multi_precision": find_master, "with_decay": with_decay, "coeff": self._coeff, "lr_ratio": 1. if self._lr_ratio is None else self._lr_ratio(param_and_grad[0]) } if isinstance(self._beta1, Variable): inputs['Beta1Tensor'] = self._beta1 else: attrs['beta1'] = self._beta1 if isinstance(self._beta2, Variable): inputs['Beta2Tensor'] = self._beta2 else: attrs['beta2'] = self._beta2 if isinstance(self._epsilon, Variable): inputs['EpsilonTensor'] = self._epsilon else: attrs['epsilon'] = self._epsilon if find_master: inputs["MasterParam"] = master_weight outputs["MasterParamOut"] = master_weight adamw_op = block.append_op( type=self.type, inputs=inputs, outputs=outputs, attrs=attrs, stop_gradient=True) return adamw_op def _create_optimization_pass(self, parameters_and_grads): optimize_ops = super( AdamW, self)._create_optimization_pass(parameters_and_grads) # In dygraph mode, clear _lr_to_coeff after applied gradient self._lr_to_coeff = dict() return optimize_ops def __str__(self): return " ".join(["Weight Decay, params:", ",".join(self._params_name)]) def _update_param_group(self, parameters): self._coeff = parameters.get('coeff', self._default_dict['coeff']) parameters = parameters.get('params') return parameters
[((262, 11, 262, 40), 'paddle.is_compiled_with_xpu', 'paddle.is_compiled_with_xpu', ({}, {}), '()', False, 'import paddle\n'), ((255, 16, 256, 61), 'paddle.fluid.layers.assign', 'paddle.fluid.layers.assign', (), '', False, 'import paddle\n'), ((259, 16, 259, 76), 'paddle.fluid.layers.assign', 'paddle.fluid.layers.assign', (), '', False, 'import paddle\n'), ((246, 21, 246, 53), 'paddle.static.device_guard', 'paddle.static.device_guard', ({(246, 48, 246, 52): 'None'}, {}), '(None)', False, 'import paddle\n')]
VinLau/BAR_API
tests/resources/test_interactions.py
0719a5fbc08872f667590b27347af9bfed669bca
from api import app from unittest import TestCase class TestIntegrations(TestCase): maxDiff = None def setUp(self): self.app_client = app.test_client() def test_get_itrns(self): """ This function test retrieving protein interactions for various species' genes. """ # Valid request rice response = self.app_client.get("/interactions/rice/LOC_Os01g52560") expected = { "wasSuccessful": True, "data": [ { "protein_1": "LOC_Os01g01080", "protein_2": "LOC_Os01g52560", "total_hits": 1, "Num_species": 1, "Quality": 1, "pcc": 0.65, }, { "protein_1": "LOC_Os01g52560", "protein_2": "LOC_Os01g73310", "total_hits": 1, "Num_species": 1, "Quality": 1, "pcc": -0.116, }, ], } self.assertEqual(response.json, expected) # Invalid species response = self.app_client.get("/interactions/poplar/abc") expected = {"wasSuccessful": False, "error": "Invalid species or gene ID"} self.assertEqual(response.json, expected) # Invalid gene id response = self.app_client.get("/interactions/rice/abc") expected = {"wasSuccessful": False, "error": "Invalid species or gene ID"} self.assertEqual(response.json, expected) # Gene does not exist response = self.app_client.get("/interactions/rice/LOC_Os01g52565") expected = { "wasSuccessful": False, "error": "There are no data found for the given gene", } self.assertEqual(response.json, expected)
[((10, 26, 10, 43), 'api.app.test_client', 'app.test_client', ({}, {}), '()', False, 'from api import app\n')]
16kozlowskim/Group-20-SE
src/dialogflow-java-client-master/samples/clients/VirtualTradingAssistant/src/main/java/ai/examples/scraper/historicalScrape.py
ceb8c319643964a3f478772d8f10090962df567c
# install BeautifulSoup4 before running # # prints out historical data in csv format: # # [date, open, high, low, close, volume] # import re, csv, sys, urllib2 from bs4 import BeautifulSoup # If start date and end date is the same only one value will be returned and # if not the multiple values which can be used to make calculations # # ticker (company symbol) # interval (d (daily), m (monthly), q (quarterly), y (yearly)) # start_date (YYYYMMDD) # end_date (YYYYMMDD) def get_historical_data(ticker, interval, start_date, end_date): #pathToCSV = '/Users/Michal/Downloads/dialogflow-java-client-master2/samples/clients/VirtualTradingAssistant/src/main/java/ai/api/examples/fileStore/file.csv' #pathToCSV = 'C:\\Users\\ojwoo\\Documents\\Warwick\\CS261\\Coursework\\dialogflow-java-client-master\\samples\\clients\\VirtualTradingAssistant\\src\\main\\java\\ai\\api\\examples\\fileStore\\file.csv' #pathToCSV = '/Users/Michal/Desktop/apache-tomcat-8.5.28/bin/misc/file.csv' pathToCSV = 'C:\\apache-tomcat-8.5.28\\bin\\misc\\file.csv' url_builder = [] url_builder.append('https://stooq.com/q/d/?s=') url_builder.append(ticker) url_builder.append('&c=0&d1=') url_builder.append(start_date) url_builder.append('&d2=') url_builder.append(end_date) url_builder.append('&i=') url_builder.append(interval) url = ''.join(url_builder) page = urllib2.urlopen(url) soup = BeautifulSoup(page, 'html.parser') link = soup.findAll('a', href=re.compile('^q/d/l/')) link = re.search('"(.*)"', str(link)) try: link = link.group(1) except AttributeError: with open(pathToCSV, 'w') as csvfile: wr = csv.writer(csvfile, delimiter='@', quotechar='#') wr.writerow('') exit() link = link.replace('amp;', '') arr = [] arr.append('https://stooq.com/') arr.append(link) link = ''.join(arr) response = urllib2.urlopen(link) cr = csv.reader(response) with open(pathToCSV, 'w') as csvfile: wr = csv.writer(csvfile, delimiter='@', quotechar='#') wr.writerows(cr) def main(): args = sys.argv get_historical_data(args[1], args[2], args[3], args[4]) if __name__ == '__main__': main()
[((36, 11, 36, 31), 'urllib2.urlopen', 'urllib2.urlopen', ({(36, 27, 36, 30): 'url'}, {}), '(url)', False, 'import re, csv, sys, urllib2\n'), ((38, 11, 38, 45), 'bs4.BeautifulSoup', 'BeautifulSoup', ({(38, 25, 38, 29): 'page', (38, 31, 38, 44): '"""html.parser"""'}, {}), "(page, 'html.parser')", False, 'from bs4 import BeautifulSoup\n'), ((62, 15, 62, 36), 'urllib2.urlopen', 'urllib2.urlopen', ({(62, 31, 62, 35): 'link'}, {}), '(link)', False, 'import re, csv, sys, urllib2\n'), ((64, 9, 64, 29), 'csv.reader', 'csv.reader', ({(64, 20, 64, 28): 'response'}, {}), '(response)', False, 'import re, csv, sys, urllib2\n'), ((66, 13, 66, 62), 'csv.writer', 'csv.writer', (), '', False, 'import re, csv, sys, urllib2\n'), ((40, 34, 40, 55), 're.compile', 're.compile', ({(40, 45, 40, 54): '"""^q/d/l/"""'}, {}), "('^q/d/l/')", False, 'import re, csv, sys, urllib2\n'), ((48, 17, 48, 66), 'csv.writer', 'csv.writer', (), '', False, 'import re, csv, sys, urllib2\n')]
odontomachus/hotbox
client/client.py
d42c48d7f056f2b1f7bd707ad674e737a3c2fe08
import sys import io from collections import defaultdict import struct from time import sleep import queue import threading import serial from serial import SerialException RUN_LABELS = ('Time left', 'Temp 1', 'Temp 2', 'Off Goal', 'Temp Change', 'Duty cycle (/30)', 'Heating', 'Cycle', 'Total time', 'Goal temp') MSG_RUN_STATUS = 1 MSG_CONFIG = 2 MSG_STATUS = 3 MSG_LENGTHS = {MSG_RUN_STATUS: 20, MSG_CONFIG: 9, MSG_STATUS: 5} STATE_START = 1 STATE_ACTIVE = 2 STATE_READY = 3 STATE_BOOT = 4 STATE_INIT = 5 STATE_DISCONNECTED = 127 # can't connect to serial HB_CYCLE = 30 class RunStatus: __slots__ = ('countdown', 't1', 't2', 'dg', 'dt', 'part', 'state', 'cycle', 'time', 'goal') def __init__(self, message): (self.t1, self.t2, self.countdown, self.part, self.cycle, self.state, self.dg, self.dt, self.time, self.goal, ) = struct.unpack('=BBLBB?bbLB', message) def __str__(self): return "\t".join( map(str, (self.countdown, self.t1, self.t2, self.dg, self.dt, self.part, "On" if self.state else "Off", self.state, self.cycle, self.time, self.goal, ) )) class OvenConfig: __slots__ = ('temp', 'time') def __init__(self, message): (self.time, self.temp) = struct.unpack('=LB', message) class OvenStatus: __slots__ = ('status',) def __init__(self, message): self.status = message[0] def check_connection(fun): def inner(self, *args, **kwargs): if self.state == "connected": try: fun(self, *args, **kwargs) except SerialException: self.disconnect() # workaround for bug in pyserial # http://sourceforge.net/p/pyserial/patches/37/ except TypeError as e: self.disconnect() return inner class Client(threading.Thread): """ Client class for hotbox serial connection """ parsers = { MSG_STATUS: OvenStatus, MSG_RUN_STATUS: RunStatus, MSG_CONFIG: OvenConfig, } def __init__(self): super().__init__() self.state = 'disconnected' self.msg_queue = {MSG_STATUS: queue.Queue(), MSG_CONFIG: queue.Queue(), MSG_RUN_STATUS: queue.Queue(), } def connect(self, port): try: self.conn = serial.Serial(port, 9600, timeout=0.05) # empty buffer while len(self.conn.read(1)) > 0: pass self.state = 'connected' sleep(0.01) self.oven_query_config() sleep(0.2) self.oven_status() except SerialException: self.disconnect() # workaround for bug in pyserial # http://sourceforge.net/p/pyserial/patches/37/ except TypeError as e: self.disconnect() finally: self.start_message = 0 def run(self): self.running = 1 parsed_length = 0 mtype = 0 msg_length = 0 while self.running: # Don't do anything if disconnected if (self.state == 'disconnected'): sleep(0.1) continue try: c = self.conn.read(1) except SerialException: self.disconnect() continue # workaround for bug in pyserial # http://sourceforge.net/p/pyserial/patches/37/ except TypeError as e: self.disconnect() continue # wait for message if not c: continue # this is the message type byte if parsed_length == 3: parsed_length += 1 if c[0] == 0: continue mtype = c[0] msg_length = MSG_LENGTHS[mtype] buffer = bytes() continue if parsed_length < 3: # Abort if not a null byte if c[0]: parsed_length = 0 continue # otherwise increment parsed length parsed_length += 1 continue # in any other case this is a data byte parsed_length += 1 buffer += c if parsed_length == msg_length: data = self.parsers[mtype](buffer) self.msg_queue[mtype].put(data) parsed_length = 0 mtype = 0 msg_length = 0 @check_connection def oven_configure(self, ctime, temp): self.conn.write(b'c'+struct.pack('=LB', ctime, temp)) @check_connection def oven_start(self): self.conn.write(b's') @check_connection def oven_stop(self): self.conn.write(b't') @check_connection def oven_status(self): self.conn.write(b'r') @check_connection def oven_query_config(self): self.conn.write(b'q') def disconnect(self): self.state = 'disconnected' self.msg_queue[MSG_STATUS].put(OvenStatus((STATE_DISCONNECTED,)))
[((41, 12, 41, 49), 'struct.unpack', 'struct.unpack', ({(41, 26, 41, 39): '"""=BBLBB?bbLB"""', (41, 41, 41, 48): 'message'}, {}), "('=BBLBB?bbLB', message)", False, 'import struct\n'), ((64, 21, 64, 50), 'struct.unpack', 'struct.unpack', ({(64, 35, 64, 40): '"""=LB"""', (64, 42, 64, 49): 'message'}, {}), "('=LB', message)", False, 'import struct\n'), ((95, 38, 95, 51), 'queue.Queue', 'queue.Queue', ({}, {}), '()', False, 'import queue\n'), ((96, 38, 96, 51), 'queue.Queue', 'queue.Queue', ({}, {}), '()', False, 'import queue\n'), ((97, 42, 97, 55), 'queue.Queue', 'queue.Queue', ({}, {}), '()', False, 'import queue\n'), ((102, 24, 102, 63), 'serial.Serial', 'serial.Serial', (), '', False, 'import serial\n'), ((109, 12, 109, 23), 'time.sleep', 'sleep', ({(109, 18, 109, 22): '(0.01)'}, {}), '(0.01)', False, 'from time import sleep\n'), ((112, 12, 112, 22), 'time.sleep', 'sleep', ({(112, 18, 112, 21): '(0.2)'}, {}), '(0.2)', False, 'from time import sleep\n'), ((136, 16, 136, 26), 'time.sleep', 'sleep', ({(136, 22, 136, 25): '(0.1)'}, {}), '(0.1)', False, 'from time import sleep\n'), ((186, 29, 186, 60), 'struct.pack', 'struct.pack', ({(186, 41, 186, 46): '"""=LB"""', (186, 48, 186, 53): 'ctime', (186, 55, 186, 59): 'temp'}, {}), "('=LB', ctime, temp)", False, 'import struct\n')]
ComputerCraftr/devault
test/functional/abc-sync-chain.py
546b54df85e3392f85e7ea5fcd4ea9b395ba8f4c
#!/usr/bin/env python3 # Copyright (c) 2018 The Bitcoin developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """ Test that a node receiving many (potentially out of order) blocks exits initial block download (IBD; this occurs once it has passed minimumchainwork) and continues to sync without seizing. """ import random from test_framework.blocktools import create_block, create_coinbase from test_framework.mininode import (CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import wait_until, p2p_port NUM_IBD_BLOCKS = 50 class BaseNode(P2PInterface): def send_header(self, block): msg = msg_headers() msg.headers = [CBlockHeader(block)] self.send_message(msg) def send_block(self, block): self.send_message(msg_block(block)) class SyncChainTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 # Setting minimumchainwork makes sure we test IBD as well as post-IBD self.extra_args = [ ["-minimumchainwork={:#x}".format(202 + 2 * NUM_IBD_BLOCKS)]] def run_test(self): node0conn = BaseNode() node0conn.peer_connect('127.0.0.1', p2p_port(0)) network_thread_start() node0conn.wait_for_verack() node0 = self.nodes[0] tip = int(node0.getbestblockhash(), 16) height = node0.getblockcount() + 1 time = node0.getblock(node0.getbestblockhash())['time'] + 1 blocks = [] for i in range(NUM_IBD_BLOCKS * 2): block = create_block(tip, create_coinbase(height), time) block.solve() blocks.append(block) tip = block.sha256 height += 1 time += 1 # Headers need to be sent in-order for b in blocks: node0conn.send_header(b) # Send blocks in some random order for b in random.sample(blocks, len(blocks)): node0conn.send_block(b) # The node should eventually, completely sync without getting stuck def node_synced(): return node0.getbestblockhash() == blocks[-1].hash wait_until(node_synced) if __name__ == '__main__': SyncChainTest().main()
[((29, 14, 29, 27), 'test_framework.mininode.msg_headers', 'msg_headers', ({}, {}), '()', False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((48, 8, 48, 30), 'test_framework.mininode.network_thread_start', 'network_thread_start', ({}, {}), '()', False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((77, 8, 77, 31), 'test_framework.util.wait_until', 'wait_until', ({(77, 19, 77, 30): 'node_synced'}, {}), '(node_synced)', False, 'from test_framework.util import wait_until, p2p_port\n'), ((30, 23, 30, 42), 'test_framework.mininode.CBlockHeader', 'CBlockHeader', ({(30, 36, 30, 41): 'block'}, {}), '(block)', False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((34, 26, 34, 42), 'test_framework.mininode.msg_block', 'msg_block', ({(34, 36, 34, 41): 'block'}, {}), '(block)', False, 'from test_framework.mininode import CBlockHeader, network_thread_start, P2PInterface, msg_block, msg_headers\n'), ((46, 44, 46, 55), 'test_framework.util.p2p_port', 'p2p_port', ({(46, 53, 46, 54): '(0)'}, {}), '(0)', False, 'from test_framework.util import wait_until, p2p_port\n'), ((59, 38, 59, 61), 'test_framework.blocktools.create_coinbase', 'create_coinbase', ({(59, 54, 59, 60): 'height'}, {}), '(height)', False, 'from test_framework.blocktools import create_block, create_coinbase\n')]
hongsemy/InstagramWithDjango
djangostagram/posts/models.py
18cb273668809fb48d829e1ac11438c51505623a
from django.db import models from djangostagram.users import models as user_model # Create your models here. # This class is used in other models as an inheritance. # An often-used pattern class TimeStamedModel(models.Model): created_at = models.DateTimeField(auto_now_add=True) updated_at = models.DateTimeField(auto_now_add=True) # An option that makes this model to not show up directly on the database class Meta: abstract = True class Posts(TimeStamedModel): author = models.ForeignKey( user_model.User, null = True, on_delete = models.CASCADE, related_name = "post_author" ) caption = models.TextField(blank=True) image = models.ImageField(blank=True) image_likes = models.ManyToManyField(user_model.User, related_name='post_image_likes') class Comments(TimeStamedModel): author = models.ForeignKey( user_model.User, null = True, on_delete = models.CASCADE, related_name = "comment_author" ) posts = models.ForeignKey( Posts, null = True, on_delete = models.CASCADE, related_name = "comment_post" ) contents = models.TextField(blank=True)
[((9, 17, 9, 56), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((10, 17, 10, 56), 'django.db.models.DateTimeField', 'models.DateTimeField', (), '', False, 'from django.db import models\n'), ((18, 13, 23, 13), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((24, 14, 24, 42), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n'), ((25, 12, 25, 41), 'django.db.models.ImageField', 'models.ImageField', (), '', False, 'from django.db import models\n'), ((26, 18, 26, 90), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (), '', False, 'from django.db import models\n'), ((30, 13, 35, 13), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((36, 12, 41, 5), 'django.db.models.ForeignKey', 'models.ForeignKey', (), '', False, 'from django.db import models\n'), ((42, 15, 42, 43), 'django.db.models.TextField', 'models.TextField', (), '', False, 'from django.db import models\n')]
rboixaderg/guillotina
guillotina/contrib/workflows/events.py
fcae65c2185222272f3b8fee4bc2754e81e0e983
from guillotina.contrib.workflows.interfaces import IWorkflowChangedEvent from guillotina.events import ObjectEvent from zope.interface import implementer @implementer(IWorkflowChangedEvent) class WorkflowChangedEvent(ObjectEvent): """An object has been moved""" def __init__(self, object, workflow, action, comments): ObjectEvent.__init__(self, object) self.object = object self.workflow = workflow self.action = action self.comments = comments
[((6, 1, 6, 35), 'zope.interface.implementer', 'implementer', ({(6, 13, 6, 34): 'IWorkflowChangedEvent'}, {}), '(IWorkflowChangedEvent)', False, 'from zope.interface import implementer\n'), ((11, 8, 11, 42), 'guillotina.events.ObjectEvent.__init__', 'ObjectEvent.__init__', ({(11, 29, 11, 33): 'self', (11, 35, 11, 41): 'object'}, {}), '(self, object)', False, 'from guillotina.events import ObjectEvent\n')]
lrwb-aou/curation
data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
""" Suppress COVID EHR vaccine concepts. Original Issues: DC-1692 """ # Python imports import logging # Project imports from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression from constants.cdr_cleaner import clean_cdr as cdr_consts from common import JINJA_ENV, CDM_TABLES from utils import pipeline_logging # Third party imports from google.cloud.exceptions import GoogleCloudError LOGGER = logging.getLogger(__name__) SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts' COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string(""" CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS with covid_vacc as ( SELECT * FROM `{{project_id}}.{{dataset_id}}.concept` WHERE ( -- done by name and vocab -- REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND vocabulary_id not in ('PPI') ) OR ( -- done by code and vocab -- REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)') and vocabulary_id = 'CVX' ) OR ( -- done by code and vocab -- REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)') and vocabulary_id = 'CPT4' ) ), concepts_via_cr as ( select distinct c.* from `{{project_id}}.{{dataset_id}}.concept`as c left join `{{project_id}}.{{dataset_id}}.concept_relationship` on c.concept_id = concept_id_1 where concept_id_2 in (select concept_id from covid_vacc) # and concept_id_1 not in (select concept_id from covid_vacc) and ( relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR (relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)')) ) ), concepts_via_ca as ( select c.* from `{{project_id}}.{{dataset_id}}.concept`as c left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca on c.concept_id = ca.descendant_concept_id where ca.ancestor_concept_id in (select concept_id from covid_vacc) ) select distinct * from covid_vacc union distinct select distinct * from concepts_via_ca union distinct select distinct * from concepts_via_cr """) class CovidEHRVaccineConceptSuppression(AbstractBqLookupTableConceptSuppression ): def __init__(self, project_id, dataset_id, sandbox_dataset_id, table_namer=None): """ Initialize the class with proper information. Set the issue numbers, description and affected datasets. As other tickets may affect this SQL, append them to the list of Jira Issues. DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS! """ desc = "Suppress COVID EHR vaccine concepts." super().__init__( issue_numbers=['DC1692'], description=desc, affected_datasets=[cdr_consts.REGISTERED_TIER_DEID], affected_tables=CDM_TABLES, project_id=project_id, dataset_id=dataset_id, sandbox_dataset_id=sandbox_dataset_id, concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE, table_namer=table_namer) def create_suppression_lookup_table(self, client): concept_suppression_lookup_query = COVID_VACCINE_CONCEPT_QUERY.render( project_id=self.project_id, dataset_id=self.dataset_id, sandbox_id=self.sandbox_dataset_id, concept_suppression_lookup_table=self. concept_suppression_lookup_table) query_job = client.query(concept_suppression_lookup_query) result = query_job.result() if hasattr(result, 'errors') and result.errors: LOGGER.error(f"Error running job {result.job_id}: {result.errors}") raise GoogleCloudError( f"Error running job {result.job_id}: {result.errors}") def validate_rule(self, client, *args, **keyword_args): """ Validates the cleaning rule which deletes or updates the data from the tables Method to run validation on cleaning rules that will be updating the values. For example: if your class updates all the datetime fields you should be implementing the validation that checks if the date time values that needs to be updated no longer exists in the table. if your class deletes a subset of rows in the tables you should be implementing the validation that checks if the count of final final row counts + deleted rows should equals to initial row counts of the affected tables. Raises RunTimeError if the validation fails. """ raise NotImplementedError("Please fix me.") def setup_validation(self, client, *args, **keyword_args): """ Run required steps for validation setup Method to run to setup validation on cleaning rules that will be updating or deleting the values. For example: if your class updates all the datetime fields you should be implementing the logic to get the initial list of values which adhere to a condition we are looking for. if your class deletes a subset of rows in the tables you should be implementing the logic to get the row counts of the tables prior to applying cleaning rule """ raise NotImplementedError("Please fix me.") if __name__ == '__main__': import cdr_cleaner.args_parser as parser import cdr_cleaner.clean_cdr_engine as clean_engine ARGS = parser.parse_args() pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True) if ARGS.list_queries: clean_engine.add_console_logging() query_list = clean_engine.get_query_list( ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id, [(CovidEHRVaccineConceptSuppression,)]) for query in query_list: LOGGER.info(query) else: clean_engine.add_console_logging(ARGS.console_log) clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id, [(CovidEHRVaccineConceptSuppression,)])
[((19, 9, 19, 36), 'logging.getLogger', 'logging.getLogger', ({(19, 27, 19, 35): '__name__'}, {}), '(__name__)', False, 'import logging\n'), ((23, 30, 67, 4), 'common.JINJA_ENV.from_string', 'JINJA_ENV.from_string', ({(23, 52, 67, 3): '"""\nCREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS\nwith covid_vacc as (\n SELECT *\n FROM `{{project_id}}.{{dataset_id}}.concept` \n WHERE (\n -- done by name and vocab --\n REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\') AND\n REGEXP_CONTAINS(concept_name, r\'(?i)(VAC)\') AND \n vocabulary_id not in (\'PPI\')\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(207)|(208)|(210)|(211)|(212)\')\n and vocabulary_id = \'CVX\'\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(91300)|(91301)|(91302)|(91303)|(91304)\')\n and vocabulary_id = \'CPT4\'\n )\n),\nconcepts_via_cr as (\nselect distinct c.*\nfrom `{{project_id}}.{{dataset_id}}.concept`as c\nleft join `{{project_id}}.{{dataset_id}}.concept_relationship`\non c.concept_id = concept_id_1\nwhere concept_id_2 in (select concept_id from covid_vacc)\n# and concept_id_1 not in (select concept_id from covid_vacc)\nand (\n relationship_id not in (\'Subsumes\', \'RxNorm dose form of\', \'Dose form group of\', \'RxNorm - SPL\') OR \n (relationship_id = \'RxNorm - SPL\' and REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\'))\n )\n),\nconcepts_via_ca as (\n select c.*\n from `{{project_id}}.{{dataset_id}}.concept`as c\n left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca\n on c.concept_id = ca.descendant_concept_id\n where ca.ancestor_concept_id in (select concept_id from covid_vacc) \n)\nselect distinct * from covid_vacc \nunion distinct\nselect distinct * from concepts_via_ca \nunion distinct \nselect distinct * from concepts_via_cr\n"""'}, {}), '(\n """\nCREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS\nwith covid_vacc as (\n SELECT *\n FROM `{{project_id}}.{{dataset_id}}.concept` \n WHERE (\n -- done by name and vocab --\n REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\') AND\n REGEXP_CONTAINS(concept_name, r\'(?i)(VAC)\') AND \n vocabulary_id not in (\'PPI\')\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(207)|(208)|(210)|(211)|(212)\')\n and vocabulary_id = \'CVX\'\n ) OR (\n -- done by code and vocab --\n REGEXP_CONTAINS(concept_code, r\'(91300)|(91301)|(91302)|(91303)|(91304)\')\n and vocabulary_id = \'CPT4\'\n )\n),\nconcepts_via_cr as (\nselect distinct c.*\nfrom `{{project_id}}.{{dataset_id}}.concept`as c\nleft join `{{project_id}}.{{dataset_id}}.concept_relationship`\non c.concept_id = concept_id_1\nwhere concept_id_2 in (select concept_id from covid_vacc)\n# and concept_id_1 not in (select concept_id from covid_vacc)\nand (\n relationship_id not in (\'Subsumes\', \'RxNorm dose form of\', \'Dose form group of\', \'RxNorm - SPL\') OR \n (relationship_id = \'RxNorm - SPL\' and REGEXP_CONTAINS(concept_name, r\'(?i)(COVID)\'))\n )\n),\nconcepts_via_ca as (\n select c.*\n from `{{project_id}}.{{dataset_id}}.concept`as c\n left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca\n on c.concept_id = ca.descendant_concept_id\n where ca.ancestor_concept_id in (select concept_id from covid_vacc) \n)\nselect distinct * from covid_vacc \nunion distinct\nselect distinct * from concepts_via_ca \nunion distinct \nselect distinct * from concepts_via_cr\n"""\n )', False, 'from common import JINJA_ENV, CDM_TABLES\n'), ((151, 11, 151, 30), 'cdr_cleaner.args_parser.parse_args', 'parser.parse_args', ({}, {}), '()', True, 'import cdr_cleaner.args_parser as parser\n'), ((152, 4, 152, 77), 'utils.pipeline_logging.configure', 'pipeline_logging.configure', (), '', False, 'from utils import pipeline_logging\n'), ((155, 8, 155, 42), 'cdr_cleaner.clean_cdr_engine.add_console_logging', 'clean_engine.add_console_logging', ({}, {}), '()', True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((156, 21, 158, 51), 'cdr_cleaner.clean_cdr_engine.get_query_list', 'clean_engine.get_query_list', ({(157, 12, 157, 27): 'ARGS.project_id', (157, 29, 157, 44): 'ARGS.dataset_id', (157, 46, 157, 69): 'ARGS.sandbox_dataset_id', (158, 12, 158, 50): '[(CovidEHRVaccineConceptSuppression,)]'}, {}), '(ARGS.project_id, ARGS.dataset_id, ARGS.\n sandbox_dataset_id, [(CovidEHRVaccineConceptSuppression,)])', True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((163, 8, 163, 58), 'cdr_cleaner.clean_cdr_engine.add_console_logging', 'clean_engine.add_console_logging', ({(163, 41, 163, 57): 'ARGS.console_log'}, {}), '(ARGS.console_log)', True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((164, 8, 166, 74), 'cdr_cleaner.clean_cdr_engine.clean_dataset', 'clean_engine.clean_dataset', ({(164, 35, 164, 50): 'ARGS.project_id', (164, 52, 164, 67): 'ARGS.dataset_id', (165, 35, 165, 58): 'ARGS.sandbox_dataset_id', (166, 35, 166, 73): '[(CovidEHRVaccineConceptSuppression,)]'}, {}), '(ARGS.project_id, ARGS.dataset_id, ARGS.\n sandbox_dataset_id, [(CovidEHRVaccineConceptSuppression,)])', True, 'import cdr_cleaner.clean_cdr_engine as clean_engine\n'), ((109, 18, 110, 70), 'google.cloud.exceptions.GoogleCloudError', 'GoogleCloudError', ({(110, 16, 110, 69): 'f"""Error running job {result.job_id}: {result.errors}"""'}, {}), "(f'Error running job {result.job_id}: {result.errors}')", False, 'from google.cloud.exceptions import GoogleCloudError\n')]
sum3105/pydbhub
pydbhub/httphub.py
501ea2c0ec7785bc06a38961a1366c3c04d7fabd
import pydbhub from typing import Any, Dict, List, Tuple from json.decoder import JSONDecodeError import requests import io def send_request_json(query_url: str, data: Dict[str, Any]) -> Tuple[List[Any], str]: """ send_request_json sends a request to DBHub.io, formatting the returned result as JSON Parameters ---------- query_url : str url of the API endpoint data : Dict[str, Any] data to be processed to the server. Returns ------- Tuple[List[Any], str] The returned data is - a list of JSON object. - a string describe error if occurs """ try: headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'} response = requests.post(query_url, data=data, headers=headers) response.raise_for_status() return response.json(), None except JSONDecodeError as e: return None, e.args[0] except TypeError as e: return None, e.args[0] except requests.exceptions.HTTPError as e: try: return response.json(), e.args[0] except JSONDecodeError: return None, e.args[0] except requests.exceptions.RequestException as e: cause = e.args(0) return None, str(cause.args[0]) def send_request(query_url: str, data: Dict[str, Any]) -> Tuple[List[bytes], str]: """ send_request sends a request to DBHub.io. Parameters ---- query_url : str url of the API endpoint data : Dict[str, Any] data to be processed to the server.------ Returns ------- List[bytes] database file is returned as a list of bytes """ try: headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'} response = requests.post(query_url, data=data, headers=headers) response.raise_for_status() return response.content, None except requests.exceptions.HTTPError as e: return None, e.args[0] except requests.exceptions.RequestException as e: cause = e.args(0) return None, str(cause.args[0]) def send_upload(query_url: str, data: Dict[str, Any], db_bytes: io.BufferedReader) -> Tuple[List[Any], str]: """ send_upload uploads a database to DBHub.io. Parameters ---------- query_url : str url of the API endpoint. data : Dict[str, Any] data to be processed to the server. db_bytes : io.BufferedReader A buffered binary stream of the database file. Returns ------- Tuple[List[Any], str] The returned data is - a list of JSON object. - a string describe error if occurs """ try: headers = {'User-Agent': f'pydbhub v{pydbhub.__version__}'} files = {"file": db_bytes} response = requests.post(query_url, data=data, headers=headers, files=files) response.raise_for_status() if response.status_code != 201: # The returned status code indicates something went wrong try: return response.json(), str(response.status_code) except JSONDecodeError: return None, str(response.status_code) return response.json(), None except requests.exceptions.HTTPError as e: try: return response.json(), e.args[0] except JSONDecodeError: return None, e.args[0] except requests.exceptions.RequestException as e: cause = e.args(0) return None, str(cause.args[0])
[((29, 19, 29, 71), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((64, 19, 64, 71), 'requests.post', 'requests.post', (), '', False, 'import requests\n'), ((97, 19, 97, 84), 'requests.post', 'requests.post', (), '', False, 'import requests\n')]
BrandonLeiran/bracket-scoring
test_calcscore.py
a099e9a56ee3083c3a9db7d085b11b1dc7fe77f8
import pytest from calcscore import round_score # you'll be picking what teams make it to the next round # - so picking 32, then 16, then 8, 4, 2, 1...i.e. round 1-6 winners # teams will have a name & a seed # seed doesn't change, so maybe make that not passed around w/ results def test_round_score_invalid_round(): with pytest.raises(ValueError, match=r".*range*"): round_score(0) with pytest.raises(ValueError, match=r".*range*"): round_score(7) def test_round_score_invalid_winner(): VALID_ROUND = 1 all_teams = [] round_winners = [] picked_winners = ["picked team"] with pytest.raises(ValueError, match=r".*invalid winner"): round_score(VALID_ROUND, all_teams, round_winners, picked_winners) # score = round_score(0) # assert score == 0
[((10, 9, 10, 53), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((11, 8, 11, 22), 'calcscore.round_score', 'round_score', ({(11, 20, 11, 21): '(0)'}, {}), '(0)', False, 'from calcscore import round_score\n'), ((13, 9, 13, 53), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((14, 8, 14, 22), 'calcscore.round_score', 'round_score', ({(14, 20, 14, 21): '(7)'}, {}), '(7)', False, 'from calcscore import round_score\n'), ((21, 9, 21, 61), 'pytest.raises', 'pytest.raises', (), '', False, 'import pytest\n'), ((22, 8, 22, 74), 'calcscore.round_score', 'round_score', ({(22, 20, 22, 31): 'VALID_ROUND', (22, 33, 22, 42): 'all_teams', (22, 44, 22, 57): 'round_winners', (22, 59, 22, 73): 'picked_winners'}, {}), '(VALID_ROUND, all_teams, round_winners, picked_winners)', False, 'from calcscore import round_score\n')]
bgyori/pyobo
tests/test_get.py
f199f62f65fc7faff307b56f979a369202c8ad33
import unittest from operator import attrgetter import obonet from pyobo import SynonymTypeDef, get from pyobo.struct import Reference from pyobo.struct.struct import ( iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs, ) from tests.constants import TEST_CHEBI_OBO_PATH class TestParseObonet(unittest.TestCase): """""" @classmethod def setUpClass(cls) -> None: cls.graph = obonet.read_obo(TEST_CHEBI_OBO_PATH) def test_get_graph_typedefs(self): """Test getting type definitions from an :mod:`obonet` graph.""" pairs = { (typedef.prefix, typedef.identifier) for typedef in iterate_graph_typedefs(self.graph) } self.assertIn(('chebi', 'has_part'), pairs) def test_get_graph_synonym_typedefs(self): """Test getting synonym type definitions from an :mod:`obonet` graph.""" synonym_typedefs = sorted(iterate_graph_synonym_typedefs(self.graph), key=attrgetter('id')) self.assertEqual( sorted([ SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'), SynonymTypeDef(id='BRAND_NAME', name='BRAND NAME'), SynonymTypeDef(id='INN', name='INN'), ], key=attrgetter('id')), synonym_typedefs, ) def test_get_node_synonyms(self): """Test getting synonyms from a node in a :mod:`obonet` graph.""" data = self.graph.nodes['CHEBI:51990'] synonyms = list(iterate_node_synonyms(data)) self.assertEqual(1, len(synonyms)) synonym = synonyms[0] self.assertEqual('N,N,N-tributylbutan-1-aminium fluoride', synonym.name, msg='name parsing failed') self.assertEqual('EXACT', synonym.specificity, msg='specificity parsing failed') # TODO implement # self.assertEqual(SynonymTypeDef(id='IUPAC_NAME', name='IUPAC NAME'), synonym.type) def test_get_node_properties(self): """Test getting properties from a node in a :mod:`obonet` graph.""" data = self.graph.nodes['CHEBI:51990'] properties = list(iterate_node_properties(data)) t_prop = 'http://purl.obolibrary.org/obo/chebi/monoisotopicmass' self.assertIn(t_prop, {prop for prop, value in properties}) self.assertEqual(1, sum(prop == t_prop for prop, value in properties)) value = [value for prop, value in properties if prop == t_prop][0] self.assertEqual('261.28318', value) def test_get_node_parents(self): """Test getting parents from a node in a :mod:`obonet` graph.""" data = self.graph.nodes['CHEBI:51990'] parents = list(iterate_node_parents(data)) self.assertEqual(2, len(parents)) self.assertEqual({'24060', '51992'}, { parent.identifier for parent in parents }) self.assertEqual({'chebi'}, { parent.prefix for parent in parents }) def test_get_node_xrefs(self): """Test getting parents from a node in a :mod:`obonet` graph.""" data = self.graph.nodes['CHEBI:51990'] xrefs = list(iterate_node_xrefs(data)) self.assertEqual(7, len(xrefs)) # NOTE the prefixes are remapped by PyOBO self.assertEqual({'pubmed', 'cas', 'beilstein', 'reaxys'}, { xref.prefix for xref in xrefs }) self.assertEqual( { ('reaxys', '3570522'), ('beilstein', '3570522'), ('cas', '429-41-4'), ('pubmed', '21142041'), ('pubmed', '21517057'), ('pubmed', '22229781'), ('pubmed', '15074950'), }, {(xref.prefix, xref.identifier) for xref in xrefs} ) def test_get_node_relations(self): """Test getting relations from a node in a :mod:`obonet` graph.""" data = self.graph.nodes['CHEBI:17051'] relations = list(iterate_node_relationships(data, 'chebi')) self.assertEqual(1, len(relations)) typedef, target = relations[0] self.assertIsNotNone(target) self.assertIsInstance(target, Reference) self.assertEqual('chebi', target.prefix) self.assertEqual('29228', target.identifier) self.assertIsNotNone(typedef) self.assertIsInstance(typedef, Reference) self.assertEqual('chebi', typedef.prefix) self.assertEqual('is_conjugate_base_of', typedef.identifier) class TestGet(unittest.TestCase): """Test generation of OBO objects.""" def test_get_obo(self): """Test getting an OBO document.""" obo = get('chebi', url=TEST_CHEBI_OBO_PATH, local=True) terms = list(obo) self.assertEqual(18, len(terms))
[((20, 20, 20, 56), 'obonet.read_obo', 'obonet.read_obo', ({(20, 36, 20, 55): 'TEST_CHEBI_OBO_PATH'}, {}), '(TEST_CHEBI_OBO_PATH)', False, 'import obonet\n'), ((118, 14, 118, 63), 'pyobo.get', 'get', (), '', False, 'from pyobo import SynonymTypeDef, get\n'), ((32, 34, 32, 76), 'pyobo.struct.struct.iterate_graph_synonym_typedefs', 'iterate_graph_synonym_typedefs', ({(32, 65, 32, 75): 'self.graph'}, {}), '(self.graph)', False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((45, 24, 45, 51), 'pyobo.struct.struct.iterate_node_synonyms', 'iterate_node_synonyms', ({(45, 46, 45, 50): 'data'}, {}), '(data)', False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((56, 26, 56, 55), 'pyobo.struct.struct.iterate_node_properties', 'iterate_node_properties', ({(56, 50, 56, 54): 'data'}, {}), '(data)', False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((66, 23, 66, 49), 'pyobo.struct.struct.iterate_node_parents', 'iterate_node_parents', ({(66, 44, 66, 48): 'data'}, {}), '(data)', False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((80, 21, 80, 45), 'pyobo.struct.struct.iterate_node_xrefs', 'iterate_node_xrefs', ({(80, 40, 80, 44): 'data'}, {}), '(data)', False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((98, 25, 98, 66), 'pyobo.struct.struct.iterate_node_relationships', 'iterate_node_relationships', ({(98, 52, 98, 56): 'data', (98, 58, 98, 65): '"""chebi"""'}, {}), "(data, 'chebi')", False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((26, 27, 26, 61), 'pyobo.struct.struct.iterate_graph_typedefs', 'iterate_graph_typedefs', ({(26, 50, 26, 60): 'self.graph'}, {}), '(self.graph)', False, 'from pyobo.struct.struct import iterate_graph_synonym_typedefs, iterate_graph_typedefs, iterate_node_parents, iterate_node_properties, iterate_node_relationships, iterate_node_synonyms, iterate_node_xrefs\n'), ((32, 82, 32, 98), 'operator.attrgetter', 'attrgetter', ({(32, 93, 32, 97): '"""id"""'}, {}), "('id')", False, 'from operator import attrgetter\n'), ((35, 16, 35, 66), 'pyobo.SynonymTypeDef', 'SynonymTypeDef', (), '', False, 'from pyobo import SynonymTypeDef, get\n'), ((36, 16, 36, 66), 'pyobo.SynonymTypeDef', 'SynonymTypeDef', (), '', False, 'from pyobo import SynonymTypeDef, get\n'), ((37, 16, 37, 52), 'pyobo.SynonymTypeDef', 'SynonymTypeDef', (), '', False, 'from pyobo import SynonymTypeDef, get\n'), ((38, 19, 38, 35), 'operator.attrgetter', 'attrgetter', ({(38, 30, 38, 34): '"""id"""'}, {}), "('id')", False, 'from operator import attrgetter\n')]
ymontilla/WebScrapingCatastro
src/commons.py
a184b5c92199305e28ca7346c01d1e78e0a92c13
# -*- coding: utf-8 -*- # + ## Utilidades comunes entre places y OSM. # + import csv import ast import codecs from math import cos, asin, sqrt # + def read_csv_with_encoding(filename, delimiter="|", encoding="iso-8859-1"): with codecs.open(filename, encoding=encoding) as fp: reader = csv.reader(fp, delimiter=delimiter) csvFile = list(reader) return pd.DataFrame(csvFile[1:], columns=csvFile[0]) def read_json_with_encoding(filename, encoding="iso-8859-1"): with codecs.open(filename, encoding=encoding) as a: l = a.read() json_file = ast.literal_eval(l) return json_file # - import pandas as pd def distance(lat1, lon1, lat2, lon2): """ El resultado de la medición de distancia esta en kilometros. """ p = 0.017453292519943295 #Pi/180 a = 0.5 - cos((lat2 - lat1) * p)/2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2 return 12742 * asin(sqrt(a)) def build_center_point(df): lat = df["latitude"].mean() lon = df["longitude"].mean() return pd.DataFrame({'fid': [777], 'latitude': [lat], 'longitude': [lon]}) """ El proceso es muy pesado y no es posible hacer el ananlisis con toda la data de bogotá, el número de registros es demasiado grande para caber en memoria. El uso correcto es filtrar los datos antes de hacer el cross join. """ def compute_cross_distances(location_df, interest_points_df=None): condition_latitude = ~location_df["latitude"].isna() condition_longitude = ~location_df["longitude"].isna() location_df_complete = location_df.loc[condition_latitude & condition_longitude] results = [] for i in location_df_complete.index: for j in interest_points_df.index: results.append([ location_df_complete.loc[i, "fid"], distance(location_df_complete.loc[i, "latitude"], location_df_complete.loc[i, "longitude"], float(interest_points_df.loc[j, "lat"]), float(interest_points_df.loc[j, "lon"])), location_df_complete.loc[i, "latitude"], location_df_complete.loc[i, "longitude"], interest_points_df.loc[j, "lat"], interest_points_df.loc[j, "lon"], interest_points_df.loc[j, "amenity"], interest_points_df.loc[j, "name"] ]) final = list(zip(*results)) return pd.DataFrame({'fid': final[0], 'distance': final[1], 'p_lat': final[2], 'p_lon': final[3], 'i_lat': final[4], 'i_lon': final[5], 'amenity': final[6], 'name': final[7]})
[((44, 11, 44, 78), 'pandas.DataFrame', 'pd.DataFrame', ({(44, 24, 44, 77): "{'fid': [777], 'latitude': [lat], 'longitude': [lon]}"}, {}), "({'fid': [777], 'latitude': [lat], 'longitude': [lon]})", True, 'import pandas as pd\n'), ((71, 11, 73, 64), 'pandas.DataFrame', 'pd.DataFrame', ({(71, 24, 73, 63): "{'fid': final[0], 'distance': final[1], 'p_lat': final[2], 'p_lon': final[3\n ], 'i_lat': final[4], 'i_lon': final[5], 'amenity': final[6], 'name':\n final[7]}"}, {}), "({'fid': final[0], 'distance': final[1], 'p_lat': final[2],\n 'p_lon': final[3], 'i_lat': final[4], 'i_lon': final[5], 'amenity':\n final[6], 'name': final[7]})", True, 'import pandas as pd\n'), ((15, 9, 15, 49), 'codecs.open', 'codecs.open', (), '', False, 'import codecs\n'), ((16, 17, 16, 52), 'csv.reader', 'csv.reader', (), '', False, 'import csv\n'), ((18, 15, 18, 60), 'pandas.DataFrame', 'pd.DataFrame', (), '', True, 'import pandas as pd\n'), ((21, 9, 21, 49), 'codecs.open', 'codecs.open', (), '', False, 'import codecs\n'), ((23, 20, 23, 39), 'ast.literal_eval', 'ast.literal_eval', ({(23, 37, 23, 38): 'l'}, {}), '(l)', False, 'import ast\n'), ((38, 24, 38, 31), 'math.sqrt', 'sqrt', ({(38, 29, 38, 30): 'a'}, {}), '(a)', False, 'from math import cos, asin, sqrt\n'), ((37, 14, 37, 36), 'math.cos', 'cos', ({(37, 18, 37, 35): '((lat2 - lat1) * p)'}, {}), '((lat2 - lat1) * p)', False, 'from math import cos, asin, sqrt\n'), ((37, 41, 37, 54), 'math.cos', 'cos', ({(37, 45, 37, 53): '(lat1 * p)'}, {}), '(lat1 * p)', False, 'from math import cos, asin, sqrt\n'), ((37, 57, 37, 70), 'math.cos', 'cos', ({(37, 61, 37, 69): '(lat2 * p)'}, {}), '(lat2 * p)', False, 'from math import cos, asin, sqrt\n'), ((37, 78, 37, 100), 'math.cos', 'cos', ({(37, 82, 37, 99): '((lon2 - lon1) * p)'}, {}), '((lon2 - lon1) * p)', False, 'from math import cos, asin, sqrt\n')]
JamescMcE/BasketBet
GamesGetter.py
f87719ac793ea50822e8c52fc23191dba9ad6418
#This script Imports Game Data from ESPN, and Odds from the ODDS-API, and then imports them into a MySQL table, example in workbench here https://puu.sh/HOKCj/ce199eec8e.png import mysql.connector import requests import json import datetime import time #Connection to the MYSQL Server. mydb = mysql.connector.connect( host="", user="", password="", database="basketbet_data" ) mycursor = mydb.cursor() #Games List. allGames=[] #Gets the game Data from ESPN API given the link. def newGetter(gameDay): #Json Response for YESTERDAY. response = requests.get(gameDay).json() gameData = response["events"] #Loop through to collect GameDay data. a=0 while a < len(gameData): game = str(gameData[a]['name']) game_ID = str(gameData[a]['id']) game_Date = str(gameData[a]['date'][:-7]) game_Time = str(gameData[a]['date'][11:-1]) game_Period = str(gameData[a]['status']['period']) game_Status = str(gameData[a]['status']['type']['description']) home_Score = str(gameData[a]['competitions'][0]['competitors'][0]['score']) away_Score = str(gameData[a]['competitions'][0]['competitors'][1]['score']) #Quick fix to change Clippers Name from LA Clippers to Los Angeles Clippers. if str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName']) == 'LA Clippers': home_Team = 'Los Angeles Clippers' else: home_Team = str(gameData[a]['competitions'][0]['competitors'][0]['team']['displayName']) if str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName']) == 'LA Clippers': away_Team = 'Los Angeles Clippers' else: away_Team = str(gameData[a]['competitions'][0]['competitors'][1]['team']['displayName']) #Appends the Game Data to the list. allGames.append((game_ID, game, home_Team, home_Score, away_Team, away_Score, game_Date, game_Time, game_Period, game_Status)) a+=1 #Gets the Odds from the ODDS-API. def oddsGetter(): #Parameters for Odds Api. parameters = { "sport" : "basketball_nba", "region" : "uk", "mkt" : "h2h", "apiKey" : "", } #JSON Response. response = requests.get("https://api.the-odds-api.com/v3/odds/", params=parameters) data = response.json()['data'] team0OddsInfo=[] team1OddsInfo=[] team0_odds = '' team1_odds = '' #Appends the odds info to a list as strings. for game in data: for site in game['sites']: if site['site_key'] == "paddypower": team0_odds = str(site['odds']['h2h'][0]) team1_odds = str(site['odds']['h2h'][1]) if team0_odds == '': team0_odds = 0 if team1_odds == '': team1_odds = 0 team0 = str(game['teams'][0]) team1 = str(game['teams'][1]) startTime = game['commence_time'] gameDate = str(datetime.datetime.utcfromtimestamp(startTime).strftime('%Y-%m-%d %H:%M:%S'))[:-9] team0OddsInfo.append((team0, team0_odds, gameDate)) team1OddsInfo.append((team1, team1_odds, gameDate)) a=0 #as both lists are the same length, it loops through one and Updates the tables where needed. while a < len(team0OddsInfo): query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_Date = %s' gameDate = (str(team0OddsInfo[a][2]),) mycursor.execute(query_string, gameDate) matchedGames = mycursor.fetchall() b=0 while b < len(matchedGames): if matchedGames[b][2] == team0OddsInfo[a][0]: query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]] query_string = 'UPDATE all_games SET Home_Odds = %s, Away_Odds = %s WHERE (Game_ID = %s)' mycursor.execute(query_string, query_list) elif matchedGames[b][5] == team0OddsInfo[a][0]: query_list = [team0OddsInfo[a][1], team1OddsInfo[a][1], matchedGames[b][0]] query_string = 'UPDATE all_games SET Away_Odds = %s, Home_Odds = %s WHERE (Game_ID = %s)' mycursor.execute(query_string, query_list) b+=1 a+=1 #For the console to show when odds were updated. mydb.commit() time = datetime.datetime.utcnow() print('\n' + 'ODDS UPDATE AT: ' + str(time)) print('--------------------------------') print('--------------------------------') print(len(team0OddsInfo), "GAME ODDS inserted.") print('REMAINING REQUESTS:', response.headers['x-requests-remaining']) print('USED REQUESTS:', response.headers['x-requests-used']) print('--------------------------------') print('--------------------------------') #Block to keep the script running then sleep for time 300 with counter set at 72 for Games every 5min | Odds every 6hr. counter=72 startTime = time.time() while True: #Today, Yesterday and Tomorrow. today = datetime.date.today() yesterday = today + datetime.timedelta(days=-1) tomorrow = today + datetime.timedelta(days=1) #Removing the - from the dates for the URLs, then making the URLs. todayShort = str(today).replace('-', '') yesterdayShort = str(yesterday).replace('-', '') tomorrowShort = str(tomorrow).replace('-', '') yesterdayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + yesterdayShort + '-' + yesterdayShort todayUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + todayShort + '-' + todayShort tomorrowUrl = "http://site.api.espn.com/apis/site/v2/sports/basketball/nba/scoreboard?dates=" + tomorrowShort + '-' + tomorrowShort newGetter(yesterdayUrl) newGetter(todayUrl) newGetter(tomorrowUrl) #Inserting or updating the table in MYSQL with the games. c=0 updateCount=0 newGameCount=0 while c < len(allGames): query_string = 'SELECT * FROM basketbet_data.all_games WHERE Game_ID = %s' gameID = (str(allGames[c][0]),) mycursor.execute(query_string, gameID) if mycursor.fetchone(): updateCount+=1 query_list = [allGames[c][1], allGames[c][2], allGames[c][4], allGames[c][5], allGames[c][3], allGames[c][6], allGames[c][7], allGames[c][8], allGames[c][9], allGames[c][0]] query_string = 'UPDATE all_games SET Game_Name = %s, Home_Team = %s, Away_Team = %s, Away_Score = %s, Home_Score = %s, Game_Date = %s, Game_Time = %s, Game_Period = %s, Game_Status = %s WHERE (Game_ID = %s)' mycursor.execute(query_string, query_list) mydb.commit() else: newGameCount+=1 query_string = "INSERT INTO basketbet_data.all_games (Game_ID, Game_Name, Home_Team, Home_Odds, Home_Score, Away_Team, Away_Odds, Away_Score, Game_Date, Game_Time, Game_Period, Game_Status) VALUES (%s, %s, %s, 0, %s, %s, 0, %s, %s, %s, %s, %s)" mycursor.execute(query_string, allGames[c]) mydb.commit() c+=1 #Prints to console what games were updated and what new games were inserted. print('----------------------------------------') print(str(updateCount) + ' GAMES UPDATED, and ' + str(newGameCount) + ' NEW GAMES inserted.') print('----------------------------------------') allGames=[] #Counter for the Odds script. if counter==72: oddsGetter() counter=0 else: counter+=1 print('\n') time.sleep(300 - ((time.time() - startTime) % 300))
[((66, 15, 66, 87), 'requests.get', 'requests.get', (), '', False, 'import requests\n'), ((114, 11, 114, 37), 'datetime.datetime.utcnow', 'datetime.datetime.utcnow', ({}, {}), '()', False, 'import datetime\n'), ((131, 12, 131, 33), 'datetime.date.today', 'datetime.date.today', ({}, {}), '()', False, 'import datetime\n'), ((132, 24, 132, 51), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((133, 23, 133, 49), 'datetime.timedelta', 'datetime.timedelta', (), '', False, 'import datetime\n'), ((24, 15, 24, 36), 'requests.get', 'requests.get', ({(24, 28, 24, 35): 'gameDay'}, {}), '(gameDay)', False, 'import requests\n'), ((87, 23, 87, 68), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', ({(87, 58, 87, 67): 'startTime'}, {}), '(startTime)', False, 'import datetime\n')]
effigies/neurodocker
neurodocker/tests/test_neurodocker.py
4b0f32d2915b8b0308e3e391d534e05eb29b8d09
"""Tests for neurodocker.main""" # Author: Jakub Kaczmarzyk <[email protected]> from __future__ import absolute_import, unicode_literals import sys import pytest from neurodocker.neurodocker import create_parser, parse_args, main def test_generate(): args = ("generate -b ubuntu:17.04 -p apt" " --arg FOO=BAR BAZ" " --afni version=latest" " --ants version=2.2.0" " --freesurfer version=6.0.0" " --fsl version=5.0.10" " --user=neuro" " --miniconda env_name=neuro conda_install=python=3.6.2" " --user=root" " --mrtrix3" " --neurodebian os_codename=zesty download_server=usa-nh" " --spm version=12 matlab_version=R2017a" " --no-check-urls" " --expose 1234 9000" " --volume /var /usr/bin" " --label FOO=BAR BAZ=CAT" " --copy relpath/to/file.txt /tmp/file.txt" " --add relpath/to/file2.txt /tmp/file2.txt" " --cmd '--arg1' '--arg2'" " --workdir /home" " --install git" " --user=neuro" ) main(args.split()) with pytest.raises(SystemExit): args = "-b ubuntu" main(args.split()) with pytest.raises(SystemExit): args = "-p apt" main(args.split()) with pytest.raises(SystemExit): main() args = "generate -b ubuntu -p apt --ants option=value" with pytest.raises(ValueError): main(args.split()) def test_generate_opts(capsys): args = "generate -b ubuntu:17.04 -p apt --no-check-urls {}" main(args.format('--user=neuro').split()) out, _ = capsys.readouterr() assert "USER neuro" in out main(args.format('--add path/to/file.txt /tmp/file.txt').split()) out, _ = capsys.readouterr() assert 'ADD ["path/to/file.txt", "/tmp/file.txt"]' in out main(args.format('--copy path/to/file.txt /tmp/file.txt').split()) out, _ = capsys.readouterr() assert 'COPY ["path/to/file.txt", "/tmp/file.txt"]' in out main(args.format('--env KEY=VAL KEY2=VAL').split()) out, _ = capsys.readouterr() assert 'ENV KEY="VAL" \\' in out assert ' KEY2="VAL"' in out main(args.format('--expose 1230 1231').split()) out, _ = capsys.readouterr() assert "EXPOSE 1230 1231" in out main(args.format('--workdir /home').split()) out, _ = capsys.readouterr() assert "WORKDIR /home" in out main(args.format('--install vi').split()) out, _ = capsys.readouterr() assert "vi" in out main(args.format('--instruction RUNecho').split()) out, _ = capsys.readouterr() assert "RUNecho" in out def test_generate_from_json(capsys, tmpdir): import json cmd = "generate -b debian:stretch -p apt --c3d version=1.0.0" main(cmd.split()) true, _ = capsys.readouterr() specs = {'check_urls': True, 'generation_timestamp': '2017-08-31 21:49:04', 'instructions': [['base', 'debian:stretch'], ['c3d', {'version': '1.0.0'}]], 'neurodocker_version': '0.2.0-18-g9227b17', 'pkg_manager': 'apt'} str_specs = json.dumps(specs) filepath = tmpdir.join("specs.json") filepath.write(str_specs) gen_cmd = "generate --file {}".format(filepath) main(gen_cmd.split()) test, _ = capsys.readouterr() # These indices chop off the header (with timestamp) and the layer that # saves to JSON (with timestamp). sl = slice(8, -19) assert true.split('\n')[sl] == test.split('\n')[sl] def test_generate_no_print(capsys): args = ['generate', '-b', 'ubuntu:17.04', '-p', 'apt', '--no-check-urls'] main(args) out, _ = capsys.readouterr() assert "FROM" in out and "RUN" in out args.append('--no-print-df') main(args) out, _ = capsys.readouterr() assert not out def test_generate_save(tmpdir): outfile = tmpdir.join("test.txt") args = ['generate', '-b', 'ubuntu:17.04', '-p', 'apt', '--mrtrix3', 'use_binaries=false', '--no-print-df', '-o', outfile.strpath, '--no-check-urls'] main(args) assert outfile.read(), "saved Dockerfile is empty" assert "git clone https://github.com/MRtrix3/mrtrix3.git" in outfile.read()
[((104, 16, 104, 33), 'json.dumps', 'json.dumps', ({(104, 27, 104, 32): 'specs'}, {}), '(specs)', False, 'import json\n'), ((120, 4, 120, 14), 'neurodocker.neurodocker.main', 'main', ({(120, 9, 120, 13): 'args'}, {}), '(args)', False, 'from neurodocker.neurodocker import create_parser, parse_args, main\n'), ((125, 4, 125, 14), 'neurodocker.neurodocker.main', 'main', ({(125, 9, 125, 13): 'args'}, {}), '(args)', False, 'from neurodocker.neurodocker import create_parser, parse_args, main\n'), ((135, 4, 135, 14), 'neurodocker.neurodocker.main', 'main', ({(135, 9, 135, 13): 'args'}, {}), '(args)', False, 'from neurodocker.neurodocker import create_parser, parse_args, main\n'), ((39, 9, 39, 34), 'pytest.raises', 'pytest.raises', ({(39, 23, 39, 33): 'SystemExit'}, {}), '(SystemExit)', False, 'import pytest\n'), ((43, 9, 43, 34), 'pytest.raises', 'pytest.raises', ({(43, 23, 43, 33): 'SystemExit'}, {}), '(SystemExit)', False, 'import pytest\n'), ((47, 9, 47, 34), 'pytest.raises', 'pytest.raises', ({(47, 23, 47, 33): 'SystemExit'}, {}), '(SystemExit)', False, 'import pytest\n'), ((48, 8, 48, 14), 'neurodocker.neurodocker.main', 'main', ({}, {}), '()', False, 'from neurodocker.neurodocker import create_parser, parse_args, main\n'), ((51, 9, 51, 34), 'pytest.raises', 'pytest.raises', ({(51, 23, 51, 33): 'ValueError'}, {}), '(ValueError)', False, 'import pytest\n')]
tmichalak/prjuray
fuzzers/011-cle-ffconfig/generate.py
53f3c94b58ffc6d405ac20a3b340ae726717ed47
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Copyright (C) 2020 The Project U-Ray Authors. # # Use of this source code is governed by a ISC-style # license that can be found in the LICENSE file or at # https://opensource.org/licenses/ISC # # SPDX-License-Identifier: ISC ''' FDCE Primitive: D Flip-Flop with Clock Enable and Asynchronous Clear FDPE Primitive: D Flip-Flop with Clock Enable and Asynchronous Preset FDRE Primitive: D Flip-Flop with Clock Enable and Synchronous Reset FDSE Primitive: D Flip-Flop with Clock Enable and Synchronous Set LDCE Primitive: Transparent Data Latch with Asynchronous Clear and Gate Enable LDPE Primitive: Transparent Data Latch with Asynchronous Preset and Gate Enable ''' from prims import isff, isl from utils.segmaker import Segmaker segmk = Segmaker("design.bits", bits_per_word=16) def loadtop(): ''' i,prim,loc,bel 0,FDPE,SLICE_X12Y100,C5FF 1,FDPE,SLICE_X15Y100,A5FF 2,FDPE_1,SLICE_X16Y100,B5FF 3,LDCE_1,SLICE_X17Y100,BFF ''' f = open('top.txt', 'r') f.readline() ret = {} for l in f: i, prim, loc, bel, init = l.split(",") i = int(i) init = int(init) ret[loc] = (i, prim, loc, bel, init) return ret top = loadtop() def vs2i(s): return {"1'b0": 0, "1'b1": 1}[s] print("Loading tags from design.txt") with open("design.txt", "r") as f: for line in f: ''' puts $fp "$type $tile $grid_x $grid_y $ff $bel_type $used $usedstr" CLEM CLEM_X10Y137 30 13 SLICE_X13Y137/AFF REG_INIT 1 FDRE CLEM CLEM_X10Y137 30 13 SLICE_X12Y137/D2FF FF_INIT 0 ''' line = line.split() tile_type = line[0] tile_name = line[1] grid_x = line[2] grid_y = line[3] # Other code uses BEL name # SLICE_X12Y137/D2FF site_ff_name = line[4] site, ff_name = site_ff_name.split('/') ff_type = line[5] used = int(line[6]) cel_prim = None cel_name = None if used: cel_name = line[7] cel_prim = line[8] cinv = int(line[9]) init = vs2i(line[10]) # A B C D E F G H which = ff_name[0] # LUT6 vs LUT5 FF is2 = '2' in ff_name if used: segmk.add_site_tag(site, "%s.ZINI" % ff_name, 1 ^ init) ''' On name: The primitives you listed have a control input to set the FF value to zero (clear/reset), the other three primitives have a control input that sets the FF value to one. Z => inversion ''' segmk.add_site_tag(site, "%s.ZRST" % ff_name, cel_prim in ('FDRE', 'FDCE', 'LDCE')) segmk.compile() segmk.write()
[((24, 8, 24, 49), 'utils.segmaker.Segmaker', 'Segmaker', (), '', False, 'from utils.segmaker import Segmaker\n')]
JamesBrofos/Thresholds-in-Hamiltonian-Monte-Carlo
hmc/integrators/states/riemannian_leapfrog_state.py
7ee1b530db0eb536666dbc872fbf8200e53dd49b
from typing import Callable import numpy as np from hmc.integrators.states.leapfrog_state import LeapfrogState from hmc.integrators.fields import riemannian from hmc.linalg import solve_psd class RiemannianLeapfrogState(LeapfrogState): """The Riemannian leapfrog state uses the Fisher information matrix to provide a position-dependent Riemannian metric. As such, computing the gradients of the Hamiltonian requires higher derivatives of the metric, which vanish in the Euclidean case. """ def __init__(self, position: np.ndarray, momentum: np.ndarray): super().__init__(position, momentum) self._jac_metric: np.ndarray self._grad_logdet_metric: np.ndarray @property def requires_update(self) -> bool: o = self.log_posterior is None or \ self.grad_log_posterior is None or \ self.metric is None or \ self.inv_metric is None or \ self.jac_metric is None or \ self.grad_logdet_metric is None return o @property def jac_metric(self): return self._jac_metric @jac_metric.setter def jac_metric(self, value): self._jac_metric = value @jac_metric.deleter def jac_metric(self): del self._jac_metric @property def grad_logdet_metric(self): return self._grad_logdet_metric @grad_logdet_metric.setter def grad_logdet_metric(self, value): self._grad_logdet_metric = value @grad_logdet_metric.deleter def grad_logdet_metric(self): del self._grad_logdet_metric def update(self, auxiliaries: Callable): num_dims = len(self.position) log_posterior, grad_log_posterior, metric, jac_metric = auxiliaries(self.position) jac_metric = np.swapaxes(jac_metric, 0, -1) inv_metric, sqrtm_metric = solve_psd(metric, return_chol=True) grad_logdet_metric = riemannian.grad_logdet(inv_metric, jac_metric, num_dims) self.log_posterior = log_posterior self.grad_log_posterior = grad_log_posterior self.metric = metric self.sqrtm_metric = sqrtm_metric self.inv_metric = inv_metric self.jac_metric = jac_metric self.grad_logdet_metric = grad_logdet_metric self.velocity = riemannian.velocity(inv_metric, self.momentum) self.force = riemannian.force(self.velocity, grad_log_posterior, jac_metric, grad_logdet_metric) def clear(self): super().clear() del self.jac_metric del self.grad_logdet_metric del self.metric del self.inv_metric del self.logdet_metric del self.sqrtm_metric
[((61, 21, 61, 51), 'numpy.swapaxes', 'np.swapaxes', ({(61, 33, 61, 43): 'jac_metric', (61, 45, 61, 46): '0', (61, 48, 61, 50): '-1'}, {}), '(jac_metric, 0, -1)', True, 'import numpy as np\n'), ((62, 35, 62, 70), 'hmc.linalg.solve_psd', 'solve_psd', (), '', False, 'from hmc.linalg import solve_psd\n'), ((63, 29, 63, 85), 'hmc.integrators.fields.riemannian.grad_logdet', 'riemannian.grad_logdet', ({(63, 52, 63, 62): 'inv_metric', (63, 64, 63, 74): 'jac_metric', (63, 76, 63, 84): 'num_dims'}, {}), '(inv_metric, jac_metric, num_dims)', False, 'from hmc.integrators.fields import riemannian\n'), ((71, 24, 71, 70), 'hmc.integrators.fields.riemannian.velocity', 'riemannian.velocity', ({(71, 44, 71, 54): 'inv_metric', (71, 56, 71, 69): 'self.momentum'}, {}), '(inv_metric, self.momentum)', False, 'from hmc.integrators.fields import riemannian\n'), ((72, 21, 72, 104), 'hmc.integrators.fields.riemannian.force', 'riemannian.force', ({(72, 38, 72, 51): 'self.velocity', (72, 53, 72, 71): 'grad_log_posterior', (72, 73, 72, 83): 'jac_metric', (72, 85, 72, 103): 'grad_logdet_metric'}, {}), '(self.velocity, grad_log_posterior, jac_metric,\n grad_logdet_metric)', False, 'from hmc.integrators.fields import riemannian\n')]
StuartLiam/DroneNavigationOnboard
MultirangerTest.py
11ac6a301dfc72b15e337ddf09f5ddc79265a03f
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # || ____ _ __ # +------+ / __ )(_) /_______________ _____ ___ # | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \ # +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/ # || || /_____/_/\__/\___/_/ \__,_/ /___/\___/ # # Copyright (C) 2017 Bitcraze AB # # Crazyflie Python Library # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. """ Example scipts that allows a user to "push" the Crazyflie 2.0 around using your hands while it's hovering. This examples uses the Flow and Multi-ranger decks to measure distances in all directions and tries to keep away from anything that comes closer than 0.2m by setting a velocity in the opposite direction. The demo is ended by either pressing Ctrl-C or by holding your hand above the Crazyflie. For the example to run the following hardware is needed: * Crazyflie 2.0 * Crazyradio PA * Flow deck * Multiranger deck """ import logging import sys import time import cflib.crtp from cflib.crazyflie import Crazyflie from cflib.crazyflie.syncCrazyflie import SyncCrazyflie from cflib.positioning.motion_commander import MotionCommander from cflib.utils.multiranger import Multiranger import matplotlib.pyplot as plt from matplotlib.pyplot import figure import matplotlib.patches as patches URI = 'radio://0/80/2M' if len(sys.argv) > 1: URI = sys.argv[1] # Only output errors from the logging framework logging.basicConfig(level=logging.ERROR) def is_close(range): MIN_DISTANCE = 0.2 # m if range is None: return False else: return range < MIN_DISTANCE if __name__ == '__main__': # Initialize the low-level drivers (don't list the debug drivers) cflib.crtp.init_drivers(enable_debug_driver=False) rangeArray = [] cf = Crazyflie(rw_cache='./cache') with SyncCrazyflie(URI, cf=cf) as scf: with MotionCommander(scf) as motion_commander: with Multiranger(scf) as multiranger: motion_commander.start_turn_left(90) rangeArray.append(multiranger.front) time.sleep(0.05) plt.plot(rangeArray)
[((63, 0, 63, 40), 'logging.basicConfig', 'logging.basicConfig', (), '', False, 'import logging\n'), ((81, 9, 81, 38), 'cflib.crazyflie.Crazyflie', 'Crazyflie', (), '', False, 'from cflib.crazyflie import Crazyflie\n'), ((90, 4, 90, 24), 'matplotlib.pyplot.plot', 'plt.plot', ({(90, 13, 90, 23): 'rangeArray'}, {}), '(rangeArray)', True, 'import matplotlib.pyplot as plt\n'), ((82, 9, 82, 34), 'cflib.crazyflie.syncCrazyflie.SyncCrazyflie', 'SyncCrazyflie', (), '', False, 'from cflib.crazyflie.syncCrazyflie import SyncCrazyflie\n'), ((83, 13, 83, 33), 'cflib.positioning.motion_commander.MotionCommander', 'MotionCommander', ({(83, 29, 83, 32): 'scf'}, {}), '(scf)', False, 'from cflib.positioning.motion_commander import MotionCommander\n'), ((84, 17, 84, 33), 'cflib.utils.multiranger.Multiranger', 'Multiranger', ({(84, 29, 84, 32): 'scf'}, {}), '(scf)', False, 'from cflib.utils.multiranger import Multiranger\n'), ((88, 16, 88, 32), 'time.sleep', 'time.sleep', ({(88, 27, 88, 31): '(0.05)'}, {}), '(0.05)', False, 'import time\n')]