max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
stp_core/common/logging/handlers.py | andkononykhin/plenum | 148 | 5700 | <reponame>andkononykhin/plenum<filename>stp_core/common/logging/handlers.py
import logging
class CallbackHandler(logging.Handler):
def __init__(self, typestr, default_tags, callback, override_tags):
"""
Initialize the handler.
"""
super().__init__()
self.callback = callback
self.tags = default_tags
self.update_tags(override_tags or {})
self.typestr = typestr
def update_tags(self, override_tags):
self.tags.update(override_tags)
def emit(self, record):
"""
Passes the log record back to the CLI for rendering
"""
should_cb = None
attr_val = None
if hasattr(record, self.typestr):
attr_val = getattr(record, self.typestr)
should_cb = bool(attr_val)
if should_cb is None and record.levelno >= logging.INFO:
should_cb = True
if hasattr(record, 'tags'):
for t in record.tags:
if t in self.tags:
if self.tags[t]:
should_cb = True
continue
else:
should_cb = False
break
if should_cb:
self.callback(record, attr_val)
class CliHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="cli",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class DemoHandler(CallbackHandler):
def __init__(self, callback, override_tags=None):
default_tags = {
"add_replica": True
}
super().__init__(typestr="demo",
default_tags=default_tags,
callback=callback,
override_tags=override_tags)
class TestingHandler(logging.Handler):
def __init__(self, tester):
"""
Initialize the handler.
"""
super().__init__()
self.tester = tester
def emit(self, record):
"""
Captures a record.
"""
self.tester(record)
| 2.125 | 2 |
blog/migrations/__init__.py | Amohammadi2/django-SPA-blog | 2 | 5701 | # you just need to add some informations here
| 1.132813 | 1 |
amocrm_asterisk_ng/crm/amocrm/kernel/calls/call_records/file_converters/core/__init__.py | iqtek/amocrn_asterisk_ng | 0 | 5702 | from .IFileConverter import IFileConverter
| 1.140625 | 1 |
tests/blueprint/test_decorators.py | cuenca-mx/agave | 3 | 5703 | <reponame>cuenca-mx/agave
from functools import wraps
from agave.blueprints.decorators import copy_attributes
def i_am_test(func):
@wraps(func)
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
wrapper.i_am_test = True
return wrapper
class TestResource:
@i_am_test
def retrieve(self) -> str:
return 'hello'
def test_copy_properties_from() -> None:
def retrieve():
...
assert not hasattr(retrieve, 'i_am_test')
retrieve = copy_attributes(TestResource)(retrieve)
assert hasattr(retrieve, 'i_am_test')
| 2.40625 | 2 |
tools/python/utils/config_parser.py | hanhan9449/mace | 1 | 5704 | # Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import os
import copy
import yaml
from enum import Enum
from utils.util import mace_check
from utils.util import MaceLogger
from py_proto import mace_pb2
CPP_KEYWORDS = [
'alignas', 'alignof', 'and', 'and_eq', 'asm', 'atomic_cancel',
'atomic_commit', 'atomic_noexcept', 'auto', 'bitand', 'bitor',
'bool', 'break', 'case', 'catch', 'char', 'char16_t', 'char32_t',
'class', 'compl', 'concept', 'const', 'constexpr', 'const_cast',
'continue', 'co_await', 'co_return', 'co_yield', 'decltype', 'default',
'delete', 'do', 'double', 'dynamic_cast', 'else', 'enum', 'explicit',
'export', 'extern', 'false', 'float', 'for', 'friend', 'goto', 'if',
'import', 'inline', 'int', 'long', 'module', 'mutable', 'namespace',
'new', 'noexcept', 'not', 'not_eq', 'nullptr', 'operator', 'or', 'or_eq',
'private', 'protected', 'public', 'register', 'reinterpret_cast',
'requires', 'return', 'short', 'signed', 'sizeof', 'static',
'static_assert', 'static_cast', 'struct', 'switch', 'synchronized',
'template', 'this', 'thread_local', 'throw', 'true', 'try', 'typedef',
'typeid', 'typename', 'union', 'unsigned', 'using', 'virtual', 'void',
'volatile', 'wchar_t', 'while', 'xor', 'xor_eq', 'override', 'final',
'transaction_safe', 'transaction_safe_dynamic', 'if', 'elif', 'else',
'endif', 'defined', 'ifdef', 'ifndef', 'define', 'undef', 'include',
'line', 'error', 'pragma',
]
def sanitize_load(s):
# do not let yaml parse ON/OFF to boolean
for w in ["ON", "OFF", "on", "off"]:
s = re.sub(r":\s+" + w + "$", r": '" + w + "'", s)
# sub ${} to env value
s = re.sub(r"\${(\w+)}", lambda x: os.environ[x.group(1)], s)
return yaml.load(s)
def parse(path):
with open(path) as f:
config = sanitize_load(f.read())
return config
def parse_device_info(path):
conf = parse(path)
return conf["devices"]
class ModelKeys(object):
platform = "platform"
runtime = "runtime"
models = 'models'
graph_optimize_options = "graph_optimize_options"
input_tensors = "input_tensors"
input_shapes = "input_shapes"
input_data_types = "input_data_types"
input_data_formats = "input_data_formats"
input_ranges = "input_ranges"
output_tensors = "output_tensors"
output_shapes = "output_shapes"
output_data_types = "output_data_types"
output_data_formats = "output_data_formats"
check_tensors = "check_tensors"
check_shapes = "check_shapes"
model_file_path = "model_file_path"
model_sha256_checksum = "model_sha256_checksum"
weight_file_path = "weight_file_path"
weight_sha256_checksum = "weight_sha256_checksum"
quantize_range_file = "quantize_range_file"
quantize = "quantize"
quantize_schema = "quantize_schema"
quantize_large_weights = "quantize_large_weights"
quantize_stat = "quantize_stat"
change_concat_ranges = "change_concat_ranges"
winograd = "winograd"
cl_mem_type = "cl_mem_type"
data_type = "data_type"
subgraphs = "subgraphs"
validation_inputs_data = "validation_inputs_data"
class DataFormat(Enum):
NONE = 0
NHWC = 1
NCHW = 2
HWIO = 100
OIHW = 101
HWOI = 102
OHWI = 103
AUTO = 1000
def parse_data_format(str):
str = str.upper()
mace_check(str in [e.name for e in DataFormat],
"unknown data format %s" % str)
return DataFormat[str]
class DeviceType(Enum):
CPU = 0
GPU = 2
HEXAGON = 3
HTA = 4
APU = 5
CPU_GPU = 100
DEVICE_MAP = {
"cpu": DeviceType.CPU,
"gpu": DeviceType.GPU,
"hexagon": DeviceType.HEXAGON,
"dsp": DeviceType.HEXAGON,
"hta": DeviceType.HTA,
"apu": DeviceType.APU,
"cpu+gpu": DeviceType.CPU_GPU
}
def parse_device_type(str):
mace_check(str in DEVICE_MAP, "unknown device %s" % str)
return DEVICE_MAP[str]
class Platform(Enum):
TENSORFLOW = 0
CAFFE = 1
ONNX = 2
MEGENGINE = 3
def parse_platform(str):
str = str.upper()
mace_check(str in [e.name for e in Platform],
"unknown platform %s" % str)
return Platform[str]
DATA_TYPE_MAP = {
'float32': mace_pb2.DT_FLOAT,
'int32': mace_pb2.DT_INT32,
}
def parse_data_type(str):
if str == "float32":
return mace_pb2.DT_FLOAT
elif str == "int32":
return mace_pb2.DT_INT32
else:
mace_check(False, "data type %s not supported" % str)
def parse_internal_data_type(str):
if str == 'fp32_fp32':
return mace_pb2.DT_FLOAT
elif str == 'bf16_fp32':
return mace_pb2.DT_BFLOAT16
else:
return mace_pb2.DT_HALF
def to_list(x):
if isinstance(x, list):
return x
else:
return [x]
def parse_int_array(xs):
if len(xs) is 0:
return [1]
return [int(x) for x in xs.split(",")]
def parse_float_array(xs):
return [float(x) for x in xs.split(",")]
def normalize_model_config(conf):
conf = copy.deepcopy(conf)
if ModelKeys.subgraphs in conf:
subgraph = conf[ModelKeys.subgraphs][0]
del conf[ModelKeys.subgraphs]
conf.update(subgraph)
conf[ModelKeys.platform] = parse_platform(conf[ModelKeys.platform])
conf[ModelKeys.runtime] = parse_device_type(conf[ModelKeys.runtime])
if ModelKeys.quantize in conf and conf[ModelKeys.quantize] == 1:
conf[ModelKeys.data_type] = mace_pb2.DT_FLOAT
else:
if ModelKeys.data_type in conf:
conf[ModelKeys.data_type] = parse_internal_data_type(
conf[ModelKeys.data_type])
else:
conf[ModelKeys.data_type] = mace_pb2.DT_HALF
# parse input
conf[ModelKeys.input_tensors] = to_list(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_tensors] = [str(i) for i in
conf[ModelKeys.input_tensors]]
input_count = len(conf[ModelKeys.input_tensors])
conf[ModelKeys.input_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.input_shapes])]
mace_check(
len(conf[ModelKeys.input_shapes]) == input_count,
"input node count and shape count do not match")
input_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.input_data_types,
["float32"]))]
if len(input_data_types) == 1 and input_count > 1:
input_data_types = [input_data_types[0]] * input_count
mace_check(len(input_data_types) == input_count,
"the number of input_data_types should be "
"the same as input tensors")
conf[ModelKeys.input_data_types] = input_data_types
input_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.input_data_formats,
["NHWC"]))]
if len(input_data_formats) == 1 and input_count > 1:
input_data_formats = [input_data_formats[0]] * input_count
mace_check(len(input_data_formats) == input_count,
"the number of input_data_formats should be "
"the same as input tensors")
conf[ModelKeys.input_data_formats] = input_data_formats
input_ranges = [parse_float_array(r) for r in
to_list(conf.get(ModelKeys.input_ranges,
["-1.0,1.0"]))]
if len(input_ranges) == 1 and input_count > 1:
input_ranges = [input_ranges[0]] * input_count
mace_check(len(input_ranges) == input_count,
"the number of input_ranges should be "
"the same as input tensors")
conf[ModelKeys.input_ranges] = input_ranges
# parse output
conf[ModelKeys.output_tensors] = to_list(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_tensors] = [str(i) for i in
conf[ModelKeys.output_tensors]]
output_count = len(conf[ModelKeys.output_tensors])
conf[ModelKeys.output_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.output_shapes])]
mace_check(len(conf[ModelKeys.output_tensors]) == output_count,
"output node count and shape count do not match")
output_data_types = [parse_data_type(dt) for dt in
to_list(conf.get(ModelKeys.output_data_types,
["float32"]))]
if len(output_data_types) == 1 and output_count > 1:
output_data_types = [output_data_types[0]] * output_count
mace_check(len(output_data_types) == output_count,
"the number of output_data_types should be "
"the same as output tensors")
conf[ModelKeys.output_data_types] = output_data_types
output_data_formats = [parse_data_format(df) for df in
to_list(conf.get(ModelKeys.output_data_formats,
["NHWC"]))]
if len(output_data_formats) == 1 and output_count > 1:
output_data_formats = [output_data_formats[0]] * output_count
mace_check(len(output_data_formats) == output_count,
"the number of output_data_formats should be "
"the same as output tensors")
conf[ModelKeys.output_data_formats] = output_data_formats
if ModelKeys.check_tensors in conf:
conf[ModelKeys.check_tensors] = to_list(conf[ModelKeys.check_tensors])
conf[ModelKeys.check_shapes] = [parse_int_array(shape) for shape in
to_list(conf[ModelKeys.check_shapes])]
mace_check(len(conf[ModelKeys.check_tensors]) == len(
conf[ModelKeys.check_shapes]),
"check tensors count and shape count do not match.")
MaceLogger.summary(conf)
return conf
| 1.664063 | 2 |
main_cross_testing_iseg.py | sami-ets/DeepNormalize | 1 | 5705 | # -*- coding: utf-8 -*-
# Copyright 2019 <NAME>. All Rights Reserved.
#
# Licensed under the MIT License;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import logging
import multiprocessing
import numpy as np
import os
import random
import torch
import torch.backends.cudnn as cudnn
from kerosene.configs.configs import RunConfiguration, DatasetConfiguration
from kerosene.configs.parsers import YamlConfigurationParser
from kerosene.loggers.visdom import PlotType, PlotFrequency
from kerosene.loggers.visdom.config import VisdomConfiguration
from kerosene.loggers.visdom.visdom import VisdomLogger, VisdomData
from kerosene.training.trainers import ModelTrainerFactory
from samitorch.inputs.utils import augmented_sample_collate
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import DataLoader
from deepNormalize.config.parsers import ArgsParserFactory, ArgsParserType
from deepNormalize.factories.customModelFactory import CustomModelFactory
from deepNormalize.factories.customTrainerFactory import TrainerFactory
from deepNormalize.inputs.datasets import iSEGSliceDatasetFactory, MRBrainSSliceDatasetFactory, ABIDESliceDatasetFactory
from deepNormalize.nn.criterions import CustomCriterionFactory
from deepNormalize.utils.constants import *
from deepNormalize.utils.image_slicer import ImageReconstructor
cudnn.benchmark = True
cudnn.enabled = True
np.random.seed(42)
random.seed(42)
if __name__ == '__main__':
# Basic settings
logging.basicConfig(level=logging.INFO)
torch.set_num_threads(multiprocessing.cpu_count())
torch.set_num_interop_threads(multiprocessing.cpu_count())
args = ArgsParserFactory.create_parser(ArgsParserType.MODEL_TRAINING).parse_args()
# Create configurations.
run_config = RunConfiguration(use_amp=args.use_amp, local_rank=args.local_rank, amp_opt_level=args.amp_opt_level)
model_trainer_configs, training_config = YamlConfigurationParser.parse(args.config_file)
if not isinstance(model_trainer_configs, list):
model_trainer_configs = [model_trainer_configs]
dataset_configs = YamlConfigurationParser.parse_section(args.config_file, "dataset")
dataset_configs = {k: DatasetConfiguration(v) for k, v, in dataset_configs.items()}
data_augmentation_config = YamlConfigurationParser.parse_section(args.config_file, "data_augmentation")
config_html = [training_config.to_html(), list(map(lambda config: config.to_html(), dataset_configs.values())),
list(map(lambda config: config.to_html(), model_trainer_configs))]
# Prepare the data.
train_datasets = list()
valid_datasets = list()
test_datasets = list()
reconstruction_datasets = list()
iSEG_train = None
iSEG_CSV = None
MRBrainS_train = None
MRBrainS_CSV = None
ABIDE_train = None
ABIDE_CSV = None
iSEG_augmentation_strategy = None
MRBrainS_augmentation_strategy = None
ABIDE_augmentation_strategy = None
# Initialize the model trainers
model_trainer_factory = ModelTrainerFactory(model_factory=CustomModelFactory(),
criterion_factory=CustomCriterionFactory())
model_trainers = model_trainer_factory.create(model_trainer_configs)
if not isinstance(model_trainers, list):
model_trainers = [model_trainers]
# Create datasets
if dataset_configs.get("iSEG", None) is not None:
iSEG_train, iSEG_valid, iSEG_test, iSEG_reconstruction = iSEGSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["iSEG"].path,
modalities=dataset_configs["iSEG"].modalities,
dataset_id=ISEG_ID,
test_size=dataset_configs["iSEG"].validation_split,
max_subjects=dataset_configs["iSEG"].max_subjects,
max_num_patches=dataset_configs["iSEG"].max_num_patches,
augment=dataset_configs["iSEG"].augment,
patch_size=dataset_configs["iSEG"].patch_size,
step=dataset_configs["iSEG"].step,
test_patch_size=dataset_configs["iSEG"].test_patch_size,
test_step=dataset_configs["iSEG"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(iSEG_train)
valid_datasets.append(iSEG_valid)
reconstruction_datasets.append(iSEG_reconstruction)
if dataset_configs.get("MRBrainS", None) is not None:
MRBrainS_train, MRBrainS_valid, MRBrainS_test, MRBrainS_reconstruction = MRBrainSSliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["MRBrainS"].path,
modalities=dataset_configs["MRBrainS"].modalities,
dataset_id=MRBRAINS_ID,
test_size=dataset_configs["MRBrainS"].validation_split,
max_subjects=dataset_configs["MRBrainS"].max_subjects,
max_num_patches=dataset_configs["MRBrainS"].max_num_patches,
augment=dataset_configs["MRBrainS"].augment,
patch_size=dataset_configs["MRBrainS"].patch_size,
step=dataset_configs["MRBrainS"].step,
test_patch_size=dataset_configs["MRBrainS"].test_patch_size,
test_step=dataset_configs["MRBrainS"].test_step,
data_augmentation_config=data_augmentation_config)
test_datasets.append(MRBrainS_test)
reconstruction_datasets.append(MRBrainS_reconstruction)
if dataset_configs.get("ABIDE", None) is not None:
ABIDE_train, ABIDE_valid, ABIDE_test, ABIDE_reconstruction = ABIDESliceDatasetFactory.create_train_valid_test(
source_dir=dataset_configs["ABIDE"].path,
modalities=dataset_configs["ABIDE"].modalities,
dataset_id=ABIDE_ID,
sites=dataset_configs["ABIDE"].sites,
max_subjects=dataset_configs["ABIDE"].max_subjects,
test_size=dataset_configs["ABIDE"].validation_split,
max_num_patches=dataset_configs["ABIDE"].max_num_patches,
augment=dataset_configs["ABIDE"].augment,
patch_size=dataset_configs["ABIDE"].patch_size,
step=dataset_configs["ABIDE"].step,
test_patch_size=dataset_configs["ABIDE"].test_patch_size,
test_step=dataset_configs["ABIDE"].test_step,
data_augmentation_config=data_augmentation_config)
train_datasets.append(ABIDE_train)
valid_datasets.append(ABIDE_valid)
test_datasets.append(ABIDE_test)
reconstruction_datasets.append(ABIDE_reconstruction)
if len(list(dataset_configs.keys())) == 2:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192), step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
segment=True,
batch_size=8)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
is_ground_truth=True,
batch_size=50)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0]],
patch_size=dataset_configs["iSEG"].test_patch_size,
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
else:
segmentation_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
models=[model_trainers[0]],
normalize_and_segment=True,
batch_size=4)
input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50)
gt_reconstructor = ImageReconstructor(
[iSEG_reconstruction._target_images[0], MRBrainS_reconstruction._target_images[0],
ABIDE_reconstruction._target_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
is_ground_truth=True)
if dataset_configs["iSEG"].augment:
augmented_input_reconstructor = ImageReconstructor(
[iSEG_reconstruction._source_images[0], MRBrainS_reconstruction._source_images[0],
ABIDE_reconstruction._source_images[0]],
patch_size=(1, 32, 32, 32),
reconstructed_image_size=(1, 256, 256, 192),
step=dataset_configs["iSEG"].test_step,
batch_size=50,
alpha=data_augmentation_config["test"]["bias_field"]["alpha"][0],
prob_bias=data_augmentation_config["test"]["bias_field"]["prob_bias"],
snr=data_augmentation_config["test"]["noise"]["snr"],
prob_noise=data_augmentation_config["test"]["noise"]["prob_noise"])
else:
augmented_input_reconstructor = None
augmented_normalized_input_reconstructor = None
# Concat datasets.
if len(dataset_configs) > 1:
train_dataset = torch.utils.data.ConcatDataset(train_datasets)
valid_dataset = torch.utils.data.ConcatDataset(valid_datasets)
test_dataset = torch.utils.data.ConcatDataset(test_datasets)
else:
train_dataset = train_datasets[0]
valid_dataset = valid_datasets[0]
test_dataset = test_datasets[0]
# Create loaders.
dataloaders = list(map(lambda dataset: DataLoader(dataset,
training_config.batch_size,
sampler=None,
shuffle=True,
num_workers=args.num_workers,
collate_fn=augmented_sample_collate,
drop_last=True,
pin_memory=True),
[train_dataset, valid_dataset, test_dataset]))
# Initialize the loggers.
visdom_config = VisdomConfiguration.from_yml(args.config_file, "visdom")
exp = args.config_file.split("/")[-3:]
if visdom_config.save_destination is not None:
save_folder = visdom_config.save_destination + os.path.join(exp[0], exp[1],
os.path.basename(
os.path.normpath(visdom_config.env)))
else:
save_folder = "saves/{}".format(os.path.basename(os.path.normpath(visdom_config.env)))
[os.makedirs("{}/{}".format(save_folder, model), exist_ok=True)
for model in
["Discriminator", "Generator", "Segmenter"]]
visdom_logger = VisdomLogger(visdom_config)
visdom_logger(VisdomData("Experiment", "Experiment Config", PlotType.TEXT_PLOT, PlotFrequency.EVERY_EPOCH, None,
config_html))
visdom_logger(VisdomData("Experiment", "Patch count", PlotType.BAR_PLOT, PlotFrequency.EVERY_EPOCH,
x=[len(iSEG_train) if iSEG_train is not None else 0,
len(MRBrainS_train) if MRBrainS_train is not None else 0,
len(ABIDE_train) if ABIDE_train is not None else 0],
y=["iSEG", "MRBrainS", "ABIDE"], params={"opts": {"title": "Patch count"}}))
trainer = TrainerFactory(training_config.trainer).create(training_config,
model_trainers,
dataloaders,
reconstruction_datasets,
None,
input_reconstructor,
segmentation_reconstructor,
augmented_input_reconstructor,
None,
gt_reconstructor,
run_config,
dataset_configs,
save_folder,
visdom_logger)
trainer.train(training_config.nb_epochs)
| 1.382813 | 1 |
docs/10.level3_demo_streaming/pc_server/server.py | FaBoPlatform/RobotCarAI | 10 | 5706 | <reponame>FaBoPlatform/RobotCarAI<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ClientからOpenCV画像データを受け取り、ライン検出して制御命令を送る
# Server: Jetson TX2
# Client: Jetson TX2/Raspberry Pi3 Docker
# 1. FFMPEG UDP StreamingをClientで実行する。AWS向け10FPS,Jetson TX2向け1FPS
# 2. Serverを起動する
# 3. Clientを起動する
# コード修正
# lib/camera.py: vid = cv2.VideoCapture()を環境に合わせて修正する必要がある
# lib/object_detection.py: /home/ubuntu/notebooks/github/SSD-Tensorflow/ を環境に合わせて修正する必要がある
'''
Python 3.6
送信するmessageは.encode('ascii')や.encode('utf-8')等でエンコードする必要がる
ここではClientから送られてくるOpenCV BGR画像データが'ascii'に変換されているので'ascii'で統一している
'''
print("wait. launching...")
import socket, select
import time
import cv2
import numpy as np
import time
import os
import sys
import logging
import threading
import numpy as np
from lib.functions import *
from lib.object_detection import ObjectDetection
from lib.opencv_lane_detection import LaneDetection
from lib.webcam import WebcamVideoStream
# ログ設定
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',
)
# 解析、送信スレッド動作フラグ
is_analyze_running = False
sock = None
out = None
# IPM変換後の画像におけるx,yメートル(黒い部分も含む)
X_METER=1.5
Y_METER=1
# ライン検出クラス
ld = None
# 物体検出クラス
od = None
def do_analyze():
global is_analyze_running
global sock
global out
global X_METER
global Y_METER
global ld
global od
# 映像を保存するかどうか
IS_SAVE = True
OUTPUT_DIR ='./'
OUTPUT_FILENAME = 'received.avi'
HANDLE_ANGLE = 42
frame_counter = 0
fourcc = None
control = None
roi_vertices = None
ipm_vertices = None
speed = None
# 映像準備
camera = WebcamVideoStream()
cols,rows,fps,fourcc = camera.init_webcam()
camera.start()
fps = 1
if IS_SAVE:
out = cv2.VideoWriter(os.path.join(OUTPUT_DIR, OUTPUT_FILENAME), int(fourcc), fps, (int(cols), int(rows)))
########################################
# ライン検出準備
########################################
ld = LaneDetection(X_METER,Y_METER,cols=cols,rows=rows)
while is_analyze_running:
frame_start_time = time.time()
#time.sleep(0.2)
########################################
# 映像取得
########################################
cv_bgr = camera.read()
frame_counter += 1
########################################
# 物体認識
########################################
# avi動画に保存する
if IS_SAVE:
out.write(cv_bgr)
rclasses,rscores,rbboxes = od.get_detection(cv_bgr)
print(rclasses,rscores,rbboxes)
if len(rclasses) > 0:
prediction_class = np.min(rclasses)
if prediction_class == 1:
# 止まれを検出した
is_need_header_receive = True
control='0,0,'
sock.sendall(("CONTROL,"+ control).encode('ascii'))
continue
elif prediction_class == 2:
# 10を検出した
speed = 40
elif prediction_class == 3:
# 20を検出した
speed = 50
elif prediction_class == 4:
# 30を検出した
speed = 60
else:
# 物体検出無し
if speed is None:
speed = 40
handle_angle = 0
########################################
# ライン検出
########################################
ld.cv_bgr = cv_bgr
# ラインを検出する
try:
tilt1_deg,tilt2_deg,angle1_deg,angle2_deg,curve1_r,curve2_r, \
meters_from_center = ld.lane_detection()
except:
# ライン検出失敗
is_need_header_receive = True
control='0,0,'
sock.sendall(("CONTROL,"+ control).encode('ascii'))
continue
########################################
# 速度調整を行う
########################################
#if np.abs(angle2_deg) > np.abs(angle1_deg):
# speed = 50
#else:
# speed = 60
'''
左右について
tilt_deg: -が右、+が左
angle_deg: +が右、-が左
meters_from_center: -が右にいる、+が左にいる
handle_angle: +が右、-が左
'''
########################################
# ハンドル角調整を行う
########################################
handle_angle = -1*tilt1_deg
if meters_from_center >= 0:
# 左にいる
if np.abs(meters_from_center)*100 > 20:
# とても離れて左にいる:右に全開で曲がる
handle_angle=HANDLE_ANGLE
elif np.abs(meters_from_center)*100 > 10:
if tilt2_deg > 0 :
# 離れて左いる、奥は左カーブ:右に少し曲がる
handle_angle=HANDLE_ANGLE/2
else:
# 離れて左いる、奥は右カーブ:右に全開で曲がる
handle_angle=HANDLE_ANGLE
else:
# 右にいる
if np.abs(meters_from_center)*100 > 20:
# とても離れて右にいる:左に全開で曲がる
handle_angle=-1*HANDLE_ANGLE
elif np.abs(meters_from_center)*100 > 10:
if tilt2_deg < 0 :
# 離れて右いる、奥は右カーブ:左に少し曲がる
handle_angle=-1*HANDLE_ANGLE/2
else:
# 離れて右いる、奥は左カーブ、左に全開で曲がる
handle_angle=-1*HANDLE_ANGLE
# 動作可能な角度内に調整する
if handle_angle > HANDLE_ANGLE:
handle_angle = HANDLE_ANGLE
if handle_angle < -1*HANDLE_ANGLE:
handle_angle = -1*HANDLE_ANGLE
# 車両制御送信
control=str(speed)+','+str(handle_angle)+','
print("speed={},handle_angle={},CONTROL,{}".format(speed,handle_angle,control))
sock.sendall(("CONTROL,"+ control).encode('ascii'))
frame_end_time = time.time()
print("FPS={}".format(round(1/(frame_end_time-frame_start_time),2)))
def main():
global is_analyze_running
global sock
global out
global ld
global od
# 通信設定
HOST = '192.168.0.77' # Server IP Address
PORT = 6666 # Server TCP Port
#HOST = 'a32158c3da9f' # AWS Docker
#PORT = 8091 # AWS TCP Port
#HOST = '2204f9b0e871' # PC Docker
#PORT = 8091 # PC TCP Port
########################################
# 通信準備
########################################
connected_clients_sockets = []
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((HOST, PORT))
server_socket.listen(10)
connected_clients_sockets.append(server_socket)
# Headerの受信が必要かどうか。Headerを受信したら、encode('ascii')を通さずに受信データを解析する
is_need_header_receive = True
########################################
# 物体認識準備
########################################
od = ObjectDetection()
print("Server start")
try:
while True:
########################################
# 受信待ち
########################################
read_sockets, write_sockets, error_sockets = select.select(connected_clients_sockets, [], [])
for sock in read_sockets:
if sock == server_socket:
sockfd, client_address = server_socket.accept()
connected_clients_sockets.append(sockfd)
else:
# ClientがServerにHeaderを送る時は4096 Byte以下にすること
packet = sock.recv(4096)
print(type(packet))
#
if is_need_header_receive:
print('header')
packet = packet.decode('ascii')
txt = str(packet)
if packet:
print('packet True')
if packet == 'START':
is_analyze_running = True
t = threading.Thread(target=do_analyze)
t.start()
elif packet.startswith('BYE'):
print('got BYE')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
else:
print('client disconnect')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
if not is_need_header_receive:
# ここには来ない
print('body')
if packet:
print('packet True')
is_need_header_receive = True
else:
print('data finished')
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
if out is not None:
out.release()
except:
import traceback
traceback.print_exc()
finally:
is_need_header_receive = True
is_analyze_running = False
sock.shutdown(socket.SHUT_RDWR)
sock.close()
connected_clients_sockets.remove(sock)
server_socket.close()
if out is not None:
out.release()
if __name__ == '__main__':
main()
print("end server")
| 1.945313 | 2 |
client.py | zackorndorff/revsync | 94 | 5707 | from collections import defaultdict
import json
import re
import redis
import threading
import time
import traceback
import uuid
import base64
import binascii
TTL = 2
hash_keys = ('cmd', 'user')
cmd_hash_keys = {
'comment': ('addr',),
'extra_comment': ('addr',),
'area_comment': ('addr',),
'rename': ('addr',),
'stackvar_renamed': ('addr', 'offset', 'name',),
'struc_created': ('struc_name', 'is_union',),
'struc_deleted': ('struc_name',),
'struc_renamed': ('old_name', 'new_name',),
'struc_member_created': ('struc_name', 'offset', 'member_name', 'size', 'flag',),
'struc_member_deleted': ('struc_name', 'offset',),
'struc_member_renamed': ('struc_name', 'offset', 'member_name',),
'struc_member_changed': ('struc_name', 'offset', 'size',),
}
key_dec = {
'c': 'cmd',
'a': 'addr',
'u': 'user',
't': 'text',
'i': 'uuid',
'b': 'blocks'
}
key_enc = dict((v, k) for k, v in key_dec.items())
nick_filter = re.compile(r'[^a-zA-Z0-9_\-]')
def decode(data):
d = json.loads(data)
return dict((key_dec.get(k, k), v) for k, v in d.items())
def dtokey(d):
return tuple(((k, v) for k, v in sorted(d.items()) if k not in ('user', 'ts', 'uuid')))
def remove_ttl(a):
now = time.time()
return [d for d in a if now - d[0] < TTL]
class Client:
def __init__(self, host, port, nick, password=None):
self.r = redis.StrictRedis(host=host, port=port, password=password, socket_connect_timeout=5)
self.r.info()
self.nick = nick_filter.sub('_', nick)
self.ps = {}
self.nolock = threading.Lock()
self.nosend = defaultdict(list)
self.uuid = str(base64.b64encode(binascii.unhexlify(uuid.uuid4().hex)).decode('ascii'))
def debounce(self, no, data):
dkey = dtokey(data)
now = time.time()
with self.nolock:
for data in no:
ts = data[0]
key = data[1:]
if dkey == key and now - ts < TTL:
no.remove(data)
return True
return False
def _sub_thread(self, ps, cb, key):
for item in ps.listen():
try:
if item['type'] == 'message':
data = decode(item['data'])
if 'user' in data:
data['user'] = nick_filter.sub('_', data['user'])
# reject our own messages
if data.get('uuid') == self.uuid:
continue
with self.nolock:
self.nosend[key] = remove_ttl(self.nosend[key])
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data)
elif item['type'] == 'subscribe':
decoded = []
for data in self.r.lrange(key, 0, -1):
try:
decoded.append(decode(data))
except Exception:
print('error decoding history', data)
traceback.print_exc()
state = []
dedup = set()
for data in reversed(decoded):
cmd = data.get('cmd')
if cmd:
keys = hash_keys + cmd_hash_keys.get(cmd, ())
hashkey = tuple([str(data.get(k)) for k in keys])
if all(hashkey):
if hashkey in dedup:
continue
dedup.add(hashkey)
state.append(data)
for data in reversed(state):
try:
with self.nolock:
self.nosend[key].append((time.time(),) + dtokey(data))
cb(key, data, replay=True)
except Exception:
print('error replaying history', data)
traceback.print_exc()
else:
print('unknown redis push', item)
except Exception:
print('error processing item', item)
traceback.print_exc()
def join(self, key, cb):
ps = self.r.pubsub()
ps.subscribe(key)
t = threading.Thread(target=self._sub_thread, args=(ps, cb, key))
t.daemon = True
t.start()
self.ps[key] = ps
self.publish(key, {'cmd': 'join'}, perm=False)
def leave(self, key):
ps = self.ps.pop(key, None)
if ps:
ps.unsubscribe(key)
def publish(self, key, data, perm=True, send_uuid=True):
if self.debounce(self.nosend[key], data):
return
data['user'] = self.nick
data['ts'] = self.r.time()[0]
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
if perm:
self.r.rpush(key, data)
self.r.publish(key, data)
def push(self, key, data, send_uuid=True):
if send_uuid:
data['uuid'] = self.uuid
data = dict((key_enc.get(k, k), v) for k, v in data.items())
data = json.dumps(data, separators=(',', ':'), sort_keys=True)
self.r.lpush(key, data)
| 2.171875 | 2 |
ontask/condition/urls.py | pinheiroo27/ontask_b | 33 | 5708 | # -*- coding: utf-8 -*-
"""URLs to manipulate columns."""
from django.urls import path
from ontask.condition import views
app_name = 'condition'
urlpatterns = [
#
# FILTERS
#
path(
'<int:pk>/create_filter/',
views.FilterCreateView.as_view(),
name='create_filter'),
path('<int:pk>/edit_filter/', views.edit_filter, name='edit_filter'),
path('<int:pk>/delete_filter/', views.delete_filter, name='delete_filter'),
#
# CONDITIONS
#
path(
'<int:pk>/create_condition/',
views.ConditionCreateView.as_view(),
name='create_condition'),
path(
'<int:pk>/edit_condition/',
views.edit_condition,
name='edit_condition'),
path(
'<int:pk>/delete_condition/',
views.delete_condition,
name='delete_condition'),
# Clone the condition
path(
'<int:pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
path(
'<int:pk>/<int:action_pk>/clone_condition/',
views.clone_condition,
name='clone_condition'),
]
| 2.09375 | 2 |
VideoClassification/SegmentLevelClassifier/model.py | googleinterns/via-content-understanding | 1 | 5709 | """Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Defines the architecture of the Video Classifier.
"""
import math
import tensorflow as tf
class NetVLAD(tf.keras.layers.Layer):
"""Applies NetVLAD to the input.
Args:
num_clusters: The number of clusters to use.
input_shape: 3D tensor denoting the input shape of the NetVLAD layer.
Input Shape:
3D tensor with shape: `(batch_size, time, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim * num_clusters)`.
"""
def __init__(self, num_clusters, input_shape, **kwargs):
super().__init__(**kwargs)
if num_clusters <= 0:
raise ValueError("`num_clusters` must be greater than 1: %i" % num_clusters)
self.num_clusters = num_clusters
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=self.num_clusters,
activation=tf.nn.softmax,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="vlad_fc" + str(num_clusters)
)
self.cluster_centers = self.add_weight(
shape=(1, feature_dim, self.num_clusters),
initializer=tf.keras.initializers.TruncatedNormal(
stddev=1.0 / math.sqrt(feature_dim)
),
trainable=True,
name="cluster_centers" + str(num_clusters)
)
self.feature_dim = feature_dim
self.max_frames = input_shape[-2]
def call(self, frames):
"""Apply the NetVLAD module to the given frames.
Args:
frames: A tensor with shape [batch_size, max_frames, feature_dim].
Returns:
vlad_out: A tensor with shape [batch_size, feature_dim * num_clusters].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
feature_dim = self.feature_dim
max_frames = self.max_frames
frames = tf.reshape(frames, (-1, feature_dim))
activation = self.fc(frames)
activation = tf.reshape(activation, (-1, max_frames, self.num_clusters))
activation_sum = tf.math.reduce_sum(activation, axis=-2, keepdims=True)
cluster_activation = activation_sum * self.cluster_centers
frames = tf.reshape(frames, (-1, max_frames, feature_dim))
activation = tf.transpose(
tf.matmul(tf.transpose(activation, perm=(0, 2, 1)), frames), perm=(0, 2, 1)
)
vlad_out = activation - cluster_activation
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
vlad_out = tf.reshape(vlad_out, (-1, feature_dim * self.num_clusters))
vlad_out = tf.nn.l2_normalize(vlad_out, 1)
return vlad_out
def compute_output_shape(self, input_shape):
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([input_shape[0], input_shape[-1] * self.num_clusters])
def get_config(self):
config = {"num_clusters": self.num_clusters}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class ContextGating(tf.keras.layers.Layer):
"""Implements the Context Gating Layer from https://arxiv.org/abs/1706.06905
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, feature_dim)`.
"""
def __init__(self, input_shape, **kwargs):
super(ContextGating, self).__init__(**kwargs)
feature_dim = input_shape[-1]
if not isinstance(feature_dim, int):
feature_dim = feature_dim.value
self.fc = tf.keras.layers.Dense(
units=feature_dim,
activation=tf.nn.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
)
def call(self, model_input):
"""Apply the ContextGating module to the given input.
Args:
model_input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, feature_dim].
Raises:
ValueError: If the `feature_dim` of model_input is not defined.
"""
model_input.shape.assert_has_rank(2)
feature_dim = model_input.shape.as_list()[-1]
if feature_dim is None:
raise ValueError("Last dimension must be defined.")
context_gate = self.fc(model_input)
output = tf.math.multiply(context_gate, model_input)
return output
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
return dict(list(base_config.items()))
class MOELogistic(tf.keras.layers.Layer):
"""Implements a Mixture of Logistic Experts classifier.
Input shape:
2D tensor with shape: `(batch_size, feature_dim)`.
Output shape:
2D tensor with shape: `(batch_size, num_classes)`.
"""
def __init__(self, input_shape, num_classes, num_mixtures, **kwargs):
super(MOELogistic, self).__init__(**kwargs)
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.gate_fc = tf.keras.layers.Dense(
units=num_classes*(num_mixtures+1),
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
self.expert_fc = tf.keras.layers.Dense(
units=num_classes*num_mixtures,
kernel_regularizer=tf.keras.regularizers.l2(1e-6),
)
def call(self, input):
"""Apply the MoE algorithm to the given input.
Args:
input: A tensor with shape [batch_size, feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
Raises:
ValueError: If the `feature_dim` of input is not defined.
"""
gate_activations = self.gate_fc(input)
expert_activations = self.expert_fc(input)
#Calculate the distribution across mixtures
gate_dist = tf.nn.softmax(tf.reshape(gate_activations, [-1, self.num_mixtures+1]))
expert_dist = tf.nn.sigmoid(tf.reshape(expert_activations, [-1, self.num_mixtures]))
probs = tf.reduce_sum(tf.math.multiply(gate_dist[:,:self.num_mixtures], expert_dist),1)
probs = tf.reshape(probs, [-1, self.num_classes])
return probs
def compute_output_shape(self, input_shape):
return (input_shape[0], self.num_classes)
def get_config(self):
base_config = super().get_config()
config = base_config.update({'number of classes': self.num_classes, 'number of mixtures': self.num_mixtures})
return config
class VideoClassifier:
"""The Video Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/pdf/1706.06905.pdf
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, num_mixtures, fc_units, iterations, **kwargs):
super(VideoClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.num_mixtures = num_mixtures
self.iterations = iterations
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.moe = MOELogistic(input_shape=self.first_cg.compute_output_shape((batch_size, fc_units)), num_classes=self.num_classes, num_mixtures=self.num_mixtures, name="moe")
self.second_cg = ContextGating(input_shape=self.moe.compute_output_shape((batch_size, fc_units)), name="second_cg")
def build_model(self, input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
model_input: input features of shape [batch_size, max_frames, video_feature_dim + audio_feature_dim].
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
moe_out = self.moe(cg_out)
final_out = self.second_cg(moe_out)
final_model = tf.keras.models.Model(inputs=model_input, outputs=final_out)
return final_model
class SegmentClassifier:
"""The Segment Classifier model, implemented according to the winning model from the Youtube-8M Challenge.
The model can be found here: https://arxiv.org/abs/1911.08548
Arguments:
num_clusters: the number of clusters to be used for NetVLAD. The audio clusters will be num_clusters/2.
video_input_shape: shape of the input video features. Shape of [batch_size, num_samples, video_feature_dim].
audio_input_shape: shape fo the input audio features. Shape of [batch_size, num_samples, audio_feature_dim].
Raises:
ValueError: If num_clusters is not divisible by 2.
ValueError: If the batch sizes of the audio_input_shape and video_input_shape do not match.
ValueError: If the number of samples of the audio_input_shape and video_input_shape do not match.
"""
def __init__(self, num_clusters, video_input_shape, audio_input_shape, num_classes, fc_units, **kwargs):
super(SegmentClassifier, self).__init__(**kwargs)
if num_clusters % 2 != 0:
raise ValueError("num_clusters must be divisible by 2.")
batch_size = video_input_shape[0]
if audio_input_shape[0] != batch_size:
raise ValueError("audio_input_shape[0] must equal video_input_shape[0]. Batch sizes must equal.")
if audio_input_shape[1] != video_input_shape[1]:
raise ValueError("audio_input_shape[1] must equal video_input_shape[1]. Number of samples must equal.")
self.num_frames = video_input_shape[1]
self.num_classes = num_classes
self.video_feature_dim = video_input_shape[2]
self.video_vlad = NetVLAD(num_clusters, input_shape=video_input_shape, name="video_vlad")
self.audio_vlad = NetVLAD(num_clusters//2, input_shape=audio_input_shape, name="audio_vlad")
#Relu6 is used as it is employed in the paper.
self.fc = tf.keras.layers.Dense(
units=fc_units,
activation=tf.nn.relu6,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc"
)
self.first_cg = ContextGating(input_shape=(batch_size, fc_units), name="first_cg")
self.fc2 = tf.keras.layers.Dense(
units=1,
activation=tf.keras.activations.sigmoid,
kernel_regularizer=tf.keras.regularizers.l2(1e-5),
name="main_fc2"
)
def build_model(self, input_shape, second_input_shape, batch_size):
"""Perform one forward pass of the model.
Args:
input_shape: input shape for video features. Shape is of the form: [max_frames, video_feature_dim + audio_feature_dim].
second_input_shape: input shape of new class specific features. Shape is of the form [num_new_features]
Returns:
A tensor with shape [batch_size, num_classes].
"""
model_input = tf.keras.layers.Input(shape=input_shape, batch_size=batch_size)
model_input2 = tf.keras.layers.Input(shape=second_input_shape, batch_size=batch_size)
video_input = model_input[:,:,:self.video_feature_dim]
audio_input = model_input[:,:,self.video_feature_dim:]
video_vlad_out = self.video_vlad(video_input)
audio_vlad_out = self.audio_vlad(audio_input)
vlad_out = tf.concat([video_vlad_out, audio_vlad_out], axis=1)
vlad_out = tf.concat([vlad_out, model_input2], axis=1)
fc_out = self.fc(vlad_out)
cg_out = self.first_cg(fc_out)
final_out = self.fc2(cg_out)
final_model = tf.keras.models.Model(inputs=[model_input, model_input2], outputs=final_out)
return final_model | 2.484375 | 2 |
ivy/functional/backends/jax/old/math.py | faruq2021/ivy | 0 | 5710 | <reponame>faruq2021/ivy<filename>ivy/functional/backends/jax/old/math.py<gh_stars>0
"""
Collection of Jax math functions, wrapped to fit Ivy syntax and signature.
"""
# global
import jax as _jax
import jax.numpy as _jnp
tan = _jnp.tan
acos = _jnp.arccos
atan = _jnp.arctan
atan2 = _jnp.arctan2
cosh = _jnp.cosh
atanh = _jnp.arctanh
log = _jnp.log
exp = _jnp.exp
erf = _jax.scipy.special.erf
| 1.21875 | 1 |
neaten_db.py | Adoni/ZhihuCrawler | 0 | 5711 | <filename>neaten_db.py<gh_stars>0
from pymongo import MongoClient
from pyltp import Segmentor
def insert_questions_from_answered_question():
in_db = MongoClient().zhihu.user_answered_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for a in u['answers']:
if a['q_id'] in existed_question_id:
continue
existed_question_id.add(a['q_id'])
if len(existed_question_id) % 1000 == 0:
print(len(existed_question_id))
words = segmentor.segment(a['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)})
def insert_questions_from_followed_question():
in_db = MongoClient().zhihu.user_followed_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for q in u['questions']:
if q['id'] in existed_question_id:
continue
existed_question_id.add(q['id'])
words = segmentor.segment(q['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': q['id'], 'title': ' '.join(words)})
def insert_questions_from_asked_question():
in_db = MongoClient().zhihu.user_asked_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for q in u['questions']:
if q['id'] in existed_question_id:
continue
existed_question_id.add(q['id'])
if len(existed_question_id) % 1000 == 0:
print(len(existed_question_id))
words = segmentor.segment(q['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': q['id'], 'title': ' '.join(words)})
def insert_questions_from_collected_question():
in_db = MongoClient().zhihu.user_collected_questions
out_db = MongoClient().zhihu_network.questions
existed_question_id = set(map(lambda q: q['_id'], out_db.find()))
segmentor = Segmentor()
segmentor.load("/Users/sunxiaofei/workspace/ltp_data/cws.model")
for u in in_db.find():
for c_name, c_questions in u['collections'].items():
for a in c_questions:
if a['q_id'] == -1:
continue
if a['q_id'] in existed_question_id:
continue
existed_question_id.add(a['q_id'])
if len(existed_question_id) % 1000 == 0:
print(len(existed_question_id))
words = segmentor.segment(a['title'].strip().replace(
'\n', ' ').replace('\r', ' ').replace('\b', ' '))
if len(words) < 3:
continue
out_db.insert({'_id': a['q_id'], 'title': ' '.join(words)})
def delete_noise_question():
db = MongoClient().zhihu_network.questions
id_to_delete = []
for q in db.find():
if len(q['title'].split(' ')) < 3:
id_to_delete.append(q['_id'])
print(len(id_to_delete))
for _id in id_to_delete:
db.delete_one({'_id': _id})
def remove_enger_inline():
db = MongoClient().zhihu_network.questions
for q in db.find():
if '\n' in q['title'] or '\r' in q['title'] or '\b' in q['title']:
q['title'] = q['title'].replace('\n', ' ')
q['title'] = q['title'].replace('\r', ' ')
q['title'] = q['title'].replace('\b', ' ')
db.update_one({'_id': q['_id']},
{'$set': {'title': q['title']}},
upsert=True)
def insert_user_list():
keys = ['_id', 'name', 'is_zero_user', 'gender', 'location', 'business',
'education', 'motto', 'answer_num', 'collection_num',
'followed_column_num', 'followed_topic_num', 'followee_num',
'follower_num', 'post_num', 'question_num', 'thank_num',
'upvote_num', 'photo_url', 'weibo_url']
out_db = MongoClient().zhihu_network.users
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for line in open('./user_info.data'):
line = line.strip().split('\t')
try:
assert (len(keys) == len(line))
except:
continue
user = dict(zip(keys, line))
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
for key in user:
if key.endswith('_num'):
user[key] = int(user[key])
out_db.insert(user)
def insert_user_follow_user_list():
out_db = MongoClient().zhihu_network.user_follow_user_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for line in open('./user_followees.data'):
line = line.strip().split('\t')
user = dict()
user['_id'] = line[0]
user['neibors'] = line[1:]
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
out_db.insert(user)
def insert_user_follow_question_list():
in_db = MongoClient().zhihu.user_followed_questions
out_db = MongoClient().zhihu_network.user_follow_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = [q['id'] for q in user['questions']]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
def insert_user_ask_question_list():
in_db = MongoClient().zhihu.user_asked_questions
out_db = MongoClient().zhihu_network.user_ask_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = [q['id'] for q in user['questions']]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
def insert_user_collect_question_list():
in_db = MongoClient().zhihu.user_collected_questions
out_db = MongoClient().zhihu_network.user_collect_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = []
for _, c in user['collections'].items():
q_ids += [q['q_id'] for q in c]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
def insert_user_answer_question_list():
in_db = MongoClient().zhihu.user_answered_questions
out_db = MongoClient().zhihu_network.user_answer_question_adjacency_list
existed_user_id = set(map(lambda u: u['_id'], out_db.find()))
for user in in_db.find():
if user['_id'] in existed_user_id:
continue
existed_user_id.add(user['_id'])
q_ids = [a['q_id'] for a in user['answers']]
out_db.insert({'_id': user['_id'], 'neibors': q_ids})
if __name__ == '__main__':
# insert_questions_from_answered_question()
# insert_questions_from_followed_question()
# insert_questions_from_asked_question()
# insert_questions_from_collected_question()
#delete_noise_question()
#remove_enger_inline()
# insert_user_list()
insert_user_follow_user_list()
# insert_user_follow_question_list()
# insert_user_ask_question_list()
# insert_user_collect_question_list()
# insert_user_answer_question_list()
| 2.734375 | 3 |
test/tst_vlen.py | timgates42/netcdf4-python | 574 | 5712 | import sys
import unittest
import os
import tempfile
from netCDF4 import Dataset
import numpy as np
from numpy.testing import assert_array_equal
FILE_NAME = tempfile.NamedTemporaryFile(suffix='.nc', delete=False).name
VL_NAME = 'vlen_type'
VL_BASETYPE = np.int16
DIM1_NAME = 'lon'
DIM2_NAME = 'lat'
nlons = 5; nlats = 5
VAR1_NAME = 'ragged'
VAR2_NAME = 'strings'
VAR3_NAME = 'strings_alt'
VAR4_NAME = 'string_scalar'
VAR5_NAME = 'vlen_scalar'
data = np.empty(nlats*nlons,object)
datas = np.empty(nlats*nlons,object)
nn = 0
for n in range(nlats*nlons):
nn = nn + 1
data[n] = np.arange(nn,dtype=VL_BASETYPE)
datas[n] = ''.join([chr(i) for i in range(97,97+nn+1)])
data = np.reshape(data,(nlats,nlons))
datas = np.reshape(datas,(nlats,nlons))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
ragged = f.createVariable(VAR1_NAME, vlen_t,\
(DIM2_NAME,DIM1_NAME))
strings = f.createVariable(VAR2_NAME, str,
(DIM2_NAME,DIM1_NAME))
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
string_scalar = f.createVariable(VAR4_NAME,str,())
vlen_scalar = f.createVariable(VAR5_NAME,vlen_t,())
ragged[:] = data
ragged[-1,-1] = data[-1,-1]
strings[:] = datas
strings[-2,-2] = datas[-2,-2]
strings_alt[:] = datas.astype(str)
string_scalar[...] = 'foo' #issue458
vlen_scalar[...] = np.array([1,2,3],np.int16)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
v = f.variables[VAR1_NAME]
vs = f.variables[VAR2_NAME]
vs_alt = f.variables[VAR3_NAME]
assert list(f.vltypes.keys()) == [VL_NAME]
assert f.vltypes[VL_NAME].dtype == VL_BASETYPE
assert f.variables['string_scalar'][...] == 'foo'
assert_array_equal(f.variables['vlen_scalar'][...],np.array([1,2,3],np.int16))
data2 = v[:]
data2s = vs[:]
for i in range(nlons):
for j in range(nlats):
assert_array_equal(data2[j,i], data[j,i])
assert datas[j,i] == data2s[j,i]
assert_array_equal(datas, vs_alt[:])
f.close()
class TestInvalidDataType(unittest.TestCase):
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF3_CLASSIC')
f.createDimension('x', 1)
# using assertRaisesRegext as a context manager
# only works with python >= 2.7 (issue #497)
#with self.assertRaisesRegexp(ValueError, 'strings are only supported'):
# f.createVariable('foo', str, ('x',))
try:
f.createVariable('foo', str, ('x',))
except ValueError:
pass
f.close()
os.remove(FILE_NAME)
class TestScalarVlenString(unittest.TestCase):
# issue 333
def runTest(self):
f = Dataset(FILE_NAME, 'w', format='NETCDF4')
teststring = f.createVariable('teststring', str)
stringout = "yyyymmdd_hhmmss"
teststring[()] = stringout
f.close()
f = Dataset(FILE_NAME)
assert f.variables['teststring'][:] == stringout
f.close()
os.remove(FILE_NAME)
class TestIntegerIndex(unittest.TestCase):
# issue 526
def runTest(self):
strtest = Dataset(FILE_NAME, 'w', format='NETCDF4')
strtest.createDimension('tenstrings', 10)
strtest.createVariable('tenstrings', str, ['tenstrings'])
strtest['tenstrings'][np.int32(5)] = 'asdf'
strtest['tenstrings'][6.0] = 'asdf'
strtest.close()
f = Dataset(FILE_NAME)
assert f.variables['tenstrings'][np.int32(5)] == 'asdf'
assert f.variables['tenstrings'][6.0] == 'asdf'
f.close()
os.remove(FILE_NAME)
class TestObjectArrayIndexing(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = Dataset(self.file,'w')
vlen_t = f.createVLType(VL_BASETYPE, VL_NAME)
f.createDimension(DIM1_NAME,nlons)
f.createDimension(DIM2_NAME,nlats)
strings_alt = f.createVariable(VAR3_NAME, datas.astype(str).dtype,
(DIM2_NAME, DIM1_NAME))
strings_alt[:] = datas.astype(str)
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing vlen variables"""
f = Dataset(self.file, 'r')
vs_alt = f.variables[VAR3_NAME]
unicode_strings = vs_alt[:]
fancy_indexed = unicode_strings[0][[1,2,4]]
assert fancy_indexed[0] == 'abc'
assert fancy_indexed[1] == 'abcd'
assert fancy_indexed[2] == 'abcdef'
f.close()
class VlenAppendTestCase(unittest.TestCase):
def setUp(self):
import netCDF4
if netCDF4.__netcdf4libversion__ < "4.4.1":
self.skip = True
try:
self.skipTest("This test requires NetCDF 4.4.1 or later.")
except AttributeError:
# workaround for Python 2.6 (skipTest(reason) is new
# in Python 2.7)
pass
else:
self.skip = False
self.file = FILE_NAME
f = Dataset(self.file, 'w')
vlen_type = f.createVLType(np.float64, 'vltest')
f.createDimension('x', None)
v = f.createVariable('vl', vlen_type, 'x')
w = f.createVariable('vl2', np.float64, 'x')
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing appending to vlen variables (issue #527)."""
# workaround for Python 2.6
if self.skip:
return
f = Dataset(self.file, 'a')
w = f.variables["vl2"]
v = f.variables["vl"]
w[0:3] = np.arange(3, dtype=np.float64)
v[0] # sometimes crashes
v[0].tolist() # sometimes crashes
v[0].size # BOOM!
f.close()
class Vlen_ScaledInts(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
nc = Dataset(self.file, 'w')
vlen_type = nc.createVLType(np.uint8, 'vltest')
nc.createDimension('x', None)
v = nc.createVariable('vl', vlen_type, 'x')
v.scale_factor = 1./254.
v.missing_value=np.array(255,np.uint8)
# random lengths between 1 and 1000
ilen = np.random.randint(1,1000,size=100)
n = 0
for nlen in ilen:
data = np.random.uniform(low=0.0, high=1.0, size=nlen)
v[n] = data
if n==99: self.data = data
n += 1
nc.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing packing float vlens as scaled integers (issue #1003)."""
nc = Dataset(self.file)
data = nc['vl'][-1]
# check max error of compression
err = np.abs(data - self.data)
assert(err.max() < nc['vl'].scale_factor)
# turn off auto-scaling
nc.set_auto_maskandscale(False)
data = nc['vl'][-1]
assert(data[-1] == np.around(self.data[-1]/nc['vl'].scale_factor))
nc.close()
if __name__ == '__main__':
unittest.main()
| 2.203125 | 2 |
sonnet/src/once.py | ScriptBox99/deepmind-sonnet | 10,287 | 5713 | # Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility to run functions and methods once."""
import uuid
from sonnet.src import utils
_ONCE_PROPERTY = "_snt_once"
def _check_no_output(output):
if output is not None:
raise ValueError("@snt.once decorated functions cannot return values")
def once(f):
"""Decorator which ensures a wrapped method is only ever run once.
>>> @snt.once
... def f():
... print('Hello, world!')
>>> f()
Hello, world!
>>> f()
>>> f()
If `f` is a method then it will be evaluated once per instance:
>>> class MyObject:
... @snt.once
... def f(self):
... print('Hello, world!')
>>> o = MyObject()
>>> o.f()
Hello, world!
>>> o.f()
>>> o2 = MyObject()
>>> o2.f()
Hello, world!
>>> o.f()
>>> o2.f()
If an error is raised during execution of `f` it will be raised to the user.
Next time the method is run, it will be treated as not having run before.
Args:
f: A function to wrap which should only be called once.
Returns:
Wrapped version of `f` which will only evaluate `f` the first time it is
called.
"""
# TODO(tomhennigan) Perhaps some more human friendly identifier?
once_id = uuid.uuid4()
@utils.decorator
def wrapper(wrapped, instance, args, kwargs):
"""Decorator which ensures a wrapped method is only ever run once."""
if instance is None:
# NOTE: We can't use the weakset since you can't weakref None.
if not wrapper.seen_none:
_check_no_output(wrapped(*args, **kwargs))
wrapper.seen_none = True
return
# Get or set the `seen` set for this object.
seen = getattr(instance, _ONCE_PROPERTY, None)
if seen is None:
seen = set()
setattr(instance, _ONCE_PROPERTY, seen)
if once_id not in seen:
_check_no_output(wrapped(*args, **kwargs))
seen.add(once_id)
wrapper.seen_none = False
decorated = wrapper(f) # pylint: disable=no-value-for-parameter,assignment-from-none
decorated.__snt_once_wrapped__ = f
return decorated
| 2.828125 | 3 |
env.py | DGarciaMedina/PiArmDiego | 0 | 5714 | <reponame>DGarciaMedina/PiArmDiego
import piarm
import time
import numpy as np
import cv2
import random
class MyArm2D:
def __init__(self, move_robot = False):
self.move_robot = move_robot
if self.move_robot:
self.robot = piarm.PiArm()
self.open_connection()
self.DEFAULT = [500, 500, 500, 500, 500, 500]
self.num_members = 3
self.adjustable_joints = [3,4,5]
self.initial_height = 73 # height in mm of motor 5's axle
self.lengths = {
"h_0": 73,
"a": 97.5,
"b": 96,
"c": 160
}
self.base_width = 110
self.base_height = 45
# All the angles are with respect to the vertical
self.max_angles = [90 for _ in range(self.num_members)]
self.min_angles = [-90 for _ in range(self.num_members)]
self.min_angles[0] = 0 # To prevent it from hitting the base of the arm
self.angles = 90*np.ones(self.num_members) # angles of motor 3, 4 and 5 ranging between
# min_angle and max_angle
self.member_thickness = 30
self.img_width = 1000
self.x_offset = int(self.img_width/2)
self.y_offset = self.lengths["h_0"]
self.img_height = int(sum(list(self.lengths.values())) + self.y_offset + 20)
self.img = np.zeros((self.img_height, self.img_width, 3))
self.timestep = 0
self.max_timestep = 200
# This is to check that all the joints (except for the last one) is above
# the ground
self.min_joint_heights = [20, 20, 10]
self.goal_coords = [None, None]
self.update_goal_coords()
self.joint_positions = [[0,0] for i in range(self.num_members + 1)]
self.update_positions()
self.distance2goal = None
self.update_distance_2_goal()
def __del__(self):
print("Closing connection...")
if self.move_robot:
self.close_connection()
def open_connection(self):
if self.robot.alive:
raise Exception("Robot is already switched on")
self.robot.connect("/dev/ttyS0")
if self.robot.alive:
print("Success connecting to robot")
return True
else:
print("Failed to connect to robot")
return False
def move_to_default_pos(self):
if self.robot.alive:
for ID in range(1, 7):
self.robot.servoWrite(ID, int(self.DEFAULT[ID - 1]), 500)
return True
else:
return False
def move_to_pos(self):
# First, convert the angles in degrees between -90º and +90º
# to angles between 125 and 875
# 90 -> 500
# 0 -> 125
angles_deg = self.angles - 90
angles_deg[2] -= angles_deg[1]
angles_deg[1] -= angles_deg[0]
angles_piarm = [int(500 + (375/90)*angle_deg) for angle_deg in angles_deg]
angles_piarm[0] = 1000 - angles_piarm[0]
angles_piarm[1] = 1000 - angles_piarm[1]
print("Angles in degrees: ", angles_deg)
print("Moving arms with angles: ", angles_piarm)
if self.robot.alive:
for ID in range(3, 6):
self.robot.servoWrite(8 - ID, int(angles_piarm[ID - 3]), 500)
time.sleep(1)
return True
else:
return False
def close_connection(self):
if not self.robot.alive:
raise Exception("Robot is already switched off")
self.robot.disconnect()
if not self.robot.alive:
print("Success disconnecting from robot")
return True
else:
print("Failed to disconnect from robot")
return False
def update_goal_coords(self):
max_length = sum(list(self.lengths.values())[1:])
r = random.uniform(0.8*max_length,max_length)
theta = random.uniform(-np.pi/4, np.pi/2)
x = r * np.sin(theta)
y = r * np.cos(theta)
self.goal_coords = [int(x), int(y)]
def update_distance_2_goal(self):
gripper_pos = self.joint_positions[-1]
self.distance2goal = np.sqrt(sum([(gripper_pos[i] - self.goal_coords[i])**2 for i in range(2)]))
def update_positions(self):
"""
Positions are with respect to the origin (0,0), right underneath
motor 5. It is positive if it is away from the origin.
"""
self.joint_positions[0] = [0, self.lengths["h_0"]]
self.joint_positions[1] = [
self.joint_positions[0][0] + self.lengths["a"] * np.sin(np.deg2rad(self.angles[0])),
self.joint_positions[0][1] + self.lengths["a"] * np.cos(np.deg2rad(self.angles[0]))
]
self.joint_positions[2] = [
self.joint_positions[1][0] + self.lengths["b"] * np.sin(np.deg2rad(self.angles[1])),
self.joint_positions[1][1] + self.lengths["b"] * np.cos(np.deg2rad(self.angles[1]))
]
self.joint_positions[3] = [
self.joint_positions[2][0] + self.lengths["c"] * np.sin(np.deg2rad(self.angles[2])),
self.joint_positions[2][1] + self.lengths["c"] * np.cos(np.deg2rad(self.angles[2]))
]
# Convert to integers
self.joint_positions = [[int(x[0]),int(x[1])] for x in self.joint_positions]
def move_arm(self, actions):
"""
The inputs are the new set of angles [theta0, theta1, theta2]
"""
for i, action in enumerate(actions):
self.angles[i:] += action
for member_index in range(1,self.num_members):
self.max_angles[member_index] = self.angles[member_index - 1] + 90
self.min_angles[member_index] = self.angles[member_index - 1] - 90
self.update_positions()
self.update_distance_2_goal()
def render(self):
self.img = np.zeros((self.img_height, self.img_width, 3))
# Render the floor
self.img = cv2.rectangle(self.img, (0,0), (self.img_width, self.y_offset), (0,255,0), -1)
# Render the base of the arm
self.img = cv2.rectangle(self.img,
(int(self.x_offset - self.base_width/2), self.y_offset),
(int(self.x_offset - self.base_width/2 + self.base_width), self.y_offset + self.base_height),
(0, 165, 255),
-1)
goal_x, goal_y = self.goal_coords
self.img = cv2.circle(self.img, (goal_x + self.x_offset, goal_y + self.y_offset), int(self.member_thickness/2), (128, 0, 128), 5)
for member_id in range(self.num_members):
first_joint = self.joint_positions[member_id].copy()
second_joint = self.joint_positions[member_id + 1].copy()
first_joint[0] += self.x_offset
first_joint[1] += self.y_offset
second_joint[0] += self.x_offset
second_joint[1] += self.y_offset
self.img = cv2.line(self.img, tuple(first_joint), tuple(second_joint), (255,0,0), self.member_thickness)
self.img = cv2.circle(self.img, tuple(first_joint), int(self.member_thickness/2), (255,255,0), -1)
# Flip image upside down
self.img = cv2.flip(self.img, 0)
self.img = cv2.putText(self.img,
"Distance: " + str(round(self.distance2goal,2)),
(10, 30),
cv2.FONT_HERSHEY_SIMPLEX,
1,
(255,255,255),
2)
cv2.imshow("Arm", self.img)
cv2.moveWindow("Arm",20,50)
def reset(self):
self.angles = 90*np.ones(self.num_members)
self.update_positions()
self.img = np.zeros((self.img_height, self.img_width, 3))
self.timestep = 0
self.update_goal_coords()
self.render()
if self.move_robot:
self.move_to_default_pos()
def check_arm_angles(self):
for member_index in range(self.num_members):
if self.angles[member_index] < self.min_angles[member_index]:
return False
if self.angles[member_index] > self.max_angles[member_index]:
return False
return True
def check_arm_positions(self):
for joint_index in range(1,len(self.joint_positions)):
member_pos = self.joint_positions[joint_index][1]
min_height = self.min_joint_heights[joint_index-1]
if member_pos < min_height:
return False
return True
def get_reward(self, forbidden_action):
if forbidden_action:
reward_scaling_factor = 2
else:
reward_scaling_factor = 1
return - self.distance2goal * reward_scaling_factor
def step(self, actions):
self.move_arm(actions)
forbidden_action = False
okay_angles = self.check_arm_angles()
okay_positions = self.check_arm_positions()
if not okay_angles:
print("An angle threshold was exceeded")
self.move_arm(-actions)
forbidden_action = True
if not okay_positions:
print("A position threshold was exqqceeded")
self.move_arm(-actions)
forbidden_action = True
self.render()
if self.move_robot:
self.move_to_pos()
r = self.get_reward(forbidden_action)
self.timestep += 1
is_done = self.timestep >= self.max_timestep
return self.angles, r, is_done
| 2.890625 | 3 |
src/python/src/grpc/_adapter/_links_test.py | jonywtf/grpc | 1 | 5715 | <gh_stars>1-10
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test of the GRPC-backed ForeLink and RearLink."""
import threading
import unittest
from grpc._adapter import _proto_scenarios
from grpc._adapter import _test_links
from grpc._adapter import fore
from grpc._adapter import rear
from grpc.framework.base import interfaces
from grpc.framework.base.packets import packets as tickets
from grpc.framework.foundation import logging_pool
_IDENTITY = lambda x: x
_TIMEOUT = 2
class RoundTripTest(unittest.TestCase):
def setUp(self):
self.fore_link_pool = logging_pool.pool(80)
self.rear_link_pool = logging_pool.pool(80)
def tearDown(self):
self.rear_link_pool.shutdown(wait=True)
self.fore_link_pool.shutdown(wait=True)
def testZeroMessageRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_fore_link = _test_links.ForeLink(None, None)
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE):
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, 0, tickets.Kind.COMPLETION, None)
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: None}, {test_method: None}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: None},
{test_method: None}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is tickets.Kind.CONTINUATION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_fore_link.condition:
self.assertIs(test_fore_link.tickets[-1].kind, tickets.Kind.COMPLETION)
def testEntireRoundTrip(self):
test_operation_id = object()
test_method = 'test method'
test_front_to_back_datum = b'\x07'
test_back_to_front_datum = b'\x08'
test_fore_link = _test_links.ForeLink(None, None)
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
if front_to_back_ticket.payload is None:
payload = None
else:
payload = test_back_to_front_datum
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if payload is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
payload)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool, {test_method: _IDENTITY},
{test_method: _IDENTITY}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
front_to_back_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.ENTIRE, test_method,
interfaces.ServicedSubscription.Kind.FULL, None,
test_front_to_back_datum, _TIMEOUT)
rear_link.accept_front_to_back_ticket(front_to_back_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
front_to_back_payloads = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
back_to_front_payloads = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTupleEqual((test_front_to_back_datum,), front_to_back_payloads)
self.assertTupleEqual((test_back_to_front_datum,), back_to_front_payloads)
def _perform_scenario_test(self, scenario):
test_operation_id = object()
test_method = scenario.method()
test_fore_link = _test_links.ForeLink(None, None)
rear_lock = threading.Lock()
rear_sequence_number = [0]
def rear_action(front_to_back_ticket, fore_link):
with rear_lock:
if front_to_back_ticket.payload is not None:
response = scenario.response_for_request(front_to_back_ticket.payload)
else:
response = None
terminal = front_to_back_ticket.kind in (
tickets.Kind.COMPLETION, tickets.Kind.ENTIRE)
if response is not None or terminal:
back_to_front_ticket = tickets.BackToFrontPacket(
front_to_back_ticket.operation_id, rear_sequence_number[0],
tickets.Kind.COMPLETION if terminal else tickets.Kind.CONTINUATION,
response)
rear_sequence_number[0] += 1
fore_link.accept_back_to_front_ticket(back_to_front_ticket)
test_rear_link = _test_links.RearLink(rear_action, None)
fore_link = fore.ForeLink(
self.fore_link_pool, {test_method: scenario.deserialize_request},
{test_method: scenario.serialize_response}, None, ())
fore_link.join_rear_link(test_rear_link)
test_rear_link.join_fore_link(fore_link)
fore_link.start()
port = fore_link.port()
rear_link = rear.RearLink(
'localhost', port, self.rear_link_pool,
{test_method: scenario.serialize_request},
{test_method: scenario.deserialize_response}, False, None, None, None)
rear_link.join_fore_link(test_fore_link)
test_fore_link.join_rear_link(rear_link)
rear_link.start()
commencement_ticket = tickets.FrontToBackPacket(
test_operation_id, 0, tickets.Kind.COMMENCEMENT, test_method,
interfaces.ServicedSubscription.Kind.FULL, None, None, _TIMEOUT)
fore_sequence_number = 1
rear_link.accept_front_to_back_ticket(commencement_ticket)
for request in scenario.requests():
continuation_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.CONTINUATION,
None, None, None, request, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(continuation_ticket)
completion_ticket = tickets.FrontToBackPacket(
test_operation_id, fore_sequence_number, tickets.Kind.COMPLETION, None,
None, None, None, None)
fore_sequence_number += 1
rear_link.accept_front_to_back_ticket(completion_ticket)
with test_fore_link.condition:
while (not test_fore_link.tickets or
test_fore_link.tickets[-1].kind is not tickets.Kind.COMPLETION):
test_fore_link.condition.wait()
rear_link.stop()
fore_link.stop()
with test_rear_link.condition:
requests = tuple(
ticket.payload for ticket in test_rear_link.tickets
if ticket.payload is not None)
with test_fore_link.condition:
responses = tuple(
ticket.payload for ticket in test_fore_link.tickets
if ticket.payload is not None)
self.assertTrue(scenario.verify_requests(requests))
self.assertTrue(scenario.verify_responses(responses))
def testEmptyScenario(self):
self._perform_scenario_test(_proto_scenarios.EmptyScenario())
def testBidirectionallyUnaryScenario(self):
self._perform_scenario_test(_proto_scenarios.BidirectionallyUnaryScenario())
def testBidirectionallyStreamingScenario(self):
self._perform_scenario_test(
_proto_scenarios.BidirectionallyStreamingScenario())
if __name__ == '__main__':
unittest.main()
| 1.4375 | 1 |
tests/_test_progress_board.py | stjordanis/Hyperactive | 382 | 5716 | <gh_stars>100-1000
import os, glob
import subprocess
from subprocess import DEVNULL, STDOUT
abspath = os.path.abspath(__file__)
dir_ = os.path.dirname(abspath)
files = glob.glob(dir_ + "/_progress_board_tests/_test_progress_board_*.py")
for file_path in files:
file_name = str(file_path.rsplit("/", maxsplit=1)[1])
try:
print("\033[0;33;40m Testing", file_name, end="...\r")
subprocess.check_call(["pytest", file_path], stdout=DEVNULL, stderr=STDOUT)
except subprocess.CalledProcessError:
print("\033[0;31;40m Error in", file_name)
else:
print("\033[0;32;40m", file_name, "is correct")
| 1.796875 | 2 |
pages/forest_pages.py | jhalljhall/beiwe-backend | 1 | 5717 | import csv
import datetime
from collections import defaultdict
from django.contrib import messages
from django.http.response import FileResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from authentication.admin_authentication import (authenticate_admin,
authenticate_researcher_study_access, forest_enabled)
from constants.data_access_api_constants import CHUNK_FIELDS
from constants.forest_constants import ForestTaskStatus, ForestTree
from database.data_access_models import ChunkRegistry
from database.study_models import Study
from database.tableau_api_models import ForestTask
from database.user_models import Participant
from forms.django_forms import CreateTasksForm
from libs.http_utils import easy_url
from libs.internal_types import ParticipantQuerySet, ResearcherRequest
from libs.streaming_zip import zip_generator
from libs.utils.date_utils import daterange
from middleware.abort_middleware import abort
from serializers.forest_serializers import ForestTaskCsvSerializer, ForestTaskSerializer
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def analysis_progress(request: ResearcherRequest, study_id=None):
study: Study = Study.objects.get(pk=study_id)
participants: ParticipantQuerySet = Participant.objects.filter(study=study_id)
# generate chart of study analysis progress logs
trackers = ForestTask.objects.filter(participant__in=participants).order_by("created_on")
start_date = (study.get_earliest_data_time_bin() or study.created_on).date()
end_date = (study.get_latest_data_time_bin() or timezone.now()).date()
# this code simultaneously builds up the chart of most recent forest results for date ranges
# by participant and tree, and tracks the metadata
params = dict()
results = defaultdict(lambda: "--")
tracker: ForestTask
for tracker in trackers:
for date in daterange(tracker.data_date_start, tracker.data_date_end, inclusive=True):
results[(tracker.participant_id, tracker.forest_tree, date)] = tracker.status
if tracker.status == tracker.status.success:
params[(tracker.participant_id, tracker.forest_tree, date)] = tracker.forest_param_id
else:
params[(tracker.participant_id, tracker.forest_tree, date)] = None
# generate the date range for charting
dates = list(daterange(start_date, end_date, inclusive=True))
chart = []
for participant in participants:
for tree in ForestTree.values():
row = [participant.patient_id, tree] + \
[results[(participant.id, tree, date)] for date in dates]
chart.append(row)
# ensure that within each tree, only a single set of param values are used (only the most recent runs
# are considered, and unsuccessful runs are assumed to invalidate old runs, clearing params)
params_conflict = False
for tree in set([k[1] for k in params.keys()]):
if len(set([m for k, m in params.items() if m is not None and k[1] == tree])) > 1:
params_conflict = True
break
return render(
request,
'forest/analysis_progress.html',
context=dict(
study=study,
chart_columns=["participant", "tree"] + dates,
status_choices=ForestTaskStatus,
params_conflict=params_conflict,
start_date=start_date,
end_date=end_date,
chart=chart # this uses the jinja safe filter and should never involve user input
)
)
@require_http_methods(['GET', 'POST'])
@authenticate_admin
@forest_enabled
def create_tasks(request: ResearcherRequest, study_id=None):
# Only a SITE admin can queue forest tasks
if not request.session_researcher.site_admin:
return abort(403)
try:
study = Study.objects.get(pk=study_id)
except Study.DoesNotExist:
return abort(404)
# FIXME: remove this double endpoint pattern, it is bad.
if request.method == "GET":
return render_create_tasks(request, study)
form = CreateTasksForm(data=request.POST, study=study)
if not form.is_valid():
error_messages = [
f'"{field}": {message}'
for field, messages in form.errors.items()
for message in messages
]
error_messages_string = "\n".join(error_messages)
messages.warning(request, f"Errors:\n\n{error_messages_string}")
return render_create_tasks(request, study)
form.save()
messages.success(request, "Forest tasks successfully queued!")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_researcher_study_access
@forest_enabled
def task_log(request: ResearcherRequest, study_id=None):
study = Study.objects.get(pk=study_id)
forest_tasks = ForestTask.objects.filter(participant__study_id=study_id).order_by("-created_on")
return render(
request,
"forest/task_log.html",
context=dict(
study=study,
is_site_admin=request.session_researcher.site_admin,
status_choices=ForestTaskStatus,
forest_log=ForestTaskSerializer(forest_tasks, many=True).data,
)
)
@require_GET
@authenticate_admin
def download_task_log(request: ResearcherRequest):
forest_tasks = ForestTask.objects.order_by("created_on")
return FileResponse(
stream_forest_task_log_csv(forest_tasks),
content_type="text/csv",
filename=f"forest_task_log_{timezone.now().isoformat()}.csv",
as_attachment=True,
)
@require_POST
@authenticate_admin
@forest_enabled
def cancel_task(request: ResearcherRequest, study_id, forest_task_external_id):
if not request.session_researcher.site_admin:
return abort(403)
number_updated = \
ForestTask.objects.filter(
external_id=forest_task_external_id, status=ForestTaskStatus.queued
).update(
status=ForestTaskStatus.cancelled,
stacktrace=f"Canceled by {request.session_researcher.username} on {datetime.date.today()}",
)
if number_updated > 0:
messages.success(request, "Forest task successfully cancelled.")
else:
messages.warning(request, "Sorry, we were unable to find or cancel this Forest task.")
return redirect(easy_url("forest_pages.task_log", study_id=study_id))
@require_GET
@authenticate_admin
@forest_enabled
def download_task_data(request: ResearcherRequest, study_id, forest_task_external_id):
try:
tracker: ForestTask = ForestTask.objects.get(
external_id=forest_task_external_id, participant__study_id=study_id
)
except ForestTask.DoesNotExist:
return abort(404)
chunks = ChunkRegistry.objects.filter(participant=tracker.participant).values(*CHUNK_FIELDS)
f = FileResponse(
zip_generator(chunks),
content_type="zip",
as_attachment=True,
filename=f"{tracker.get_slug()}.zip",
)
f.set_headers(None)
return f
def stream_forest_task_log_csv(forest_tasks):
buffer = CSVBuffer()
writer = csv.DictWriter(buffer, fieldnames=ForestTaskCsvSerializer.Meta.fields)
writer.writeheader()
yield buffer.read()
for forest_task in forest_tasks:
writer.writerow(ForestTaskCsvSerializer(forest_task).data)
yield buffer.read()
def render_create_tasks(request: ResearcherRequest, study: Study):
participants = Participant.objects.filter(study=study)
try:
start_date = ChunkRegistry.objects.filter(participant__in=participants).earliest("time_bin")
end_date = ChunkRegistry.objects.filter(participant__in=participants).latest("time_bin")
start_date = start_date.time_bin.date()
end_date = end_date.time_bin.date()
except ChunkRegistry.DoesNotExist:
start_date = study.created_on.date()
end_date = timezone.now().date()
return render(
request,
"forest/create_tasks.html",
context=dict(
study=study,
participants=list(
study.participants.order_by("patient_id").values_list("patient_id", flat=True)
),
trees=ForestTree.choices(),
start_date=start_date.strftime('%Y-%m-%d'),
end_date=end_date.strftime('%Y-%m-%d')
)
)
class CSVBuffer:
line = ""
def read(self):
return self.line
def write(self, line):
self.line = line
| 2.109375 | 2 |
data_scout/transformations/math_custom.py | janthiemen/data_scout | 0 | 5718 | <filename>data_scout/transformations/math_custom.py
from __future__ import division
from .transformation import Transformation
from pyparsing import (Literal, CaselessLiteral, Word, Combine, Group, Optional,
ZeroOrMore, Forward, nums, alphas, oneOf)
import math
import re
import operator
__author__ = '<NAME>'
__version__ = '$Revision: 0.0 $'
__date__ = '$Date: 2009-03-20 $'
__source__ = '''http://pyparsing.wikispaces.com/file/view/fourFn.py
http://pyparsing.wikispaces.com/message/view/home/15549426
'''
__note__ = '''
All I've done is rewrap Paul McGuire's fourFn.py as a class, so I can use it
more easily in other places.
'''
class Custom(Transformation):
"""
Most of this code comes from the fourFn.py pyparsing example
"""
title = "Custom equation"
key = "Math equation"
fields = {
"equation": {"name": "Equation", "type": "string", "help": "The equation to evaluate. Column values should be entered as {COLUMN NAME}",
"required": True, "input": "text", "default": ""},
"output": {"name": "Output column", "type": "string", "input": "text", "required": True,
"help": "The name of the (newly created) column that contains the results", "default": ""},
}
def __init__(self, arguments: dict, sample_size: int, example: dict = None):
"""
Initialize the transformation with the given parameters.
expop :: '^'
multop :: '*' | '/'
addop :: '+' | '-'
integer :: ['+' | '-'] '0'..'9'+
atom :: PI | E | real | fn '(' expr ')' | '(' expr ')'
factor :: atom [ expop factor ]*
term :: factor [ multop factor ]*
expr :: term [ addop term ]*
Arguments:
arguments {dict} -- The arguments
"""
super().__init__(arguments, sample_size, example)
self.equation = arguments["equation"]
self.output = arguments["output"]
point = Literal(".")
e = CaselessLiteral("E")
fnumber = Combine(Word("+-" + nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-" + nums, nums)))
ident = Word(alphas, alphas + nums + "_$")
plus = Literal("+")
minus = Literal("-")
mult = Literal("*")
div = Literal("/")
mod = Literal("%")
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
addop = plus | minus
multop = mult | div | mod
expop = Literal("^")
pi = CaselessLiteral("PI")
expr = Forward()
atom = ((Optional(oneOf("- +")) +
(ident + lpar + expr + rpar | pi | e | fnumber).setParseAction(self.push_first))
| Optional(oneOf("- +")) + Group(lpar + expr + rpar)
).setParseAction(self.push_u_minus)
# by defining exponentiation as "atom [ ^ factor ]..." instead of
# "atom [ ^ atom ]...", we get right-to-left exponents, instead of left-to-right
# that is, 2^3^2 = 2^(3^2), not (2^3)^2.
factor = Forward()
factor << atom + \
ZeroOrMore((expop + factor).setParseAction(self.push_first))
term = factor + \
ZeroOrMore((multop + factor).setParseAction(self.push_first))
expr << term + \
ZeroOrMore((addop + term).setParseAction(self.push_first))
# addop_term = ( addop + term ).setParseAction( self.push_first )
# general_term = term + ZeroOrMore( addop_term ) | OneOrMore( addop_term)
# expr << general_term
self.bnf = expr
# map operator symbols to corresponding arithmetic operations
epsilon = 1e-12
self.opn = {"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"%": operator.mod,
"/": operator.truediv,
"^": operator.pow}
self.expr_stack = None
self.fn = {"sin": math.sin,
"sinh": math.sinh,
"cos": math.cos,
"cosh": math.cosh,
"tan": math.tan,
"tanh": math.tanh,
"exp": math.exp,
"sqrt": math.sqrt,
"radians": math.radians,
"degrees": math.degrees,
"sign": lambda x: 0 if x == 0 else x / abs(x),
"log": math.log10,
"ln": math.log,
"abs": abs,
"trunc": lambda a: int(a),
"round": round,
"floor": math.floor,
"ceil": math.ceil,
"sgn": lambda a: abs(a) > epsilon and cmp(a, 0) or 0}
def push_first(self, strg, loc, toks):
self.expr_stack.append(toks[0])
def push_u_minus(self, strg, loc, toks):
if toks and toks[0] == '-':
self.expr_stack.append('unary -')
def evaluate_stack(self, s):
op = s.pop()
if op == 'unary -':
return -self.evaluate_stack(s)
if op in "+-*/^%":
op2 = self.evaluate_stack(s)
op1 = self.evaluate_stack(s)
return self.opn[op](op1, op2)
elif op == "PI":
return math.pi # 3.1415926535
elif op == "E":
return math.e # 2.718281828
elif op in self.fn:
return self.fn[op](self.evaluate_stack(s))
elif op[0].isalpha():
return 0
else:
return float(op)
def eval(self, num_string, parse_all=True):
self.expr_stack = []
results = self.bnf.parseString(num_string, parse_all)
val = self.evaluate_stack(self.expr_stack[:])
return val
def __call__(self, row, index: int):
"""This class is called on each row.
Arguments:
row {dict} -- The complete row
Returns:
dict -- The row, including the extra output column
"""
row[self.output] = self.eval(re.sub(r'{(\w+)}', lambda x: str(row.get(x.group(1), 0)), self.equation))
return row, index
| 2.546875 | 3 |
project/cloudmesh-storage/cloudmesh/vdir/api/manager.py | cybertraining-dsc/fa19-516-171 | 0 | 5719 | #
# this manager stores directly into the db wit Database update
from cloudmesh.mongo.DataBaseDecorator import DatabaseUpdate
from cloudmesh.mongo.CmDatabase import CmDatabase
from cloudmesh.common.console import Console
from cloudmesh.storage.Provider import Provider
import os
from datetime import datetime
class Vdir(object):
def __init__(self):
self.cm = CmDatabase()
self.col = self.cm.db['local-vdir']
self.directory = 'vdir'
def cd(self, dirname=None):
try:
if dirname is None:
if self.directory == 'vdir':
Console.error("Root directory reached.")
else:
cwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
self.directory = cwd['parent']
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
directory = self.col.find_one({'type': 'directory', 'cm.name': dirname})
if directory['parent'] == self.directory:
self.directory = dirname
pwd = self.col.find_one({'type': 'directory', 'cm.name': self.directory})
return pwd
else:
Console.error('Directory does not exist at this location.')
except Exception as e:
print(e)
@DatabaseUpdate()
def mkdir(self, dirname):
try:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
if directory is None:
dir_dict = dict()
dir_dict['cm'] = {
'name': dirname,
'kind': 'vdir',
'cloud': 'local'
}
dir_dict['type'] = 'directory'
dir_dict['parent'] = self.directory
dir_dict['cm']['created'] = datetime.utcnow()
dir_dict['cm']['modified'] = datetime.utcnow()
return dir_dict
else:
Console.error("Directory with that name exists.")
except Exception as e:
print(e)
def ls(self, directory=None):
try:
dash = '-' * 40
if directory is not None:
cloudmesh = self.col.find({'$or': [{'vdirectory': directory}, {'parent': directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': directory}, {'parent': directory}]})
else:
cloudmesh = self.col.find({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
count = self.col.count_documents({'$or': [{'vdirectory': self.directory}, {'parent': self.directory}]})
locations = "{:<20} {:>}".format("Name", "Location") + "\n" + dash + "\n"
for i in range(0, count):
entry = cloudmesh[i]
if entry['type'] == 'fileendpoint':
location = entry['provider'] + ":" + entry['cloud_directory'] + "/" + entry['filename']
else:
if self.directory == '':
location = 'Vdir'
else:
location = self.directory
locations += "{:<20} {:>}".format(entry['cm']['name'], location) + "\n"
print(locations)
return locations
except Exception as e:
print(e)
@DatabaseUpdate()
def add(self, endpoint, dir_and_name):
try:
dirname = os.path.dirname(dir_and_name).split('/')[-1]
if dirname == '':
dirname = 'vdir'
directory = 'vdir'
else:
directory = self.col.find_one({"cm.name": dirname, 'type': 'directory'})
filename = os.path.basename(dir_and_name)
file = self.col.find_one({"cm.name": filename, 'type': 'fileendpoint'})
if directory is not None and file is None:
file_dict = dict()
file_dict['cm'] = {
'name': filename,
'kind': 'vdir',
'cloud': 'local'
}
file_dict['type'] = 'fileendpoint'
file_dict['vdirectory'] = dirname
file_dict['cloud_directory'] = os.path.dirname(endpoint).split(':')[1]
file_dict['filename'] = os.path.basename(endpoint)
file_dict['provider'] = os.path.dirname(endpoint).split(':')[0]
file_dict['cm']['created'] = datetime.utcnow()
file_dict['cm']['modified'] = datetime.utcnow()
return file_dict
elif directory is None:
Console.error("Virtual directory not found.")
elif file is not None:
print(file)
Console.error("File with that name already exists.")
except Exception as e:
print(e)
def get(self, name, destination=None):
try:
doc = self.col.find_one({'cm.name': name, 'type': 'fileendpoint'})
if doc is not None:
self.col.update_one({'cm.name': name, 'type': 'fileendpoint'},
{'$set': {'modified': datetime.utcnow()}})
service = doc['provider']
source = os.path.join(doc['cloud_directory'], doc['filename'])
print(source)
if destination is None:
destination = '~/.cloudmesh/vdir'
p = Provider(service)
file = p.get(source, destination, False)
return file
else:
Console.error("File not found.")
except Exception as e:
print(e)
def delete(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
self.col.delete_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
def status(self, dir_or_name):
try:
result = self.col.find_one({'cm.name': dir_or_name})
return result
except Exception as e:
print(e)
| 2.5 | 2 |
redash/query_runner/influx_db.py | cjpit/redash | 1 | 5720 | import json
import logging
from redash.query_runner import *
from redash.utils import JSONEncoder
logger = logging.getLogger(__name__)
try:
from influxdb import InfluxDBClusterClient
enabled = True
except ImportError:
enabled = False
def _transform_result(results):
result_columns = []
result_rows = []
for result in results:
for series in result.raw.get('series', []):
for column in series['columns']:
if column not in result_columns:
result_columns.append(column)
tags = series.get('tags', {})
for key in tags.keys():
if key not in result_columns:
result_columns.append(key)
for result in results:
for series in result.raw.get('series', []):
for point in series['values']:
result_row = {}
for column in result_columns:
tags = series.get('tags', {})
if column in tags:
result_row[column] = tags[column]
elif column in series['columns']:
index = series['columns'].index(column)
value = point[index]
result_row[column] = value
result_rows.append(result_row)
return json.dumps({
"columns": [{'name': c} for c in result_columns],
"rows": result_rows
}, cls=JSONEncoder)
class InfluxDB(BaseQueryRunner):
noop_query = "show measurements limit 1"
@classmethod
def configuration_schema(cls):
return {
'type': 'object',
'properties': {
'url': {
'type': 'string'
}
},
'required': ['url']
}
@classmethod
def enabled(cls):
return enabled
@classmethod
def annotate_query(cls):
return False
@classmethod
def type(cls):
return "influxdb"
def run_query(self, query, user):
client = InfluxDBClusterClient.from_DSN(self.configuration['url'])
logger.debug("influxdb url: %s", self.configuration['url'])
logger.debug("influxdb got query: %s", query)
try:
results = client.query(query)
if not isinstance(results, list):
results = [results]
json_data = _transform_result(results)
error = None
except Exception as ex:
json_data = None
error = ex.message
return json_data, error
register(InfluxDB)
| 2.21875 | 2 |
ics/mergeGatingSets.py | victorfica/utils | 5 | 5721 | <filename>ics/mergeGatingSets.py
#!/usr/bin/env python
"""
Usage examples:
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv
sbatch -n 1 -t 3-0 -c 4 -o functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions --ncpus 4 --out functions_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function functions_markers --ncpus 4 --out functions_markers_extract.csv"
sbatch -n 1 -t 3-0 -c 4 -o functions_markers_sparse_slurm_gby.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 4 --subsets /home/agartlan/gitrepo/utils/ics/allcombs_subsets.csv --out functions_markers_sparse_24Jul2018_gby.csv"
sbatch -n 1 -t 3-0 -c 4 -o cell_functions_slurm.txt --wrap="python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 4 --out cell_functions_22Aug2018.feather --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv"
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function cell_functions --ncpus 3 --out cell_functions_extract.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function sparse_functions --ncpus 3 --out sparse_functions_extract_23Aug2018.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
python /home/agartlan/gitrepo/utils/ics/mergeGatingSets.py --function bool_functions --ncpus 6 --out bool_functions_extract_05May2020.csv --testbatch --testsamples --feather --subsets /home/agartlan/gitrepo/utils/ics/subsets_CD4_gd_Tcells.csv
To delete all tmp files use:
find . -name \merged_tmp*.feather -type f -delete
"""
def mergeBatches(dataFolder, extractionFunc, extractionKwargs, ncpus, testsamples, testbatch, outFile, metaCols=None, filters=None, useFeather=False):
out = []
batchList = [opj(dataFolder, bf) for bf in os.listdir(dataFolder) if os.path.isdir(opj(dataFolder, bf))]
if testbatch:
batchList = batchList[:1]
matchStr = 'gs_*.feather'
if ncpus > 1 and _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
pool=Pool(processes=ncpus))
else:
if _PARMAP:
res = parmap.map(mergeSamples,
batchList,
extractionFunc,
extractionKwargs,
matchStr,
testsamples,
metaCols,
filters,
parallel=False)
else:
func = partial(mergeSamples,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
matchStr=matchStr,
test=testsamples,
metaCols=metaCols,
filters=filters)
res = list(map(func, batchList))
outFilename = mergeFeathers(res, outFile, writeCSV=1 - int(useFeather))
return outFilename
def testMatching(dataFolder):
out = []
for bf in os.listdir(dataFolder):
batchFolder = opj(dataFolder, bf)
if os.path.isdir(opj(dataFolder, bf)):
featherLU = matchSamples(batchFolder, test=False)
tmp = pd.Series(featherLU).to_frame()
tmp.loc[:, 'batch'] = bf
tmp.loc[:, 'batch_folder'] = opj(dataFolder, bf)
out.append(tmp)
return pd.concat(out, axis=0)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Extract features and merge batches into one CSV.')
parser.add_argument('--folder', type=str,
help='Data folder containing all batch folders.',
default='/fh/fast/gilbert_p/grp/hvtn602_compass/tmpdata')
parser.add_argument('--function', type=str,
help='Name of extraction to apply ("functions")',
default='functions')
parser.add_argument('--subsets', type=str,
help='Filename listing subsets for analysis.',
default='/home/agartlan/gitrepo/utils/ics/sample_subsets2.csv')
parser.add_argument('--out', type=str,
help='Output filename for CSV.',
default='merged_out.csv')
parser.add_argument('--ncpus', type=int,
help='Number of CPUs/cores to use for parallelization.',
default=1)
parser.add_argument('--testsamples', action='store_true', help='Only process two samples from each batch.')
parser.add_argument('--testbatch', action='store_true', help='Only process twp samples from one batch.')
parser.add_argument('--matchingonly', action='store_true', help='Only perform sample matching, to validate metadata.')
parser.add_argument('--feather', action='store_true', help='Store as feather as oposed to CSV')
parser.add_argument('--utils', default='/home/agartlan/gitrepo/utils', help='Location of agartland/utils repo from public github.com')
args = parser.parse_args()
try:
import parmap
from multiprocessing import Pool
_PARMAP = True
except:
_PARMAP = False
print('Could not find package "parmap", parallelization not enabled.')
import itertools
import pandas as pd
import numpy as np
from os.path import join as opj
import os
from functools import partial
import time
import sys
import feather
"""Make sure the utils are on path before importing"""
sys.path.append(args.utils)
# from ics import extractFunctionsGBY, extractFunctionsMarkersGBY, parseSubsets, mergeSamples, matchSamples
from ics import *
if args.matchingonly:
metaDf = testMatching(args.folder)
metaDf.to_csv(opj(args.folder, 'metamatch_' + args.out))
print('Wrote matching metadata to %s.' % opj(args.folder, 'metamatch_' + args.out))
else:
subsets, markers, functions, exclude = parseSubsets(args.subsets)
features = {'sparse_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=5)),
'bool_functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
mincells=0)),
'functions_markers':(extractFunctionsMarkersGBY, dict(subsets=subsets,
functions=functions,
markers=markers,
compressions=[('ALL', 2),
(['IFNg','IL2', 'TNFa'], 2)])),
'functions':(extractFunctionsGBY, dict(subsets=subsets,
functions=functions,
compressions=[('ALL', 1),
('ALL', 2),
(['IFNg','IL2', 'TNFa'], 1),
(['IFNg','IL2', 'TNFa'], 2),
(['IFNg','IL2'], 1)])),
'cell_functions':(extractRawFunctions, dict(subsets=subsets, functions=functions, downsample=1))}
extractionFunc, extractionKwargs = features[args.function]
if args.testbatch:
print('Test: processing samples from one batch')
if args.testsamples:
print('Test: processing two samples per batch')
outFile = opj(args.folder, args.out)
if args.feather:
outFile = outFile.replace('.csv', '.feather')
wrote = mergeBatches(args.folder,
extractionFunc=extractionFunc,
extractionKwargs=extractionKwargs,
testsamples=args.testsamples,
testbatch=args.testbatch,
outFile=outFile,
metaCols=['PTID', 'VISITNO', 'Global.Spec.Id', 'TESTDT', 'STIM'],
filters={'STIM':['negctrl', 'TB WCL', 'BCG-Pasteur', 'Ag85B', 'TB 10.4'], 'VISITNO':[2, 6, 7, 10, 11, 12]},
useFeather=int(args.feather),
ncpus=args.ncpus)
if wrote == outFile:
print('Wrote extracted data to %s.' % outFile)
else:
print('Error writing file to disk: %s' % wrote) | 1.335938 | 1 |
meeting.py | zoni/ulauncher-meet | 4 | 5722 | from dataclasses import dataclass
@dataclass
class Meeting:
name: str
id: str
| 1.710938 | 2 |
setup.py | uuosio/uuosio.gscdk | 6 | 5723 | <filename>setup.py<gh_stars>1-10
import os
import shutil
import setuptools
# from skbuild import setup
from distutils.core import setup
from distutils.sysconfig import get_python_lib
import glob
# if os.path.exists('pysrc/tinygo'):
# shutil.rmtree('pysrc/tinygo')
# shutil.copytree('tinygo/build/release/tinygo', 'pysrc/tinygo')
release_files = []
for root, dirs, files in os.walk("pysrc/tinygo"):
for f in files:
release_files.append(os.path.join(root.replace('pysrc/', ''), f))
# print(release_files)
setup(
name="gscdk",
version="0.3.5",
description="Go Smart Contract Development Kit",
author='The UUOSIO Team',
license="BSD-3-Clause",
url="https://github.com/uuosio/uuosio.gscdk",
packages=['gscdk'],
package_dir={'gscdk': 'pysrc'},
package_data={
# "": ["*"],
'gscdk': release_files,
},
setup_requires=['wheel']
# scripts=['compiler/build/release/tinygo/bin/eosio-go'],
# install_requires=[
# ],
# include_package_data=True
)
| 1.75 | 2 |
tests/data_creator_action.py | michalurbanski/bkgames | 0 | 5724 | from typing import Callable
class DataCreatorAction:
def __init__(self, func: Callable, priority_for_creation: int = 99, priority_for_removal: int = 99):
self.func = func
self.priority_for_creation = priority_for_creation
self.priority_for_removal = priority_for_removal
| 2.6875 | 3 |
Python/Numpy/Min and Max/min_and_max.py | brianchiang-tw/HackerRank | 2 | 5725 | import numpy as np
if __name__ == '__main__':
h, w = map( int, input().split() )
row_list = []
for i in range(h):
single_row = list( map(int, input().split() ) )
np_row = np.array( single_row )
row_list.append( np_row )
min_of_each_row = np.min( row_list, axis = 1)
max_of_min = np.max( min_of_each_row )
print( max_of_min )
| 3.40625 | 3 |
allure/pytest_plugin.py | allure-framework/allure-pytest | 112 | 5726 | import uuid
import pickle
import pytest
import argparse
from collections import namedtuple
from six import text_type
from allure.common import AllureImpl, StepContext
from allure.constants import Status, AttachmentType, Severity, \
FAILED_STATUSES, Label, SKIPPED_STATUSES
from allure.utils import parent_module, parent_down_from_module, labels_of, \
all_of, get_exception_message, now, mangle_testnames
from allure.structure import TestCase, TestStep, Attach, TestSuite, Failure, TestLabel
def pytest_addoption(parser):
parser.getgroup("reporting").addoption('--alluredir',
action="store",
dest="allurereportdir",
metavar="DIR",
default=None,
help="Generate Allure report in the specified directory (may not exist)")
severities = [v for (_, v) in all_of(Severity)]
def label_type(name, legal_values=set()):
"""
argparse-type factory for labelish things.
processed value is set of tuples (name, value).
:param name: of label type (for future TestLabel things)
:param legal_values: a `set` of values that are legal for this label, if any limit whatsoever
:raises ArgumentTypeError: if `legal_values` are given and there are values that fall out of that
"""
def a_label_type(string):
atoms = set(string.split(','))
if legal_values and not atoms < legal_values:
raise argparse.ArgumentTypeError('Illegal {} values: {}, only [{}] are allowed'.format(name, ', '.join(atoms - legal_values), ', '.join(legal_values)))
return set((name, v) for v in atoms)
return a_label_type
parser.getgroup("general").addoption('--allure_severities',
action="store",
dest="allureseverities",
metavar="SEVERITIES_SET",
default={},
type=label_type(name=Label.SEVERITY, legal_values=set(severities)),
help="""Comma-separated list of severity names.
Tests only with these severities will be run.
Possible values are:%s.""" % ', '.join(severities))
parser.getgroup("general").addoption('--allure_features',
action="store",
dest="allurefeatures",
metavar="FEATURES_SET",
default={},
type=label_type(name=Label.FEATURE),
help="""Comma-separated list of feature names.
Run tests that have at least one of the specified feature labels.""")
parser.getgroup("general").addoption('--allure_stories',
action="store",
dest="allurestories",
metavar="STORIES_SET",
default={},
type=label_type(name=Label.STORY),
help="""Comma-separated list of story names.
Run tests that have at least one of the specified story labels.""")
def pytest_configure(config):
reportdir = config.option.allurereportdir
if reportdir: # we actually record something
allure_impl = AllureImpl(reportdir)
testlistener = AllureTestListener(config)
pytest.allure._allurelistener = testlistener
config.pluginmanager.register(testlistener)
if not hasattr(config, 'slaveinput'):
# on xdist-master node do all the important stuff
config.pluginmanager.register(AllureAgregatingListener(allure_impl, config))
config.pluginmanager.register(AllureCollectionListener(allure_impl))
class AllureTestListener(object):
"""
Per-test listener.
Is responsible for recording in-test data and for attaching it to the test report thing.
The per-test reports are handled by `AllureAgregatingListener` at the `pytest_runtest_logreport` hook.
"""
def __init__(self, config):
self.config = config
self.environment = {}
self.test = None
# FIXME: that flag makes us pre-report failures in the makereport hook.
# it is here to cope with xdist's begavior regarding -x.
# see self.pytest_runtest_makereport and AllureAgregatingListener.pytest_sessionfinish
self._magicaldoublereport = hasattr(self.config, 'slaveinput') and self.config.getvalue("maxfail")
@pytest.mark.hookwrapper
def pytest_runtest_protocol(self, item, nextitem):
try:
# for common items
description = item.function.__doc__
except AttributeError:
# for doctests that has no `function` attribute
description = item.reportinfo()[2]
self.test = TestCase(name='.'.join(mangle_testnames([x.name for x in parent_down_from_module(item)])),
description=description,
start=now(),
attachments=[],
labels=labels_of(item),
status=None,
steps=[],
id=str(uuid.uuid4())) # for later resolution in AllureAgregatingListener.pytest_sessionfinish
self.stack = [self.test]
yield
self.test = None
self.stack = []
def attach(self, title, contents, attach_type):
"""
Store attachment object in current state for later actual write in the `AllureAgregatingListener.write_attach`
"""
attach = Attach(source=contents, # we later re-save those, oh my...
title=title,
type=attach_type)
self.stack[-1].attachments.append(attach)
def dynamic_issue(self, *issues):
"""
Attaches ``issues`` to the current active case
"""
if self.test:
self.test.labels.extend([TestLabel(name=Label.ISSUE, value=issue) for issue in issues])
def description(self, description):
"""
Sets description for the test
"""
if self.test:
self.test.description = description
def start_step(self, name):
"""
Starts an new :py:class:`allure.structure.TestStep` with given ``name``,
pushes it to the ``self.stack`` and returns the step.
"""
step = TestStep(name=name,
title=name,
start=now(),
attachments=[],
steps=[])
self.stack[-1].steps.append(step)
self.stack.append(step)
return step
def stop_step(self):
"""
Stops the step at the top of ``self.stack``
"""
step = self.stack.pop()
step.stop = now()
def _fill_case(self, report, call, pyteststatus, status):
"""
Finalizes with important data
:param report: py.test's `TestReport`
:param call: py.test's `CallInfo`
:param pyteststatus: the failed/xfailed/xpassed thing
:param status: a :py:class:`allure.constants.Status` entry
"""
[self.attach(name, contents, AttachmentType.TEXT) for (name, contents) in dict(report.sections).items()]
self.test.stop = now()
self.test.status = status
if status in FAILED_STATUSES:
self.test.failure = Failure(message=get_exception_message(call.excinfo, pyteststatus, report),
trace=report.longrepr or hasattr(report, 'wasxfail') and report.wasxfail)
elif status in SKIPPED_STATUSES:
skip_message = type(report.longrepr) == tuple and report.longrepr[2] or report.wasxfail
trim_msg_len = 89
short_message = skip_message.split('\n')[0][:trim_msg_len]
# FIXME: see pytest.runner.pytest_runtest_makereport
self.test.failure = Failure(message=(short_message + '...' * (len(skip_message) > trim_msg_len)),
trace=status == Status.PENDING and report.longrepr or short_message != skip_message and skip_message or '')
def report_case(self, item, report):
"""
Adds `self.test` to the `report` in a `AllureAggegatingListener`-understood way
"""
parent = parent_module(item)
# we attach a four-tuple: (test module ID, test module name, test module doc, environment, TestCase)
report.__dict__.update(_allure_result=pickle.dumps((parent.nodeid,
parent.module.__name__,
parent.module.__doc__ or '',
self.environment,
self.test)))
@pytest.mark.hookwrapper
def pytest_runtest_makereport(self, item, call):
"""
Decides when to actually report things.
pytest runs this (naturally) three times -- with report.when being:
setup <--- fixtures are to be initialized in this one
call <--- when this finishes the main code has finished
teardown <--- tears down fixtures (that still possess important info)
`setup` and `teardown` are always called, but `call` is called only if `setup` passes.
See :py:func:`_pytest.runner.runtestprotocol` for proofs / ideas.
The "other side" (AllureAggregatingListener) expects us to send EXACTLY ONE test report (it wont break, but it will duplicate cases in the report -- which is bad.
So we work hard to decide exact moment when we call `_stop_case` to do that. This method may benefit from FSM (we keep track of what has already happened via self.test.status)
Expected behavior is:
FAILED when call fails and others OK
BROKEN when either setup OR teardown are broken (and call may be anything)
PENDING if skipped and xfailed
SKIPPED if skipped and not xfailed
"""
report = (yield).get_result()
status = self.config.hook.pytest_report_teststatus(report=report)
status = status and status[0]
if report.when == 'call':
if report.passed:
self._fill_case(report, call, status, Status.PASSED)
elif report.failed:
self._fill_case(report, call, status, Status.FAILED)
# FIXME: this is here only to work around xdist's stupid -x thing when in exits BEFORE THE TEARDOWN test log. Meh, i should file an issue to xdist
if self._magicaldoublereport:
# to minimize ze impact
self.report_case(item, report)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'setup': # setup / teardown
if report.failed:
self._fill_case(report, call, status, Status.BROKEN)
elif report.skipped:
if hasattr(report, 'wasxfail'):
self._fill_case(report, call, status, Status.PENDING)
else:
self._fill_case(report, call, status, Status.CANCELED)
elif report.when == 'teardown':
# as teardown is always called for testitem -- report our status here
if not report.passed:
if self.test.status not in FAILED_STATUSES:
# if test was OK but failed at teardown => broken
self._fill_case(report, call, status, Status.BROKEN)
else:
# mark it broken so, well, someone has idea of teardown failure
# still, that's no big deal -- test has already failed
# TODO: think about that once again
self.test.status = Status.BROKEN
# if a test isn't marked as "unreported" or it has failed, add it to the report.
if not item.get_marker("unreported") or self.test.status in FAILED_STATUSES:
self.report_case(item, report)
def pytest_runtest_setup(item):
item_labels = set((l.name, l.value) for l in labels_of(item)) # see label_type
arg_labels = set().union(item.config.option.allurefeatures,
item.config.option.allurestories,
item.config.option.allureseverities)
if arg_labels and not item_labels & arg_labels:
pytest.skip('Not suitable with selected labels: %s.' % ', '.join(text_type(l) for l in sorted(arg_labels)))
class LazyInitStepContext(StepContext):
"""
This is a step context used for decorated steps.
It provides a possibility to create step decorators, being initiated before pytest_configure, when no AllureListener initiated yet.
"""
def __init__(self, allure_helper, title):
self.allure_helper = allure_helper
self.title = title
self.step = None
@property
def allure(self):
listener = self.allure_helper.get_listener()
# if listener has `stack` we are inside a test
# record steps only when that
# FIXME: this breaks encapsulation a lot
if hasattr(listener, 'stack'):
return listener
class AllureHelper(object):
"""
This object holds various utility methods used from ``pytest.allure`` namespace, like ``pytest.allure.attach``
"""
def __init__(self):
self._allurelistener = None # FIXME: this gets injected elsewhere, like in the pytest_configure
def get_listener(self):
return self._allurelistener
def attach(self, name, contents, type=AttachmentType.TEXT): # @ReservedAssignment
"""
Attaches ``contents`` to a current context with given ``name`` and ``type``.
"""
if self._allurelistener:
self._allurelistener.attach(name, contents, type)
def label(self, name, *value):
"""
A decorator factory that returns ``pytest.mark`` for a given label.
"""
allure_label = getattr(pytest.mark, '%s.%s' % (Label.DEFAULT, name))
return allure_label(*value)
def severity(self, severity):
"""
A decorator factory that returns ``pytest.mark`` for a given allure ``level``.
"""
return self.label(Label.SEVERITY, severity)
def feature(self, *features):
"""
A decorator factory that returns ``pytest.mark`` for a given features.
"""
return self.label(Label.FEATURE, *features)
def story(self, *stories):
"""
A decorator factory that returns ``pytest.mark`` for a given stories.
"""
return self.label(Label.STORY, *stories)
def issue(self, *issues):
"""
A decorator factory that returns ``pytest.mark`` for a given issues.
"""
return self.label(Label.ISSUE, *issues)
def dynamic_issue(self, *issues):
"""
Mark test ``issues`` from inside.
"""
if self._allurelistener:
self._allurelistener.dynamic_issue(*issues)
def description(self, description):
"""
Sets description for the test
"""
if self._allurelistener:
self._allurelistener.description(description)
def testcase(self, *testcases):
"""
A decorator factory that returns ``pytest.mark`` for a given testcases.
"""
return self.label(Label.TESTCASE, *testcases)
def step(self, title):
"""
A contextmanager/decorator for steps.
TODO: when moving to python 3, rework this with ``contextlib.ContextDecorator``.
Usage examples::
import pytest
def test_foo():
with pytest.allure.step('mystep'):
assert False
@pytest.allure.step('make test data')
def make_test_data_bar():
raise ValueError('No data today')
def test_bar():
assert make_test_data_bar()
@pytest.allure.step
def make_test_data_baz():
raise ValueError('No data today')
def test_baz():
assert make_test_data_baz()
@pytest.fixture()
@pytest.allure.step('test fixture')
def steppy_fixture():
return 1
def test_baz(steppy_fixture):
assert steppy_fixture
"""
if callable(title):
return LazyInitStepContext(self, title.__name__)(title)
else:
return LazyInitStepContext(self, title)
def single_step(self, text):
"""
Writes single line to report.
"""
if self._allurelistener:
with self.step(text):
pass
def environment(self, **env_dict):
if self._allurelistener:
self._allurelistener.environment.update(env_dict)
@property
def attach_type(self):
return AttachmentType
@property
def severity_level(self):
return Severity
def __getattr__(self, attr):
"""
Provides fancy shortcuts for severity::
# these are the same
pytest.allure.CRITICAL
pytest.allure.severity(pytest.allure.severity_level.CRITICAL)
"""
if attr in dir(Severity) and not attr.startswith('_'):
return self.severity(getattr(Severity, attr))
else:
raise AttributeError
MASTER_HELPER = AllureHelper()
def pytest_namespace():
return {'allure': MASTER_HELPER}
class AllureAgregatingListener(object):
"""
Listens to pytest hooks to generate reports for common tests.
"""
def __init__(self, impl, config):
self.impl = impl
# module's nodeid => TestSuite object
self.suites = {}
def pytest_sessionfinish(self):
"""
We are done and have all the results in `self.suites`
Lets write em down.
But first we kinda-unify the test cases.
We expect cases to come from AllureTestListener -- and the have ._id field to manifest their identity.
Of all the test cases in suite.testcases we leave LAST with the same ID -- becase logreport can be sent MORE THAN ONE TIME
(namely, if the test fails and then gets broken -- to cope with the xdist's -x behavior we have to have tests even at CALL failures)
TODO: do it in a better, more efficient way
"""
for s in self.suites.values():
if s.tests: # nobody likes empty suites
s.stop = max(case.stop for case in s.tests)
known_ids = set()
refined_tests = []
for t in s.tests[::-1]:
if t.id not in known_ids:
known_ids.add(t.id)
refined_tests.append(t)
s.tests = refined_tests[::-1]
with self.impl._reportfile('%s-testsuite.xml' % uuid.uuid4()) as f:
self.impl._write_xml(f, s)
self.impl.store_environment()
def write_attach(self, attachment):
"""
Writes attachment object from the `AllureTestListener` to the FS, fixing it fields
:param attachment: a :py:class:`allure.structure.Attach` object
"""
# OMG, that is bad
attachment.source = self.impl._save_attach(attachment.source, attachment.type)
attachment.type = attachment.type.mime_type
def pytest_runtest_logreport(self, report):
if hasattr(report, '_allure_result'):
module_id, module_name, module_doc, environment, testcase = pickle.loads(report._allure_result)
report._allure_result = None # so actual pickled data is garbage-collected, see https://github.com/allure-framework/allure-python/issues/98
self.impl.environment.update(environment)
for a in testcase.iter_attachments():
self.write_attach(a)
self.suites.setdefault(module_id, TestSuite(name=module_name,
description=module_doc,
tests=[],
labels=[],
start=testcase.start, # first case starts the suite!
stop=None)).tests.append(testcase)
CollectFail = namedtuple('CollectFail', 'name status message trace')
class AllureCollectionListener(object):
"""
Listens to pytest collection-related hooks
to generate reports for modules that failed to collect.
"""
def __init__(self, impl):
self.impl = impl
self.fails = []
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
status = Status.BROKEN
else:
status = Status.CANCELED
self.fails.append(CollectFail(name=mangle_testnames(report.nodeid.split("::"))[-1],
status=status,
message=get_exception_message(None, None, report),
trace=report.longrepr))
def pytest_sessionfinish(self):
"""
Creates a testsuite with collection failures if there were any.
"""
if self.fails:
self.impl.start_suite(name='test_collection_phase',
title='Collection phase',
description='This is the tests collection phase. Failures are modules that failed to collect.')
for fail in self.fails:
self.impl.start_case(name=fail.name.split(".")[-1])
self.impl.stop_case(status=fail.status, message=fail.message, trace=fail.trace)
self.impl.stop_suite()
| 2.1875 | 2 |
treenode/debug.py | domlysi/django-treenode | 0 | 5727 | <reponame>domlysi/django-treenode
# -*- coding: utf-8 -*-
from django.conf import settings
from django.db import connection
import logging
import timeit
logger = logging.getLogger(__name__)
class debug_performance(object):
def __init__(self, message_prefix=''):
super(debug_performance, self).__init__()
self.__message_prefix = message_prefix
@staticmethod
def _get_queries():
return len(connection.queries)
@staticmethod
def _get_timer():
return timeit.default_timer()
def __enter__(self):
self.__init_queries = debug_performance._get_queries()
self.__init_timer = debug_performance._get_timer()
return None
def __exit__(self, type_, value, traceback):
queries = (debug_performance._get_queries() - self.__init_queries)
timer = (debug_performance._get_timer() - self.__init_timer)
if settings.DEBUG:
message = '\r%sexecuted %s %s in %ss.' % (
self.__message_prefix,
queries,
'query' if queries == 1 else 'queries',
timer, )
print(message)
| 2.09375 | 2 |
String_tool.py | vibhorvk/BlendString | 0 | 5728 | bl_info = {
"name": "STRING",
"blender": (2, 80, 0),
"category": "Object",
'Author' : '<NAME>'
}
import bpy
import bmesh
class STRING(bpy.types.Operator):
"""My Object Moving Script""" # Use this as a tooltip for menu items and buttons.
bl_idname = "object.stringtool_ot" # Unique identifier for buttons and menu items to reference.
bl_label = "String" # Display name in the interface.
bl_options = {'REGISTER', 'UNDO'} # Enable undo for the operator.
bdepth: bpy.props.FloatProperty(name = "String Thickness", min = 0.1, max = 5, precision = 2 )
def execute(self, context):
# The original script
####################
#to create an edge between two given objects
def Edgify(ob1,ob2):
loc1 = ob1.location
loc2 = ob2.location
verts = [loc1,loc2]
bpy.ops.mesh.primitive_plane_add(location = (0,0,0))
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.delete(type='VERT')
#creating the vertices using the current mesh data into bmesh
pipe = bpy.context.object.data
bm = bmesh.new()
for v in verts:
bm.verts.new(v)
bpy.ops.object.editmode_toggle()
bm.to_mesh(pipe)
bm.free()
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.edge_face_add()
bpy.ops.object.editmode_toggle()
def string(olist):
edges = []
l = len(olist)
for x in range(l):
for y in range(l):
if y != x and x < y :
Edgify(olist[x], olist[y])
edges.append(bpy.context.active_object)
return edges
def piper(xlist):
bpy.ops.object.select_all(action='DESELECT')
for x in xlist:
x.select_set(True)
bpy.ops.object.join()
bpy.ops.object.convert(target='CURVE')
def check(olist):
if len(olist) == 0:
self.report({'INFO'},'NONE SELECTED OBJECTS')
return 0
else:
return 1
oblist = bpy.context.selected_objects
Edgelist = string(oblist)
piper(Edgelist)
actob = bpy.context.active_object
actob.data.bevel_depth = self.bdepth
bpy.ops.object.shade_smooth()
########################
return {'FINISHED'} # Lets Blender know the operator finished successfully.
class STRING_PT(bpy.types.Panel):
bl_idname = "object_stringtool_pt"
bl_label = "String"
bl_space_type = 'VIEW_3D'
bl_region_type = 'UI'
bl_category = "newprop"
def draw(self, context):
# You can set the property values that should be used when the user
# presses the button in the UI.
layout = self.layout
props = layout.operator('object.stringtool_ot')
def register():
bpy.utils.register_class(STRING)
def unregister():
bpy.utils.unregister_class(STRING)
# This allows you to run the script directly from Blender's Text editor
# to test the add-on without having to install it.
if __name__ == "__main__":
register()
| 2.640625 | 3 |
Code/DataHandlers/__init__.py | aricsanders/pyMez3 | 2 | 5729 | """
The DataHandlers subpackage is designed to manipulate data, by allowing different data types to be opened,
created, saved and updated. The subpackage is further divided into modules grouped by a common theme. Classes for data
that are already on disk normally follows the following pattern:
`instance=ClassName(file_path,**options)`
For Example to
open a XML file that you don't know the model, use
`xml=pyMez.Code.DataHandlers.XMLModels.XMLBase('MyXML.xml')'
or
`xml=XMLBase('MyXML.xml')`
All data models normally have save(), str() and if appropriate show() methods.
Examples
--------
<a href="../../../Examples/How_To_Open_S2p.html"> How to open a s2p file </a>
Import Structure
----------------
DataHandlers typically import from Utils but __NOT__ from Analysis, InstrumentControl or FrontEnds
Help
-----
<a href="../index.html">`pyMez.Code`</a>
<div>
<a href="../../../pyMez_Documentation.html">Documentation Home</a> |
<a href="../../index.html">API Documentation Home</a> |
<a href="../../../Examples/html/Examples_Home.html">Examples</a> |
<a href="../../../Reference_Index.html">Index </a>
</div>
"""
| 2.859375 | 3 |
djangomail/backends/dummy.py | somenzz/djangomail | 1 | 5730 | <reponame>somenzz/djangomail
"""
Dummy email backend that does nothing.
"""
from djangomail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def send_messages(self, email_messages):
return len(list(email_messages))
| 1.820313 | 2 |
awx/plugins/library/scan_services.py | Avinesh/awx | 17 | 5731 | <filename>awx/plugins/library/scan_services.py
#!/usr/bin/env python
import re
from ansible.module_utils.basic import * # noqa
DOCUMENTATION = '''
---
module: scan_services
short_description: Return service state information as fact data
description:
- Return service state information as fact data for various service management utilities
version_added: "1.9"
options:
requirements: [ ]
author: <NAME>
'''
EXAMPLES = '''
- monit: scan_services
# Example fact output:
# host | success >> {
# "ansible_facts": {
# "services": {
# "network": {
# "source": "sysv",
# "state": "running",
# "name": "network"
# },
# "arp-ethers.service": {
# "source": "systemd",
# "state": "stopped",
# "name": "arp-ethers.service"
# }
# }
# }
'''
class BaseService(object):
def __init__(self, module):
self.module = module
self.incomplete_warning = False
class ServiceScanService(BaseService):
def gather_services(self):
services = {}
service_path = self.module.get_bin_path("service")
if service_path is None:
return None
initctl_path = self.module.get_bin_path("initctl")
chkconfig_path = self.module.get_bin_path("chkconfig")
# sysvinit
if service_path is not None and chkconfig_path is None:
rc, stdout, stderr = self.module.run_command("%s --status-all 2>&1 | grep -E \"\\[ (\\+|\\-) \\]\"" % service_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) < 4:
continue # Skipping because we expected more data
service_name = " ".join(line_data[3:])
if line_data[1] == "+":
service_state = "running"
else:
service_state = "stopped"
services[service_name] = {"name": service_name, "state": service_state, "source": "sysv"}
# Upstart
if initctl_path is not None and chkconfig_path is None:
p = re.compile(r'^\s?(?P<name>.*)\s(?P<goal>\w+)\/(?P<state>\w+)(\,\sprocess\s(?P<pid>[0-9]+))?\s*$')
rc, stdout, stderr = self.module.run_command("%s list" % initctl_path)
real_stdout = stdout.replace("\r","")
for line in real_stdout.split("\n"):
m = p.match(line)
if not m:
continue
service_name = m.group('name')
service_goal = m.group('goal')
service_state = m.group('state')
if m.group('pid'):
pid = m.group('pid')
else:
pid = None # NOQA
payload = {"name": service_name, "state": service_state, "goal": service_goal, "source": "upstart"}
services[service_name] = payload
# RH sysvinit
elif chkconfig_path is not None:
#print '%s --status-all | grep -E "is (running|stopped)"' % service_path
p = re.compile(
r'(?P<service>.*?)\s+[0-9]:(?P<rl0>on|off)\s+[0-9]:(?P<rl1>on|off)\s+[0-9]:(?P<rl2>on|off)\s+'
r'[0-9]:(?P<rl3>on|off)\s+[0-9]:(?P<rl4>on|off)\s+[0-9]:(?P<rl5>on|off)\s+[0-9]:(?P<rl6>on|off)')
rc, stdout, stderr = self.module.run_command('%s' % chkconfig_path, use_unsafe_shell=True)
# Check for special cases where stdout does not fit pattern
match_any = False
for line in stdout.split('\n'):
if p.match(line):
match_any = True
if not match_any:
p_simple = re.compile(r'(?P<service>.*?)\s+(?P<rl0>on|off)')
match_any = False
for line in stdout.split('\n'):
if p_simple.match(line):
match_any = True
if match_any:
# Try extra flags " -l --allservices" needed for SLES11
rc, stdout, stderr = self.module.run_command('%s -l --allservices' % chkconfig_path, use_unsafe_shell=True)
elif '--list' in stderr:
# Extra flag needed for RHEL5
rc, stdout, stderr = self.module.run_command('%s --list' % chkconfig_path, use_unsafe_shell=True)
for line in stdout.split('\n'):
m = p.match(line)
if m:
service_name = m.group('service')
service_state = 'stopped'
if m.group('rl3') == 'on':
rc, stdout, stderr = self.module.run_command('%s %s status' % (service_path, service_name), use_unsafe_shell=True)
service_state = rc
if rc in (0,):
service_state = 'running'
#elif rc in (1,3):
else:
if 'root' in stderr or 'permission' in stderr.lower() or 'not in sudoers' in stderr.lower():
self.incomplete_warning = True
continue
else:
service_state = 'stopped'
service_data = {"name": service_name, "state": service_state, "source": "sysv"}
services[service_name] = service_data
return services
class SystemctlScanService(BaseService):
def systemd_enabled(self):
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
def gather_services(self):
services = {}
if not self.systemd_enabled():
return None
systemctl_path = self.module.get_bin_path("systemctl", opt_dirs=["/usr/bin", "/usr/local/bin"])
if systemctl_path is None:
return None
rc, stdout, stderr = self.module.run_command("%s list-unit-files --type=service | tail -n +2 | head -n -2" % systemctl_path, use_unsafe_shell=True)
for line in stdout.split("\n"):
line_data = line.split()
if len(line_data) != 2:
continue
if line_data[1] == "enabled":
state_val = "running"
else:
state_val = "stopped"
services[line_data[0]] = {"name": line_data[0], "state": state_val, "source": "systemd"}
return services
def main():
module = AnsibleModule(argument_spec = dict()) # noqa
service_modules = (ServiceScanService, SystemctlScanService)
all_services = {}
incomplete_warning = False
for svc_module in service_modules:
svcmod = svc_module(module)
svc = svcmod.gather_services()
if svc is not None:
all_services.update(svc)
if svcmod.incomplete_warning:
incomplete_warning = True
if len(all_services) == 0:
results = dict(skipped=True, msg="Failed to find any services. Sometimes this is due to insufficient privileges.")
else:
results = dict(ansible_facts=dict(services=all_services))
if incomplete_warning:
results['msg'] = "WARNING: Could not find status for all services. Sometimes this is due to insufficient privileges."
module.exit_json(**results)
main()
| 2.234375 | 2 |
app.py | duckm8795/runscope-circleci | 0 | 5732 | import requests
import sys
import time
import os
def main():
trigger_url = sys.argv[1]
trigger_resp = requests.get(trigger_url)
if trigger_resp.ok:
trigger_json = trigger_resp.json().get("data", {})
test_runs = trigger_json.get("runs", [])
print ("Started {} test runs.".format(len(test_runs)))
results = {}
while len(results.keys()) < len(test_runs):
time.sleep(1)
for run in test_runs:
test_run_id = run.get("test_run_id")
if not test_run_id in results:
result = _get_result(run)
if result.get("result") in ["pass", "fail"]:
results[test_run_id] = result
pass_count = sum([r.get("result") == "pass" for r in results.values()])
fail_count = sum([r.get("result") == "fail" for r in results.values()])
if fail_count > 0:
print ("{} test runs passed. {} test runs failed.".format(pass_count, fail_count))
exit(1)
print ("All test runs passed.")
def _get_result(test_run):
# generate Personal Access Token at https://www.runscope.com/applications
if not "RUNSCOPE_ACCESS_TOKEN" in os.environ:
print ("Please set the environment variable RUNSCOPE_ACCESS_TOKEN. You can get an access token by going to https://www.runscope.com/applications")
exit(1)
API_TOKEN = os.environ["RUNSCOPE_ACCESS_TOKEN"]
opts = {
"base_url": "https://api.runscope.com",
"bucket_key": test_run.get("bucket_key"),
"test_id": test_run.get("test_id"),
"test_run_id": test_run.get("test_run_id")
}
result_url = "{base_url}/buckets/{bucket_key}/tests/{test_id}/results/{test_run_id}".format(**opts)
print ("Getting result: {}".format(result_url))
headers = {
"Authorization": "Bearer {}".format(API_TOKEN),
"User-Agent": "python-trigger-sample"
}
result_resp = requests.get(result_url, headers=headers)
if result_resp.ok:
return result_resp.json().get("data")
return None
if __name__ == '__main__':
main() | 2.71875 | 3 |
spyse/client.py | fabaff/spyse-python | 9 | 5733 | <reponame>fabaff/spyse-python
import requests
from typing import List, Optional
from .models import AS, Domain, IP, CVE, Account, Certificate, Email, DNSHistoricalRecord, WHOISHistoricalRecord
from .response import Response
from .search_query import SearchQuery
from limiter import get_limiter, limit
class DomainsSearchResults:
def __init__(self, results: List[Domain], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[Domain] = results
class AutonomousSystemsSearchResults:
def __init__(self, results: List[AS], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[AS] = results
class IPSearchResults:
def __init__(self, results: List[IP], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[IP] = results
class CertificatesSearchResults:
def __init__(self, results: List[Certificate], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[Certificate] = results
class CVESearchResults:
def __init__(self, results: List[CVE], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[CVE] = results
class EmailsSearchResults:
def __init__(self, results: List[Email], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[Email] = results
class HistoricalDNSSearchResults:
def __init__(self, results: List[DNSHistoricalRecord], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[DNSHistoricalRecord] = results
class HistoricalWHOISSearchResults:
def __init__(self, results: List[WHOISHistoricalRecord], total_items: int = None, search_id: str = None):
self.total_items: Optional[int] = total_items
self.search_id: Optional[str] = search_id
self.results: List[WHOISHistoricalRecord] = results
class Client:
DEFAULT_BASE_URL = 'https://api.spyse.com/v4/data'
MAX_LIMIT = 100
SEARCH_RESULTS_LIMIT = 10000
RATE_LIMIT_FRAME_IN_SECONDS = 1
def __init__(self, api_token, base_url=DEFAULT_BASE_URL):
self.session = requests.Session()
self.session.headers.update({'Authorization': 'Bearer ' + api_token})
self.session.headers.update({'User-Agent': 'spyse-python'})
self.base_url = base_url
self.limiter = get_limiter(rate=self.RATE_LIMIT_FRAME_IN_SECONDS, capacity=1)
self.account = self.get_quotas()
self.limiter._capacity = self.account.requests_rate_limit
def __get(self, endpoint: str) -> Response:
with limit(self.limiter, consume=1):
return Response.from_dict(self.session.get(endpoint).json())
def __search(self, endpoint, query: SearchQuery, lim: int = MAX_LIMIT, offset: int = 0) -> Response:
with limit(self.limiter, consume=1):
return Response.from_dict(self.session.post(endpoint,
json={"search_params": query.get(), "limit": lim,
"offset": offset}).json())
def __scroll(self, endpoint, query: SearchQuery, search_id: Optional[str] = None) -> Response:
with limit(self.limiter, consume=1):
if search_id:
body = {"search_params": query.get(), "search_id": search_id}
else:
body = {"search_params": query.get()}
return Response.from_dict(self.session.post(endpoint, json=body).json())
def set_user_agent(self, s: str):
self.session.headers.update({'User-Agent': s})
def get_quotas(self) -> Optional[Account]:
"""Returns details about your account quotas."""
response = self.__get('{}/account/quota'.format(self.base_url))
response.check_errors()
return Account.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def get_autonomous_system_details(self, asn: int) -> Optional[AS]:
"""Returns details about an autonomous system by AS number."""
response = self.__get('{}/as/{}'.format(self.base_url, asn))
response.check_errors()
return AS.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def count_autonomous_systems(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/as/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def search_autonomous_systems(self, query: SearchQuery, limit: int = MAX_LIMIT,
offset: int = 0) -> AutonomousSystemsSearchResults:
"""
Returns a list of autonomous systems that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/as/search'.format(self.base_url), query, limit, offset)
response.check_errors()
as_list = list()
for r in response.data.items:
as_list.append(AS.from_dict(r))
return AutonomousSystemsSearchResults(as_list, response.data.total_items)
def scroll_autonomous_systems(self, query: SearchQuery, scroll_id: str = None) -> AutonomousSystemsSearchResults:
"""
Returns a list of autonomous systems that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/as/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
as_list = list()
for r in response.data.items:
as_list.append(AS.from_dict(r))
return AutonomousSystemsSearchResults(as_list, search_id=response.data.search_id)
def get_domain_details(self, domain_name: str) -> Optional[Domain]:
"""Returns details about domain"""
response = self.__get('{}/domain/{}'.format(self.base_url, domain_name))
response.check_errors()
return Domain.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_domains(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> DomainsSearchResults:
"""
Returns a list of domains that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/domain/search'.format(self.base_url), query, limit, offset)
response.check_errors()
domains = list()
for r in response.data.items:
domains.append(Domain.from_dict(r))
return DomainsSearchResults(domains, response.data.total_items)
def count_domains(self, query: SearchQuery):
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/domain/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_domains(self, query: SearchQuery, scroll_id: str = None) -> DomainsSearchResults:
"""
Returns a list of domains that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/domain/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
domains = list()
for r in response.data.items:
domains.append(Domain.from_dict(r))
return DomainsSearchResults(domains, search_id=response.data.search_id)
def get_ip_details(self, ip: str) -> Optional[IP]:
"""Returns details about IP"""
response = self.__get('{}/ip/{}'.format(self.base_url, ip))
response.check_errors()
return IP.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_ip(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> IPSearchResults:
"""
Returns a list of IPv4 hosts that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/ip/search'.format(self.base_url), query, limit, offset)
response.check_errors()
ips = list()
for r in response.data.items:
ips.append(IP.from_dict(r))
return IPSearchResults(ips, response.data.total_items)
def count_ip(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/ip/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_ip(self, query: SearchQuery, scroll_id: str = None) -> IPSearchResults:
"""
Returns a list of IPv4 hosts that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/ip/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
ips = list()
for r in response.data.items:
ips.append(IP.from_dict(r))
return IPSearchResults(ips, search_id=response.data.search_id)
def get_certificate_details(self, fingerprint_sha256: str) -> Optional[Certificate]:
"""Returns details about SSL/TLS certificate"""
response = self.__get('{}/certificate/{}'.format(self.base_url, fingerprint_sha256))
response.check_errors()
return Certificate.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_certificate(self, query: SearchQuery, limit: int = MAX_LIMIT,
offset: int = 0) -> CertificatesSearchResults:
"""
Returns a list of SSL/TLS certificate hosts that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/certificate/search'.format(self.base_url), query, limit, offset)
response.check_errors()
certs = list()
for r in response.data.items:
certs.append(Certificate.from_dict(r))
return CertificatesSearchResults(certs, response.data.total_items)
def count_certificate(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/certificate/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_certificate(self, query: SearchQuery, scroll_id: str = None) -> CertificatesSearchResults:
"""
Returns a list of SSL/TLS certificates that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/certificate/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
certs = list()
for r in response.data.items:
certs.append(Certificate.from_dict(r))
return CertificatesSearchResults(certs, search_id=response.data.search_id)
def get_cve_details(self, cve_id: str) -> Optional[CVE]:
"""Returns details about CVE"""
response = self.__get('{}/cve/{}'.format(self.base_url, cve_id))
response.check_errors()
return CVE.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_cve(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> CVESearchResults:
"""
Returns a list of CVE that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/cve/search'.format(self.base_url), query, limit, offset)
response.check_errors()
cve_list = list()
for r in response.data.items:
cve_list.append(CVE.from_dict(r))
return CVESearchResults(cve_list, response.data.total_items)
def count_cve(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/cve/search/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_cve(self, query: SearchQuery, scroll_id: str = None) -> CVESearchResults:
"""
Returns a list of CVEs that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/cve/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
cve_list = list()
for r in response.data.items:
cve_list.append(CVE.from_dict(r))
return CVESearchResults(cve_list, search_id=response.data.search_id)
def get_email_details(self, email: str) -> Optional[Email]:
"""Returns details about email"""
response = self.__get('{}/email/{}'.format(self.base_url, email))
response.check_errors()
return Email.from_dict(response.data.items[0]) if len(response.data.items) > 0 else None
def search_emails(self, query: SearchQuery, limit: int = MAX_LIMIT, offset: int = 0) -> EmailsSearchResults:
"""
Returns a list of emails that matched the search query.
Allows getting only the first 10,000 results.
"""
response = self.__search('{}/email/search'.format(self.base_url), query, limit, offset)
response.check_errors()
emails = list()
for r in response.data.items:
emails.append(Email.from_dict(r))
return EmailsSearchResults(emails, response.data.total_items)
def count_emails(self, query: SearchQuery) -> int:
"""Returns the precise number of search results that matched the search query."""
response = self.__search('{}/cve/email/count'.format(self.base_url), query)
response.check_errors()
return response.data.total_items
def scroll_emails(self, query: SearchQuery, scroll_id: str = None) -> EmailsSearchResults:
"""
Returns a list of emails that matched the search query.
Allows getting all the results but requires a Spyse Pro subscription
"""
response = self.__scroll('{}/email/scroll/search'.format(self.base_url), query, scroll_id)
response.check_errors()
emails = list()
for r in response.data.items:
emails.append(Email.from_dict(r))
return EmailsSearchResults(emails, search_id=response.data.search_id)
def search_historical_dns(self, dns_type, domain_name: str, limit: int = MAX_LIMIT, offset: int = 0) \
-> HistoricalDNSSearchResults:
"""
Returns the historical DNS records about the given domain name.
"""
response = self.__get(f'{self.base_url}/history/dns/{dns_type}/{domain_name}?limit={limit}&offset={offset}')
response.check_errors()
records = list()
for r in response.data.items:
records.append(DNSHistoricalRecord.from_dict(r))
return HistoricalDNSSearchResults(records, response.data.total_items)
def search_historical_whois(self, domain_name: str, limit: int = MAX_LIMIT, offset: int = 0) \
-> HistoricalWHOISSearchResults:
"""
Returns the historical WHOIS records for the given domain name.
"""
response = self.__get(f'{self.base_url}/history/domain-whois/{domain_name}?limit={limit}&offset={offset}')
response.check_errors()
records = list()
for r in response.data.items:
records.append(WHOISHistoricalRecord.from_dict(r))
return HistoricalWHOISSearchResults(records, response.data.total_items)
| 2.28125 | 2 |
talleres_inov_docente/figures/plot_helpers.py | jfcaballero/Tutorial-sobre-scikit-learn-abreviado | 576 | 5734 | from matplotlib.colors import ListedColormap
cm3 = ListedColormap(['#0000aa', '#ff2020', '#50ff50'])
cm2 = ListedColormap(['#0000aa', '#ff2020'])
| 2.328125 | 2 |
Applications/FlaskApp/errorpages.py | cemac-ccs/FlaskMWE | 0 | 5735 | <filename>Applications/FlaskApp/errorpages.py
from flask import render_template
# Error Pages ----------------------------------------------------------------
def page_not_found(e):
# note that we set the 404 status explicitly
return render_template('404.html.j2'), 404
def page_not_allowed(e):
# note that we set the 403 status explicitly
return render_template('403.html.j2'), 403
def internal_error(error):
app.logger.error('Server Error: %s', (error))
return render_template('500.html.j2'), 500
def unhandled_exception(e):
app.logger.error('Unhandled Exception: %s', (e))
return render_template('500.html.j2'), 501
| 2.625 | 3 |
cppgym/ToyText/BlackJack.py | anhydrous99/cppgym | 0 | 5736 | from .._BlackJack import BlackJackCPP
import gym
import ctypes
import numpy as np
from gym import spaces
class BlackJack(gym.Env):
def __init__(self, natural=False):
self.env = BlackJackCPP(natural)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Tuple((
spaces.Discrete(32),
spaces.Discrete(11),
spaces.Discrete(2)
))
self.state = None
self.natural = natural
def seed(self, seed=None):
if seed is None:
return [self.env.get_seed()]
else:
if not isinstance(seed, ctypes.c_uint32):
seed = ctypes.c_uint32(seed).value
self.env.set_seed(seed)
return [seed]
def step(self, action):
assert self.action_space.contains(action)
state, reward, done = self.env.step(action)
self.state = np.array(state)
return self.state, reward, done, {}
def render(self, mode='human'):
return None
def reset(self):
self.state = np.array(self.env.reset())
return self.state
| 2.625 | 3 |
sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py | ajothomas/beam | 5 | 5737 | <gh_stars>1-10
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for consumer_tracking_pipeline_visitor."""
# pytype: skip-file
import logging
import unittest
import apache_beam as beam
from apache_beam import pvalue
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import AsList
from apache_beam.runners.direct import DirectRunner
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor
from apache_beam.transforms import CoGroupByKey
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Flatten
from apache_beam.transforms import ParDo
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
# pylint: disable=pointless-statement
class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):
def setUp(self):
self.pipeline = Pipeline(DirectRunner())
self.visitor = ConsumerTrackingPipelineVisitor()
def test_root_transforms(self):
root_read = beam.Impulse()
root_flatten = Flatten(pipeline=self.pipeline)
pbegin = pvalue.PBegin(self.pipeline)
pcoll_read = pbegin | 'read' >> root_read
pcoll_read | FlatMap(lambda x: x)
[] | 'flatten' >> root_flatten
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertCountEqual(root_transforms, [root_read, root_flatten])
pbegin_consumers = [
c.transform for c in self.visitor.value_to_consumers[pbegin]
]
self.assertCountEqual(pbegin_consumers, [root_read])
self.assertEqual(len(self.visitor.step_names), 3)
def test_side_inputs(self):
class SplitNumbersFn(DoFn):
def process(self, element):
if element < 0:
yield pvalue.TaggedOutput('tag_negative', element)
else:
yield element
class ProcessNumbersFn(DoFn):
def process(self, element, negatives):
yield element
def _process_numbers(pcoll, negatives):
first_output = (
pcoll
| 'process numbers step 1' >> ParDo(ProcessNumbersFn(), negatives))
second_output = (
first_output
| 'process numbers step 2' >> ParDo(ProcessNumbersFn(), negatives))
output_pc = ((first_output, second_output)
| 'flatten results' >> beam.Flatten())
return output_pc
root_read = beam.Impulse()
result = (
self.pipeline
| 'read' >> root_read
| ParDo(SplitNumbersFn()).with_outputs('tag_negative', main='positive'))
positive, negative = result
_process_numbers(positive, AsList(negative))
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(root_transforms, [root_read])
self.assertEqual(len(self.visitor.step_names), 5)
self.assertEqual(len(self.visitor.views), 1)
self.assertTrue(isinstance(self.visitor.views[0], pvalue.AsList))
def test_co_group_by_key(self):
emails = self.pipeline | 'email' >> Create([('joe', '<EMAIL>')])
phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])
{'emails': emails, 'phones': phones} | CoGroupByKey()
self.pipeline.visit(self.visitor)
root_transforms = [t.transform for t in self.visitor.root_transforms]
self.assertEqual(len(root_transforms), 2)
self.assertGreater(
len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK
self.assertEqual(len(self.visitor.views), 0)
def test_visitor_not_sorted(self):
p = Pipeline()
# pylint: disable=expression-not-assigned
from apache_beam.testing.test_stream import TestStream
p | TestStream().add_elements(['']) | beam.Map(lambda _: _)
original_graph = p.to_runner_api(return_context=False)
out_of_order_graph = p.to_runner_api(return_context=False)
root_id = out_of_order_graph.root_transform_ids[0]
root = out_of_order_graph.components.transforms[root_id]
tmp = root.subtransforms[0]
root.subtransforms[0] = root.subtransforms[1]
root.subtransforms[1] = tmp
p = beam.Pipeline().from_runner_api(
out_of_order_graph, runner='BundleBasedDirectRunner', options=None)
v_out_of_order = ConsumerTrackingPipelineVisitor()
p.visit(v_out_of_order)
p = beam.Pipeline().from_runner_api(
original_graph, runner='BundleBasedDirectRunner', options=None)
v_original = ConsumerTrackingPipelineVisitor()
p.visit(v_original)
# Convert to string to assert they are equal.
out_of_order_labels = {
str(k): [str(t) for t in v_out_of_order.value_to_consumers[k]]
for k in v_out_of_order.value_to_consumers
}
original_labels = {
str(k): [str(t) for t in v_original.value_to_consumers[k]]
for k in v_original.value_to_consumers
}
self.assertDictEqual(out_of_order_labels, original_labels)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| 1.773438 | 2 |
gluon/main.py | scudette/rekall-agent-server | 21 | 5738 | <filename>gluon/main.py
#!/bin/env python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by <NAME> <<EMAIL>>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
The gluon wsgi application
---------------------------
"""
from __future__ import print_function
if False: import import_all # DO NOT REMOVE PART OF FREEZE PROCESS
import gc
import os
import re
import copy
import sys
import time
import datetime
import signal
import socket
import random
import string
from gluon._compat import Cookie, urllib2
#from thread import allocate_lock
from gluon.fileutils import abspath, write_file
from gluon.settings import global_settings
from gluon.utils import web2py_uuid
from gluon.admin import add_path_first, create_missing_folders, create_missing_app_folders
from gluon.globals import current
# Remarks:
# calling script has inserted path to script directory into sys.path
# applications_parent (path to applications/, site-packages/ etc)
# defaults to that directory set sys.path to
# ("", gluon_parent/site-packages, gluon_parent, ...)
#
# this is wrong:
# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# because we do not want the path to this file which may be Library.zip
# gluon_parent is the directory containing gluon, web2py.py, logging.conf
# and the handlers.
# applications_parent (web2py_path) is the directory containing applications/
# and routes.py
# The two are identical unless web2py_path is changed via the web2py.py -f folder option
# main.web2py_path is the same as applications_parent (for backward compatibility)
web2py_path = global_settings.applications_parent # backward compatibility
create_missing_folders()
# set up logging for subsequent imports
import logging
import logging.config
# This needed to prevent exception on Python 2.5:
# NameError: name 'gluon' is not defined
# See http://bugs.python.org/issue1436
# attention!, the import Tkinter in messageboxhandler, changes locale ...
import gluon.messageboxhandler
logging.gluon = gluon
# so we must restore it! Thanks ozancag
import locale
locale.setlocale(locale.LC_CTYPE, "C") # IMPORTANT, web2py requires locale "C"
exists = os.path.exists
pjoin = os.path.join
try:
logging.config.fileConfig(abspath("logging.conf"))
except: # fails on GAE or when logfile is missing
logging.basicConfig()
logger = logging.getLogger("web2py")
from gluon.restricted import RestrictedError
from gluon.http import HTTP, redirect
from gluon.globals import Request, Response, Session
from gluon.compileapp import build_environment, run_models_in, \
run_controller_in, run_view_in
from gluon.contenttype import contenttype
from pydal.base import BaseAdapter
from gluon.validators import CRYPT
from gluon.html import URL, xmlescape
from gluon.utils import is_valid_ip_address, getipaddrinfo
from gluon.rewrite import load as load_routes, url_in, THREAD_LOCAL as rwthread, \
try_rewrite_on_error, fixup_missing_path_info
from gluon import newcron
__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer']
requests = 0 # gc timer
# Security Checks: validate URL and session_id here,
# accept_language is validated in languages
# pattern used to validate client address
regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6
try:
version_info = open(pjoin(global_settings.gluon_parent, 'VERSION'), 'r')
raw_version_string = version_info.read().split()[-1].strip()
version_info.close()
global_settings.web2py_version = raw_version_string
web2py_version = global_settings.web2py_version
except:
raise RuntimeError("Cannot determine web2py version")
try:
from gluon import rocket
except:
if not global_settings.web2py_runtime_gae:
logger.warn('unable to import Rocket')
load_routes()
HTTPS_SCHEMES = set(('https', 'HTTPS'))
def get_client(env):
"""
Guesses the client address from the environment variables
First tries 'http_x_forwarded_for', secondly 'remote_addr'
if all fails, assume '127.0.0.1' or '::1' (running locally)
"""
eget = env.get
g = regex_client.search(eget('http_x_forwarded_for', ''))
client = (g.group() or '').split(',')[0] if g else None
if client in (None, '', 'unknown'):
g = regex_client.search(eget('remote_addr', ''))
if g:
client = g.group()
elif env.http_host.startswith('['): # IPv6
client = '::1'
else:
client = '127.0.0.1' # IPv4
if not is_valid_ip_address(client):
raise HTTP(400, "Bad Request (request.client=%s)" % client)
return client
def serve_controller(request, response, session):
"""
This function is used to generate a dynamic page.
It first runs all models, then runs the function in the controller,
and then tries to render the output using a view/template.
this function must run from the [application] folder.
A typical example would be the call to the url
/[application]/[controller]/[function] that would result in a call
to [function]() in applications/[application]/[controller].py
rendered by applications/[application]/views/[controller]/[function].html
"""
# ##################################################
# build environment for controller and view
# ##################################################
environment = build_environment(request, response, session)
# set default view, controller can override it
response.view = '%s/%s.%s' % (request.controller,
request.function,
request.extension)
# also, make sure the flash is passed through
# ##################################################
# process models, controller and view (if required)
# ##################################################
run_models_in(environment)
response._view_environment = copy.copy(environment)
page = run_controller_in(request.controller, request.function, environment)
if isinstance(page, dict):
response._vars = page
response._view_environment.update(page)
page = run_view_in(response._view_environment)
# logic to garbage collect after exec, not always, once every 100 requests
global requests
requests = ('requests' in globals()) and (requests + 1) % 100 or 0
if not requests:
gc.collect()
# end garbage collection logic
# ##################################################
# set default headers it not set
# ##################################################
default_headers = [
('Content-Type', contenttype('.' + request.extension)),
('Cache-Control',
'no-store, no-cache, must-revalidate, post-check=0, pre-check=0'),
('Expires', time.strftime('%a, %d %b %Y %H:%M:%S GMT',
time.gmtime())),
('Pragma', 'no-cache')]
for key, value in default_headers:
response.headers.setdefault(key, value)
raise HTTP(response.status, page, **response.headers)
class LazyWSGI(object):
def __init__(self, environ, request, response):
self.wsgi_environ = environ
self.request = request
self.response = response
@property
def environ(self):
if not hasattr(self, '_environ'):
new_environ = self.wsgi_environ
new_environ['wsgi.input'] = self.request.body
new_environ['wsgi.version'] = 1
self._environ = new_environ
return self._environ
def start_response(self, status='200', headers=[], exec_info=None):
"""
in controller you can use:
- request.wsgi.environ
- request.wsgi.start_response
to call third party WSGI applications
"""
self.response.status = str(status).split(' ', 1)[0]
self.response.headers = dict(headers)
return lambda *args, **kargs: \
self.response.write(escape=False, *args, **kargs)
def middleware(self, *middleware_apps):
"""
In you controller use::
@request.wsgi.middleware(middleware1, middleware2, ...)
to decorate actions with WSGI middleware. actions must return strings.
uses a simulated environment so it may have weird behavior in some cases
"""
def middleware(f):
def app(environ, start_response):
data = f()
start_response(self.response.status,
self.response.headers.items())
if isinstance(data, list):
return data
return [data]
for item in middleware_apps:
app = item(app)
def caller(app):
return app(self.environ, self.start_response)
return lambda caller=caller, app=app: caller(app)
return middleware
def wsgibase(environ, responder):
"""
The gluon wsgi application. The first function called when a page
is requested (static or dynamic). It can be called by paste.httpserver
or by apache mod_wsgi (or any WSGI-compatible server).
- fills request with info
- the environment variables, replacing '.' with '_'
- adds web2py path and version info
- compensates for fcgi missing path_info and query_string
- validates the path in url
The url path must be either:
1. for static pages:
- /<application>/static/<file>
2. for dynamic pages:
- /<application>[/<controller>[/<function>[/<sub>]]][.<extension>]
The naming conventions are:
- application, controller, function and extension may only contain
`[a-zA-Z0-9_]`
- file and sub may also contain '-', '=', '.' and '/'
"""
eget = environ.get
current.__dict__.clear()
request = Request(environ)
response = Response()
session = Session()
env = request.env
#env.web2py_path = global_settings.applications_parent
env.web2py_version = web2py_version
#env.update(global_settings)
static_file = False
http_response = None
try:
try:
try:
# ##################################################
# handle fcgi missing path_info and query_string
# select rewrite parameters
# rewrite incoming URL
# parse rewritten header variables
# parse rewritten URL
# serve file if static
# ##################################################
fixup_missing_path_info(environ)
(static_file, version, environ) = url_in(request, environ)
response.status = env.web2py_status_code or response.status
if static_file:
if eget('QUERY_STRING', '').startswith('attachment'):
response.headers['Content-Disposition'] \
= 'attachment'
if version:
response.headers['Cache-Control'] = 'max-age=315360000'
response.headers[
'Expires'] = 'Thu, 31 Dec 2037 23:59:59 GMT'
response.stream(static_file, request=request)
# ##################################################
# fill in request items
# ##################################################
app = request.application # must go after url_in!
if not global_settings.local_hosts:
local_hosts = set(['127.0.0.1', '::ffff:127.0.0.1', '::1'])
if not global_settings.web2py_runtime_gae:
try:
fqdn = socket.getfqdn()
local_hosts.add(socket.gethostname())
local_hosts.add(fqdn)
local_hosts.update([
addrinfo[4][0] for addrinfo
in getipaddrinfo(fqdn)])
if env.server_name:
local_hosts.add(env.server_name)
local_hosts.update([
addrinfo[4][0] for addrinfo
in getipaddrinfo(env.server_name)])
except (socket.gaierror, TypeError):
pass
global_settings.local_hosts = list(local_hosts)
else:
local_hosts = global_settings.local_hosts
client = get_client(env)
x_req_with = str(env.http_x_requested_with).lower()
cmd_opts = global_settings.cmd_options
request.update(
client = client,
folder = abspath('applications', app) + os.sep,
ajax = x_req_with == 'xmlhttprequest',
cid = env.http_web2py_component_element,
is_local = (env.remote_addr in local_hosts and
client == env.remote_addr),
is_shell = False,
is_scheduler = False,
is_https = env.wsgi_url_scheme in HTTPS_SCHEMES or \
request.env.http_x_forwarded_proto in HTTPS_SCHEMES \
or env.https == 'on'
)
request.url = environ['PATH_INFO']
# ##################################################
# access the requested application
# ##################################################
disabled = pjoin(request.folder, 'DISABLED')
if not exists(request.folder):
if app == rwthread.routes.default_application \
and app != 'welcome':
redirect(URL('welcome', 'default', 'index'))
elif rwthread.routes.error_handler:
_handler = rwthread.routes.error_handler
redirect(URL(_handler['application'],
_handler['controller'],
_handler['function'],
args=app))
else:
raise HTTP(404, rwthread.routes.error_message
% 'invalid request',
web2py_error='invalid application')
elif not request.is_local and exists(disabled):
five0three = os.path.join(request.folder,'static','503.html')
if os.path.exists(five0three):
raise HTTP(503, file(five0three, 'r').read())
else:
raise HTTP(503, "<html><body><h1>Temporarily down for maintenance</h1></body></html>")
# ##################################################
# build missing folders
# ##################################################
create_missing_app_folders(request)
# ##################################################
# get the GET and POST data
# ##################################################
#parse_get_post_vars(request, environ)
# ##################################################
# expose wsgi hooks for convenience
# ##################################################
request.wsgi = LazyWSGI(environ, request, response)
# ##################################################
# load cookies
# ##################################################
if env.http_cookie:
for single_cookie in env.http_cookie.split(';'):
single_cookie = single_cookie.strip()
if single_cookie:
try:
request.cookies.load(single_cookie)
except Cookie.CookieError:
pass # single invalid cookie ignore
# ##################################################
# try load session or create new session file
# ##################################################
if not env.web2py_disable_session:
session.connect(request, response)
# ##################################################
# run controller
# ##################################################
if global_settings.debugging and app != "admin":
import gluon.debug
# activate the debugger
gluon.debug.dbg.do_debug(mainpyfile=request.folder)
serve_controller(request, response, session)
except HTTP as hr:
http_response = hr
if static_file:
return http_response.to(responder, env=env)
if request.body:
request.body.close()
if hasattr(current, 'request'):
# ##################################################
# on success, try store session in database
# ##################################################
if not env.web2py_disable_session:
session._try_store_in_db(request, response)
# ##################################################
# on success, commit database
# ##################################################
if response.do_not_commit is True:
BaseAdapter.close_all_instances(None)
elif response.custom_commit:
BaseAdapter.close_all_instances(response.custom_commit)
else:
BaseAdapter.close_all_instances('commit')
# ##################################################
# if session not in db try store session on filesystem
# this must be done after trying to commit database!
# ##################################################
if not env.web2py_disable_session:
session._try_store_in_cookie_or_file(request, response)
# Set header so client can distinguish component requests.
if request.cid:
http_response.headers.setdefault(
'web2py-component-content', 'replace')
if request.ajax:
if response.flash:
http_response.headers['web2py-component-flash'] = \
urllib2.quote(xmlescape(response.flash).replace(b'\n', b''))
if response.js:
http_response.headers['web2py-component-command'] = \
urllib2.quote(response.js.replace('\n', ''))
# ##################################################
# store cookies in headers
# ##################################################
session._fixup_before_save()
http_response.cookies2headers(response.cookies)
ticket = None
except RestrictedError as e:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
# log tickets before rollback if not in DB
if not request.tickets_db:
ticket = e.log(request) or 'unknown'
# rollback
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
# if tickets in db, reconnect and store it in db
if request.tickets_db:
ticket = e.log(request) or 'unknown'
http_response = \
HTTP(500, rwthread.routes.error_message_ticket %
dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
except:
if request.body:
request.body.close()
# ##################################################
# on application error, rollback database
# ##################################################
try:
if response._custom_rollback:
response._custom_rollback()
else:
BaseAdapter.close_all_instances('rollback')
except:
pass
e = RestrictedError('Framework', '', '', locals())
ticket = e.log(request) or 'unrecoverable'
http_response = \
HTTP(500, rwthread.routes.error_message_ticket
% dict(ticket=ticket),
web2py_error='ticket %s' % ticket)
finally:
if response and hasattr(response, 'session_file') \
and response.session_file:
response.session_file.close()
session._unlock(response)
http_response, new_environ = try_rewrite_on_error(
http_response, request, environ, ticket)
if not http_response:
return wsgibase(new_environ, responder)
if global_settings.web2py_crontype == 'soft':
newcron.softcron(global_settings.applications_parent).start()
return http_response.to(responder, env=env)
def save_password(password, port):
"""
Used by main() to save the password in the parameters_port.py file.
"""
password_file = abspath('parameters_%i.py' % port)
if password == '<random>':
# make up a new password
chars = string.letters + string.digits
password = ''.join([random.choice(chars) for _ in range(8)])
cpassword = CRYPT()(password)[0]
print('******************* IMPORTANT!!! ************************')
print('your admin password is "%s"' % password)
print('*********************************************************')
elif password == '<recycle>':
# reuse the current password if any
if exists(password_file):
return
else:
password = ''
elif password.startswith('<pam_user:'):
# use the pam password for specified user
cpassword = password[1:-1]
else:
# use provided password
cpassword = CRYPT()(password)[0]
fp = open(password_file, 'w')
if password:
fp.write('password="%s"\n' % cpassword)
else:
fp.write('password=None\n')
fp.close()
def appfactory(wsgiapp=wsgibase,
logfilename='httpserver.log',
profiler_dir=None,
profilerfilename=None):
"""
generates a wsgi application that does logging and profiling and calls
wsgibase
Args:
wsgiapp: the base application
logfilename: where to store apache-compatible requests log
profiler_dir: where to store profile files
"""
if profilerfilename is not None:
raise BaseException("Deprecated API")
if profiler_dir:
profiler_dir = abspath(profiler_dir)
logger.warn('profiler is on. will use dir %s', profiler_dir)
if not os.path.isdir(profiler_dir):
try:
os.makedirs(profiler_dir)
except:
raise BaseException("Can't create dir %s" % profiler_dir)
filepath = pjoin(profiler_dir, 'wtest')
try:
filehandle = open( filepath, 'w' )
filehandle.close()
os.unlink(filepath)
except IOError:
raise BaseException("Unable to write to dir %s" % profiler_dir)
def app_with_logging(environ, responder):
"""
a wsgi app that does logging and profiling and calls wsgibase
"""
status_headers = []
def responder2(s, h):
"""
wsgi responder app
"""
status_headers.append(s)
status_headers.append(h)
return responder(s, h)
time_in = time.time()
ret = [0]
if not profiler_dir:
ret[0] = wsgiapp(environ, responder2)
else:
import cProfile
prof = cProfile.Profile()
prof.enable()
ret[0] = wsgiapp(environ, responder2)
prof.disable()
destfile = pjoin(profiler_dir, "req_%s.prof" % web2py_uuid())
prof.dump_stats(destfile)
try:
line = '%s, %s, %s, %s, %s, %s, %f\n' % (
environ['REMOTE_ADDR'],
datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
environ['REQUEST_METHOD'],
environ['PATH_INFO'].replace(',', '%2C'),
environ['SERVER_PROTOCOL'],
(status_headers[0])[:3],
time.time() - time_in,
)
if not logfilename:
sys.stdout.write(line)
elif isinstance(logfilename, str):
write_file(logfilename, line, 'a')
else:
logfilename.write(line)
except:
pass
return ret[0]
return app_with_logging
class HttpServer(object):
"""
the web2py web server (Rocket)
"""
def __init__(
self,
ip='1172.16.17.32',
port=8000,
password='',
pid_filename='httpserver.pid',
log_filename='httpserver.log',
profiler_dir=None,
ssl_certificate=None,
ssl_private_key=None,
ssl_ca_certificate=None,
min_threads=None,
max_threads=None,
server_name=None,
request_queue_size=5,
timeout=10,
socket_timeout=1,
shutdown_timeout=None, # Rocket does not use a shutdown timeout
path=None,
interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string
):
"""
starts the web server.
"""
if interfaces:
# if interfaces is specified, it must be tested for rocket parameter correctness
# not necessarily completely tested (e.g. content of tuples or ip-format)
import types
if isinstance(interfaces, list):
for i in interfaces:
if not isinstance(i, tuple):
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
else:
raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/"
if path:
# if a path is specified change the global variables so that web2py
# runs from there instead of cwd or os.environ['web2py_path']
global web2py_path
path = os.path.normpath(path)
web2py_path = path
global_settings.applications_parent = path
os.chdir(path)
load_routes()
for p in (path, abspath('site-packages'), ""):
add_path_first(p)
if exists("logging.conf"):
logging.config.fileConfig("logging.conf")
save_password(password, port)
self.pid_filename = pid_filename
if not server_name:
server_name = socket.gethostname()
logger.info('starting web server...')
rocket.SERVER_NAME = server_name
rocket.SOCKET_TIMEOUT = socket_timeout
sock_list = [ip, port]
if not ssl_certificate or not ssl_private_key:
logger.info('SSL is off')
elif not rocket.ssl:
logger.warning('Python "ssl" module unavailable. SSL is OFF')
elif not exists(ssl_certificate):
logger.warning('unable to open SSL certificate. SSL is OFF')
elif not exists(ssl_private_key):
logger.warning('unable to open SSL private key. SSL is OFF')
else:
sock_list.extend([ssl_private_key, ssl_certificate])
if ssl_ca_certificate:
sock_list.append(ssl_ca_certificate)
logger.info('SSL is ON')
app_info = {'wsgi_app': appfactory(wsgibase,
log_filename,
profiler_dir)}
self.server = rocket.Rocket(interfaces or tuple(sock_list),
method='wsgi',
app_info=app_info,
min_threads=min_threads,
max_threads=max_threads,
queue_size=int(request_queue_size),
timeout=int(timeout),
handle_signals=False,
)
def start(self):
"""
start the web server
"""
try:
signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop())
signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop())
except:
pass
write_file(self.pid_filename, str(os.getpid()))
self.server.start()
def stop(self, stoplogging=False):
"""
stop cron and the web server
"""
newcron.stopcron()
self.server.stop(stoplogging)
try:
os.unlink(self.pid_filename)
except:
pass
| 2.0625 | 2 |
agents/EWPublisherAgent.py | marc4gov/tokenspice2 | 1 | 5739 | <filename>agents/EWPublisherAgent.py
import logging
log = logging.getLogger('marketagents')
from enforce_typing import enforce_types # type: ignore[import]
import random
from agents.PublisherAgent import PublisherAgent
from agents.PoolAgent import PoolAgent
from util import constants
from util.constants import POOL_WEIGHT_DT, POOL_WEIGHT_OCEAN
from web3engine import bfactory, bpool, datatoken, dtfactory, globaltokens
from web3tools.web3util import toBase18
@enforce_types
class EWPublisherAgent(PublisherAgent):
def __init__(self, name: str, USD: float, OCEAN: float):
super().__init__(name, USD, OCEAN)
self._s_since_create = 0
self._s_between_create = 7 * constants.S_PER_DAY #magic number
self._s_since_unstake = 0
self._s_between_unstake = 3 * constants.S_PER_DAY #magic number
def takeStep(self, state) -> None:
self._s_since_create += state.ss.time_step
self._s_since_unstake += state.ss.time_step
if self._doCreatePool():
self._s_since_create = 0
self._createPoolAgent(state)
if self._doUnstakeOCEAN(state):
self._s_since_unstake = 0
self._unstakeOCEANsomewhere(state)
def _doCreatePool(self) -> bool:
if self.OCEAN() < 200.0: #magic number
return False
return self._s_since_create >= self._s_between_create
def _createPoolAgent(self, state) -> PoolAgent:
assert self.OCEAN() > 0.0, "should not call if no OCEAN"
wallet = self._wallet._web3wallet
OCEAN = globaltokens.OCEANtoken()
#name
pool_i = len(state.agents.filterToPool())
dt_name = f'DT{pool_i}'
pool_agent_name = f'pool{pool_i}'
#new DT
DT = self._createDatatoken(dt_name, mint_amt=1000.0) #magic number
#new pool
pool_address = bfactory.BFactory().newBPool(from_wallet=wallet)
pool = bpool.BPool(pool_address)
#bind tokens & add initial liquidity
OCEAN_bind_amt = self.OCEAN() #magic number: use all the OCEAN
DT_bind_amt = 20.0 #magic number
DT.approve(pool.address, toBase18(DT_bind_amt), from_wallet=wallet)
OCEAN.approve(pool.address, toBase18(OCEAN_bind_amt),from_wallet=wallet)
pool.bind(DT.address, toBase18(DT_bind_amt),
toBase18(POOL_WEIGHT_DT), from_wallet=wallet)
pool.bind(OCEAN.address, toBase18(OCEAN_bind_amt),
toBase18(POOL_WEIGHT_OCEAN), from_wallet=wallet)
pool.finalize(from_wallet=wallet)
#create agent
pool_agent = PoolAgent(pool_agent_name, pool)
state.addAgent(pool_agent)
return pool_agent
def _doUnstakeOCEAN(self, state) -> bool:
if not state.agents.filterByNonzeroStake(self):
return False
return self._s_since_unstake >= self._s_between_unstake
def _unstakeOCEANsomewhere(self, state):
"""Choose what pool to unstake and by how much. Then do the action."""
pool_agents = state.agents.filterByNonzeroStake(self)
pool_agent = random.choice(list(pool_agents.values()))
BPT = self.BPT(pool_agent.pool)
BPT_unstake = 0.10 * BPT #magic number
self.unstakeOCEAN(BPT_unstake, pool_agent.pool)
def _createDatatoken(self,dt_name:str,mint_amt:float)-> datatoken.Datatoken:
"""Create datatoken contract and mint DTs to self."""
wallet = self._wallet._web3wallet
DT_address = dtfactory.DTFactory().createToken(
'', dt_name, dt_name, toBase18(mint_amt), from_wallet=wallet)
DT = datatoken.Datatoken(DT_address)
DT.mint(wallet.address, toBase18(mint_amt), from_wallet=wallet)
return DT
| 1.796875 | 2 |
lcls_orbit/__init__.py | slaclab/lcls-orbit | 0 | 5740 | import numpy as np
from . import _version
__version__ = _version.get_versions()['version']
HXR_COLORS = ("#000000", "#02004a", "#030069", "#04008f", "#0500b3", "#0700ff")
SXR_COLORS = ("#000000", "#330000", "#520000", "#850000", "#ad0000", "#ff0000")
HXR_AREAS = {
"GUN" : [2017.911, 2018.712],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTH_2": [3058.457, 3110.961],
"BSYH_1": [3110.961, 3117.409],
"BSYH_2": [3117.409, 3224.022],
"LTUH": [3224.022, 3562.739],
"UNDH": [3562.739, 3718.483],
"DMPH_1": [3718.483, 3734.407],
"DMPH_2": [3734.407, 3765.481]
}
HXR_AREAS = {np.mean(value): key for key, value in HXR_AREAS.items()}
SXR_AREAS = {
"GUN" : [2017.911, 2017.911],
"L0" : [2018.712, 2024.791],
"DL1_1": [2024.791, 2031.992],
"DL1_2": [2031.992, 2035.035],
"L1": [2035.035, 2044.167],
"BC1": [2044.167, 2059.733],
"L2": [2059.733, 2410.698],
"BC2": [2410.698, 2438.400],
"L3": [2438.400, 3042.005],
"CLTH_0": [3042.005, 3050.512],
"CLTH_1": [3050.512, 3058.457],
"CLTS": [3177.650, 3224.022],
"BSYS": [3224.022, 3565.656],
"LTUS": [3565.656, 3718.483],
"UNDS": [3718.483, 3734.407],
"DMPS_1": [3734.407, 3734.407],
"DMPS_2": [3734.407, 3765.481]
}
SXR_AREAS = {np.mean(value): key for key, value in SXR_AREAS.items()} | 2.125 | 2 |
tests/test_optimizers_v2/test_optimizers_v2.py | OverLordGoldDragon/dummy | 0 | 5741 | import os
import tempfile
import numpy as np
import tensorflow as tf
from time import time
from termcolor import cprint
from unittest import TestCase
from .. import K
from .. import Input, Dense, GRU, Bidirectional, Embedding
from .. import Model, load_model
from .. import l2
from .. import maxnorm
from .. import Adam, Nadam, SGD
from .. import AdamW, NadamW, SGDW
from .. import get_weight_decays, fill_dict_in_order, reset_seeds, K_eval
print("TF version: %s" % tf.__version__)
tf_eager = bool(os.environ["TF_EAGER"] == "True")
if tf_eager:
print("TF running eagerly")
else:
tf.compat.v1.disable_eager_execution()
print("TF running in graph mode")
class TestOptimizers(TestCase):
def test_all(self): # Save/Load, Warm Restarts (w/ cosine annealing)
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
num_batches, num_epochs = 25, 4
batch_size, timesteps, num_channels = 16, 8, 4
batch_shape = (batch_size, timesteps, num_channels)
total_iterations = num_batches # due to warm restarts
self.model = self._make_model(batch_shape, total_iterations)
optimizer = self._make_optimizer(optimizer_name, self.model,
total_iterations)
self.model.compile(optimizer, loss='binary_crossentropy')
self.assertTrue(self._valid_weight_decays(self.model))
self.model._make_train_function() # else K.eval before train may fail
X, Y = self._make_data(num_batches, *batch_shape)
self.eta_history = [] # for stop-introspection
self.t_cur_history = [] # for stop-introspection
for epoch in range(num_epochs):
for batch_num in range(num_batches):
self.t_cur_history += [K_eval(self.model.optimizer.t_cur, K)]
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.model.train_on_batch(X[batch_num], Y[batch_num])
self.eta_history += [K_eval(self.model.optimizer.eta_t, K)]
self.eta_history.pop(-(1 + int(tf_eager)))
K.set_value(self.model.optimizer.t_cur, 0)
self.assertTrue(self._valid_cosine_annealing(self.eta_history,
total_iterations, num_epochs))
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MAIN TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MAIN TESTS PASSED >>\n", 'green')
def test_misc(self): # tests of non-main features to improve coverage
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
reset_seeds()
optimizer_kw = {'total_iterations': 0, 'decay': 1e-3,
'amsgrad': optimizer_name == 'AdamW',
'nesterov': optimizer_name == 'SGDW'}
num_batches = 4
batch_size, timesteps = 16, 8
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
self.model = self._make_model(batch_shape, total_iterations,
embed_input_dim=embed_input_dim,
dense_constraint=1, l2_reg=1e-4,
bidirectional=False, sparse=True)
optimizer = self._make_optimizer(optimizer_name, self.model,
**optimizer_kw)
self.model.compile(optimizer, loss='sparse_categorical_crossentropy')
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
for batch_num in range(num_batches):
self.model.train_on_batch(X[batch_num], Y[batch_num])
self._test_save_load(self.model, X, optimizer_name, optimizer)
# cleanup
del self.model, optimizer
reset_seeds(reset_graph_with_backend=K)
cprint("\n<< {} MISC TEST PASSED >>\n".format(optimizer_name), 'green')
cprint("\n<< ALL MISC TESTS PASSED >>\n", 'green')
def test_control(self): # tests losses against original optimizers'
for optimizer_name in ['AdamW', 'NadamW', 'SGDW']:
cprint("<< TESTING {} OPTIMIZER >>".format(optimizer_name), 'blue')
pass_txt = "Control Test Passed"
if optimizer_name == 'AdamW':
for amsgrad in [True, False]:
self._test_control(optimizer_name, amsgrad=amsgrad)
print("\n>> AdamW amsgrad={} {}".format(amsgrad, pass_txt))
elif optimizer_name == 'NadamW':
self._test_control(optimizer_name)
elif optimizer_name == 'SGDW':
for nesterov in [True, False]:
self._test_control(optimizer_name, nesterov=nesterov)
print("\n>> SGDW nesterov={} {}".format(nesterov, pass_txt))
o_name = optimizer_name
cprint("\n<< {} {} >>\n".format(o_name, pass_txt.upper()), 'green')
cprint("\n<< ALL CONTROL TESTS PASSED >>\n", 'green')
def _test_control(self, optimizer_name, amsgrad=False, nesterov=False):
optimizer_kw = dict(total_iterations=0, decay=1e-3,
amsgrad=amsgrad, nesterov=nesterov,
control_mode=True)
num_batches = 100
batch_size, timesteps = 16, 32
batch_shape = (batch_size, timesteps)
embed_input_dim = 5
total_iterations = 0
model_kw = dict(batch_shape=batch_shape, dense_constraint=1,
total_iterations=total_iterations,
embed_input_dim=embed_input_dim, l2_reg=0,
bidirectional=False, sparse=True)
loss_name = 'sparse_categorical_crossentropy'
reset_seeds(verbose=0)
X, Y = self._make_data(num_batches, *batch_shape,
embed_input_dim=embed_input_dim, sparse=True)
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_custom = self._make_model(**model_kw)
optimizer_custom = self._make_optimizer(optimizer_name,
self.model_custom,
**optimizer_kw)
self.model_custom.compile(optimizer_custom, loss=loss_name)
self.loss_custom = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_custom += [self.model_custom.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_custom -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
reset_seeds(reset_graph_with_backend=K, verbose=0)
self.model_control = self._make_model(**model_kw)
optimizer_control = self._make_optimizer(optimizer_name[:-1],
self.model_control,
**optimizer_kw)
self.model_control.compile(optimizer_control, loss=loss_name)
self.loss_control = [] # for introspection
t0 = time()
for batch_num in range(num_batches):
self.loss_control += [self.model_control.train_on_batch(
X[batch_num], Y[batch_num])]
print("model_control -- %s batches -- time: %.2f sec" % (num_batches,
time() - t0))
loss_diff = np.abs(np.array(self.loss_custom) -
np.array(self.loss_control))
print("%s max loss diff: %e" % (optimizer_name, np.max(loss_diff)))
self.assertTrue(np.allclose(self.loss_custom, self.loss_control,
rtol=0, atol=1e-3))
# cleanup
del self.model_custom, self.model_control
del optimizer_custom, optimizer_control
reset_seeds(reset_graph_with_backend=K, verbose=0)
def _test_save_load(self, model, X, optimizer_name, optimizer):
saved_model_preds = model.predict(X[0])
saved_model_weights = K.batch_get_value(model.trainable_weights)
saved_optim_weights = K.batch_get_value(model.optimizer.weights)
test_name = 'test__%f{}.h5'.format(np.random.random())
modelpath = os.path.join(tempfile.gettempdir(), test_name)
model.save(modelpath)
del model
model = load_model(modelpath, custom_objects={optimizer_name: optimizer})
loaded_model_preds = model.predict(X[0])
loaded_model_weights = K.batch_get_value(model.trainable_weights)
loaded_optim_weights = K.batch_get_value(model.optimizer.weights)
self.assertTrue(np.allclose(saved_model_preds, loaded_model_preds,
rtol=0, atol=1e-8))
for smw, lmw in zip(saved_model_weights, loaded_model_weights):
self.assertTrue(np.allclose(smw, lmw, rtol=0, atol=1e-8))
for sow, low in zip(saved_optim_weights, loaded_optim_weights):
self.assertTrue(np.allclose(sow, low, rtol=0, atol=1e-8))
@staticmethod
def _make_data(num_batches, batch_size, timesteps, num_channels=None,
embed_input_dim=None, sparse=False):
if sparse:
X = np.random.randint(0, embed_input_dim,
(num_batches, batch_size, timesteps))
else:
X = np.random.randn(num_batches, batch_size, timesteps, num_channels)
Y = np.random.randint(0, 2, (num_batches, batch_size))
return X, Y
@staticmethod
def _make_model(batch_shape, total_iterations, l2_reg=0, bidirectional=True,
dense_constraint=None, embed_input_dim=None, sparse=False):
if dense_constraint is not None:
dense_constraint = maxnorm(dense_constraint)
ipt = Input(batch_shape=batch_shape)
if sparse:
x = Embedding(embed_input_dim, embed_input_dim*3 + 1,
mask_zero=True)(ipt)
else:
x = ipt
gru = GRU(4, recurrent_regularizer=l2(l2_reg), bias_regularizer=l2(l2_reg))
if bidirectional:
x = Bidirectional(gru)(x)
else:
x = gru(x)
x = Dense(2, kernel_regularizer=l2(l2_reg),
kernel_constraint=dense_constraint)(x)
if sparse:
out = Dense(2, activation='softmax')(x)
else:
out = Dense(1, activation='sigmoid')(x)
return Model(ipt, out)
@staticmethod
def _make_optimizer(optimizer_name, model, total_iterations, decay=0,
amsgrad=False, nesterov=False, control_mode=False):
optimizer_dict = {'AdamW': AdamW, 'NadamW': NadamW, 'SGDW': SGDW,
'Adam': Adam, 'Nadam': Nadam, 'SGD': SGD}
optimizer = optimizer_dict[optimizer_name]
optimizer_kw = {}
if 'Adam' in optimizer_name:
optimizer_kw = {'amsgrad': amsgrad}
elif 'SGD' in optimizer_name:
optimizer_kw = {'nesterov': nesterov, 'momentum': .9}
if 'Nadam' not in optimizer_name:
optimizer_kw.update({'decay': decay})
if not control_mode:
wd_dict = get_weight_decays(model)
l2_extra = [2e-5]*(len(wd_dict) - 3)
wd = fill_dict_in_order(wd_dict, [1e-5, 1e-5, 1e-6] + l2_extra)
lr_m = {'gru': 0.5}
use_cosine_annealing = True
else:
wd, lr_m = None, None
use_cosine_annealing = False
if not any([optimizer_name == name for name in ('Adam', 'Nadam', 'SGD')]):
return optimizer(lr=1e-4, weight_decays=wd, lr_multipliers=lr_m,
use_cosine_annealing=use_cosine_annealing, t_cur=0,
total_iterations=total_iterations, **optimizer_kw)
else:
return optimizer(lr=1e-4, **optimizer_kw)
@staticmethod
def _valid_weight_decays(model):
weight_decays = get_weight_decays(model)
trues = 0
for wd in weight_decays.values():
trues += (wd != 0)
return (trues == 0)
@staticmethod
def _valid_cosine_annealing(eta_history, total_iterations, num_epochs):
eta_history_simul = []
for epoch in range(num_epochs):
for iteration in range(0, total_iterations):
eta_history_simul.append(0.5 * (
1 + np.cos(np.pi*iteration / total_iterations)))
return np.allclose(eta_history, eta_history_simul, rtol=0, atol=2e-7)
| 2.171875 | 2 |
koku/reporting/migrations/0099_ocp_performance.py | Vasyka/koku | 2 | 5742 | # Generated by Django 2.2.10 on 2020-02-18 12:51
import django.contrib.postgres.indexes
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting", "0098_auto_20200221_2034")]
operations = [
migrations.RunSQL(
"""
drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;
drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;
"""
),
migrations.RemoveIndex(model_name="ocpawscostlineitemdailysummary", name="cost_summary_node_idx"),
migrations.RemoveIndex(
model_name="ocpawscostlineitemprojectdailysummary", name="cost__proj_sum_namespace_idx"
),
migrations.RemoveIndex(model_name="ocpawscostlineitemprojectdailysummary", name="cost_proj_sum_node_idx"),
migrations.RemoveIndex(model_name="ocpazurecostlineitemdailysummary", name="ocpazure_node_idx"),
migrations.RemoveIndex(
model_name="ocpazurecostlineitemprojectdailysummary", name="ocpazure_proj_namespace_idx"
),
migrations.RemoveIndex(model_name="ocpazurecostlineitemprojectdailysummary", name="ocpazure_proj_node_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdaily", name="namespace_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdaily", name="node_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdailysummary", name="summary_namespace_idx"),
migrations.RemoveIndex(model_name="ocpusagelineitemdailysummary", name="summary_node_idx"),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpawscostlineitemprojectdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary", name="usage_end", field=models.DateField()
),
migrations.AlterField(
model_name="ocpazurecostlineitemprojectdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(model_name="ocpstoragelineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpstoragelineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="total_seconds", field=models.IntegerField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdailysummary", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpusagelineitemdailysummary", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="ocpawscostlineitemdailysummary",
index=models.Index(fields=["node"], name="cost_summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="cost__proj_sum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpawscostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="cost_proj_sum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemdailysummary",
index=models.Index(fields=["node"], name="ocpazure_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(
fields=["namespace"], name="ocpazure_proj_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpazurecostlineitemprojectdailysummary",
index=models.Index(fields=["node"], name="ocpazure_proj_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(
fields=["namespace"], name="ocp_storage_li_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="ocpstoragelineitemdaily",
index=models.Index(fields=["node"], name="ocp_storage_li_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["namespace"], name="namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdaily",
index=models.Index(fields=["node"], name="node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["namespace"], name="summary_namespace_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="ocpusagelineitemdailysummary",
index=models.Index(fields=["node"], name="summary_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AlterField(model_name="costsummary", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="costsummary", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="costsummary", index=models.Index(fields=["usage_start"], name="ocpcostsum_usage_start_idx")
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(
fields=["namespace"], name="ocpcostsum_namespace_idx", opclasses=["varchar_pattern_ops"]
),
),
migrations.AddIndex(
model_name="costsummary",
index=models.Index(fields=["node"], name="ocpcostsum_node_idx", opclasses=["varchar_pattern_ops"]),
),
migrations.AddIndex(
model_name="costsummary",
index=django.contrib.postgres.indexes.GinIndex(fields=["pod_labels"], name="ocpcostsum_pod_labels_idx"),
),
# This extension will help specifically with "col LIKE %val%"
# operations. (As long as val is at least 3 characters)
migrations.RunSQL(
"""
create extension if not exists pg_trgm schema public;
"""
),
# Create indexes to aid with text searching.
# These cases will specifically help with case-insensitive
# and contains (vs startswith) searches
# ocp usage line item daily
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_namespace_idx
on reporting_ocpusagelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_node_idx
on reporting_ocpusagelineitem_daily using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp usage line item daily summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_summary_namespace_like_idx
on reporting_ocpusagelineitem_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_summary_node_like_idx
on reporting_ocpusagelineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpstoragelineitem_daily
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocp_storage_li_namespace_like_idx
on reporting_ocpstoragelineitem_daily using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocp_storage_li_node_like_idx
on reporting_ocpstoragelineitem_daily using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp azure cost
migrations.RunSQL(
"""
/* add node index for like trigram ops */
create index if not exists ocpazure_node_like_idx
on reporting_ocpazurecostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# ocp azure project cost
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocpazure_proj_namespace_like_idx
on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocpazure_proj_node_like_idx
on reporting_ocpazurecostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpawscostlineitem_daily_summary
migrations.RunSQL(
"""
/* add node index for like trigram ops */
create index if not exists cost_summary_node_like_idx
on reporting_ocpawscostlineitem_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpawscostlineitem_project_daily_summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists cost__proj_sum_namespace_like_idx
on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists cost__proj_sum_node_like_idx
on reporting_ocpawscostlineitem_project_daily_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
# reporting_ocpcosts_summary
migrations.RunSQL(
"""
/* add namespace index for like trigram ops */
create index if not exists ocpcostsum_namespace_like_idx
on reporting_ocpcosts_summary using gin (UPPER(namespace) gin_trgm_ops);
/* add node index for like trigram ops */
create index if not exists ocpcostsum_node_like_idx
on reporting_ocpcosts_summary using gin (UPPER(node) gin_trgm_ops);
"""
),
migrations.RunSQL(
"""
drop materialized view if exists reporting_ocpallcostlineitem_daily_summary;
create materialized view if not exists reporting_ocpallcostlineitem_daily_summary as
SELECT row_number() OVER () AS id,
lids.source_type,
lids.cluster_id,
lids.cluster_alias,
lids.namespace,
lids.node,
lids.resource_id,
lids.usage_start,
lids.usage_end,
lids.usage_account_id,
lids.account_alias_id,
lids.product_code,
lids.product_family,
lids.instance_type,
lids.region,
lids.availability_zone,
lids.tags,
lids.usage_amount,
lids.unit,
lids.unblended_cost,
lids.markup_cost,
lids.currency_code,
lids.shared_projects,
lids.project_costs
FROM ( SELECT 'AWS'::text AS source_type,
reporting_ocpawscostlineitem_daily_summary.cluster_id,
reporting_ocpawscostlineitem_daily_summary.cluster_alias,
reporting_ocpawscostlineitem_daily_summary.namespace,
reporting_ocpawscostlineitem_daily_summary.node,
reporting_ocpawscostlineitem_daily_summary.resource_id,
reporting_ocpawscostlineitem_daily_summary.usage_start::date,
reporting_ocpawscostlineitem_daily_summary.usage_end::date,
reporting_ocpawscostlineitem_daily_summary.usage_account_id,
reporting_ocpawscostlineitem_daily_summary.account_alias_id,
reporting_ocpawscostlineitem_daily_summary.product_code,
reporting_ocpawscostlineitem_daily_summary.product_family,
reporting_ocpawscostlineitem_daily_summary.instance_type,
reporting_ocpawscostlineitem_daily_summary.region,
reporting_ocpawscostlineitem_daily_summary.availability_zone,
reporting_ocpawscostlineitem_daily_summary.tags,
reporting_ocpawscostlineitem_daily_summary.usage_amount,
reporting_ocpawscostlineitem_daily_summary.unit,
reporting_ocpawscostlineitem_daily_summary.unblended_cost,
reporting_ocpawscostlineitem_daily_summary.markup_cost,
reporting_ocpawscostlineitem_daily_summary.currency_code,
reporting_ocpawscostlineitem_daily_summary.shared_projects,
reporting_ocpawscostlineitem_daily_summary.project_costs
FROM reporting_ocpawscostlineitem_daily_summary
WHERE reporting_ocpawscostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)
UNION
SELECT 'Azure'::text AS source_type,
reporting_ocpazurecostlineitem_daily_summary.cluster_id,
reporting_ocpazurecostlineitem_daily_summary.cluster_alias,
reporting_ocpazurecostlineitem_daily_summary.namespace,
reporting_ocpazurecostlineitem_daily_summary.node,
reporting_ocpazurecostlineitem_daily_summary.resource_id,
reporting_ocpazurecostlineitem_daily_summary.usage_start::date,
reporting_ocpazurecostlineitem_daily_summary.usage_end::date,
reporting_ocpazurecostlineitem_daily_summary.subscription_guid AS usage_account_id,
NULL::integer AS account_alias_id,
reporting_ocpazurecostlineitem_daily_summary.service_name AS product_code,
NULL::character varying AS product_family,
reporting_ocpazurecostlineitem_daily_summary.instance_type,
reporting_ocpazurecostlineitem_daily_summary.resource_location AS region,
NULL::character varying AS availability_zone,
reporting_ocpazurecostlineitem_daily_summary.tags,
reporting_ocpazurecostlineitem_daily_summary.usage_quantity AS usage_amount,
reporting_ocpazurecostlineitem_daily_summary.unit_of_measure AS unit,
reporting_ocpazurecostlineitem_daily_summary.pretax_cost AS unblended_cost,
reporting_ocpazurecostlineitem_daily_summary.markup_cost,
reporting_ocpazurecostlineitem_daily_summary.currency AS currency_code,
reporting_ocpazurecostlineitem_daily_summary.shared_projects,
reporting_ocpazurecostlineitem_daily_summary.project_costs
FROM reporting_ocpazurecostlineitem_daily_summary
WHERE reporting_ocpazurecostlineitem_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids
with no data;
create index mv_reporting_ocpallcostlineitem_daily_summary_namespace_ix
on reporting_ocpallcostlineitem_daily_summary using gin (namespace);
create index mv_reporting_ocpallcostlineitem_daily_summary_node_ix
on reporting_ocpallcostlineitem_daily_summary (node varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_daily_summary_usage_ix
on reporting_ocpallcostlineitem_daily_summary (usage_start);
drop materialized view if exists reporting_ocpallcostlineitem_project_daily_summary;
create materialized view if not exists reporting_ocpallcostlineitem_project_daily_summary as
SELECT row_number() OVER () AS id,
lids.source_type,
lids.cluster_id,
lids.cluster_alias,
lids.data_source,
lids.namespace,
lids.node,
lids.pod_labels,
lids.resource_id,
lids.usage_start,
lids.usage_end,
lids.usage_account_id,
lids.account_alias_id,
lids.product_code,
lids.product_family,
lids.instance_type,
lids.region,
lids.availability_zone,
lids.usage_amount,
lids.unit,
lids.unblended_cost,
lids.project_markup_cost,
lids.pod_cost,
lids.currency_code
FROM ( SELECT 'AWS'::text AS source_type,
reporting_ocpawscostlineitem_project_daily_summary.cluster_id,
reporting_ocpawscostlineitem_project_daily_summary.cluster_alias,
reporting_ocpawscostlineitem_project_daily_summary.data_source,
reporting_ocpawscostlineitem_project_daily_summary.namespace,
reporting_ocpawscostlineitem_project_daily_summary.node,
reporting_ocpawscostlineitem_project_daily_summary.pod_labels,
reporting_ocpawscostlineitem_project_daily_summary.resource_id,
reporting_ocpawscostlineitem_project_daily_summary.usage_start::date,
reporting_ocpawscostlineitem_project_daily_summary.usage_end::date,
reporting_ocpawscostlineitem_project_daily_summary.usage_account_id,
reporting_ocpawscostlineitem_project_daily_summary.account_alias_id,
reporting_ocpawscostlineitem_project_daily_summary.product_code,
reporting_ocpawscostlineitem_project_daily_summary.product_family,
reporting_ocpawscostlineitem_project_daily_summary.instance_type,
reporting_ocpawscostlineitem_project_daily_summary.region,
reporting_ocpawscostlineitem_project_daily_summary.availability_zone,
reporting_ocpawscostlineitem_project_daily_summary.usage_amount,
reporting_ocpawscostlineitem_project_daily_summary.unit,
reporting_ocpawscostlineitem_project_daily_summary.unblended_cost,
reporting_ocpawscostlineitem_project_daily_summary.project_markup_cost,
reporting_ocpawscostlineitem_project_daily_summary.pod_cost,
reporting_ocpawscostlineitem_project_daily_summary.currency_code
FROM reporting_ocpawscostlineitem_project_daily_summary
WHERE reporting_ocpawscostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)
UNION
SELECT 'Azure'::text AS source_type,
reporting_ocpazurecostlineitem_project_daily_summary.cluster_id,
reporting_ocpazurecostlineitem_project_daily_summary.cluster_alias,
reporting_ocpazurecostlineitem_project_daily_summary.data_source,
reporting_ocpazurecostlineitem_project_daily_summary.namespace,
reporting_ocpazurecostlineitem_project_daily_summary.node,
reporting_ocpazurecostlineitem_project_daily_summary.pod_labels,
reporting_ocpazurecostlineitem_project_daily_summary.resource_id,
reporting_ocpazurecostlineitem_project_daily_summary.usage_start::date,
reporting_ocpazurecostlineitem_project_daily_summary.usage_end::date,
reporting_ocpazurecostlineitem_project_daily_summary.subscription_guid AS usage_account_id,
NULL::integer AS account_alias_id,
reporting_ocpazurecostlineitem_project_daily_summary.service_name AS product_code,
NULL::character varying AS product_family,
reporting_ocpazurecostlineitem_project_daily_summary.instance_type,
reporting_ocpazurecostlineitem_project_daily_summary.resource_location AS region,
NULL::character varying AS availability_zone,
reporting_ocpazurecostlineitem_project_daily_summary.usage_quantity AS usage_amount,
reporting_ocpazurecostlineitem_project_daily_summary.unit_of_measure AS unit,
reporting_ocpazurecostlineitem_project_daily_summary.pretax_cost AS unblended_cost,
reporting_ocpazurecostlineitem_project_daily_summary.project_markup_cost,
reporting_ocpazurecostlineitem_project_daily_summary.pod_cost,
reporting_ocpazurecostlineitem_project_daily_summary.currency AS currency_code
FROM reporting_ocpazurecostlineitem_project_daily_summary
WHERE reporting_ocpazurecostlineitem_project_daily_summary.usage_start >= date_trunc('month'::text, date_trunc('month'::text, now()) - '1 day'::interval day)) lids
with no data;
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_ix
on reporting_ocpallcostlineitem_project_daily_summary (namespace varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_ix
on reporting_ocpallcostlineitem_project_daily_summary (node varchar_pattern_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_namespace_like_ix
on reporting_ocpallcostlineitem_project_daily_summary using gin (namespace gin_trgm_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_node_like_ix
on reporting_ocpallcostlineitem_project_daily_summary using gin (node gin_trgm_ops);
create index mv_reporting_ocpallcostlineitem_prj_daily_summary_usage_ix
on reporting_ocpallcostlineitem_project_daily_summary (usage_start);
"""
),
migrations.RunSQL(
"""
refresh materialized view reporting_ocpallcostlineitem_daily_summary;
refresh materialized view reporting_ocpallcostlineitem_project_daily_summary;
"""
),
]
| 1.835938 | 2 |
sympy/tensor/tests/test_functions.py | iamabhishek0/sympy | 8,323 | 5743 | <reponame>iamabhishek0/sympy<filename>sympy/tensor/tests/test_functions.py
from sympy.tensor.functions import TensorProduct
from sympy import MatrixSymbol, Matrix, Array
from sympy.abc import x, y, z
from sympy.abc import i, j, k, l
A = MatrixSymbol("A", 3, 3)
B = MatrixSymbol("B", 3, 3)
C = MatrixSymbol("C", 3, 3)
def test_TensorProduct_construction():
assert TensorProduct(3, 4) == 12
assert isinstance(TensorProduct(A, A), TensorProduct)
expr = TensorProduct(TensorProduct(x, y), z)
assert expr == x*y*z
expr = TensorProduct(TensorProduct(A, B), C)
assert expr == TensorProduct(A, B, C)
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]])
assert expr == Array([
[
[[0, -1], [1, 0]],
[[0, 0], [0, 0]]
],
[
[[0, 0], [0, 0]],
[[0, -1], [1, 0]]
]
])
def test_TensorProduct_shape():
expr = TensorProduct(3, 4, evaluate=False)
assert expr.shape == ()
assert expr.rank() == 0
expr = TensorProduct([1, 2], [x, y], evaluate=False)
assert expr.shape == (2, 2)
assert expr.rank() == 2
expr = TensorProduct(expr, expr, evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
expr = TensorProduct(Matrix.eye(2), [[0, -1], [1, 0]], evaluate=False)
assert expr.shape == (2, 2, 2, 2)
assert expr.rank() == 4
def test_TensorProduct_getitem():
expr = TensorProduct(A, B)
assert expr[i, j, k, l] == A[i, j]*B[k, l]
| 2.609375 | 3 |
app/views.py | Kgermando/sem | 0 | 5744 | <reponame>Kgermando/sem
from django.shortcuts import render
# Create your views here.
class MultipleProxyMiddleware:
FORWARDED_FOR_FIELDS = [
'HTTP_X_FORWARDED_FOR',
'HTTP_X_FORWARDED_HOST',
'HTTP_X_FORWARDED_SERVER',
]
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
"""
Rewrites the proxy headers so that only the most
recent proxy is used.
"""
for field in self.FORWARDED_FOR_FIELDS:
if field in request.META:
if ',' in request.META[field]:
parts = request.META[field].split(',')
request.META[field] = parts[-1].strip()
return self.get_response(request)
def index(request):
context = {
}
template_name = 'pages/app/index.html'
return render(request, template_name, context)
| 2.234375 | 2 |
webcrawler/crawler/spiders/baselensspider.py | HansZimmer5000/LensComparison | 0 | 5745 | <gh_stars>0
# This module is about my webcrawler with the use of scrapy.
# Its a generell web crawler, but the import and use of GhAdapter makes it usefull for geizhals.de sites.
from abc import ABC, abstractmethod
import scrapy
class BaseLensSpider(scrapy.Spider, ABC):
@property
@abstractmethod
def adapter(self):
raise NotImplementedError()
#TODO: make the start_url thing abstract and initialliy with a call to adapter.START_URLS
@abstractmethod
def parse_lens_page(self, response):
raise NotImplementedError()
@abstractmethod
def create_lens_page_requests(self,response):
raise NotImplementedError()
@abstractmethod
def create_overview_page_request(self, response):
raise NotImplementedError()
def parse_overview_page(self,response):
for lens_page_request in self.create_lens_page_requests(response):
yield lens_page_request
for overview_page_request in self.create_overview_page_request(response):
yield overview_page_request
def parse(self, response):
return self.parse_overview_page(response)
| 2.84375 | 3 |
byol_train.py | fjbriones/deep-text-recognition-benchmark | 0 | 5746 | import os
import sys
import time
import random
import string
import argparse
import torch
import torch.backends.cudnn as cudnn
import torch.nn.init as init
import torch.optim as optim
import torch.utils.data
import numpy as np
from utils import CTCLabelConverter, CTCLabelConverterForBaiduWarpctc, AttnLabelConverter, Averager
from simclr_dataset import hierarchical_dataset, AlignCollate, Batch_Balanced_Dataset
from simclr_model import FeaturesModel as Model
from test import validation
from byol_pytorch import BYOL
from imgaug import augmenters as iaa
import imgaug as ia
from tqdm import tqdm
import matplotlib.pyplot as plt
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def train(opt):
""" dataset preparation """
if not opt.data_filtering_off:
print('Filtering the images containing characters which are not in opt.character')
print('Filtering the images whose label is longer than opt.batch_max_length')
# see https://github.com/clovaai/deep-text-recognition-benchmark/blob/6593928855fb7abb999a99f428b3e4477d4ae356/dataset.py#L130
opt.select_data = opt.select_data.split('-')
opt.batch_ratio = opt.batch_ratio.split('-')
train_dataset = Batch_Balanced_Dataset(opt)
log = open(f'./saved_models/{opt.exp_name}/log_dataset.txt', 'a')
ia.seed(1)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
AlignCollate_valid = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD, image_transforms=image_transforms)
valid_dataset, valid_dataset_log = hierarchical_dataset(root=opt.valid_data, opt=opt)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=opt.batch_size,
shuffle=True, # 'True' to check training progress with validation function.
num_workers=int(opt.workers),
collate_fn=AlignCollate_valid, pin_memory=True)
log.write(valid_dataset_log)
print('-' * 80)
log.write('-' * 80 + '\n')
log.close()
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.batch_max_length, opt.Transformation, opt.FeatureExtraction,
opt.SequenceModeling)
# weight initialization
for name, param in model.named_parameters():
if 'localization_fc2' in name:
print(f'Skip {name} as it is already initialized')
continue
try:
if 'bias' in name:
init.constant_(param, 0.0)
elif 'weight' in name:
init.kaiming_normal_(param)
except Exception as e: # for batchnorm.
if 'weight' in name:
param.data.fill_(1)
continue
# data parallel for multi-GPU
model = torch.nn.DataParallel(model).to(device)
model.train()
if opt.saved_model != '':
print(f'loading pretrained model from {opt.saved_model}')
if opt.FT:
model.load_state_dict(torch.load(opt.saved_model), strict=False)
else:
model.load_state_dict(torch.load(opt.saved_model))
print("Model:")
print(model)
image_transforms = iaa.Sequential([iaa.SomeOf((1, 5),
[iaa.LinearContrast((0.5, 1.0)),
iaa.GaussianBlur((0.5, 1.5)),
iaa.Crop(percent=((0, 0.4),(0, 0),(0, 0.4),(0, 0.0)), keep_size=True),
iaa.Crop(percent=((0, 0.0),(0, 0.02),(0, 0),(0, 0.02)), keep_size=True),
iaa.Sharpen(alpha=(0.0, 0.5), lightness=(0.0, 0.5)),
iaa.PiecewiseAffine(scale=(0.02, 0.03), mode='edge'),
iaa.PerspectiveTransform(scale=(0.01, 0.02))],
random_order=True)])
byol_learner = BYOL(
model,
image_size=(32,100),
hidden_layer=-1,
channels=1,
augment_fn=image_transforms,
augmented=True)
print(byol_learner)
# filter that only require gradient decent
filtered_parameters = []
params_num = []
for p in filter(lambda p: p.requires_grad, byol_learner.parameters()):
filtered_parameters.append(p)
params_num.append(np.prod(p.size()))
print('Trainable params num : ', sum(params_num))
# setup optimizer
if opt.optimizer == 'adam':
optimizer = optim.Adam(filtered_parameters, lr=opt.lr, betas=(opt.beta1, 0.999))
elif opt.optimizer == 'adadelta':
optimizer = optim.Adadelta(filtered_parameters, lr=opt.lr, rho=opt.rho, eps=opt.eps, weight_decay=opt.weight_decay)
elif opt.optimizer == 'sgd':
optimizer = optim.SGD(filtered_parameters, lr=opt.lr, momentum=opt.momentum, weight_decay=opt.weight_decay, nesterov=opt.nesterov)
else:
raise Exception('Unknown optimizer')
print("Optimizer:")
print(optimizer)
""" final options """
# print(opt)
with open(f'./saved_models/{opt.exp_name}/opt.txt', 'a') as opt_file:
opt_log = '------------ Options -------------\n'
args = vars(opt)
for k, v in args.items():
opt_log += f'{str(k)}: {str(v)}\n'
opt_log += '---------------------------------------\n'
print(opt_log)
opt_file.write(opt_log)
""" start training """
start_iter = 0
if opt.saved_model != '':
try:
start_iter = int(opt.saved_model.split('_')[-1].split('.')[0])
print(f'continue to train, start_iter: {start_iter}')
except:
pass
#LR Scheduler:
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(0.6*opt.num_iter), int(0.8*opt.num_iter)], last_epoch=start_iter-1, gamma=0.1)
best_loss = None
iteration = start_iter
print(device)
loss_avg = Averager()
valid_loss_avg = Averager()
# kl_loss_avg = Averager()
# kl_loss = torch.nn.KLDivLoss()
epoch = 0
while(True):
# train part
for i in tqdm(range(opt.valInterval)):
image_tensors, _ = train_dataset.get_batch()
image = image_tensors.to(device)
optimizer.zero_grad()
loss = byol_learner(image)
loss.backward()
if opt.grad_clip:
torch.nn.utils.clip_grad_norm_(byol_learner.parameters(), opt.grad_clip)
optimizer.step()
scheduler.step()
byol_learner.update_moving_average()
loss_avg.add(loss)
if iteration==0:
print("Epoch {:06d} Loss: {:.04f}".format(iteration, loss_avg.val()))
iteration += 1
byol_learner.eval()
model.eval()
with torch.no_grad():
for image_tensors, _ in valid_loader:
image = image_tensors.to(device)
val_loss = byol_learner(image)
valid_loss_avg.add(val_loss)
# features = model(image)
# features = features.view(-1, 26, features.shape[1])
# kl_div = kl_loss(features[:int(features.shape[0]/2)], features[int(features.shape[0]/2):])
# kl_loss_avg.add(kl_div)
model.train()
byol_learner.train()
with open(f'./saved_models/{opt.exp_name}/log_train.txt', 'a') as log:
log.write("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()) + '\n')
print("Iteration {:06d} Loss: {:.06f} Val loss: {:06f}".format(iteration, loss_avg.val(), valid_loss_avg.val()))
if best_loss is None:
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
elif best_loss > valid_loss_avg.val():
best_loss = valid_loss_avg.val()
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
scheduler.step()
loss_avg.reset()
valid_loss_avg.reset()
if epoch % 5 == 0:
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
if (iteration + 1) >= opt.num_iter:
print('end the training')
torch.save(model.state_dict(), f'./saved_models/{opt.exp_name}/iter_{iteration+1}.pth')
sys.exit()
epoch +=1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', help='Where to store logs and models')
parser.add_argument('--train_data', required=True, help='path to training dataset')
parser.add_argument('--valid_data', required=True, help='path to validation dataset')
parser.add_argument('--manualSeed', type=int, default=1111, help='for random seed setting')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--num_iter', type=int, default=300000, help='number of iterations to train for')
parser.add_argument('--valInterval', type=int, default=2000, help='Interval between each validation')
parser.add_argument('--saved_model', default='', help="path to model to continue training")
parser.add_argument('--FT', action='store_true', help='whether to do fine-tuning')
parser.add_argument('--optimizer', type=str, choices=['adam', 'adadelta', 'sgd'], help="Optimizer")
parser.add_argument('--lr', type=float, default=1, help='learning rate, default=1.0 for Adadelta')
parser.add_argument('--beta1', type=float, default=0.9, help='beta1 for adam. default=0.9')
parser.add_argument('--rho', type=float, default=0.95, help='decay rate rho for Adadelta. default=0.95')
parser.add_argument('--eps', type=float, default=1e-8, help='eps for Adadelta. default=1e-8')
parser.add_argument('--nesterov', action='store_true', help='Use Nesterov momentum for SGD')
parser.add_argument('--momentum', type=float, default=0.9, help='Momentum for SGD')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping value. default=5')
parser.add_argument('--baiduCTC', action='store_true', help='for data_filtering_off mode')
""" Data processing """
parser.add_argument('--select_data', type=str, default='MJ-ST',
help='select training data (default is MJ-ST, which means MJ and ST used as training data)')
parser.add_argument('--batch_ratio', type=str, default='0.5-0.5',
help='assign ratio for each selected data in the batch')
parser.add_argument('--total_data_usage_ratio', type=str, default='1.0',
help='total data usage ratio, this ratio is multiplied to total number of data.')
parser.add_argument('--batch_max_length', type=int, default=25, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str,
default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--Transformation', type=str, required=True, help='Transformation stage. None|TPS')
parser.add_argument('--FeatureExtraction', type=str, required=True,
help='FeatureExtraction stage. VGG|RCNN|ResNet')
parser.add_argument('--SequenceModeling', type=str, required=True, help='SequenceModeling stage. None|BiLSTM')
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1,
help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=256, help='the size of the LSTM hidden state')
parser.add_argument('--weight_decay', type=float, default=10e-4, help='Weight decay')
parser.add_argument('--FinalLayer', action='store_true', help='Use a nonlinear projection head during training')
parser.add_argument('--final_feature', type=int, default=256, help='the size of the output of the final layer')
opt = parser.parse_args()
if not opt.exp_name:
opt.exp_name = f'{opt.Transformation}-{opt.FeatureExtraction}-{opt.SequenceModeling}-BYOL'
opt.exp_name += f'-Seed{opt.manualSeed}'
# print(opt.exp_name)
os.makedirs(f'./saved_models/{opt.exp_name}', exist_ok=True)
""" vocab / character number configuration """
if opt.sensitive:
# opt.character += 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
""" Seed and GPU setting """
# print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
np.random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
torch.cuda.manual_seed(opt.manualSeed)
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
# print('device count', opt.num_gpu)
if opt.num_gpu > 1:
print('------ Use multi-GPU setting ------')
print('if you stuck too long time with multi-GPU setting, try to set --workers 0')
# check multi-GPU issue https://github.com/clovaai/deep-text-recognition-benchmark/issues/1
opt.workers = opt.workers * opt.num_gpu
opt.batch_size = opt.batch_size * opt.num_gpu
""" previous version
print('To equlize batch stats to 1-GPU setting, the batch_size is multiplied with num_gpu and multiplied batch_size is ', opt.batch_size)
opt.batch_size = opt.batch_size * opt.num_gpu
print('To equalize the number of epochs to 1-GPU setting, num_iter is divided with num_gpu by default.')
If you dont care about it, just commnet out these line.)
opt.num_iter = int(opt.num_iter / opt.num_gpu)
"""
train(opt)
| 2.28125 | 2 |
app/__init__.py | dulin/tornado-test | 0 | 5747 | <filename>app/__init__.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- mode: python -*-
import aiopg
import psycopg2
import tornado.locks
from tornado.options import define, options
from tornado.web import Application
from app.application import Application
define('port', default=8080, help="listening port")
define('bind_address', default="", help="bind address")
define("db_host", default="127.0.0.1", help="database host")
define("db_port", default=5432, help="database port")
define("db_database", default="tornado", help="database name")
define("db_user", default="tornado", help="database user")
define("db_password", default="<PASSWORD>", help="database password")
async def maybe_create_tables(db):
try:
with (await db.cursor()) as cur:
await cur.execute("SELECT COUNT(*) FROM schema LIMIT 1")
await cur.fetchone()
except psycopg2.ProgrammingError:
print("Database error!")
async def main():
options.parse_command_line()
async with aiopg.create_pool(
host=options.db_host,
port=options.db_port,
user=options.db_user,
password=options.db_password,
dbname=options.db_database) as db:
await maybe_create_tables(db)
app = Application(db)
app.listen(options.port, options.bind_address, xheaders=True)
print("Listening on http://%s:%i" % (options.bind_address, options.port))
shutdown_event = tornado.locks.Event()
await shutdown_event.wait()
| 2.46875 | 2 |
pipescaler/core/stage.py | KarlTDebiec/PipeScaler | 1 | 5748 | <filename>pipescaler/core/stage.py
#!/usr/bin/env python
# pipescaler/core/stage.py
#
# Copyright (C) 2020-2021 <NAME>
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license.
from __future__ import annotations
from abc import ABC, abstractmethod
from importlib.util import module_from_spec, spec_from_file_location
from typing import Any, List, Optional
from pipescaler.common import validate_input_path
def initialize_stage(stage_name, stage_conf, modules):
# Get stage's class name
stage_cls_name = next(iter(stage_conf)) # get first key
# Get stage's configuration
stage_args = stage_conf.get(stage_cls_name)
if stage_args is None:
stage_args = {}
# Get stage's class
stage_cls = None
for module in modules:
try:
stage_cls = getattr(module, stage_cls_name)
except AttributeError:
continue
if stage_cls is None:
if "infile" in stage_args:
module_infile = validate_input_path(stage_args.pop("infile"))
spec = spec_from_file_location(stage_cls_name, module_infile)
module = module_from_spec(spec)
spec.loader.exec_module(module)
stage_cls = getattr(module, stage_cls_name)
else:
raise KeyError(f"Class '{stage_cls_name}' not found")
return stage_cls(name=stage_name, **stage_args)
class Stage(ABC):
"""Base class for stages."""
trim_suffixes = None
extension = "png"
def __init__(
self, name: Optional[str] = None, desc: Optional[str] = None, **kwargs: Any
) -> None:
"""
Validates and stores static configuration.
Arguments:
name (Optional[str]): Name of stage
desc (Optional[str]): Description of stage
kwargs (Any): Additional keyword arguments
"""
if name is not None:
self.name = name
else:
self.name = self.__class__.__name__
if desc is not None:
self.desc = desc
else:
self.desc = self.name
def __repr__(self) -> str:
return self.desc
def __str__(self) -> str:
return self.name
@property
@abstractmethod
def inlets(self) -> List[str]:
raise NotImplementedError()
@property
@abstractmethod
def outlets(self) -> List[str]:
raise NotImplementedError()
| 2.109375 | 2 |
leetcode/970_powerful_integers/970_powerful_integers.py | ryangillard/misc | 0 | 5749 | <gh_stars>0
class Solution(object):
def powerfulIntegers(self, x, y, bound):
"""
:type x: int
:type y: int
:type bound: int
:rtype: List[int]
"""
# Find max exponent
base = max(x, y) if x == 1 or y == 1 else min(x, y)
exponent = 1
if base != 1:
while base ** exponent <= bound:
exponent += 1
# Brute force all of the exponent trials
hashset = set()
for i in range(exponent):
for j in range(exponent):
z = x ** i + y ** j
if z <= bound:
hashset.add(z)
return list(hashset) | 3.109375 | 3 |
src/project/api/rankings/urls.py | jSkrod/djangae-react-browser-games-app | 0 | 5750 | <filename>src/project/api/rankings/urls.py
from django.conf.urls import url, include
from project.api.rankings.api import AddRanking, AddScore, GetScoresUser, GetScoresGame
urlpatterns = [
url(r'add_ranking$', AddRanking.as_view()),
url(r'add_score$', AddScore.as_view()),
url(r'get_scores_game$', GetScoresGame.as_view()),
url(r'get_scores_user$', GetScoresUser.as_view())
] | 1.703125 | 2 |
qiskit_metal/qlibrary/lumped/cap_n_interdigital.py | wdczdj/qiskit-metal | 0 | 5751 | <filename>qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
import numpy as np
class CapNInterdigital(QComponent):
"""Generates a two pin (+) structure comprised of a north CPW transmission
line, and a south transmission line, coupled together via a finger
capacitor. Such a structure can be used, as an example, for generating CPW
resonators. (0,0) represents the center position of the component. Setting
finger length to 0 gives a simple gap capacitor. The width of the gap
capacitor is found via.
(cap_width * finger_count + * cap_gap * (finger_count-1)).
Inherits QComponent class.
::
(0,0) N
+ ^
| |
|
|
--|-----|--
| | | | |
|-----|-----|
|
|
|
|
+
Options:
* north_width: '10um' -- The width of the 'north' portion of the CPW transmission line
* north_gap: '6um' -- The dielectric gap of the 'north' portion of the CPW transmission line
* south_width: '10um' -- The width of the 'south' portion of the CPW transmission line
* south_gap: '6um' -- The dielectric gap of the 'south' portion of the CPW transmission line
(also for the capacitor gap to ground)
* cap_width: '10um' -- The width of the finger capacitor metal (and islands)
* cap_gap: '6um' -- The width of dielectric for the capacitive coupling/fingers
* cap_gap_ground: '6um' -- Width of the dielectric between the capacitor and ground
* finger_length: '20um' -- The depth of the finger islands of the capacitor
* finger_count: '5' -- Number of fingers in the capacitor
* cap_distance: '50um' -- Distance of the north point of the capacitor from the north pin
* pos_x/_y: '0um' -- The x/y position of the north pin
* rotation: '0' -- The direction of the transmission line. 0 degrees is -y, following a
counter-clockwise rotation (eg. 90 is +x)
* chip: 'main' -- The chip the capacitor should be on.
* layer: '1' -- Layer the capacitor is on.
"""
component_metadata = Dict(short_name='cpw',
_qgeometry_table_poly='True',
_qgeometry_table_path='True')
"""Component metadata"""
#Currently setting the primary CPW length based on the coupling_length
#May want it to be it's own value that the user can control?
default_options = Dict(north_width='10um',
north_gap='6um',
south_width='10um',
south_gap='6um',
cap_width='10um',
cap_gap='6um',
cap_gap_ground='6um',
finger_length='20um',
finger_count='5',
cap_distance='50um',
pos_x='0um',
pos_y='0um',
orientation='0',
chip='main',
layer='1')
"""Default connector options"""
def make(self):
"""Build the component."""
p = self.p
N = int(p.finger_count)
#Finger Capacitor
cap_box = draw.rectangle(N * p.cap_width + (N - 1) * p.cap_gap,
p.cap_gap + 2 * p.cap_width + p.finger_length,
0, 0)
make_cut_list = []
make_cut_list.append([0, (p.finger_length) / 2])
make_cut_list.append([(p.cap_width) + (p.cap_gap / 2),
(p.finger_length) / 2])
flip = -1
for i in range(1, N):
make_cut_list.append([
i * (p.cap_width) + (2 * i - 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
make_cut_list.append([
(i + 1) * (p.cap_width) + (2 * i + 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
flip = flip * -1
cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2,
cap_style=2,
join_style=2)
cap_cut = draw.translate(cap_cut,
-(N * p.cap_width + (N - 1) * p.cap_gap) / 2,
0)
cap_body = draw.subtract(cap_box, cap_cut)
cap_body = draw.translate(
cap_body, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
cap_etch = draw.rectangle(
N * p.cap_width + (N - 1) * p.cap_gap + 2 * p.cap_gap_ground,
p.cap_gap + 2 * p.cap_width + p.finger_length +
2 * p.cap_gap_ground, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
#CPW
north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]])
south_cpw = draw.LineString(
[[
0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
],
[
0, -2 * p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
]])
#Rotate and Translate
c_items = [north_cpw, south_cpw, cap_body, cap_etch]
c_items = draw.rotate(c_items, p.orientation, origin=(0, 0))
c_items = draw.translate(c_items, p.pos_x, p.pos_y)
[north_cpw, south_cpw, cap_body, cap_etch] = c_items
#Add to qgeometry tables
self.add_qgeometry('path', {'north_cpw': north_cpw},
width=p.north_width,
layer=p.layer)
self.add_qgeometry('path', {'north_cpw_sub': north_cpw},
width=p.north_width + 2 * p.north_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('path', {'south_cpw': south_cpw},
width=p.south_width,
layer=p.layer)
self.add_qgeometry('path', {'south_cpw_sub': south_cpw},
width=p.south_width + 2 * p.south_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer)
self.add_qgeometry('poly', {'cap_etch': cap_etch},
layer=p.layer,
subtract=True)
#Add pins
north_pin_list = north_cpw.coords
south_pin_list = south_cpw.coords
self.add_pin('north_end',
points=np.array(north_pin_list[::-1]),
width=p.north_width,
input_as_norm=True)
self.add_pin('south_end',
points=np.array(south_pin_list),
width=p.south_width,
input_as_norm=True)
| 2.28125 | 2 |
ThirdParty/protobuf-registry/python/protobufs/services/feature/actions/get_flags_pb2.py | getcircle/luno-ios | 0 | 5752 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: protobufs/services/feature/actions/get_flags.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='protobufs/services/feature/actions/get_flags.proto',
package='services.feature.actions.get_flags',
syntax='proto3',
serialized_pb=b'\n2protobufs/services/feature/actions/get_flags.proto\x12\"services.feature.actions.get_flags\"\x0b\n\tRequestV1\"\x84\x01\n\nResponseV1\x12H\n\x05\x66lags\x18\x01 \x03(\x0b\x32\x39.services.feature.actions.get_flags.ResponseV1.FlagsEntry\x1a,\n\nFlagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x08:\x02\x38\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_REQUESTV1 = _descriptor.Descriptor(
name='RequestV1',
full_name='services.feature.actions.get_flags.RequestV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=90,
serialized_end=101,
)
_RESPONSEV1_FLAGSENTRY = _descriptor.Descriptor(
name='FlagsEntry',
full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='services.feature.actions.get_flags.ResponseV1.FlagsEntry.value', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=192,
serialized_end=236,
)
_RESPONSEV1 = _descriptor.Descriptor(
name='ResponseV1',
full_name='services.feature.actions.get_flags.ResponseV1',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='flags', full_name='services.feature.actions.get_flags.ResponseV1.flags', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_RESPONSEV1_FLAGSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=104,
serialized_end=236,
)
_RESPONSEV1_FLAGSENTRY.containing_type = _RESPONSEV1
_RESPONSEV1.fields_by_name['flags'].message_type = _RESPONSEV1_FLAGSENTRY
DESCRIPTOR.message_types_by_name['RequestV1'] = _REQUESTV1
DESCRIPTOR.message_types_by_name['ResponseV1'] = _RESPONSEV1
RequestV1 = _reflection.GeneratedProtocolMessageType('RequestV1', (_message.Message,), dict(
DESCRIPTOR = _REQUESTV1,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.RequestV1)
))
_sym_db.RegisterMessage(RequestV1)
ResponseV1 = _reflection.GeneratedProtocolMessageType('ResponseV1', (_message.Message,), dict(
FlagsEntry = _reflection.GeneratedProtocolMessageType('FlagsEntry', (_message.Message,), dict(
DESCRIPTOR = _RESPONSEV1_FLAGSENTRY,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1.FlagsEntry)
))
,
DESCRIPTOR = _RESPONSEV1,
__module__ = 'protobufs.services.feature.actions.get_flags_pb2'
# @@protoc_insertion_point(class_scope:services.feature.actions.get_flags.ResponseV1)
))
_sym_db.RegisterMessage(ResponseV1)
_sym_db.RegisterMessage(ResponseV1.FlagsEntry)
_RESPONSEV1_FLAGSENTRY.has_options = True
_RESPONSEV1_FLAGSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), b'8\001')
# @@protoc_insertion_point(module_scope)
| 1.078125 | 1 |
Medium/102_2.py | Hellofafar/Leetcode | 6 | 5753 | <reponame>Hellofafar/Leetcode
# ------------------------------
# Binary Tree Level Order Traversal
#
# Description:
# Given a binary tree, return the level order traversal of its nodes' values. (ie, from
# left to right, level by level).
#
# For example:
# Given binary tree [3,9,20,null,null,15,7],
# 3
# / \
# 9 20
# / \
# 15 7
# return its level order traversal as:
# [
# [3],
# [9,20],
# [15,7]
# ]
#
# Version: 2.0
# 11/11/19 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def levelOrder(self, root: TreeNode) -> List[List[int]]:
if not root:
return []
# BFS
res = []
queue = [root]
while queue:
temp = [] # values of this level of nodes
children = [] # next level of nodes
for node in queue:
temp.append(node.val)
if node.left:
children.append(node.left)
if node.right:
children.append(node.right)
res.append(temp[:]) # actually here can be res.append(temp), res will not change as temp changes
queue = children[:] # here must be children[:] otherwise queue will change as children changes
return res
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Similar BFS solution but use a little more spaces.
# On 102.py, using list.pop(0) actually takes O(n) time because it needs to remap the index
# of values. Use collections.deque instead.
#
# O(N) time O(N) space | 3.984375 | 4 |
mturk/comparison_among_different_models/sample_from_models_for_comparison.py | qiaone/GIF | 322 | 5754 | import sys
sys.path.append('../../')
import constants as cnst
import os
os.environ['PYTHONHASHSEED'] = '2'
import tqdm
from model.stg2_generator import StyledGenerator
import numpy as np
from my_utils.visualize_flame_overlay import OverLayViz
from my_utils.flm_dynamic_fit_overlay import camera_ringnetpp
from my_utils.generate_gif import generate_from_flame_sequence
from my_utils.generic_utils import save_set_of_images
from my_utils import compute_fid
import constants
from dataset_loaders import fast_image_reshape
import torch
from my_utils import generic_utils
from my_utils.eye_centering import position_to_given_location
def ge_gen_in(flm_params, textured_rndr, norm_map, normal_map_cond, texture_cond):
if normal_map_cond and texture_cond:
return torch.cat((textured_rndr, norm_map), dim=1)
elif normal_map_cond:
return norm_map
elif texture_cond:
return textured_rndr
else:
return flm_params
# General settings
save_images = True
code_size = 236
use_inst_norm = True
core_tensor_res = 4
resolution = 256
alpha = 1
step_max = int(np.log2(resolution) - 2)
root_out_dir = f'{cnst.output_root}sample/'
num_smpl_to_eval_on = 1000
use_styled_conv_stylegan2 = True
flength = 5000
cam_t = np.array([0., 0., 0])
camera_params = camera_ringnetpp((512, 512), trans=cam_t, focal=flength)
run_ids_1 = [29, ] # with sqrt(2)
# run_ids_1 = [7, 24, 8, 3]
# run_ids_1 = [7, 8, 3]
settings_for_runs = \
{24: {'name': 'vector_cond', 'model_idx': '216000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
29: {'name': 'full_model', 'model_idx': '294000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': True},
7: {'name': 'flm_rndr_tex_interp', 'model_idx': '051000_1', 'normal_maps_as_cond': False,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},
3: {'name': 'norm_mp_tex_interp', 'model_idx': '203000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': False, 'apply_sqrt2_fac_in_eq_lin': False},
8: {'name': 'norm_map_rend_flm_no_tex_interp', 'model_idx': '009000_1', 'normal_maps_as_cond': True,
'rendered_flame_as_condition': True, 'apply_sqrt2_fac_in_eq_lin': False},}
overlay_visualizer = OverLayViz()
# overlay_visualizer.setup_renderer(mesh_file=None)
flm_params = np.zeros((num_smpl_to_eval_on, code_size)).astype('float32')
fl_param_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
for i, key in enumerate(fl_param_dict):
flame_param = fl_param_dict[key]
flame_param = np.hstack((flame_param['shape'], flame_param['exp'], flame_param['pose'], flame_param['cam'],
flame_param['tex'], flame_param['lit'].flatten()))
# tz = camera_params['f'][0] / (camera_params['c'][0] * flame_param[:, 156:157])
# flame_param[:, 156:159] = np.concatenate((flame_param[:, 157:], tz), axis=1)
# import ipdb; ipdb.set_trace()
flm_params[i, :] = flame_param.astype('float32')
if i == num_smpl_to_eval_on - 1:
break
batch_size = 64
flame_decoder = overlay_visualizer.deca.flame.eval()
for run_idx in run_ids_1:
# import ipdb; ipdb.set_trace()
generator_1 = torch.nn.DataParallel(
StyledGenerator(embedding_vocab_size=69158,
rendered_flame_ascondition=settings_for_runs[run_idx]['rendered_flame_as_condition'],
normal_maps_as_cond=settings_for_runs[run_idx]['normal_maps_as_cond'],
core_tensor_res=core_tensor_res,
w_truncation_factor=1.0,
apply_sqrt2_fac_in_eq_lin=settings_for_runs[run_idx]['apply_sqrt2_fac_in_eq_lin'],
n_mlp=8)).cuda()
model_idx = settings_for_runs[run_idx]['model_idx']
ckpt1 = torch.load(f'{cnst.output_root}checkpoint/{run_idx}/{model_idx}.model')
generator_1.load_state_dict(ckpt1['generator_running'])
generator_1 = generator_1.eval()
# images = np.zeros((num_smpl_to_eval_on, 3, resolution, resolution)).astype('float32')
pbar = tqdm.tqdm(range(0, num_smpl_to_eval_on, batch_size))
pbar.set_description('Generating_images')
flame_mesh_imgs = None
mdl_id = 'mdl2_'
if settings_for_runs[run_idx]['name'] == 'full_model':
mdl_id = 'mdl1_'
for batch_idx in pbar:
flm_batch = flm_params[batch_idx:batch_idx+batch_size, :]
flm_batch = torch.from_numpy(flm_batch).cuda()
flm_batch = position_to_given_location(flame_decoder, flm_batch)
batch_size_true = flm_batch.shape[0]
if settings_for_runs[run_idx]['normal_maps_as_cond'] or \
settings_for_runs[run_idx]['rendered_flame_as_condition']:
cam = flm_batch[:, constants.DECA_IDX['cam'][0]:constants.DECA_IDX['cam'][1]:]
shape = flm_batch[:, constants.INDICES['SHAPE'][0]:constants.INDICES['SHAPE'][1]]
exp = flm_batch[:, constants.INDICES['EXP'][0]:constants.INDICES['EXP'][1]]
pose = flm_batch[:, constants.INDICES['POSE'][0]:constants.INDICES['POSE'][1]]
# import ipdb; ipdb.set_trace()
light_code = \
flm_batch[:, constants.DECA_IDX['lit'][0]:constants.DECA_IDX['lit'][1]:].view((batch_size_true, 9, 3))
texture_code = flm_batch[:, constants.DECA_IDX['tex'][0]:constants.DECA_IDX['tex'][1]:]
norma_map_img, _, _, _, rend_flm = \
overlay_visualizer.get_rendered_mesh(flame_params=(shape, exp, pose, light_code, texture_code),
camera_params=cam)
rend_flm = torch.clamp(rend_flm, 0, 1) * 2 - 1
norma_map_img = torch.clamp(norma_map_img, 0, 1) * 2 - 1
rend_flm = fast_image_reshape(rend_flm, height_out=256, width_out=256, mode='bilinear')
norma_map_img = fast_image_reshape(norma_map_img, height_out=256, width_out=256, mode='bilinear')
else:
rend_flm = None
norma_map_img = None
gen_1_in = ge_gen_in(flm_batch, rend_flm, norma_map_img, settings_for_runs[run_idx]['normal_maps_as_cond'],
settings_for_runs[run_idx]['rendered_flame_as_condition'])
# torch.manual_seed(2)
identity_embeddings = torch.randint(low=0, high=69158, size=(gen_1_in.shape[0], ), dtype=torch.long,
device='cuda')
mdl_1_gen_images = generic_utils.get_images_from_flame_params(
flame_params=gen_1_in.cpu().numpy(), pose=None,
model=generator_1,
step=step_max, alpha=alpha,
input_indices=identity_embeddings.cpu().numpy())
# import ipdb; ipdb.set_trace()
images = torch.clamp(mdl_1_gen_images, -1, 1).cpu().numpy()
flame_mesh_imgs = torch.clamp(rend_flm, -1, 1).cpu().numpy()
save_path_current_id = os.path.join(root_out_dir, 'inter_model_comparison', settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id, prefix=f'{mdl_id}_{batch_idx}',
images=(images + 1) / 2, show_prog_bar=True)
#save flam rndr
save_path_current_id_flm_rndr = os.path.join(root_out_dir, 'inter_model_comparison',
settings_for_runs[run_idx]['name'])
save_set_of_images(path=save_path_current_id_flm_rndr, prefix=f'mesh_{batch_idx}',
images=(flame_mesh_imgs + 1) / 2, show_prog_bar=True)
# save_set_of_images(path=save_path_this_expt, prefix='mesh_', images=((norma_map_img + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl1_', images=((mdl_1_gen_images + 1) / 2).cpu().numpy())
# save_set_of_images(path=save_path_this_expt, prefix='mdl2_', images=((mdl_2_gen_images + 1) / 2).cpu().numpy()) | 1.703125 | 2 |
setup.py | philippWassibauer/django-activity-stream | 4 | 5755 | <reponame>philippWassibauer/django-activity-stream
from distutils.core import setup
""" django-activity-stream instalation script """
setup(
name = 'activity_stream',
description = 'generic activity feed system for users',
author = '<NAME>',
author_email = '<EMAIL>',
url='http://github.com/philippWassibauer/django-activity-stream',
download_url='http://github.com/philippWassibauer/django-activity-stream/tarball/master',
license='MIT',
version = __import__('activity_stream').__version__,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 1.242188 | 1 |
02/selenium.02.py | study-machine-learning/dongheon.shin | 2 | 5756 | <gh_stars>1-10
from selenium import webdriver
username = "henlix"
password = "<PASSWORD>"
browser = webdriver.PhantomJS()
browser.implicitly_wait(5)
url_login = "https://nid.naver.com/nidlogin.login"
browser.get(url_login)
el = browser.find_element_by_id("id")
el.clear()
el.send_keys(username)
el = browser.find_element_by_id("pw")
el.clear()
el.send_keys(password)
form = browser.find_element_by_css_selector("input.btn_global[type=submit]")
form.submit()
url_shopping_list = "https://order.pay.naver.com/home?tabMenu=SHOPPING"
browser.get(url_shopping_list)
products = browser.find_elements_by_css_selector(".p_info span")
for product in products:
print("- ", product.text)
# PYTHONIOENCODING=utf-8:surrogateescape python3 selenium.02.py
| 3.109375 | 3 |
visualization/matplotlib/barwitherror.py | Licas/datascienceexamples | 0 | 5757 | <filename>visualization/matplotlib/barwitherror.py<gh_stars>0
from matplotlib import pyplot as plt
drinks = ["cappuccino", "latte", "chai", "americano", "mocha", "espresso"]
ounces_of_milk = [6, 9, 4, 0, 9, 0]
error = [0.6, 0.9, 0.4, 0, 0.9, 0]
#Yerr -> element at i position represents +/- error[i] variance on bar[i] value
plt.bar( range(len(drinks)),ounces_of_milk, yerr=error, capsize=15)
plt.show() | 3.421875 | 3 |
Packs/mnemonicMDR/Integrations/ArgusManagedDefence/ArgusManagedDefence.py | matan-xmcyber/content | 0 | 5758 | import demistomock as demisto
from CommonServerPython import *
""" IMPORTS """
import json
import urllib3
import dateparser
import traceback
from typing import Any, Dict, List, Union
import logging
from argus_api import session as argus_session
from argus_api.api.currentuser.v1.user import get_current_user
from argus_api.api.cases.v2.case import (
add_case_tag,
add_comment,
advanced_case_search,
close_case,
create_case,
delete_case,
delete_comment,
download_attachment,
edit_comment,
get_attachment,
get_case_metadata_by_id,
list_case_attachments,
list_case_tags,
list_case_comments,
remove_case_tag_by_id,
remove_case_tag_by_key_value,
update_case,
)
from argus_api.api.events.v1 import get_event_by_path
from argus_api.api.events.v1.case.case import get_events_for_case
from argus_api.api.events.v1.aggregated import (
find_aggregated_events,
list_aggregated_events,
)
from argus_api.api.events.v1.payload import get_payload
from argus_api.api.events.v1.pcap import get_pcap
from argus_api.api.events.v1.nids import find_n_i_d_s_events, list_n_i_d_s_events
from argus_api.api.pdns.v3.search import search_records
from argus_api.api.reputation.v1.observation import (
fetch_observations_for_domain,
fetch_observations_for_i_p,
)
# Disable insecure warnings
urllib3.disable_warnings()
""" CONSTANTS """
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
PRETTY_DATE_FORMAT = "%b %d, %Y, %H:%M:%S"
FETCH_TAG = demisto.params().get("fetch_tag")
""" HELPER FUNCTIONS """
def set_argus_settings(
api_key: str, base_url: str = None, proxies: dict = None, verify: bool = None
):
argus_session.api_key = api_key
argus_session.base_url = base_url
argus_session.proxies = proxies
argus_session.verify = verify
def argus_priority_to_demisto_severity(priority: str) -> int:
mapping = {"low": 1, "medium": 2, "high": 3, "critical": 4}
return mapping.get(priority, 0)
def argus_status_to_demisto_status(status: str) -> int:
mapping = {
"pendingCustomer": 0,
"pendingSoc": 0,
"pendingVendor": 0,
"pendingClose": 0,
"workingSoc": 1,
"workingCustomer": 1,
"closed": 2,
}
return mapping.get(status, 0)
def build_argus_priority_from_min_severity(min_severity: str) -> List[str]:
severities = ["low", "medium", "high", "critical"]
min_severity_list = []
for severity in severities:
if argus_priority_to_demisto_severity(
min_severity.lower()
) <= argus_priority_to_demisto_severity(severity):
min_severity_list.append(severity)
return min_severity_list
def parse_first_fetch(first_fetch: Any) -> Any:
if isinstance(first_fetch, str):
if first_fetch[0] != "-":
first_fetch = f"-{first_fetch}"
return first_fetch
def build_tags_from_list(lst: list) -> List[Dict]:
if not lst:
return []
if len(lst) % 2 != 0:
return []
tags = []
for i in range(0, len(lst), 2):
tags.append({"key": lst[i], "value": lst[i + 1]})
return tags
def str_to_dict(string: str) -> dict:
if not string:
return {}
lst = argToList(string)
if len(lst) % 2 != 0:
return {}
return {lst[i]: lst[i + 1] for i in range(0, len(lst), 2)}
def date_time_to_epoch_milliseconds(date_time: Union[datetime, str] = None) -> int:
if isinstance(date_time, datetime):
return int(date_time.timestamp() * 1000)
if isinstance(date_time, str):
return date_time_to_epoch_milliseconds(dateparser.parse(date_time))
return int(datetime.now().timestamp() * 1000)
def pretty_print_date(date_time: Union[datetime, str] = None) -> str:
if isinstance(date_time, datetime):
return date_time.strftime(PRETTY_DATE_FORMAT)
if isinstance(date_time, str):
return pretty_print_date(dateparser.parse(date_time))
return datetime.now().strftime(PRETTY_DATE_FORMAT)
def pretty_print_case_metadata(result: dict, title: str = None) -> str:
data = result["data"]
string = title if title else f"# #{data['id']}: {data['subject']}\n"
string += "_Priority: {}, status: {}, last updated: {}_\n".format(
data["priority"], data["status"], pretty_print_date(data["lastUpdatedTime"])
)
string += "Reported by {} at {}\n\n".format(
data["publishedByUser"]["name"], pretty_print_date(data["publishedTime"])
)
string += data["description"]
return string
def pretty_print_comment(comment: dict, title: str = None) -> str:
string = title if title else ""
string += f"#### *{comment['addedByUser']['userName']} - {pretty_print_date(comment['addedTime'])}*\n"
string += (
f"_Last updated {pretty_print_date(comment['lastUpdatedTime'])}_\n"
if comment["lastUpdatedTime"]
else ""
)
string += f"{comment['comment']}\n\n"
string += f"_id: {comment['id']}_\n"
string += f"_Flags: {str(comment['flags'])}_\n" if comment["flags"] else ""
string += "* * *\n"
return string
def pretty_print_comments(comments: list, title: str = None) -> str:
string = title if title else ""
for comment in comments:
string += pretty_print_comment(comment)
return string
def pretty_print_events(result: dict, title: str = None) -> str:
string = title if title else ""
string += "_Count: {}, showing {} events, from {} to {}_\n".format(
result["count"], result["size"], result["offset"], result["limit"]
)
string += tableToMarkdown("Events", result["data"])
return string
""" COMMAND FUNCTIONS """
def test_module_command() -> str:
response = get_current_user()
if response["responseCode"] == 200:
return "ok"
return (
f"Unable to communicate with Argus API {response['responseCode']}, {response}"
)
def fetch_incidents(
last_run: dict, first_fetch_period: str, limit: int = 25, min_severity: str = "low"
):
start_timestamp = last_run.get("start_time", None) if last_run else None
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=start_timestamp if start_timestamp else first_fetch_period,
endTimestamp="now",
limit=limit,
sortBy=["createdTimestamp"],
priority=build_argus_priority_from_min_severity(min_severity),
subCriteria=[
{"exclude": True, "status": ["closed"]},
],
timeFieldStrategy=["createdTimestamp"],
)
incidents = []
for case in result["data"]:
incidents.append(
{
"name": f"#{case['id']}: {case['subject']}",
"occurred": case["createdTime"],
"severity": argus_priority_to_demisto_severity(case["priority"]),
"status": argus_status_to_demisto_status(case["status"]),
"details": case["description"],
"customFields": {
"argus_id": str(case["id"]),
"type": case["type"],
"category": case["category"]["name"] if case["category"] else None,
"service": case["service"]["name"],
"lastUpdatedTime": case["lastUpdatedTime"],
"createdTimestamp": case["createdTimestamp"],
"customer": case["customer"]["shortName"],
},
"rawJSON": json.dumps(case),
}
)
if result["data"]:
last_run["start_time"] = str(result["data"][-1]["createdTimestamp"] + 1)
return last_run, incidents
def add_case_tag_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case_id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
tag = {"key": key, "value": value}
result = add_case_tag(caseID=case_id, tags=tag)
headers = ["key", "value", "addedTime"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def add_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case_id not specified")
if not comment:
raise ValueError("comment not specified")
result = add_comment(
caseID=case_id,
comment=comment,
asReplyTo=args.get("as_reply_to", None),
internal=args.get("internal", None),
originEmailAddress=args.get("origin_email_address", None),
associatedAttachmentID=args.get("associated_attachment_id", None),
)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Added comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def advanced_case_search_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = advanced_case_search(
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
subCriteria=argToList(args.get("sub_criteria", None)),
exclude=args.get("exclude", None),
required=args.get("required", None),
customerID=argToList(args.get("customer_id", None)),
caseID=argToList(args.get("case_id", None)),
customer=argToList(args.get("customer", None)),
type=argToList(args.get("case_type", None)),
service=argToList(args.get("service", None)),
category=argToList(args.get("category", None)),
status=argToList(args.get("status", None)),
priority=argToList(args.get("priority", None)),
assetID=argToList(args.get("asset_id", None)),
tag=argToList(args.get("tag", None)),
workflow=argToList(args.get("workflow", None)),
field=argToList(args.get("field", None)),
keywords=argToList(args.get("keywords", None)),
timeFieldStrategy=argToList(args.get("time_field_strategy", None)),
timeMatchStrategy=args.get("time_match_strategy", None),
keywordFieldStrategy=argToList(args.get("keyword_field_strategy", None)),
keywordMatchStrategy=args.get("keyword_match_strategy", None),
user=argToList(args.get("user", None)),
userFieldStrategy=argToList(args.get("user_field_strategy", None)),
userAssigned=args.get("user_assigned", None),
techAssigned=args.get("tech_assigned", None),
includeWorkflows=args.get("include_workflows", None),
includeDescription=args.get("include_description", None),
accessMode=argToList(args.get("access_mode", None)),
explicitAccess=argToList(args.get("explicit_access", None)),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
readable_output = f"Advanced Case Search: {result['count']} result(s)\n"
readable_output += tableToMarkdown(
"Output not suitable for playground", result["data"]
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Cases",
outputs=result,
raw_response=result,
)
def close_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = close_case(
caseID=case_id,
comment=args.get("comment", None),
)
readable_output = f"# #{case_id}: close case\n"
readable_output += (
f"_Status: {result['data']['status']}, at: {result['data']['closedTime']}_"
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def create_case_command(args: Dict[str, Any]) -> CommandResults:
subject = args.get("subject", None)
description = args.get("description", None)
service = args.get("service", None)
case_type = args.get("type", None)
tags = args.get("tags", None)
if not subject:
raise ValueError("subject not specified")
if not description:
raise ValueError("description not specified")
if not service:
raise ValueError("service not specified")
if not case_type:
raise ValueError("case_type not specified")
if tags:
tags = str(tags).split(",")
if len(tags) % 2 != 0:
raise ValueError("tags list must be of even number", tags)
tags = build_tags_from_list(tags)
result = create_case(
customer=args.get("customer", None),
service=service,
category=args.get("category", None),
type=case_type,
status=args.get("status", None),
tags=tags,
subject=subject,
description=description,
customerReference=args.get("customer_reference", None),
priority=args.get("priority", None),
accessMode=args.get("access_mode", None),
originEmailAddress=args.get("origin_email_address", None),
publish=args.get("publish", None),
defaultWatchers=args.get("default_watchers", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = delete_case(caseID=case_id)
return CommandResults(
readable_output=pretty_print_case_metadata(result, "Case deleted"),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def delete_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
result = delete_comment(caseID=case_id, commentID=comment_id)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Deleted comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def download_attachment_command(args: Dict[str, Any]) -> Any:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = download_attachment(caseID=case_id, attachmentID=attachment_id)
return fileResult(attachment_id, result.content)
def edit_comment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
comment_id = args.get("comment_id", None)
comment = args.get("comment", None)
if not case_id:
raise ValueError("case id not specified")
if not comment_id:
raise ValueError("comment id not specified")
if not comment:
raise ValueError("comment not specified")
result = edit_comment(caseID=case_id, commentID=comment_id, comment=comment)
return CommandResults(
readable_output=pretty_print_comment(
result["data"], f"# #{case_id}: Updated comment\n"
),
outputs_prefix="Argus.Comment",
outputs=result,
raw_response=result,
)
def get_attachment_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
attachment_id = args.get("attachment_id", None)
if not case_id:
raise ValueError("case id not specified")
if not attachment_id:
raise ValueError("attachment id not specified")
result = get_attachment(caseID=case_id, attachmentID=attachment_id)
readable_output = f"# #{case_id}: attachment metadata\n"
readable_output += f"#### *{result['data']['addedByUser']['userName']} - {result['data']['addedTime']}*\n"
readable_output += f"{result['data']['name']} ({result['data']['mimeType']}, {result['data']['size']} bytes)\n\n"
readable_output += f"_id: {result['data']['id']}_\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def get_case_metadata_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_case_metadata_by_id(
id=case_id, skipRedirect=args.get("skip_redirect", None)
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def list_case_attachments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_attachments(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
readable_output = f"# #{case_id}: Case attachments\n"
for attachment in result["data"]:
readable_output += f"#### *{attachment['addedByUser']['userName']} - {attachment['addedTime']}*\n"
readable_output += f"{attachment['name']} ({attachment['mimeType']}, {attachment['size']} kb)\n\n"
readable_output += f"_id: {attachment['id']}_\n"
readable_output += "* * *\n"
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Attachments",
outputs=result,
raw_response=result,
)
def list_case_tags_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case_id not specified")
result = list_case_tags(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
headers = ["key", "value", "addedTime", "id"]
readable_output = tableToMarkdown(
f"#{case_id}: Tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def list_case_comments_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
sort_by = args.get("sort_by", None)
if not case_id:
raise ValueError("case_id not specified")
if sort_by:
sort_by = ["addedTimestamp"] if sort_by == "ascending" else ["-addedTimestamp"]
result = list_case_comments(
caseID=case_id,
beforeComment=args.get("before_comment", None),
afterComment=args.get("after_comment", None),
offset=args.get("offset", None),
limit=args.get("limit", None),
sortBy=sort_by,
)
return CommandResults(
readable_output=pretty_print_comments(
result["data"], f"# #{case_id}: Comments\n"
),
outputs_prefix="Argus.Comments",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_id_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
tag_id = args.get("tag_id", None)
if not case_id:
raise ValueError("case id not specified")
if not tag_id:
raise ValueError("tag id not specified")
result = remove_case_tag_by_id(caseID=case_id, tagID=tag_id)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def remove_case_tag_by_key_value_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
key = args.get("key", None)
value = args.get("value", None)
if not case_id:
raise ValueError("case id not specified")
if not key:
raise ValueError("key not specified")
if not value:
raise ValueError("value not specified")
result = remove_case_tag_by_key_value(caseID=case_id, tagKey=key, tagValue=value)
headers = ["key", "value", "addedTime", "id", "flags"]
readable_output = tableToMarkdown(
f"#{case_id}: Delete tags", result["data"], headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Tags",
outputs=result,
raw_response=result,
)
def update_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = update_case(
id=case_id,
subject=args.get("subject", None),
description=args.get("description", None),
status=args.get("status", None),
priority=args.get("priority", None),
category=args.get("category", None),
reporter=args.get("reporter", None),
assignedUser=args.get("assigned_user", None),
assignedTech=args.get("assigned_tech", None),
customerReference=args.get("customer_reference", None),
comment=args.get("comment", None),
originEmailAddress=args.get("origin_email_address", None),
hasEvents=args.get("has_events", None),
internalComment=args.get("internal_comment", None),
)
return CommandResults(
readable_output=pretty_print_case_metadata(result),
outputs_prefix="Argus.Case",
outputs=result,
raw_response=result,
)
def get_event_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_event_by_path(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return CommandResults(
readable_output=tableToMarkdown(f"Event: {event_id}", result["data"]),
outputs_prefix="Argus.Event",
outputs=result,
raw_response=result,
)
def get_events_for_case_command(args: Dict[str, Any]) -> CommandResults:
case_id = args.get("case_id", None)
if not case_id:
raise ValueError("case id not specified")
result = get_events_for_case(
caseID=case_id, limit=args.get("limit", None), offset=args.get("offset", None)
)
return CommandResults(
readable_output=pretty_print_events(
dict(result), f"# #{case_id}: Associated Events\n"
),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def find_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_aggregated_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
minCount=args.get("min_count", None),
associatedCaseID=argToList(args.get("associated_case_id", None)),
sourceIPMinBits=args.get("source_ip_min_bits", None),
destinationIPMinBits=args.get("destination_ip_min_bits", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def list_aggregated_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_aggregated_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List Events\n"),
outputs_prefix="Argus.Events",
outputs=result,
raw_response=result,
)
def get_payload_command(args: Dict[str, Any]) -> CommandResults:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_payload(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
readable_output = "# Event payload\n"
readable_output += f"Event: {event_id}, type: {result['data']['type']}\n"
readable_output += result["data"]["payload"]
return CommandResults(
readable_output=readable_output,
outputs_prefix="Argus.Payload",
outputs=result,
raw_response=result,
)
def get_pcap_command(args: Dict[str, Any]) -> Any:
event_type = args.get("type", None)
timestamp = args.get("timestamp", None)
customer_id = args.get("customer_id", None)
event_id = args.get("event_id", None)
if not event_type:
raise ValueError("event type not specified")
if not timestamp:
raise ValueError("timestamp not specified")
if not customer_id:
raise ValueError("customer id not specified")
if not event_id:
raise ValueError("event id not specified")
result = get_pcap(
type=event_type, timestamp=timestamp, customerID=customer_id, eventID=event_id
)
return fileResult(f"{event_id}_pcap", result.content)
def find_nids_events_command(args: Dict[str, Any]) -> CommandResults:
# noinspection PyTypeChecker
result = find_n_i_d_s_events(
skipFutureEvents=args.get("skip_future_events", None),
exclude=args.get("exclude", None),
eventIdentifier=argToList(args.get("event_identifier", None)),
locationID=argToList(args.get("location_id", None)),
severity=argToList(args.get("severity", None)),
customer=argToList(args.get("customer", None)),
alarmID=argToList(args.get("alarm_id", None)),
attackCategoryID=argToList(args.get("attack_category_id", None)),
sourceGeoCountry=argToList(args.get("source_geo_country", None)),
destinationGeoCountry=argToList(args.get("destination_geo_country", None)),
geoCountry=argToList(args.get("geo_country", None)),
properties=str_to_dict(args.get("properties", None)),
exactMatchProperties=args.get("exact_match_properties", None),
sensorID=argToList(args.get("sensor_id", None)),
subCriteria=argToList(args.get("sub_criteria", None)),
signature=argToList(args.get("signature", None)),
lastUpdatedTimestamp=args.get("last_updated_timestamp", None),
indexStartTime=args.get("index_start_time", None),
indexEndTime=args.get("index_end_time", None),
destinationIP=argToList(args.get("destination_ip", None)),
sourceIP=argToList(args.get("source_ip", None)),
ip=argToList(args.get("ip", None)),
destinationPort=argToList(args.get("destination_port", None)),
sourcePort=argToList(args.get("source_port", None)),
port=argToList(args.get("port", None)),
minSeverity=args.get("min_severity", None),
maxSeverity=args.get("max_severity", None),
limit=args.get("limit", 25),
offset=args.get("offset", None),
includeDeleted=args.get("include_deleted", None),
startTimestamp=args.get("start_timestamp", "-24hours"),
endTimestamp=args.get("end_timestamp", "now"),
sortBy=argToList(args.get("sort_by", None)),
includeFlags=argToList(args.get("include_flags", None)),
excludeFlags=argToList(args.get("exclude_flags", None)),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# Find NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def list_nids_events_command(args: Dict[str, Any]) -> CommandResults:
result = list_n_i_d_s_events(
customerID=args.get("customer_id", None),
signature=args.get("signature", None),
ip=args.get("ip", None),
startTimestamp=args.get("start_timestamp", None),
endTimestamp=args.get("end_timestamp", None),
limit=args.get("limit", None),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=pretty_print_events(dict(result), "# List NIDS Events\n"),
outputs_prefix="Argus.NIDS",
outputs=result,
raw_response=result,
)
def search_records_command(args: Dict[str, Any]) -> CommandResults:
query = args.get("query", None)
if not query:
raise ValueError("query not specified")
# noinspection PyTypeChecker
result = search_records(
query=query,
aggregateResult=args.get("aggregate_result", None),
includeAnonymousResults=args.get("include_anonymous_results", None),
rrClass=argToList(args.get("rr_class", None)),
rrType=argToList(args.get("rr_type", None)),
customerID=argToList(args.get("customer_id", None)),
tlp=argToList((args.get("tlp", None))),
limit=args.get("limit", 25),
offset=args.get("offset", None),
)
return CommandResults(
readable_output=tableToMarkdown("PDNS records", result["data"]),
outputs_prefix="Argus.PDNS",
outputs=result,
raw_response=result,
)
def fetch_observations_for_domain_command(args: Dict[str, Any]) -> CommandResults:
fqdn = args.get("fqdn", None)
if not fqdn:
raise ValueError("fqdn not specified")
result = fetch_observations_for_domain(fqdn=fqdn)
return CommandResults(
readable_output=tableToMarkdown(
f'Domain observations for "{fqdn}"', result["data"]
),
outputs_prefix="Argus.ObservationsDomain",
outputs=result,
raw_response=result,
)
def fetch_observations_for_i_p_command(args: Dict[str, Any]) -> CommandResults:
ip = args.get("ip", None)
if not ip:
raise ValueError("ip not specified")
result = fetch_observations_for_i_p(ip=ip)
return CommandResults(
readable_output=tableToMarkdown(f'IP observations for "{ip}"', result["data"]),
outputs_prefix="Argus.ObservationsIP",
outputs=result,
raw_response=result,
)
""" MAIN FUNCTION """
def main() -> None:
logging.getLogger("argus_cli").setLevel("WARNING")
first_fetch_period = parse_first_fetch(
demisto.params().get("first_fetch", "-1 day")
)
set_argus_settings(
demisto.params().get("api_key"),
demisto.params().get("api_url"),
handle_proxy(),
demisto.params().get("insecure", None),
)
demisto.debug(f"Command being called is {demisto.command()}")
try:
if demisto.command() == "test-module":
# This is the call made when pressing the integration Test button.
return_results(test_module_command())
elif demisto.command() == "fetch-incidents":
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
last_run=demisto.getLastRun(),
first_fetch_period=first_fetch_period,
limit=demisto.params().get("max_fetch", 25),
min_severity=demisto.params().get("min_severity", "low"),
)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == "argus-add-case-tag":
return_results(add_case_tag_command(demisto.args()))
elif demisto.command() == "argus-add-comment":
return_results(add_comment_command(demisto.args()))
elif demisto.command() == "argus-advanced-case-search":
return_results(advanced_case_search_command(demisto.args()))
elif demisto.command() == "argus-close-case":
return_results(close_case_command(demisto.args()))
elif demisto.command() == "argus-create-case":
return_results(create_case_command(demisto.args()))
elif demisto.command() == "argus-delete-case":
return_results(delete_case_command(demisto.args()))
elif demisto.command() == "argus-delete-comment":
return_results(delete_comment_command(demisto.args()))
elif demisto.command() == "argus-download-attachment":
return_results(download_attachment_command(demisto.args()))
elif demisto.command() == "argus-edit-comment":
return_results(edit_comment_command(demisto.args()))
elif demisto.command() == "argus-get-attachment":
return_results(get_attachment_command(demisto.args()))
elif demisto.command() == "argus-get-case-metadata-by-id":
return_results(get_case_metadata_by_id_command(demisto.args()))
elif demisto.command() == "argus-list-case-attachments":
return_results(list_case_attachments_command(demisto.args()))
elif demisto.command() == "argus-list-case-tags":
return_results(list_case_tags_command(demisto.args()))
elif demisto.command() == "argus-list-case-comments":
return_results(list_case_comments_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-id":
return_results(remove_case_tag_by_id_command(demisto.args()))
elif demisto.command() == "argus-remove-case-tag-by-key-value":
return_results(remove_case_tag_by_key_value_command(demisto.args()))
elif demisto.command() == "argus-update-case":
return_results(update_case_command(demisto.args()))
elif demisto.command() == "argus-get-event":
return_results(get_event_command(demisto.args()))
elif demisto.command() == "argus-get-events-for-case":
return_results(get_events_for_case_command(demisto.args()))
elif demisto.command() == "argus-find-aggregated-events":
return_results(find_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-list-aggregated-events":
return_results(list_aggregated_events_command(demisto.args()))
elif demisto.command() == "argus-get-payload":
return_results(get_payload_command(demisto.args()))
elif demisto.command() == "argus-get-pcap":
return_results(get_pcap_command(demisto.args()))
elif demisto.command() == "argus-find-nids-events":
return_results(find_nids_events_command(demisto.args()))
elif demisto.command() == "argus-list-nids-events":
return_results(list_nids_events_command(demisto.args()))
elif demisto.command() == "argus-pdns-search-records":
return_results(search_records_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-domain":
return_results(fetch_observations_for_domain_command(demisto.args()))
elif demisto.command() == "argus-fetch-observations-for-ip":
return_results(fetch_observations_for_i_p_command(demisto.args()))
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(
f"Failed to execute {demisto.command()} command.\nError:\n{str(e)}"
)
""" ENTRY POINT """
if __name__ in ("__main__", "__builtin__", "builtins"):
main()
| 1.84375 | 2 |
03.py | SnowWolf75/aoc-2020 | 0 | 5759 | #!/usr/bin/env python3
import sys, os
import unittest
from lib.common import *
filename = "inputs/2020_12_03_input.txt"
class day03:
def __init__(self):
pass
class day03part1(day03):
def solve(self, args):
pass
class day03part2(day03):
def solve(self, args):
pass
class examples(unittest.TestCase):
def test_examples_part1(self):
day3 = day03part1()
# self.assetTrue()
def test_examples_part2(self):
day3 = day03part2()
# self.assetTrue()
class solutions(unittest.TestCase):
def test_part1(self):
day3 = day03part1()
def test_part2(self):
day3 = day03part2()
| 3.09375 | 3 |
scripts/examples/tools/capturebat.py | fortinet/ips-bph-framework | 21 | 5760 | <gh_stars>10-100
# Tool Imports
from bph.tools.windows.capturebat import BphCaptureBat as CaptureBat
# Core Imports
from bph.core.server.template import BphTemplateServer as TemplateServer
from bph.core.sample import BphSample as Sample
from bph.core.sample import BphLabFile as LabFile
from bph.core.session import BphSession as Session
session = Session(project_name='blackhat_arsenal_2019')
session.start()
templateserver = TemplateServer()
templateserver.start()
capturebat = CaptureBat()
capturebat.cleanup()
capturebat.execute()
capturebat.start()
capturebat.execute(delay=15)
capturebat.stop()
capturebat.execute()
capturebat.collect()
capturebat.execute()
capturebat.files() | 1.625 | 2 |
dymos/utils/test/test_hermite.py | kaushikponnapalli/dymos | 104 | 5761 | import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from dymos.utils.hermite import hermite_matrices
class TestHermiteMatrices(unittest.TestCase):
def test_quadratic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 1.0]
tau_eval = np.linspace(-1, 1, 100)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [4.0, 4.0]
ydot_given = [-4.0, 4.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**2
ydot_computed = 2.0 * (tau_eval * dt_dtau)
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
def test_cubic(self):
# Interpolate with values and rates provided at [-1, 1] in tau space
tau_given = [-1.0, 0.0, 1.0]
tau_eval = np.linspace(-1, 1, 101)
# In time space use the boundaries [-2, 2]
dt_dtau = 4.0 / 2.0
# Provide values for y = t**2 and its time-derivative
y_given = [-8.0, 0.0, 8.0]
ydot_given = [12.0, 0.0, 12.0]
# Get the hermite matrices.
Ai, Bi, Ad, Bd = hermite_matrices(tau_given, tau_eval)
# Interpolate y and ydot at tau_eval points in tau space.
y_i = np.dot(Ai, y_given) + dt_dtau * np.dot(Bi, ydot_given)
ydot_i = (1.0 / dt_dtau) * np.dot(Ad, y_given) + np.dot(Bd, ydot_given)
# Compute our function as a point of comparison.
y_computed = (tau_eval * dt_dtau)**3
ydot_computed = 3.0 * (tau_eval * dt_dtau)**2
# Check results
assert_almost_equal(y_i, y_computed)
assert_almost_equal(ydot_i, ydot_computed)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 2.859375 | 3 |
xl_auth/settings.py | libris/xl_auth | 7 | 5762 | # -*- coding: utf-8 -*-
"""Application configuration."""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from . import __author__, __name__, __version__
class Config(object):
"""Base configuration."""
SERVER_NAME = os.environ.get('SERVER_NAME', None)
PREFERRED_URL_SCHEME = os.environ.get('PREFERRED_URL_SCHEME', 'http')
APP_NAME = __name__
APP_VERSION = __version__
APP_AUTHOR = __author__
JSON_AS_ASCII = False
SECRET_KEY = os.environ.get('XL_AUTH_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory.
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
DEBUG_TB_ENABLED = False # Disable Debug toolbar.
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
WEBPACK_MANIFEST_PATH = 'webpack/manifest.json'
BABEL_DEFAULT_LOCALE = os.environ.get('BABEL_DEFAULT_LOCALE', 'sv')
BABEL_DEFAULT_TIMEZONE = 'utc'
EMAIL_DEFAULT_FROM = os.environ.get('EMAIL_DEFAULT_FROM', '<EMAIL>')
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.kb.se')
EMAIL_PORT = int(os.environ.get('EMAIL_PORT', '25'))
EMAIL_TIMEOUT = int(os.environ.get('EMAIL_TIMEOUT', '5'))
OAUTH2_PROVIDER_TOKEN_EXPIRES_IN = 36000
XL_AUTH_MAX_ACTIVE_PASSWORD_RESETS = 2
XL_AUTH_FAILED_LOGIN_TIMEFRAME = 60 * 60
XL_AUTH_FAILED_LOGIN_MAX_ATTEMPTS = 7
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI',
'postgresql://localhost/example')
DEBUG_TB_ENABLED = False # Disable Debug toolbar.
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH)
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
# For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds".
BCRYPT_LOG_ROUNDS = 4
WTF_CSRF_ENABLED = False # Allows form testing.
EMAIL_BACKEND = 'flask_emails.backends.DummyBackend'
| 1.71875 | 2 |
tests/mrp/test_mrp_auth.py | evanreichard/pyatv | 0 | 5763 | <gh_stars>0
"""Functional authentication tests with fake MRP Apple TV."""
import inspect
from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
import pyatv
from pyatv import exceptions
from pyatv.const import Protocol
from pyatv.conf import MrpService, AppleTV
from pyatv.mrp.server_auth import PIN_CODE, CLIENT_IDENTIFIER, CLIENT_CREDENTIALS
from tests.fake_device import FakeAppleTV
class MrpAuthFunctionalTest(AioHTTPTestCase):
def setUp(self):
AioHTTPTestCase.setUp(self)
self.service = MrpService(
CLIENT_IDENTIFIER, self.fake_atv.get_port(Protocol.MRP)
)
self.conf = AppleTV("127.0.0.1", "Apple TV")
self.conf.add_service(self.service)
async def tearDownAsync(self):
if inspect.iscoroutinefunction(self.handle.close):
await self.handle.close()
else:
self.handle.close()
await super().tearDownAsync()
async def get_application(self, loop=None):
self.fake_atv = FakeAppleTV(self.loop)
self.state, self.usecase = self.fake_atv.add_service(Protocol.MRP)
return self.fake_atv.app
@unittest_run_loop
async def test_pairing_with_device(self):
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertIsNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE)
await self.handle.finish()
self.assertTrue(self.handle.has_paired)
self.assertTrue(self.state.has_paired)
self.assertIsNotNone(self.service.credentials)
@unittest_run_loop
async def test_pairing_with_existing_credentials(self):
self.service.credentials = CLIENT_CREDENTIALS
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertFalse(self.handle.has_paired)
self.assertIsNotNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE)
await self.handle.finish()
self.assertTrue(self.handle.has_paired)
self.assertTrue(self.state.has_paired)
self.assertIsNotNone(self.service.credentials)
@unittest_run_loop
async def test_pairing_with_bad_pin(self):
self.handle = await pyatv.pair(self.conf, Protocol.MRP, self.loop)
self.assertIsNone(self.service.credentials)
self.assertTrue(self.handle.device_provides_pin)
await self.handle.begin()
self.handle.pin(PIN_CODE + 1)
with self.assertRaises(exceptions.PairingError):
await self.handle.finish()
self.assertFalse(self.handle.has_paired)
self.assertFalse(self.state.has_paired)
self.assertIsNone(self.service.credentials)
@unittest_run_loop
async def test_authentication(self):
self.service.credentials = CLIENT_CREDENTIALS
self.handle = await pyatv.connect(self.conf, self.loop)
self.assertTrue(self.state.has_authenticated)
| 2.125 | 2 |
tests_app/tests/functional/key_constructor/bits/models.py | maryokhin/drf-extensions | 1 | 5764 | # -*- coding: utf-8 -*-
from django.db import models
class KeyConstructorUserProperty(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'tests_app'
class KeyConstructorUserModel(models.Model):
property = models.ForeignKey(KeyConstructorUserProperty)
class Meta:
app_label = 'tests_app' | 2 | 2 |
ngraph_onnx/onnx_importer/utils/numeric_limits.py | cliveseldon/ngraph-onnx | 0 | 5765 | # ******************************************************************************
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import numbers
from typing import Union
class NumericLimits(object):
"""Class providing interface to extract numerical limits for given data type."""
@staticmethod
def _get_number_limits_class(dtype):
# type: (np.dtype) -> Union[IntegralLimits, FloatingPointLimits]
"""Return specialized class instance with limits set for given data type.
:param dtype: The data type we want to check limits for.
:return: The specialized class instance providing numeric limits.
"""
data_type = dtype.type
value = data_type(1)
if isinstance(value, numbers.Integral):
return IntegralLimits(data_type)
elif isinstance(value, numbers.Real):
return FloatingPointLimits(data_type)
else:
raise ValueError('NumericLimits: unsupported data type: <{}>.'.format(dtype.type))
@staticmethod
def _get_dtype(dtype): # type: (Union[np.dtype, int, float]) -> np.dtype
"""Return numpy dtype object wrapping provided data type.
:param dtype: The data type to be wrapped.
:return: The numpy dtype object.
"""
return dtype if isinstance(dtype, np.dtype) else np.dtype(dtype)
@classmethod
def max(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return maximum value that can be represented in given data type.
:param dtype: The data type we want to check maximum value for.
:return: The maximum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).max
@classmethod
def min(cls, dtype): # type: (np.dtype) -> Union[int, float]
"""Return minimum value that can be represented in given data type.
:param dtype: The data type we want to check minimum value for.
:return: The minimum value.
"""
return cls._get_number_limits_class(cls._get_dtype(dtype)).min
class FloatingPointLimits(object):
"""Class providing access to numeric limits for floating point data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> float
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.finfo(self.data_type).max
@property
def min(self): # type: () -> float
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.finfo(self.data_type).min
class IntegralLimits(object):
"""Class providing access to numeric limits for integral data types."""
def __init__(self, data_type): # type: (type) -> None
self.data_type = data_type
@property
def max(self): # type: () -> int
"""Provide maximum representable value by stored data type.
:return: The maximum value.
"""
return np.iinfo(self.data_type).max
@property
def min(self): # type: () -> int
"""Provide minimum representable value by stored data type.
:return: The minimum value.
"""
return np.iinfo(self.data_type).min
| 2.484375 | 2 |
curvpack/utils.py | AbhilashReddyM/curvpack | 8 | 5766 | <filename>curvpack/utils.py
import numpy as np
# The first two functions are modified from MNE surface project. LIcense follows
# This software is OSI Certified Open Source Software. OSI Certified is a certification mark of the Open Source Initiative.
#
# Copyright (c) 2011-2019, authors of MNE-Python. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# Neither the names of MNE-Python authors nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#
# This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall the copyright owner or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage.
def triangle_neighbors(tris, npts):
"""Efficiently compute vertex neighboring triangles.
Returns the triangles in the 1-ring of a given vertex
"""
# this code replaces the following, but is faster (vectorized):
#
# this['neighbor_tri'] = [list() for _ in xrange(this['np'])]
# for p in xrange(this['ntri']):
# verts = this['tris'][p]
# this['neighbor_tri'][verts[0]].append(p)
# this['neighbor_tri'][verts[1]].append(p)
# this['neighbor_tri'][verts[2]].append(p)
# this['neighbor_tri'] = [np.array(nb, int) for nb in this['neighbor_tri']]
#
verts = tris.ravel()
counts = np.bincount(verts, minlength=npts)
reord = np.argsort(verts)
tri_idx = np.unravel_index(reord, (len(tris), 3))[0]
idx = np.cumsum(np.r_[0, counts])
# the sort below slows it down a bit, but is needed for equivalence
neighbor_tri = np.array([np.sort(tri_idx[v1:v2])
for v1, v2 in zip(idx[:-1], idx[1:])])
return neighbor_tri
def get_surf_neighbors(tris,neighbor_tri, k):
"""Get vertices of 1-ring
"""
verts = tris[neighbor_tri[k]]
verts = np.setdiff1d(verts, [k], assume_unique=False)
nneighbors = len(verts)
return verts
def GetVertexNormals(vertices,faces,FaceNormals,e0,e1,e2):
"""
INPUT:
Vertices : vertices
Faces : vertex connectivity
FaceNormals : Outer Normal per face, having magnitude equal to area of face
e0,e1,e2 : edge vectors
OUTPUT:
VertNormals : Unit normal at the vertex
"""
VertNormals =np.zeros(vertices.shape)
#edge lengths
de0=np.sqrt(e0[:,0]**2+e0[:,1]**2+e0[:,2]**2)
de1=np.sqrt(e1[:,0]**2+e1[:,1]**2+e1[:,2]**2)
de2=np.sqrt(e2[:,0]**2+e2[:,1]**2+e2[:,2]**2)
L2=np.c_[de0**2,de1**2,de2**2]
#Calculate weights according to N.Max [1999] for normals
wfv1=FaceNormals/(L2[:,1]*L2[:,2])[:,np.newaxis]
wfv2=FaceNormals/(L2[:,2]*L2[:,0])[:,np.newaxis]
wfv3=FaceNormals/(L2[:,0]*L2[:,1])[:,np.newaxis]
# #Calculate the weights according to MWA for normals
# wfv1=FaceNormals*np.arcsin(2*Af/(de1*de2))[:,np.newaxis]
# wfv2=FaceNormals*np.arcsin(2*Af/(de2*de0))[:,np.newaxis]
# wfv3=FaceNormals*np.arcsin(2*Af/(de0*de1))[:,np.newaxis]
verts=faces.T[0]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv1[:,j])
verts=faces.T[1]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv2[:,j])
verts=faces.T[2]
for j in [0,1,2]:
VertNormals[:,j]+=np.bincount(verts,minlength=vertices.shape[0],weights=wfv3[:,j])
VertNormals=normr(VertNormals)
return VertNormals
def fastcross(x, y):
"""Compute cross product between list of 3D vectors
Input
x : Mx3 array
y : Mx3 array
Output
z : Mx3 array Cross product of x and y.
"""
if max([x.shape[0], y.shape[0]]) >= 500:
return np.c_[x[:, 1] * y[:, 2] - x[:, 2] * y[:, 1],
x[:, 2] * y[:, 0] - x[:, 0] * y[:, 2],
x[:, 0] * y[:, 1] - x[:, 1] * y[:, 0]]
else:
return np.cross(x, y)
def normr(vec):
"""
Normalizes an array of vectors. e.g. to convert a np array of vectors to unit vectors
"""
return vec/np.sqrt((vec**2).sum(axis=1))[:,np.newaxis]
| 1.851563 | 2 |
tests/unit/core/streams/test_stream_zero.py | tethys-platform/tethys | 2 | 5767 | # Copyright 2020 Konstruktor, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import platform
import time
from unittest import mock
from unittest.mock import patch, call
from pytest import fixture
from tethys.core.pipes.pipe_zero import ZeroPipe
from tethys.core.sessions.sess_zero import ZeroSession
from tethys.core.stations.station_zero import ZeroStation
from tethys.core.streams.stream_zero import ZeroStream
from tethys.core.transports.transport_zero import ZeroTransport
class MockTransport(ZeroTransport):
def __init__(self):
pass
connect = mock.MagicMock()
disconnect = mock.MagicMock()
class MockSession(ZeroSession):
closing_mode = None
def __init__(self):
self._closed = False
@property
def closed(self):
return self._closed
class MockStation(ZeroStation):
def __init__(self):
pass
class TestZeroStream:
@staticmethod
def teardown_method():
MockTransport.connect.reset_mock()
MockTransport.disconnect.reset_mock()
@fixture
def pipe(self):
pipe = mock.MagicMock(spec=ZeroPipe)
return pipe
@fixture
def session(self):
session = MockSession()
return session
@fixture
def transport(self):
return MockTransport()
@fixture
def station(self):
return MockStation()
@fixture
def stream(self, pipe, session, transport):
return ZeroStream(pipe, session, transport)
# init
def test_init_with_transport_cb(self, pipe, session, transport):
def get_transport(_):
return transport
get_transport = mock.MagicMock(side_effect=get_transport)
stream = ZeroStream(pipe, session, get_transport)
assert stream.transport == transport
# conn context
def test_new_connection_context(self, stream):
with stream.connection_context():
MockTransport.connect.assert_called_once_with(stream)
MockTransport.disconnect.assert_not_called()
MockTransport.disconnect.assert_called_once_with(stream)
def test_old_connection_context(self, stream):
MockTransport._connections[stream.id] = stream
with stream.connection_context():
MockTransport.connect.assert_not_called()
MockTransport.disconnect.assert_not_called()
# heartbeat
def test_heartbeat_fail_delay(self, stream):
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 0
assert stream.heartbeat_fail_delay == stream.DEFAULT_HEARTBEAT_FAIL_DELAY
stream.station.heartbeat_fail_delay = 12345
assert stream.heartbeat_fail_delay == 12345
def test_busy_false(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1
stream.heartbeat_ts = time.time() - 10
assert stream.is_busy is False
assert stream.refresh.call_count == 1
def test_busy_true(self, stream):
stream.refresh = mock.MagicMock()
stream.station = mock.MagicMock(spec=ZeroStation)
stream.station.heartbeat_fail_delay = 1000
stream.heartbeat_ts = time.time()
assert stream.is_busy is True
assert stream.refresh.call_count == 1
def test_heartbeat(self, stream):
stream.save = mock.MagicMock()
with patch("time.time", lambda: 12345):
stream.heartbeat()
assert stream.heartbeat_ts == 12345
stream.save.assert_called_once_with(save_dependency=False)
# open
def test_open(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open() is stream
assert stream.closed is False
stream.save.assert_called_once_with(save_dependency=False)
def test_open_no_commit(self, stream):
stream.save = mock.MagicMock()
stream.closed = True
assert stream.open(save=False) is stream
assert stream.closed is False
stream.save.assert_not_called()
# close
def test_close(self, stream):
stream.save = mock.MagicMock()
assert stream.close() is stream
assert stream.closed is True
stream.save.assert_called_once_with(save_dependency=False)
def test_close_no_commit(self, stream):
stream.save = mock.MagicMock()
assert stream.close(save=False) is stream
assert stream.closed is True
stream.save.assert_not_called()
# read
def test_read(self, stream):
data = ["packet", 0, {}, "", None] + [None, "packet"] * 5
result_data = list(filter(lambda x: x is not None, data))
iter_data = iter(data)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
if platform.python_implementation().lower() == "pypy":
gc.collect()
assert result == result_data
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in data]
)
def test_read_n_packets(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(count=5, test_kw=1):
if item is ...:
break
result.append(item)
assert result == ["packet"] * 5
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(6)]
)
def test_read_while_stream_open(self, stream):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.closed = True
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_while_sess_open(self, stream):
stream.session._closed = True
iter_data = iter([0, 1, 2, 3, None, 4])
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item is ...:
break
result.append(item)
assert result == list(range(4))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_when_station_changed(self, stream, station):
iter_data = iter(range(10))
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(test_kw=1):
if item == 4:
stream.station = station
if item is ...:
break
result.append(item)
assert result == list(range(5))
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_has_calls(
[call(stream, wait_timeout=None, test_kw=1) for _ in range(5)]
)
def test_read_none(self, stream):
iter_data = iter([None, "packet"] + ["packet"] * 10)
def recv_cb(*_, **__):
try:
return next(iter_data)
except StopIteration:
return ...
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.recv = mock.MagicMock(side_effect=recv_cb)
result = []
for item in stream.read(wait_timeout=1, test_kw=1):
if item is ...:
break
result.append(item)
assert result == []
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
stream.transport.recv.assert_called_once_with(stream, wait_timeout=1, test_kw=1)
# write
def test_write(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_many(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.write("packet", many=True, test_kw=1)
stream.transport.send.assert_has_calls(
[call(stream, i, test_kw=1) for i in "packet"]
)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
def test_write_when_closed(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.write("packet", test_kw=1)
stream.transport.send.assert_not_called()
stream.connection_context.assert_not_called()
connection_context.__enter__.assert_not_called()
connection_context.__exit__.assert_not_called()
def test_write_out(self, stream):
connection_context = mock.MagicMock()
stream.connection_context = mock.MagicMock(
side_effect=lambda: connection_context
)
stream.transport.send = mock.MagicMock()
stream.closed = True
stream.pipe.node_b = "<out>"
stream.write("packet", test_kw=1)
stream.transport.send.assert_called_once_with(stream, "packet", test_kw=1)
assert stream.connection_context.call_count == 1
assert connection_context.__enter__.call_count == 1
assert connection_context.__exit__.call_count == 1
# ack
def test_ack(self, stream):
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_called_once_with(stream, "message", test_kw=1)
def test_ack_closed(self, stream):
stream.closed = True
stream.transport.ack = mock.MagicMock()
stream.ack("message", test_kw=1)
stream.transport.ack.assert_not_called()
# redirect
def test_redirect(self, stream, station):
station.save = mock.MagicMock()
station.stream_lock_ttl = 0
stream.save = mock.MagicMock()
stream.redirect_to(station)
assert stream.station == station
station.save.assert_called_once_with(save_dependency=False)
stream.save.assert_called_once_with(save_dependency=False)
# open/close context
def test_context(self, stream):
stream.open = mock.MagicMock()
stream.close = mock.MagicMock()
with stream:
stream.open.assert_called_once_with(save=False)
stream.close.assert_not_called()
stream.close.assert_called_once_with(save=False)
| 1.804688 | 2 |
amd64-linux/lib/ppc64_simple_components.py | qiyancos/Simics-3.0.31 | 1 | 5768 | ## Copyright 2005-2007 Virtutech AB
##
## The contents herein are Source Code which are a subset of Licensed
## Software pursuant to the terms of the Virtutech Simics Software
## License Agreement (the "Agreement"), and are being distributed under
## the Agreement. You should have received a copy of the Agreement with
## this Licensed Software; if not, please contact Virtutech for a copy
## of the Agreement prior to using this Licensed Software.
##
## By using this Source Code, you agree to be bound by all of the terms
## of the Agreement, and use of this Source Code is subject to the terms
## the Agreement.
##
## This Source Code and any derivatives thereof are provided on an "as
## is" basis. Virtutech makes no warranties with respect to the Source
## Code or any derivatives thereof and disclaims all implied warranties,
## including, without limitation, warranties of merchantability and
## fitness for a particular purpose and non-infringement.
from sim_core import *
from components import *
import time
# Generic Simple System for PPC64 Processors
class ppc64_simple_base_component(component_object):
basename = 'system'
connectors = {
'uart0' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False},
'uart1' : {'type' : 'serial', 'direction' : 'down',
'empty_ok' : True, 'hotplug' : True, 'multi' : False}}
def __init__(self, parse_obj):
component_object.__init__(self, parse_obj)
self.o.cpu = []
self.map_offset = 0xf0000000
self.time_of_day = "2006-06-06 06:06:06 UTC"
def get_cpu_frequency(self, idx):
return self.freq_mhz
def set_cpu_frequency(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.freq_mhz = val
return Sim_Set_Ok
def get_memory_megs(self, idx):
return self.memory_megs
def set_memory_megs(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.memory_megs = val
return Sim_Set_Ok
def get_map_offset(self, idx):
return self.map_offset
def set_map_offset(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
self.map_offset = val
return Sim_Set_Ok
def get_time_of_day(self, idx):
return self.time_of_day
def set_time_of_day(self, val, idx):
if self.obj.configured:
return Sim_Set_Illegal_Value
try:
time.strptime(val, "%Y-%m-%d %H:%M:%S %Z")
except Exception, msg:
SIM_attribute_error(str(msg))
return Sim_Set_Illegal_Value
self.time_of_day = val
return Sim_Set_Ok
def add_objects(self, cpu):
self.o.phys_mem = pre_obj('phys_mem', 'memory-space')
self.o.ram_image = pre_obj('memory_image', 'image')
self.o.ram_image.size = self.memory_megs * 0x100000
self.o.ram = pre_obj('memory', 'ram')
self.o.ram.image = self.o.ram_image
self.o.pic = pre_obj('pic$', 'open-pic')
self.o.pic.irq_devs = [cpu]
self.o.irq = pre_obj('irq$', 'i8259x2')
self.o.irq.irq_dev = self.o.pic
self.o.uart0 = pre_obj('uart0', 'NS16550')
self.o.uart0.irq_dev = self.o.irq
self.o.uart0.irq_level = 4
self.o.uart0.xmit_time = 1000
self.o.uart1 = pre_obj('uart1', 'NS16550')
self.o.uart1.irq_dev = self.o.irq
self.o.uart1.irq_level = 3
self.o.uart1.xmit_time = 1000
self.o.of = pre_obj('of', 'ppc-of')
self.o.of.cpu = self.o.cpu[0]
self.o.of.memory_megs = self.memory_megs
self.o.of.entry_point = 0x7000000
self.o.of.map_offset = self.map_offset
self.o.of.time_of_day = self.time_of_day
self.o.broadcast_bus = pre_obj('broadcast_bus', 'ppc-broadcast-bus')
self.o.empty = pre_obj('empty', 'empty-device')
self.o.pci_io = pre_obj('pci_io', 'memory-space')
self.o.hfs = pre_obj('hfs$', 'hostfs')
self.o.phys_mem.map = [
[0x00000000, self.o.ram, 0, 0x0, self.memory_megs * 0x100000],
[self.map_offset + 0x08000000, self.o.pci_io, 0, 0x0, 0x100000],
[self.map_offset + 0x0f660000, self.o.hfs, 0, 0, 0x10],
[self.map_offset + 0x0fc00000, self.o.pic, 0, 0x0, 0x100000]]
self.o.pci_io.map = [
[0x020, self.o.irq, 0, 0x20, 0x1],
[0x021, self.o.irq, 0, 0x21, 0x1],
[0x0a0, self.o.irq, 0, 0xa0, 0x1],
[0x0a1, self.o.irq, 0, 0xa1, 0x1],
# Linux probes for UARTs at 0x2e8 and 0x3e8 too, so provide
# empty mappings there
[0x2e8, self.o.empty, 0, 0x0, 0x8],
# two NS16550, at the traditional addresses
[0x2f8, self.o.uart1, 0, 0x0, 0x8, None, 0, 1],
[0x3e8, self.o.empty, 0, 0x0, 0x8],
[0x3f8, self.o.uart0, 0, 0x0, 0x8, None, 0, 1],
# no UARTs here either
[0x890, self.o.empty, 0, 0x0, 0x8],
[0x898, self.o.empty, 0, 0x0, 0x8]]
def add_connector_info(self):
self.connector_info['uart0'] = [None, self.o.uart0, self.o.uart0.name]
self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name]
def connect_serial(self, connector, link, console):
if connector == 'uart0':
if link:
self.o.uart0.link = link
else:
self.o.uart0.console = console
elif connector == 'uart1':
if link:
self.o.uart1.link = link
else:
self.o.uart1.console = console
def disconnect_serial(self, connector):
if connector == 'uart0':
self.o.uart0.link = None
self.o.uart0.console = None
elif connector == 'uart1':
self.o.uart1.link = None
self.o.uart1.console = None
def get_clock(self):
return self.o.cpu[0]
def get_processors(self):
return self.o.cpu
ppc64_simple_attributes = [
['cpu_frequency', Sim_Attr_Required, 'f',
'Processor frequency in MHz.'],
['memory_megs', Sim_Attr_Required, 'i',
'The amount of RAM in megabytes.'],
['map_offset', Sim_Attr_Optional, 'i',
'Base address for device mappings. ' \
'Offsets at 4 GB and above will not work'],
['time_of_day', Sim_Attr_Optional, 's',
'Date and time to initialize the OpenFirmware RTC to']]
| 2.171875 | 2 |
Front-end (Django)/course/migrations/0002_subject_number_of_questions.py | shadow0403bsr/AutomatedGradingSoftware | 0 | 5769 | # Generated by Django 3.0.1 on 2020-02-15 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='subject',
name='Number_Of_Questions',
field=models.IntegerField(default=0),
),
]
| 1.734375 | 2 |
cornflow/tests/unit/test_dags.py | pchtsp/corn | 5 | 5770 | """
Unit test for the DAG endpoints
"""
# Import from libraries
import json
# Import from internal modules
from cornflow.shared.const import EXEC_STATE_CORRECT, EXEC_STATE_MANUAL
from cornflow.tests.const import (
DAG_URL,
EXECUTION_URL_NORUN,
CASE_PATH,
INSTANCE_URL,
)
from cornflow.tests.unit.test_executions import TestExecutionsDetailEndpointMock
class TestDagEndpoint(TestExecutionsDetailEndpointMock):
def test_manual_dag_service_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_service_user()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
def test_manual_dag_planner_user(self):
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_MANUAL,
)
payload_to_send = {**self.payload, **data}
token = self.create_planner()
self.items_to_check = [
"config",
"name",
"description",
"schema",
"instance_id",
"state",
]
idx = self.create_new_row(
url=DAG_URL,
model=self.model,
payload=payload_to_send,
check_payload=True,
token=token,
)
class TestDagDetailEndpoint(TestExecutionsDetailEndpointMock):
def test_put_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
with open(CASE_PATH) as f:
payload = json.load(f)
data = dict(
data=payload["data"],
state=EXEC_STATE_CORRECT,
)
payload_to_check = {**self.payload, **data}
token = self.create_service_user()
data = self.update_row(
url=DAG_URL + idx + "/",
payload_to_check=payload_to_check,
change=data,
token=token,
check_payload=False,
)
def test_get_dag(self):
idx = self.create_new_row(EXECUTION_URL_NORUN, self.model, self.payload)
token = self.create_service_user()
data = self.get_one_row(
url=DAG_URL + idx + "/",
token=token,
check_payload=False,
payload=self.payload,
)
instance_data = self.get_one_row(
url=INSTANCE_URL + self.payload["instance_id"] + "/data/",
payload=dict(),
check_payload=False,
)
self.assertEqual(data["data"], instance_data["data"])
self.assertEqual(data["config"], self.payload["config"])
return
| 2.46875 | 2 |
nets/resnet.py | xwshi/faster-rcnn-keras | 0 | 5771 | #-------------------------------------------------------------#
# ResNet50的网络部分
#-------------------------------------------------------------#
import keras.backend as K
from keras import backend as K
from keras import initializers, layers, regularizers
from keras.engine import InputSpec, Layer
from keras.initializers import random_normal
from keras.layers import (Activation, Add, AveragePooling2D, Conv2D, MaxPooling2D, TimeDistributed,
ZeroPadding2D)
class BatchNormalization(Layer):
def __init__(self, epsilon=1e-3, axis=-1,
weights=None, beta_init='zero', gamma_init='one',
gamma_regularizer=None, beta_regularizer=None, **kwargs):
self.supports_masking = True
self.beta_init = initializers.get(beta_init)
self.gamma_init = initializers.get(gamma_init)
self.epsilon = epsilon
self.axis = axis
self.gamma_regularizer = regularizers.get(gamma_regularizer)
self.beta_regularizer = regularizers.get(beta_regularizer)
self.initial_weights = weights
super(BatchNormalization, self).__init__(**kwargs)
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape)]
shape = (input_shape[self.axis],)
self.gamma = self.add_weight(shape,
initializer=self.gamma_init,
regularizer=self.gamma_regularizer,
name='{}_gamma'.format(self.name),
trainable=False)
self.beta = self.add_weight(shape,
initializer=self.beta_init,
regularizer=self.beta_regularizer,
name='{}_beta'.format(self.name),
trainable=False)
self.running_mean = self.add_weight(shape, initializer='zero',
name='{}_running_mean'.format(self.name),
trainable=False)
self.running_std = self.add_weight(shape, initializer='one',
name='{}_running_std'.format(self.name),
trainable=False)
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def call(self, x, mask=None):
assert self.built, 'Layer must be built before being called'
input_shape = K.int_shape(x)
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis]
if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
x_normed = K.batch_normalization(
x, self.running_mean, self.running_std,
self.beta, self.gamma,
epsilon=self.epsilon)
else:
broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
broadcast_beta = K.reshape(self.beta, broadcast_shape)
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
x_normed = K.batch_normalization(
x, broadcast_running_mean, broadcast_running_std,
broadcast_beta, broadcast_gamma,
epsilon=self.epsilon)
return x_normed
def get_config(self):
config = {'epsilon': self.epsilon,
'axis': self.axis,
'gamma_regularizer': self.gamma_regularizer.get_config() if self.gamma_regularizer else None,
'beta_regularizer': self.beta_regularizer.get_config() if self.beta_regularizer else None}
base_config = super(BatchNormalization, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02), name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same', kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2b')(x)
x = BatchNormalization(name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '2c')(x)
x = BatchNormalization(name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides, kernel_initializer=random_normal(stddev=0.02),
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(inputs):
#-----------------------------------#
# 假设输入进来的图片是600,600,3
#-----------------------------------#
img_input = inputs
# 600,600,3 -> 300,300,64
x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(x)
x = BatchNormalization(name='bn_conv1')(x)
x = Activation('relu')(x)
# 300,300,64 -> 150,150,64
x = MaxPooling2D((3, 3), strides=(2, 2), padding="same")(x)
# 150,150,64 -> 150,150,256
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
# 150,150,256 -> 75,75,512
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
# 75,75,512 -> 38,38,1024
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
# 最终获得一个38,38,1024的共享特征层
return x
def identity_block_td(input_tensor, kernel_size, filters, stage, block):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), kernel_initializer='normal',padding='same'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
x = Add()([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block_td(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
nb_filter1, nb_filter2, nb_filter3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = TimeDistributed(Conv2D(nb_filter1, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '2a')(input_tensor)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter2, (kernel_size, kernel_size), padding='same', kernel_initializer='normal'), name=conv_name_base + '2b')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = TimeDistributed(Conv2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c')(x)
x = TimeDistributed(BatchNormalization(), name=bn_name_base + '2c')(x)
shortcut = TimeDistributed(Conv2D(nb_filter3, (1, 1), strides=strides, kernel_initializer='normal'), name=conv_name_base + '1')(input_tensor)
shortcut = TimeDistributed(BatchNormalization(), name=bn_name_base + '1')(shortcut)
x = Add()([x, shortcut])
x = Activation('relu')(x)
return x
def classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = conv_block_td(x, 3, [512, 512, 2048], stage=5, block='a', strides=(2, 2))
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='b')
# num_rois, 7, 7, 2048 -> num_rois, 7, 7, 2048
x = identity_block_td(x, 3, [512, 512, 2048], stage=5, block='c')
# num_rois, 7, 7, 2048 -> num_rois, 1, 1, 2048
x = TimeDistributed(AveragePooling2D((7, 7)), name='avg_pool')(x)
return x
| 2.734375 | 3 |
app/api/deps.py | congdh/fastapi-realworld | 0 | 5772 | <gh_stars>0
from typing import Generator
from fastapi import Depends, HTTPException
from fastapi.security import APIKeyHeader
from sqlalchemy.orm import Session
from starlette import status
from app import crud, models
from app.core import security
from app.db.session import SessionLocal
JWT_TOKEN_PREFIX = "Token" # noqa: S105
def get_db() -> Generator:
db = SessionLocal()
try:
yield db
finally:
db.close()
def authrization_heder_token(
api_key: str = Depends(APIKeyHeader(name="Authorization")),
) -> str:
try:
token_prefix, token = api_key.split(" ")
except ValueError:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="unsupported authorization type",
)
if token_prefix != JWT_TOKEN_PREFIX:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="unsupported authorization type",
)
return token
async def get_current_user(
token: str = Depends(authrization_heder_token), db: Session = Depends(get_db)
) -> models.User:
user_id = security.get_user_id_from_token(token=token)
user = crud.user.get_user_by_id(db, int(user_id))
if not user:
raise HTTPException(status_code=404, detail="User not found")
return user
| 2.234375 | 2 |
src/raiden_libs/contract_info.py | netcriptus/raiden-services | 13 | 5773 | <filename>src/raiden_libs/contract_info.py
import sys
from typing import Dict, List, Tuple
import structlog
from eth_utils import to_canonical_address
from raiden.utils.typing import Address, BlockNumber, ChainID, Optional
from raiden_contracts.contract_manager import (
ContractDevEnvironment,
ContractManager,
contracts_precompiled_path,
get_contracts_deployment_info,
)
log = structlog.get_logger(__name__)
CONTRACT_MANAGER = ContractManager(contracts_precompiled_path())
def get_contract_addresses_and_start_block(
chain_id: ChainID,
contracts: List[str],
address_overwrites: Dict[str, Address],
development_environment: ContractDevEnvironment = ContractDevEnvironment.DEMO,
contracts_version: Optional[str] = None,
) -> Tuple[Dict[str, Address], BlockNumber]:
"""Returns contract addresses and start query block for a given chain and contracts version.
The default contracts can be overwritten by the additional parameters.
Args:
chain_id: The chain id to look for deployed contracts.
contracts: The list of contracts which should be considered
address_overwrites: Dict of addresses which should be used instead of
the ones in the requested deployment.
contracts_version: The version of the contracts to use.
Returns: A dictionary with the contract addresses and start block for the given information
"""
contract_data = get_contracts_deployment_info(
chain_id=chain_id,
version=contracts_version,
development_environment=development_environment,
)
if not contract_data:
log.error(
"No deployed contracts were found at the default registry",
contracts_version=contracts_version,
)
sys.exit(1)
# Get deployed addresses for those contracts which have no overwrites
addresses = {
c: (
address_overwrites.get(c)
or to_canonical_address(contract_data["contracts"][c]["address"])
)
for c in contracts
}
# Set start block to zero if any contract addresses are overwritten
if any(address_overwrites.values()):
start_block = BlockNumber(0)
else:
start_block = BlockNumber(
max(0, min(contract_data["contracts"][c]["block_number"] for c in contracts))
)
return addresses, start_block
| 2.328125 | 2 |
meta_dataset/models/functional_classifiers.py | letyrodridc/meta-dataset | 0 | 5774 | # coding=utf-8
# Copyright 2022 The Meta-Dataset Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2,python3
"""Classifier-related code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin.tf
from meta_dataset.models import functional_backbones
import tensorflow.compat.v1 as tf
def linear_classifier_forward_pass(embeddings, w_fc, b_fc, cosine_classifier,
cosine_logits_multiplier, use_weight_norm):
"""Passes embeddings through the linear layer defined by w_fc and b_fc.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
w_fc: A Tensor of size [embedding dim, num outputs].
b_fc: Either None, or a Tensor of size [num outputs] or []. If
cosine_classifier is False, it can not be None.
cosine_classifier: A bool. If true, a cosine classifier is used which does
not require the bias b_fc.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if cosine_classifier:
# Each column of the weight matrix may be interpreted as a class
# representation (of the same dimenionality as the embedding space). The
# logit for an embedding vector belonging to that class is the cosine
# similarity between that embedding and that class representation.
embeddings = tf.nn.l2_normalize(embeddings, axis=1, epsilon=1e-3)
if not use_weight_norm:
# Only normalize the weights if weight norm was not used.
w_fc = tf.nn.l2_normalize(w_fc, axis=0, epsilon=1e-3)
logits = tf.matmul(embeddings, w_fc)
# Scale the logits as passing numbers in [-1, 1] to softmax is not very
# expressive.
logits *= cosine_logits_multiplier
else:
assert b_fc is not None
logits = tf.matmul(embeddings, w_fc) + b_fc
return logits
@gin.configurable
def linear_classifier(embeddings, num_classes, cosine_classifier,
cosine_logits_multiplier, use_weight_norm, weight_decay):
"""Forward pass through a linear classifier, or possibly a cosine classifier.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: An integer; the dimension of the classification.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
use_weight_norm: A bool. Whether weight norm was used. If so, then if using
cosine classifier, normalize only the embeddings but not the weights.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
embedding_dims = embeddings.get_shape().as_list()[-1]
if use_weight_norm:
# A variable to keep track of whether the initialization has already
# happened.
data_dependent_init_done = tf.get_variable(
'data_dependent_init_done',
initializer=0,
dtype=tf.int32,
trainable=False)
w_fc = tf.get_variable(
'w_fc', [embedding_dims, num_classes],
initializer=tf.random_normal_initializer(0, 0.05),
trainable=True)
# This init is temporary as it needs to be done in a data-dependent way.
# It will be overwritten during the first forward pass through this layer.
g = tf.get_variable(
'g',
dtype=tf.float32,
initializer=tf.ones([num_classes]),
trainable=True)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = tf.get_variable(
'b_fc', initializer=tf.zeros([num_classes]), trainable=True)
def _do_data_dependent_init():
"""Returns ops for the data-dependent init of g and maybe b_fc."""
w_fc_normalized = tf.nn.l2_normalize(w_fc.read_value(), [0])
output_init = tf.matmul(embeddings, w_fc_normalized)
mean_init, var_init = tf.nn.moments(output_init, [0])
# Data-dependent init values.
g_init_value = 1. / tf.sqrt(var_init + 1e-10)
ops = [tf.assign(g, g_init_value)]
if not cosine_classifier:
# Also initialize a bias in a data-dependent way.
b_fc_init_value = -mean_init * g_init_value
ops.append(tf.assign(b_fc, b_fc_init_value))
# Mark that the data-dependent initialization is done to prevent it from
# happening again in the future.
ops.append(tf.assign(data_dependent_init_done, 1))
return tf.group(*ops)
# Possibly perform data-dependent init (if it hasn't been done already).
init_op = tf.cond(
tf.equal(data_dependent_init_done, 0), _do_data_dependent_init,
tf.no_op)
with tf.control_dependencies([init_op]):
# Apply weight normalization.
w_fc *= g / tf.sqrt(tf.reduce_sum(tf.square(w_fc), [0]))
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, True)
else:
# No weight norm.
w_fc = functional_backbones.weight_variable([embedding_dims, num_classes],
weight_decay=weight_decay)
b_fc = None
if not cosine_classifier:
# Also initialize a bias.
b_fc = functional_backbones.bias_variable([num_classes])
# Forward pass through the layer defined by w_fc and b_fc.
logits = linear_classifier_forward_pass(embeddings, w_fc, b_fc,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
@gin.configurable
def separate_head_linear_classifier(embeddings, num_classes, dataset_idx,
start_idx, cosine_classifier,
cosine_logits_multiplier, learnable_scale,
weight_decay):
"""A linear classifier with num_sets heads, for different datasets.
Args:
embeddings: A Tensor of size [batch size, embedding dim].
num_classes: A list of integers; the dimension of the classifier layers of
the different heads.
dataset_idx: An int Tensor. The index of the dataset head to use.
start_idx: An int Tensor. The index of the first class of the given dataset.
cosine_classifier: A bool. If true, a cosine classifier is used, which does
not require a bias.
cosine_logits_multiplier: A float. Only used if cosine_classifier is True,
and multiplies the resulting logits.
learnable_scale: A bool. Whether to make the cosine_logits_multiplier a
learnable parameter. Only applies if cosine_classifier is True.
weight_decay: A float; the scalar multiple on the L2 regularization of the
weight matrix.
Returns:
logits: A Tensor of size [batch size, num outputs].
"""
if not cosine_classifier:
raise NotImplementedError('`separate_head_linear_classifier` currently '
'only supports `cosine_classifier` True.')
if learnable_scale:
cosine_logits_multiplier = tf.get_variable(
'cosine_scale',
initializer=cosine_logits_multiplier,
dtype=tf.float32,
trainable=True)
embedding_dims = embeddings.get_shape().as_list()[-1]
w_fc = functional_backbones.weight_variable(
[embedding_dims, sum(num_classes)], weight_decay=weight_decay)
# Select the output "head" to use in the forward pass.
dataset_num_classes = tf.gather(num_classes, dataset_idx)
w_fc = w_fc[:, start_idx:start_idx + dataset_num_classes]
logits = linear_classifier_forward_pass(embeddings, w_fc, None,
cosine_classifier,
cosine_logits_multiplier, False)
return logits
| 2.28125 | 2 |
app.py | Shrinidhi-C/Context-Based-Question-Answering | 16 | 5775 | <gh_stars>10-100
import os
import threading
import shutil
from datetime import timedelta, datetime
from flask import Flask, render_template, request, session, jsonify, url_for, redirect
from haystack.document_store.elasticsearch import *
from haystack.preprocessor.utils import convert_files_to_dicts
from haystack.preprocessor.cleaning import clean_wiki_text
from haystack import Finder
from haystack.retriever.sparse import ElasticsearchRetriever
from haystack.reader.transformers import TransformersReader
from elasticsearch import Elasticsearch
es = (
Elasticsearch()
) # Replace with Elasticsearch(["http://elasticsearch:9200/"], verify_certs=True) to build docker image
session_time = 60 # Session Timeout in Minutes
app = Flask(__name__)
app.secret_key = "<KEY>"
app.permanent_session_lifetime = timedelta(minutes=session_time)
user_id = 0 # User ID to keep track w.r.t sessions and context data
current_users = dict() # Used to store user id with time of login
user_doc_store = dict() # Document store object of the user id
user_settings = dict() # User settings for GPU and Pre-trained models choice
# Handles pre-processing the context and uploads the pre-processed context to Elasticsearch
# Each user is assigned with a separate Elasticsearch index starting with "user_{user_id}"
# Documents & textual context are deleted from them temp folder named with user_id under users dir after uploading to Es
def pre_process(user_id_key):
uploads_dir = "users/" + str(user_id_key) + "/uploads/"
try:
es_result = es.search(
index="user_" + str(user_id_key), body={"query": {"match_all": {}}}
)
no_docs = len(es_result["hits"]["hits"])
except Exception as e:
print(e)
print("\n no documents in es")
processed = convert_files_to_dicts(
dir_path=uploads_dir, clean_func=clean_wiki_text, split_paragraphs=True
)
for doc in range(len(processed)):
try:
# print("\n Checking for duplicate docs ..")
add_doc = True
for each_doc in range(no_docs):
doc_text = es_result["hits"]["hits"][each_doc]["_source"]["text"]
doc_name = es_result["hits"]["hits"][each_doc]["_source"]["name"]
doc_id = es_result["hits"]["hits"][each_doc]["_id"]
if (
processed[doc]["meta"]["name"] == "context_file.txt"
and doc_name == "context_file.txt"
):
# print("Deleting context file to update with new changes ..")
es.delete(
index="user_" + str(user_id_key), doc_type="_doc", id=doc_id
)
if processed[doc]["text"] == doc_text:
# print("\n There is a duplicate, So this document is not added ..")
add_doc = False
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
break
if add_doc:
# print("\n No duplicates found, so adding this to es..")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
except Exception as e:
print(e)
# print("\n no documents in es")
processed_lst = [processed[doc]]
user_doc_store[user_id_key].write_documents(processed_lst)
os.remove(uploads_dir + str(processed[doc]["meta"]["name"]))
# Handles setting up reader and retriever
def set_finder(user_id_key):
if user_settings[user_id_key]["model"] == "roberta":
model_path = (
"deepset/roberta-base-squad2" # Path of the models hosted in Hugging Face
)
elif user_settings[user_id_key]["model"] == "bert":
model_path = "deepset/bert-large-uncased-whole-word-masking-squad2"
elif user_settings[user_id_key]["model"] == "distilbert":
model_path = "distilbert-base-uncased-distilled-squad"
else:
model_path = "illuin/camembert-base-fquad"
retriever = ElasticsearchRetriever(document_store=user_doc_store[user_id_key])
if user_settings[user_id_key]["gpu"] == "on":
try:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=0
)
except Exception as e:
print(e)
print("GPU not available. Inferencing on CPU")
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
else:
reader = TransformersReader(
model_name_or_path=model_path, tokenizer=model_path, use_gpu=-1
)
finder = Finder(reader, retriever)
return finder
# Handles deletion of context data completely from the server after the session time ends and deletes user id from dict
def user_session_timer():
global current_users, session_time
seconds_in_day = 24 * 60 * 60
print("\n User tracker thread started @ ", datetime.now())
while True:
for user_id_key in current_users.copy():
current_time = datetime.now()
user_time = current_users[user_id_key]
difference = current_time - user_time
time_diff = divmod(
difference.days * seconds_in_day + difference.seconds, 60
)
if time_diff[0] >= session_time:
try:
del current_users[user_id_key]
del user_doc_store[user_id_key]
del user_settings[user_id_key]
shutil.rmtree("users/" + str(user_id_key))
es.indices.delete(
index="user_" + str(user_id_key), ignore=[400, 404]
)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
# print("\n Deleted user:", user_id_key, " @", datetime.now())
session_timer = threading.Thread(target=user_session_timer)
session_timer.start()
# Handles users w.r.t new session or already in session
@app.route("/")
def home():
global user_id, current_users, session_time
logging.info(
"User connected at "
+ str(datetime.now())
+ " with IP: "
+ str(request.environ["REMOTE_ADDR"])
)
if "user" in session and session["user"] in current_users:
user_id = session["user"]
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
else:
session.permanent = True
current_time = datetime.now()
user_id += 1
current_users[user_id] = current_time
session["user"] = user_id
# print(current_users)
if not os.path.exists("users/"): # Creating user temp dir for uploading context
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
else:
os.makedirs("users/" + str(user_id))
os.makedirs("users/" + str(user_id) + "/uploads")
user_doc_store[user_id] = ElasticsearchDocumentStore(
host="localhost", index="user_" + str(user_id)
) # Change host = "elasticsearch" to build docker image
user_settings[user_id] = {
"gpu": "off",
"model": "roberta",
} # Initial user settings
logged_on = current_users[user_id]
current_time = datetime.now()
diff_min_sec = (
int(datetime.strftime(current_time, "%M"))
- int(datetime.strftime(logged_on, "%M"))
) * 60
diff_sec = int(datetime.strftime(current_time, "%S")) - int(
datetime.strftime(logged_on, "%S")
)
diff_time = diff_min_sec + diff_sec
time_left = (
session_time * 60
) - diff_time # For session timeout on client side
return render_template("index.html", time_left=time_left)
# Handles context documents uploads
@app.route("/upload_file", methods=["GET", "POST"])
def upload_file():
global current_users
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
for f in request.files.getlist("file"):
f.save(
os.path.join("users/" + str(user_id_key) + "/uploads", f.filename)
)
pre_process(user_id_key)
return render_template("index.html")
else:
return redirect(url_for("session_timeout"))
else:
return redirect(url_for("session_timeout"))
# Handles context added through the textbox
@app.route("/context", methods=["POST"])
def context():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
text_context = request.form["context"]
context_file = open(
"users/" + str(user_id_key) + "/uploads/context_file.txt", "w"
)
context_file.write(text_context)
context_file.close()
pre_process(user_id_key)
return jsonify({"output": "" + text_context})
else:
return render_template("session_out.html")
else:
return redirect(url_for("session_timeout"))
# Provides extracted answers for the posted question
@app.route("/question", methods=["POST"])
def question():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
query_question = request.form["question"]
es_stats = es.indices.stats(index="user_" + str(user_id_key))
user_index_size = es_stats["_all"]["primaries"]["store"]["size_in_bytes"]
if (
user_index_size == 208
): # To check if index in Es is empty. 208 bytes is default index size without docs
return jsonify({"error": "add context"})
finder = set_finder(user_id_key)
answers_dict = finder.get_answers(
question=query_question, top_k_retriever=5, top_k_reader=5
)
unique_answers = list()
output = list()
if len(answers_dict["answers"]) > 0:
for i in range(len(answers_dict["answers"])):
if (
answers_dict["answers"][i]["answer"] is not None
and answers_dict["answers"][i]["answer"] not in unique_answers
):
temp_dict = answers_dict["answers"][i]
remove = (
"score",
"probability",
"offset_start",
"offset_end",
"document_id",
)
unique_answers.append(temp_dict["answer"])
if temp_dict["meta"]["name"] == "context_file.txt":
temp_dict["meta"]["name"] = "Textual Context"
temp_dict["meta"] = temp_dict["meta"]["name"]
output.append(temp_dict)
for key in remove:
if key in temp_dict:
del temp_dict[key]
else:
output = [
{"answer": "No Answers found ..", "context": " ", "meta": " "},
]
return jsonify({"output": output})
else:
return render_template("session_out.html")
# Handles GPU setting changes.
@app.route("/gpu", methods=["POST"])
def gpu():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
if user_settings[user_id_key]["gpu"] == "on":
user_settings[user_id_key]["gpu"] = "off"
else:
user_settings[user_id_key]["gpu"] = "on"
return jsonify({"output": "gpu status changed"})
# Handles pre-trained model choice setting changes.
@app.route("/models", methods=["POST"])
def models():
if "user" in session:
user_id_key = session["user"]
if user_id_key in current_users:
user_settings[user_id_key]["model"] = request.form["model"]
return jsonify({"output": "model changed"})
# Handles session timeout redirection
@app.route("/session_timeout")
def session_timeout():
return render_template("session_out.html")
# Handles removing of session identifier from session dict, This works only when app tab is open until session completes
@app.route("/session_out", methods=["POST"])
def session_out():
session.pop("user", None)
return redirect(url_for("session_timeout"))
# Comment the below block in case of building a docker image or running on WSGI server like gunicorn
if __name__ == "__main__":
app.run(host="0.0.0.0")
| 2.15625 | 2 |
timevortex/utils/filestorage.py | timevortexproject/timevortex | 0 | 5776 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""File storage adapter for timevortex project"""
import os
from os import listdir, makedirs
from os.path import isfile, join, exists
from time import tzname
from datetime import datetime
import pytz
import dateutil.parser
from django.conf import settings
from django.utils import timezone
from timevortex.utils.globals import LOGGER, KEY_ERROR, KEY_SITE_ID, KEY_VARIABLE_ID, KEY_VALUE, KEY_DATE
from timevortex.utils.globals import KEY_DST_TIMEZONE, KEY_NON_DST_TIMEZONE, SYSTEM_SITE_ID
SETTINGS_FILE_STORAGE_FOLDER = "SETTINGS_FILE_STORAGE_FOLDER"
SETTINGS_DEFAULT_FILE_STORAGE_FOLDER = "/tmp/data/"
def get_lines_number(file_path):
"""Get lines number
"""
return sum(1 for line in open(file_path))
def get_series_per_file(site_folder, file_prefix):
"""Get series per file
"""
series = {}
for filename in listdir(site_folder):
is_file = isfile(join(site_folder, filename))
if is_file and file_prefix in filename:
complete_filename = "%s/%s" % (site_folder, filename)
with open(complete_filename, "r") as filed:
temp_series = filed.readlines()
for line in temp_series:
array_line = line.split("\t")
if len(array_line) >= 2:
series[array_line[1]] = array_line[0]
return series
def get_last_file_name(site_folder, file_prefix):
"""Get last filename
"""
old_date = None
last_filename = ""
for new_filename in listdir(site_folder):
is_file = isfile(join(site_folder, new_filename))
if is_file and file_prefix in new_filename:
old_date, last_filename = update_last_file_name(file_prefix, old_date, last_filename, new_filename)
return last_filename
def update_last_file_name(file_prefix, old_date, last_filename, new_filename):
"""Update last file name
"""
try:
new_date = new_filename.replace(file_prefix, "")
new_date = datetime.strptime(new_date, "%Y-%m-%d")
if old_date is None or new_date > old_date:
return new_date, new_filename
except ValueError:
LOGGER.error("Not right file")
return old_date, last_filename
class FileStorage(object):
"""Class that help us to store and load data over several file"""
def __init__(self, folder_path):
"""Constructor"""
self.folder_path = folder_path
if not exists(self.folder_path):
makedirs(self.folder_path)
def insert_series(self, series):
"""Insert series in DB
:param series: Representation of a series
:type series: dict.
"""
self.insert(series)
def insert(self, message):
"""Insert data in file"""
file_folder = "%s/%s" % (self.folder_path, message[KEY_SITE_ID])
file_date = timezone.localtime(
dateutil.parser.parse(message[KEY_DATE]).replace(tzinfo=pytz.UTC)).strftime("%Y-%m-%d")
if not exists(file_folder):
makedirs(file_folder)
raw_file = "%s/%s.tsv.%s" % (
file_folder, message[KEY_VARIABLE_ID], file_date)
extracted = open(raw_file, "a+")
extracted.write("%s\t%s\t%s\t%s\n" % (
message[KEY_VALUE],
message[KEY_DATE],
message[KEY_DST_TIMEZONE],
message[KEY_NON_DST_TIMEZONE]))
extracted.close()
def insert_error(self, message):
"""Function that store error in errors collection and in log
:param message: Error to insert in DB
:type message: str.
"""
LOGGER.error(message)
message[KEY_VARIABLE_ID] = KEY_ERROR
self.insert(message)
def store_error(self, error):
"""Function that create valid error message
:param error: Mal formed message
:type error: str.
"""
message = {
KEY_VALUE: error,
KEY_VARIABLE_ID: KEY_ERROR,
KEY_SITE_ID: SYSTEM_SITE_ID,
KEY_DATE: datetime.utcnow().isoformat('T'),
KEY_DST_TIMEZONE: tzname[1],
KEY_NON_DST_TIMEZONE: tzname[0]
}
LOGGER.error(error)
self.insert(message)
def get_series(self, site_id, variable_id):
"""Retrieve all series for a variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
series = get_series_per_file(site_folder, file_prefix)
else:
series = {}
return series
def get_last_series(self, site_id, variable_id):
"""Retrieve last value of variable_id in site_id
"""
element = variable_id
file_prefix = "%s.tsv." % element
site_folder = "%s/%s" % (self.folder_path, site_id)
if exists(site_folder):
last_filename = get_last_file_name(site_folder, file_prefix)
last_filename = "%s/%s" % (site_folder, last_filename)
try:
with open(last_filename, "rb") as filed2:
for last in filed2:
pass
except IsADirectoryError:
return None
LOGGER.debug(last) # pylint: disable=I0011,W0631
last = last.decode("utf-8").replace("\n", "") # pylint: disable=I0011,W0631
return {
KEY_VARIABLE_ID: element,
KEY_SITE_ID: site_id,
KEY_VALUE: last.split("\t")[0],
KEY_DATE: last.split("\t")[1],
KEY_DST_TIMEZONE: last.split("\t")[2],
KEY_NON_DST_TIMEZONE: last.split("\t")[3]
}
return None
def get_last_error(self, site_id):
"""Retrieve last error of a site_id file storage
"""
return self.get_last_series(site_id, KEY_ERROR)
def get_number_of_error(self, site_id, day_date):
"""This method retrieve number of error published for a day_date
"""
element = KEY_ERROR
site_folder = "%s/%s" % (self.folder_path, site_id)
filename = "%s.tsv.%s" % (element, day_date)
file_path = "%s/%s" % (site_folder, filename)
if exists(site_folder) and exists(file_path):
return get_lines_number(file_path)
return 0
def get_number_of_series(self, site_id, day_date):
"""This method retrieve number of series published for a day_date
"""
site_folder = "%s/%s" % (self.folder_path, site_id)
series = []
if exists(site_folder):
for filename in listdir(site_folder):
if "%s.tsv" % KEY_ERROR not in filename and day_date in filename:
file_path = "%s/%s" % (site_folder, filename)
var_id = filename.replace(".tsv.%s" % day_date, "")
series_numbers = get_lines_number(file_path)
series.append([var_id, series_numbers])
return series
def set_data_location(self, folder_path):
"""Set data folder space"""
self.folder_path = folder_path
def get_sites_list(self):
"""Get sites list"""
return os.listdir(self.folder_path)
FILE_STORAGE_SPACE = FileStorage(getattr(settings, SETTINGS_FILE_STORAGE_FOLDER, SETTINGS_DEFAULT_FILE_STORAGE_FOLDER))
| 2.390625 | 2 |
main_tg.py | olegush/quiz-bot | 0 | 5777 | <reponame>olegush/quiz-bot<filename>main_tg.py
import os
import logging
import logging.config
from functools import partial
from dotenv import load_dotenv
from telegram import Bot, ReplyKeyboardMarkup, ReplyKeyboardRemove
from telegram.ext import (Updater, CommandHandler, MessageHandler,
RegexHandler, ConversationHandler, Filters)
from redis import Redis
from tg_logging import create_logger
from quiz_tools import get_question_and_answer, format_answer, format_question
QUESTION, ATTEMPT = range(2)
def main():
class LoggerTelegramBot(logging.Handler):
def emit(self, record):
log_entry = self.format(record)
bot.send_message(chat_id=chat_id_tg_admin, text=log_entry)
dictLogConfig = {
'version': 1,
'handlers': {
'handler': {
'()': LoggerTelegramBot,
'formatter': 'formatter'
}
},
'loggers': {
'tg_logger': {
'handlers': ['handler'],
'level': 'INFO'
}
},
'formatters': {
'formatter': {
'format': '%(asctime)s - %(levelname)s - %(message)s'
}
}
}
load_dotenv()
chat_id_tg_admin = os.getenv('CHAT_ID_TG_ADMIN')
bot = Bot(token=os.getenv('TOKEN_TG'))
logging.config.dictConfig(dictLogConfig)
logger = logging.getLogger('tg_logger')
handler = LoggerTelegramBot()
logger.addHandler(handler)
rediser = Redis(
host=os.getenv('REDIS_HOST'),
port=os.getenv('REDIS_PORT'),
db=0,
password=os.getenv('REDIS_PWD'))
updater = Updater(token_tg)
dp = updater.dispatcher
logger.info(dp)
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
QUESTION: [
RegexHandler('^Выход$', do_exit),
MessageHandler(Filters.text, partial(handle_new_question, rediser))],
ATTEMPT: [
RegexHandler('^Выход$', do_exit),
RegexHandler('^(Новый вопрос|Другой вопрос)$', partial(handle_new_question, rediser)),
RegexHandler('^Показать ответ$', partial(display_answer, rediser)),
MessageHandler(Filters.text, partial(handle_attempt, rediser))],
},
fallbacks=[CommandHandler('cancel', do_exit)]
)
dp.add_handler(conv_handler)
updater.start_polling()
updater.idle()
def do_reply(update, text, keyboard=None):
if keyboard is None:
markup = ReplyKeyboardRemove()
return update.message.reply_text(text, reply_markup=markup)
markup = ReplyKeyboardMarkup(keyboard, resize_keyboard=True)
return update.message.reply_text(text, reply_markup=markup)
def start(bot, update):
do_reply(update, 'Привет знатоку в чате-викторине! Начинаем?', [['Да!']])
return QUESTION
def handle_new_question(rediser, bot, update):
new_question, new_answer = get_question_and_answer()
chat_id = update.message.chat_id
rediser.set(chat_id, new_answer)
do_reply(update, format_question(new_question))
return ATTEMPT
def display_answer(rediser, bot, update):
chat_id = update.message.chat_id
answer = rediser.get(chat_id).decode()
do_reply(update, answer, [['Новый вопрос', 'Выход']])
return QUESTION
def handle_attempt(rediser, bot, update):
chat_id = update.message.chat_id
attempt = update.message.text.strip().lower()
answer = rediser.get(chat_id).decode()
if attempt == format_answer(answer):
text = 'Правильно! \n\n {}'.format(answer)
reply_keyboard = [['Новый вопрос', 'Выход']]
else:
text = 'Неверно! Попробуйте еще раз.'
reply_keyboard = [['Показать ответ', 'Другой вопрос', 'Выход']]
do_reply(update, text, reply_keyboard)
return ATTEMPT
def do_exit(bot, update):
text = 'До скорой встречи! Желаете начать заново? Жмите /start'
do_reply(update, text)
return ConversationHandler.END
if __name__ == '__main__':
main()
| 2.328125 | 2 |
tf_fourier_features/fourier_features_mlp.py | titu1994/tf_fourier_features | 37 | 5778 | import tensorflow as tf
from typing import Optional
from tf_fourier_features import fourier_features
class FourierFeatureMLP(tf.keras.Model):
def __init__(self, units: int, final_units: int, gaussian_projection: Optional[int],
activation: str = 'relu',
final_activation: str = "linear",
num_layers: int = 1,
gaussian_scale: float = 1.0,
use_bias: bool = True, **kwargs):
"""
Fourier Feature Projection model from the paper
[Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/).
Used to create a multi-layer MLP with optional FourierFeatureProjection layer.
Args:
units: Number of hidden units in the intermediate layers.
final_units: Number of hidden units in the final layer.
activation: Activation in the hidden layers.
final_activation: Activation function of the final layer.
num_layers: Number of layers in the network.
gaussian_projection: Projection dimension for the gaussian kernel in fourier feature
projection layer. Can be None, negative or positive integer.
If None, then fourier feature map layer is not used.
If <=0, uses identity matrix (basic projection) without gaussian kernel.
If >=1, uses gaussian projection matrix of specified dim.
gaussian_scale: Scale of the gaussian kernel in fourier feature projection layer.
Note: If the scale is too small, convergence will slow down and obtain poor results.
If the scale is too large (>50), convergence will be fast but results will be grainy.
Try grid search for scales in the range [10 - 50].
use_bias: Boolean whether to use bias or not.
# References:
- [Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains](https://people.eecs.berkeley.edu/~bmild/fourfeat/)
"""
super().__init__(**kwargs)
layers = []
if gaussian_projection is not None:
layers.append(fourier_features.FourierFeatureProjection(
gaussian_projection=gaussian_projection,
gaussian_scale=gaussian_scale,
**kwargs
))
for _ in range(num_layers - 1):
layers.append(tf.keras.layers.Dense(units, activation=activation, use_bias=use_bias,
bias_initializer='he_uniform', **kwargs))
self.network = tf.keras.Sequential(layers)
self.final_dense = tf.keras.layers.Dense(final_units, activation=final_activation,
use_bias=use_bias, bias_initializer='he_uniform', **kwargs)
def call(self, inputs, training=None, mask=None):
features = self.network(inputs)
output = self.final_dense(features)
return output
| 3.234375 | 3 |
eek/spider.py | fusionbox/eek | 5 | 5779 | <filename>eek/spider.py
import urlparse
import csv
import sys
import re
import collections
import time
import requests
from eek import robotparser # this project's version
from bs4 import BeautifulSoup
try:
import lxml
except ImportError:
HTML_PARSER = None
else:
HTML_PARSER = 'lxml'
encoding_re = re.compile("charset\s*=\s*(\S+?)(;|$)")
html_re = re.compile("text/html")
headers = ['url', 'title', 'description', 'keywords', 'allow', 'disallow',
'noindex', 'meta robots', 'canonical', 'referer', 'status']
def encoding_from_content_type(content_type):
"""
Extracts the charset from a Content-Type header.
>>> encoding_from_content_type('text/html; charset=utf-8')
'utf-8'
>>> encoding_from_content_type('text/html')
>>>
"""
if not content_type:
return None
match = encoding_re.search(content_type)
return match and match.group(1) or None
class NotHtmlException(Exception):
pass
class UrlTask(tuple):
"""
We need to keep track of referers, but we don't want to add a url multiple
times just because it was referenced on multiple pages
"""
def __hash__(self):
return hash(self[0])
def __eq__(self, other):
return self[0] == other[0]
class VisitOnlyOnceClerk(object):
def __init__(self):
self.visited = set()
self.to_visit = set()
def enqueue(self, url, referer):
if not url in self.visited:
self.to_visit.add(UrlTask((url, referer)))
def __bool__(self):
return bool(self.to_visit)
def __iter__(self):
while self.to_visit:
(url, referer) = self.to_visit.pop()
self.visited.add(url)
yield (url, referer)
def lremove(string, prefix):
"""
Remove a prefix from a string, if it exists.
>>> lremove('www.foo.com', 'www.')
'foo.com'
>>> lremove('foo.com', 'www.')
'foo.com'
"""
if string.startswith(prefix):
return string[len(prefix):]
else:
return string
def beautify(response):
content_type = response.headers.get('content-type')
if content_type:
if not html_re.search(content_type):
raise NotHtmlException
encoding = encoding_from_content_type(content_type)
else:
encoding = None
try:
return BeautifulSoup(
response.content,
features=HTML_PARSER,
from_encoding=encoding,
)
except UnicodeEncodeError:
raise NotHtmlException
def get_links(response):
if 300 <= response.status_code < 400 and response.headers['location']:
# redirect
yield urlparse.urldefrag(
urlparse.urljoin(response.url, response.headers['location'], False)
)[0]
try:
html = beautify(response)
for i in html.find_all('a', href=True):
yield urlparse.urldefrag(urlparse.urljoin(response.url, i['href'], False))[0]
except NotHtmlException:
pass
def force_unicode(s):
if isinstance(s, str):
return unicode(s, encoding='utf-8')
else:
return s
def force_bytes(str_or_unicode):
if isinstance(str_or_unicode, unicode):
return str_or_unicode.encode('utf-8')
else:
return str_or_unicode
def get_pages(base, clerk, session=requests.session()):
clerk.enqueue(base, base)
base_domain = lremove(urlparse.urlparse(base).netloc, 'www.')
for (url, referer) in clerk:
url = force_bytes(url)
referer = force_bytes(referer)
response = session.get(
url,
headers={'Referer': referer, 'User-Agent': 'Fusionbox spider'},
allow_redirects=False,
)
for link in get_links(response):
parsed = urlparse.urlparse(link)
if lremove(parsed.netloc, 'www.') == base_domain:
clerk.enqueue(link, url)
yield referer, response
def metadata_spider(base, output=sys.stdout, delay=0, insecure=False):
writer = csv.writer(output)
robots = robotparser.RobotFileParser(base + '/robots.txt')
robots.read()
writer.writerow(headers)
session = requests.session()
session.verify = not insecure
for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):
rules = applicable_robot_rules(robots, response.url)
robots_meta = canonical = title = description = keywords = ''
try:
html = beautify(response)
robots_meta = ','.join(i['content'] for i in html.find_all('meta', {"name": "robots"}))
try:
canonical = html.find_all('link', {"rel": "canonical"})[0]['href']
except IndexError:
pass
try:
title = html.head.title.contents[0]
except (AttributeError, IndexError):
pass
try:
description = html.head.find_all('meta', {"name": "description"})[0]['content']
except (AttributeError, IndexError, KeyError):
pass
try:
keywords = html.head.find_all('meta', {"name": "keywords"})[0]['content']
except (AttributeError, IndexError, KeyError):
pass
except NotHtmlException:
pass
writer.writerow(map(force_bytes, [
response.url,
title,
description,
keywords,
','.join(rules['allow']),
','.join(rules['disallow']),
','.join(rules['noindex']),
robots_meta,
canonical,
referer,
response.status_code,
]))
if delay:
time.sleep(delay)
def grep_spider(base, pattern, delay=0, insensitive=False, insecure=False):
flags = 0
if insensitive:
flags |= re.IGNORECASE
pattern = re.compile(pattern, flags)
session = requests.session()
session.verify = not insecure
for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):
for line in response.content.split('\n'):
if pattern.search(line):
print u'%s:%s' % (force_unicode(response.url), force_unicode(line))
if delay:
time.sleep(delay)
def graphviz_spider(base, delay=0, insecure=False):
print "digraph links {"
session = requests.session()
session.verify = not insecure
for referer, response in get_pages(base, VisitOnlyOnceClerk(), session=session):
for link in get_links(response):
print ' "%s" -> "%s";' % (force_bytes(response.url), force_bytes(link))
if delay:
time.sleep(delay)
print "}"
def applicable_robot_rules(robots, url):
rules = collections.defaultdict(list)
if robots.default_entry:
rules[robots.default_entry.allowance(url)].append('*')
for entry in robots.entries:
rules[entry.allowance(url)].extend(entry.useragents)
return rules
| 2.734375 | 3 |
observations/r/zea_mays.py | hajime9652/observations | 199 | 5780 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def zea_mays(path):
"""Darwin's Heights of Cross- and Self-fertilized Zea May Pairs
Darwin (1876) studied the growth of pairs of zea may (aka corn)
seedlings, one produced by cross-fertilization and the other produced by
self-fertilization, but otherwise grown under identical conditions. His
goal was to demonstrate the greater vigour of the cross-fertilized
plants. The data recorded are the final height (inches, to the nearest
1/8th) of the plants in each pair.
In the *Design of Experiments*, Fisher (1935) used these data to
illustrate a paired t-test (well, a one-sample test on the mean
difference, `cross - self`). Later in the book (section 21), he used
this data to illustrate an early example of a non-parametric permutation
test, treating each paired difference as having (randomly) either a
positive or negative sign.
A data frame with 15 observations on the following 4 variables.
`pair`
pair number, a numeric vector
`pot`
pot, a factor with levels `1` `2` `3` `4`
`cross`
height of cross fertilized plant, a numeric vector
`self`
height of self fertilized plant, a numeric vector
`diff`
`cross - self` for each pair
<NAME>. (1876). *The Effect of Cross- and Self-fertilization in the
Vegetable Kingdom*, 2nd Ed. London: <NAME>.
<NAME>. and <NAME>. (1985) *Data: a collection of problems from
many fields for the student and research worker*. New York: Springer.
Data retrieved from: `https://www.stat.cmu.edu/StatDat/`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `zea_mays.csv`.
Returns:
Tuple of np.ndarray `x_train` with 15 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'zea_mays.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/HistData/ZeaMays.csv'
maybe_download_and_extract(path, url,
save_file_name='zea_mays.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| 3.109375 | 3 |
ois_api_client/v3_0/dto/Lines.py | peterkulik/ois_api_client | 7 | 5781 | <filename>ois_api_client/v3_0/dto/Lines.py
from typing import List
from dataclasses import dataclass
from .Line import Line
@dataclass
class Lines:
"""Product / service items
:param merged_item_indicator: Indicates whether the data exchange contains merged line data due to size reduction
:param line: Product / service item
"""
merged_item_indicator: bool
line: List[Line]
| 1.875 | 2 |
parsing_documents.py | leylafenix/belief-network-irs | 0 | 5782 | <filename>parsing_documents.py<gh_stars>0
__author__ = '<NAME>'
import os
import pprint
def read_block(f):
s = ""
line = f.readline()
while line and not line.startswith("."):
s += line
line = f.readline()
return s, line
def read_doc(f):
doc = {"title": "", "authors": "", "content": ""}
line = f.readline()
while line and not line.startswith(".I"):
if line.startswith(".T"):
doc["title"], line = read_block(f)
elif line.startswith(".A"):
doc["authors"], line = read_block(f)
elif line.startswith(".W"):
doc["content"], line = read_block(f)
else:
_, line = read_block(f)
return doc, line
def create_doc(data, out_folder, name):
with open(out_folder + os.sep + name, 'w') as f:
f.write(data["title"] + "\n")
f.write(data["content"] + "\n")
f.write(data["authors"])
def parse_all(s, out_folder):
with open(s) as f:
line = f.readline() # .I
while line:
doc_name = "d%03d.txt" % (int(line.strip().split()[-1]))
doc, line = read_doc(f)
create_doc(doc, out_folder, doc_name)
# print("**********************************")
if __name__ == '__main__':
s = "adi" + os.sep + "ADI.ALL"
out_folder = "test_index"
try: # averiguar como preguntar si una carpeta o fichero existe en python
os.mkdir(out_folder)
except FileExistsError:
pass
parse_all(s, out_folder)
| 3.1875 | 3 |
groups/admin.py | caktus/rapidsms-groups | 1 | 5783 | <reponame>caktus/rapidsms-groups<gh_stars>1-10
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4 encoding=utf-8
from django.contrib import admin
from groups.models import Group
admin.site.register(Group)
| 1.179688 | 1 |
avod/datasets/kitti/kitti_aug_test.py | Ascend-Huawei/AVOD | 0 | 5784 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import unittest
import numpy as np
from avod.datasets.kitti import kitti_aug
class KittiAugTest(unittest.TestCase):
def test_flip_boxes_3d(self):
boxes_3d = np.array([
[1, 2, 3, 4, 5, 6, np.pi / 4],
[1, 2, 3, 4, 5, 6, -np.pi / 4]
])
exp_flipped_boxes_3d = np.array([
[-1, 2, 3, 4, 5, 6, 3 * np.pi / 4],
[-1, 2, 3, 4, 5, 6, -3 * np.pi / 4]
])
flipped_boxes_3d = kitti_aug.flip_boxes_3d(boxes_3d)
np.testing.assert_almost_equal(flipped_boxes_3d, exp_flipped_boxes_3d)
| 1.414063 | 1 |
application/model/_base.py | keysona/blog | 0 | 5785 | <gh_stars>0
from flask_sqlalchemy import SQLAlchemy, Model
# class BaseModel(Model):
# def save(self):
# db.session.add(self)
# db.session.commit(self)
# def delete(self):
# db.session.
db = SQLAlchemy()
| 2.34375 | 2 |
kvmagent/kvmagent/plugins/prometheus.py | qianfei11/zstack-utility | 0 | 5786 | <filename>kvmagent/kvmagent/plugins/prometheus.py
import os.path
import threading
import typing
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
from kvmagent import kvmagent
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import lock
from zstacklib.utils import lvm
from zstacklib.utils import misc
from zstacklib.utils import thread
from zstacklib.utils.bash import *
from zstacklib.utils.ip import get_nic_supported_max_speed
logger = log.get_logger(__name__)
collector_dict = {} # type: Dict[str, threading.Thread]
latest_collect_result = {}
collectResultLock = threading.RLock()
QEMU_CMD = kvmagent.get_qemu_path().split("/")[-1]
def read_number(fname):
res = linux.read_file(fname)
return 0 if not res else int(res)
def collect_host_network_statistics():
all_eths = os.listdir("/sys/class/net/")
virtual_eths = os.listdir("/sys/devices/virtual/net/")
interfaces = []
for eth in all_eths:
eth = eth.strip(' \t\n\r')
if eth in virtual_eths: continue
if eth == 'bonding_masters':
continue
elif not eth:
continue
else:
interfaces.append(eth)
all_in_bytes = 0
all_in_packets = 0
all_in_errors = 0
all_out_bytes = 0
all_out_packets = 0
all_out_errors = 0
for intf in interfaces:
all_in_bytes += read_number("/sys/class/net/{}/statistics/rx_bytes".format(intf))
all_in_packets += read_number("/sys/class/net/{}/statistics/rx_packets".format(intf))
all_in_errors += read_number("/sys/class/net/{}/statistics/rx_errors".format(intf))
all_out_bytes += read_number("/sys/class/net/{}/statistics/tx_bytes".format(intf))
all_out_packets += read_number("/sys/class/net/{}/statistics/tx_packets".format(intf))
all_out_errors += read_number("/sys/class/net/{}/statistics/tx_errors".format(intf))
metrics = {
'host_network_all_in_bytes': GaugeMetricFamily('host_network_all_in_bytes',
'Host all inbound traffic in bytes'),
'host_network_all_in_packages': GaugeMetricFamily('host_network_all_in_packages',
'Host all inbound traffic in packages'),
'host_network_all_in_errors': GaugeMetricFamily('host_network_all_in_errors',
'Host all inbound traffic errors'),
'host_network_all_out_bytes': GaugeMetricFamily('host_network_all_out_bytes',
'Host all outbound traffic in bytes'),
'host_network_all_out_packages': GaugeMetricFamily('host_network_all_out_packages',
'Host all outbound traffic in packages'),
'host_network_all_out_errors': GaugeMetricFamily('host_network_all_out_errors',
'Host all outbound traffic errors'),
}
metrics['host_network_all_in_bytes'].add_metric([], float(all_in_bytes))
metrics['host_network_all_in_packages'].add_metric([], float(all_in_packets))
metrics['host_network_all_in_errors'].add_metric([], float(all_in_errors))
metrics['host_network_all_out_bytes'].add_metric([], float(all_out_bytes))
metrics['host_network_all_out_packages'].add_metric([], float(all_out_packets))
metrics['host_network_all_out_errors'].add_metric([], float(all_out_errors))
return metrics.values()
def collect_host_capacity_statistics():
default_zstack_path = '/usr/local/zstack/apache-tomcat/webapps/zstack'
zstack_env_path = os.environ.get('ZSTACK_HOME', None)
if zstack_env_path and zstack_env_path != default_zstack_path:
default_zstack_path = zstack_env_path
zstack_dir = ['/var/lib/zstack', '%s/../../../' % default_zstack_path, '/opt/zstack-dvd/',
'/var/log/zstack', '/var/lib/mysql', '/var/lib/libvirt', '/tmp/zstack']
metrics = {
'zstack_used_capacity_in_bytes': GaugeMetricFamily('zstack_used_capacity_in_bytes',
'ZStack used capacity in bytes')
}
zstack_used_capacity = 0
for dir in zstack_dir:
if not os.path.exists(dir):
continue
cmd = "du -bs %s | awk {\'print $1\'}" % dir
res = bash_o(cmd)
zstack_used_capacity += int(res)
metrics['zstack_used_capacity_in_bytes'].add_metric([], float(zstack_used_capacity))
return metrics.values()
def collect_lvm_capacity_statistics():
metrics = {
'vg_size': GaugeMetricFamily('vg_size',
'volume group size', None, ['vg_name']),
'vg_avail': GaugeMetricFamily('vg_avail',
'volume group and thin pool free size', None, ['vg_name']),
}
r = bash_r("grep -Ev '^[[:space:]]*#|^[[:space:]]*$' /etc/multipath/wwids")
if r == 0:
linux.set_fail_if_no_path()
r, o, e = bash_roe("vgs --nolocking --noheading -oname")
if r != 0 or len(o.splitlines()) == 0:
return metrics.values()
vg_names = o.splitlines()
for name in vg_names:
name = name.strip()
size, avail = lvm.get_vg_size(name, False)
metrics['vg_size'].add_metric([name], float(size))
metrics['vg_avail'].add_metric([name], float(avail))
return metrics.values()
def convert_raid_state_to_int(state):
"""
:type state: str
"""
state = state.lower()
if state == "optimal":
return 0
elif state == "degraded":
return 5
else:
return 100
def convert_disk_state_to_int(state):
"""
:type state: str
"""
state = state.lower()
if "online" in state or "jobd" in state:
return 0
elif "rebuild" in state:
return 5
elif "failed" in state:
return 10
elif "unconfigured" in state:
return 15
else:
return 100
def collect_raid_state():
metrics = {
'raid_state': GaugeMetricFamily('raid_state',
'raid state', None, ['target_id']),
'physical_disk_state': GaugeMetricFamily('physical_disk_state',
'physical disk state', None,
['slot_number', 'disk_group']),
'physical_disk_temperature': GaugeMetricFamily('physical_disk_temperature',
'physical disk temperature', None,
['slot_number', 'disk_group']),
}
if bash_r("/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll") != 0:
return metrics.values()
raid_info = bash_o("/opt/MegaRAID/MegaCli/MegaCli64 -LDInfo -LALL -aAll | grep -E 'Target Id|State'").strip().splitlines()
target_id = state = "unknown"
for info in raid_info:
if "Target Id" in info:
target_id = info.strip().strip(")").split(" ")[-1]
else:
state = info.strip().split(" ")[-1]
metrics['raid_state'].add_metric([target_id], convert_raid_state_to_int(state))
disk_info = bash_o(
"/opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll | grep -E 'Slot Number|DiskGroup|Firmware state|Drive Temperature'").strip().splitlines()
slot_number = state = disk_group = "unknown"
for info in disk_info:
if "Slot Number" in info:
slot_number = info.strip().split(" ")[-1]
elif "DiskGroup" in info:
kvs = info.replace("Drive's position: ", "").split(",")
disk_group = filter(lambda x: "DiskGroup" in x, kvs)[0]
disk_group = disk_group.split(" ")[-1]
elif "Drive Temperature" in info:
temp = info.split(":")[1].split("C")[0]
metrics['physical_disk_temperature'].add_metric([slot_number, disk_group], int(temp))
else:
disk_group = "JBOD" if disk_group == "unknown" and info.count("JBOD") > 0 else disk_group
disk_group = "unknown" if disk_group is None else disk_group
state = info.strip().split(":")[-1]
metrics['physical_disk_state'].add_metric([slot_number, disk_group], convert_disk_state_to_int(state))
return metrics.values()
def collect_equipment_state():
metrics = {
'power_supply': GaugeMetricFamily('power_supply',
'power supply', None, ['ps_id']),
'ipmi_status': GaugeMetricFamily('ipmi_status', 'ipmi status', None, []),
'physical_network_interface': GaugeMetricFamily('physical_network_interface',
'physical network interface', None,
['interface_name', 'speed']),
}
r, ps_info = bash_ro("ipmitool sdr type 'power supply'") # type: (int, str)
if r == 0:
for info in ps_info.splitlines():
info = info.strip()
ps_id = info.split("|")[0].strip().split(" ")[0]
health = 10 if "fail" in info.lower() or "lost" in info.lower() else 0
metrics['power_supply'].add_metric([ps_id], health)
metrics['ipmi_status'].add_metric([], bash_r("ipmitool mc info"))
nics = bash_o("find /sys/class/net -type l -not -lname '*virtual*' -printf '%f\\n'").splitlines()
if len(nics) != 0:
for nic in nics:
nic = nic.strip()
try:
# NOTE(weiw): sriov nic contains carrier file but can not read
status = linux.read_file("/sys/class/net/%s/carrier" % nic) == 1
except Exception as e:
status = True
speed = str(get_nic_supported_max_speed(nic))
metrics['physical_network_interface'].add_metric([nic, speed], status)
return metrics.values()
def collect_vm_statistics():
metrics = {
'cpu_occupied_by_vm': GaugeMetricFamily('cpu_occupied_by_vm',
'Percentage of CPU used by vm', None, ['vmUuid'])
}
r, pid_vm_map_str = bash_ro("ps --no-headers u -C \"%s -name\" | awk '{print $2,$13}'" % QEMU_CMD)
if r != 0 or len(pid_vm_map_str.splitlines()) == 0:
return metrics.values()
pid_vm_map_str = pid_vm_map_str.replace(",debug-threads=on", "").replace("guest=", "")
'''pid_vm_map_str samples:
38149 e8e6f27bfb2d47e08c59cbea1d0488c3
38232 afa02edca7eb4afcb5d2904ac1216eb1
'''
pid_vm_map = {}
for pid_vm in pid_vm_map_str.splitlines():
arr = pid_vm.split()
if len(arr) == 2:
pid_vm_map[arr[0]] = arr[1]
def collect(vm_pid_arr):
vm_pid_arr_str = ','.join(vm_pid_arr)
r, pid_cpu_usages_str = bash_ro("top -b -n 1 -p %s | grep qemu | awk '{print $1,$9}'" % vm_pid_arr_str)
if r != 0 or len(pid_cpu_usages_str.splitlines()) == 0:
return
for pid_cpu_usage in pid_cpu_usages_str.splitlines():
arr = pid_cpu_usage.split()
pid = arr[0]
vm_uuid = pid_vm_map[pid]
cpu_usage = arr[1]
metrics['cpu_occupied_by_vm'].add_metric([vm_uuid], float(cpu_usage))
n = 10
for i in range(0, len(pid_vm_map.keys()), n):
collect(pid_vm_map.keys()[i:i + n])
return metrics.values()
collect_node_disk_wwid_last_time = None
collect_node_disk_wwid_last_result = None
def collect_node_disk_wwid():
global collect_node_disk_wwid_last_time
global collect_node_disk_wwid_last_result
# NOTE(weiw): some storage can not afford frequent TUR. ref: ZSTAC-23416
if collect_node_disk_wwid_last_time is None:
collect_node_disk_wwid_last_time = time.time()
elif time.time() - collect_node_disk_wwid_last_time < 60 and collect_node_disk_wwid_last_result is not None:
return collect_node_disk_wwid_last_result
metrics = {
'node_disk_wwid': GaugeMetricFamily('node_disk_wwid',
'node disk wwid', None, ["disk", "wwid"])
}
pvs = bash_o("pvs --nolocking --noheading -o pv_name").strip().splitlines()
for pv in pvs:
multipath_wwid = None
if bash_r("dmsetup table %s | grep multipath" % pv) == 0:
multipath_wwid = bash_o("udevadm info -n %s | grep -E '^S: disk/by-id/dm-uuid' | awk -F '-' '{print $NF}'" % pv).strip()
disks = linux.get_physical_disk(pv, False)
for disk in disks:
disk_name = disk.split("/")[-1].strip()
wwids = bash_o("udevadm info -n %s | grep -E '^S: disk/by-id' | awk -F '/' '{print $NF}' | grep -v '^lvm-pv' | sort" % disk).strip().splitlines()
if multipath_wwid is not None:
wwids.append(multipath_wwid)
if len(wwids) > 0:
metrics['node_disk_wwid'].add_metric([disk_name, ";".join([w.strip() for w in wwids])], 1)
collect_node_disk_wwid_last_result = metrics.values()
return metrics.values()
kvmagent.register_prometheus_collector(collect_host_network_statistics)
kvmagent.register_prometheus_collector(collect_host_capacity_statistics)
kvmagent.register_prometheus_collector(collect_vm_statistics)
kvmagent.register_prometheus_collector(collect_node_disk_wwid)
if misc.isMiniHost():
kvmagent.register_prometheus_collector(collect_lvm_capacity_statistics)
kvmagent.register_prometheus_collector(collect_raid_state)
kvmagent.register_prometheus_collector(collect_equipment_state)
class PrometheusPlugin(kvmagent.KvmAgent):
COLLECTD_PATH = "/prometheus/collectdexporter/start"
@kvmagent.replyerror
@in_bash
def start_prometheus_exporter(self, req):
@in_bash
def start_collectd(cmd):
conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf')
conf = '''Interval {{INTERVAL}}
# version {{VERSION}}
FQDNLookup false
LoadPlugin syslog
LoadPlugin aggregation
LoadPlugin cpu
LoadPlugin disk
LoadPlugin interface
LoadPlugin memory
LoadPlugin network
LoadPlugin virt
<Plugin aggregation>
<Aggregation>
#Host "unspecified"
Plugin "cpu"
#PluginInstance "unspecified"
Type "cpu"
#TypeInstance "unspecified"
GroupBy "Host"
GroupBy "TypeInstance"
CalculateNum false
CalculateSum false
CalculateAverage true
CalculateMinimum false
CalculateMaximum false
CalculateStddev false
</Aggregation>
</Plugin>
<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>
<Plugin disk>
Disk "/^sd[a-z]$/"
Disk "/^hd[a-z]$/"
Disk "/^vd[a-z]$/"
IgnoreSelected false
</Plugin>
<Plugin "interface">
{% for i in INTERFACES -%}
Interface "{{i}}"
{% endfor -%}
IgnoreSelected false
</Plugin>
<Plugin memory>
ValuesAbsolute true
ValuesPercentage false
</Plugin>
<Plugin virt>
Connection "qemu:///system"
RefreshInterval {{INTERVAL}}
HostnameFormat name
PluginInstanceFormat name
BlockDevice "/:hd[a-z]/"
IgnoreSelected true
ExtraStats "vcpu memory"
</Plugin>
<Plugin network>
Server "localhost" "25826"
</Plugin>
'''
tmpt = Template(conf)
conf = tmpt.render({
'INTERVAL': cmd.interval,
'INTERFACES': interfaces,
'VERSION': cmd.version,
})
need_restart_collectd = False
if os.path.exists(conf_path):
with open(conf_path, 'r') as fd:
old_conf = fd.read()
if old_conf != conf:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
else:
with open(conf_path, 'w') as fd:
fd.write(conf)
need_restart_collectd = True
cpid = linux.find_process_by_command('collectd', [conf_path])
mpid = linux.find_process_by_command('collectdmon', [conf_path])
if not cpid:
bash_errorout('collectdmon -- -C %s' % conf_path)
else:
bash_errorout('kill -TERM %s' % cpid)
if need_restart_collectd:
if not mpid:
bash_errorout('collectdmon -- -C %s' % conf_path)
else:
bash_errorout('kill -HUP %s' % mpid)
else:
if not mpid:
bash_errorout('collectdmon -- -C %s' % conf_path)
def run_in_systemd(binPath, args, log):
def get_systemd_name(path):
if "collectd_exporter" in path:
return "collectd_exporter"
elif "node_exporter" in path:
return "node_exporter"
elif "pushgateway" in path:
return "pushgateway"
def reload_and_restart_service(service_name):
bash_errorout("systemctl daemon-reload && systemctl restart %s.service" % service_name)
service_name = get_systemd_name(binPath)
service_path = '/etc/systemd/system/%s.service' % service_name
service_conf = '''
[Unit]
Description=prometheus %s
After=network.target
[Service]
ExecStart=/bin/sh -c '%s %s > %s 2>&1'
ExecStop=/bin/sh -c 'pkill -TERM -f %s'
Restart=always
RestartSec=30s
[Install]
WantedBy=multi-user.target
''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath)
if not os.path.exists(service_path):
linux.write_file(service_path, service_conf, True)
os.chmod(service_path, 0644)
reload_and_restart_service(service_name)
return
if linux.read_file(service_path) != service_conf:
linux.write_file(service_path, service_conf, True)
logger.info("%s.service conf changed" % service_name)
os.chmod(service_path, 0644)
# restart service regardless of conf changes, for ZSTAC-23539
reload_and_restart_service(service_name)
@lock.file_lock("/run/collectd-conf.lock", locker=lock.Flock())
def start_collectd_exporter(cmd):
start_collectd(cmd)
start_exporter(cmd)
@in_bash
def start_exporter(cmd):
EXPORTER_PATH = cmd.binaryPath
LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log')
ARGUMENTS = cmd.startupArguments
if not ARGUMENTS:
ARGUMENTS = ""
os.chmod(EXPORTER_PATH, 0o755)
run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE)
para = jsonobject.loads(req[http.REQUEST_BODY])
rsp = kvmagent.AgentResponse()
eths = bash_o("ls /sys/class/net").split()
interfaces = []
for eth in eths:
eth = eth.strip(' \t\n\r')
if eth == 'lo': continue
if eth == 'bonding_masters': continue
elif eth.startswith('vnic'): continue
elif eth.startswith('outer'): continue
elif eth.startswith('br_'): continue
elif not eth: continue
else:
interfaces.append(eth)
for cmd in para.cmds:
if "collectd_exporter" in cmd.binaryPath:
start_collectd_exporter(cmd)
else:
start_exporter(cmd)
return jsonobject.dumps(rsp)
def install_colletor(self):
class Collector(object):
__collector_cache = {}
@classmethod
def __get_cache__(cls):
# type: () -> list
keys = cls.__collector_cache.keys()
if keys is None or len(keys) == 0:
return None
if (time.time() - keys[0]) < 9:
return cls.__collector_cache.get(keys[0])
return None
@classmethod
def __store_cache__(cls, ret):
# type: (list) -> None
cls.__collector_cache.clear()
cls.__collector_cache.update({time.time(): ret})
@classmethod
def check(cls, v):
try:
if v is None:
return False
if isinstance(v, GaugeMetricFamily):
return Collector.check(v.samples)
if isinstance(v, list) or isinstance(v, tuple):
for vl in v:
if Collector.check(vl) is False:
return False
if isinstance(v, dict):
for vk in v.iterkeys():
if vk == "timestamp" or vk == "exemplar":
continue
if Collector.check(v[vk]) is False:
return False
except Exception as e:
logger.warn("got exception in check value %s: %s" % (v, e))
return True
return True
def collect(self):
global latest_collect_result
ret = []
def get_result_run(f, fname):
# type: (typing.Callable, str) -> None
global collectResultLock
global latest_collect_result
r = f()
if not Collector.check(r):
logger.warn("result from collector %s contains illegal character None, details: \n%s" % (fname, r))
return
with collectResultLock:
latest_collect_result[fname] = r
cache = Collector.__get_cache__()
if cache is not None:
return cache
for c in kvmagent.metric_collectors:
name = "%s.%s" % (c.__module__, c.__name__)
if collector_dict.get(name) is not None and collector_dict.get(name).is_alive():
continue
collector_dict[name] = thread.ThreadFacade.run_in_thread(get_result_run, (c, name,))
for i in range(7):
for t in collector_dict.values():
if t.is_alive():
time.sleep(0.5)
continue
for k in collector_dict.iterkeys():
if collector_dict[k].is_alive():
logger.warn("It seems that the collector [%s] has not been completed yet,"
" temporarily use the last calculation result." % k)
for v in latest_collect_result.itervalues():
ret.extend(v)
Collector.__store_cache__(ret)
return ret
REGISTRY.register(Collector())
def start(self):
http_server = kvmagent.get_http_server()
http_server.register_async_uri(self.COLLECTD_PATH, self.start_prometheus_exporter)
self.install_colletor()
start_http_server(7069)
def stop(self):
pass
| 1.898438 | 2 |
recipes/libstudxml/all/conanfile.py | rockandsalt/conan-center-index | 562 | 5787 | from conans import ConanFile, AutoToolsBuildEnvironment, MSBuild, tools
from conans.errors import ConanInvalidConfiguration
import os
import shutil
required_conan_version = ">=1.33.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
exports_sources = "patches/*"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def requirements(self):
self.requires("expat/2.4.1")
def validate(self):
if self.settings.compiler == "Visual Studio":
if tools.Version(self.settings.compiler.version) < "9":
raise ConanInvalidConfiguration("Visual Studio {} is not supported.".format(self.settings.compiler.version))
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def build_requirements(self):
if self.settings.compiler != "Visual Studio":
self.build_requires("gnu-config/cci.20201022")
self.build_requires("libtool/2.4.6")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.build_requires("msys2/cci.latest")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
def _build_vs(self):
vc_ver = int(tools.Version(self.settings.compiler.version).major)
sln_path = None
def get_sln_path():
return os.path.join(self._source_subfolder, "libstudxml-vc{}.sln".format(vc_ver))
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self._source_subfolder, "xml", "libstudxml-vc{}.vcxproj".format(vc_ver))
if not self.options.shared:
tools.replace_in_file(proj_path, "DynamicLibrary", "StaticLibrary")
tools.replace_in_file(proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.build(sln_path, platforms={"x86": "Win32"})
@property
def _user_info_build(self):
return getattr(self, "user_info_build", self.deps_user_info)
def _build_autotools(self):
shutil.copy(self._user_info_build["gnu-config"].CONFIG_SUB,
os.path.join(self._source_subfolder, "config", "config.sub"))
shutil.copy(self._user_info_build["gnu-config"].CONFIG_GUESS,
os.path.join(self._source_subfolder, "config", "config.guess"))
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
tools.remove_files_by_mask(self._source_subfolder, "version")
with tools.chdir(self._source_subfolder):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
if self.settings.compiler == "Visual Studio":
self._build_vs()
else:
self._build_autotools()
def package(self):
self.copy(pattern="LICENSE", dst="licenses", src=self._source_subfolder)
if self.settings.compiler == "Visual Studio":
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
tools.remove_files_by_mask(os.path.join(self.package_folder, "lib"), "libstudxml.la")
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.names["pkg_config"] = "libstudxml"
# If built with makefile, static library mechanism is provided by their buildsystem already
if self.settings.compiler == "Visual Studio" and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"]
| 1.921875 | 2 |
dataset/WebCariA.py | KeleiHe/DAAN | 9 | 5788 |
# Copyright 2020 <NAME> & <NAME> (<EMAIL>)
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
class WebCariA:
def __init__(self, dataType, modelType, parse, des_attri=None):
self.dir_path = "/data/jw/dataset/" + str(parse)
self.dataType = dataType
self.parse = parse
self.des_attri = des_attri
if self.dataType == 'train':
if self.parse == 'Caricature':
self.subPath = 'CariTrain'
elif self.parse == 'Photo':
self.subPath = 'PhotoTrain'
else:
self.subPath = 'WebCariTrain'
elif self.dataType == 'val':
if self.parse == 'Caricature':
self.subPath = 'CariVal'
elif self.parse == 'Photo':
self.subPath = 'PhotoVal'
else:
self.subPath = 'WebCariVal'
elif self.dataType == 'test':
if self.parse == 'Caricature':
self.subPath = 'CariTest'
elif self.parse == 'Photo':
self.subPath = 'PhotoTest'
else:
self.subPath = 'WebCariTest'
elif self.dataType == 'all_data':
if self.parse == 'Caricature':
self.subPath = 'all_cari_data'
elif self.parse == 'Photo':
self.subPath = 'all_photo_data'
else:
self.subPath = 'all_WebCari_data'
else:
print("Caricature error, please select a dataType from: train, val, github")
exit(1)
self.modelType = modelType
self.dir_path = os.path.join(self.dir_path, self.subPath)
self.attributes = ['Women',
'Asian',
'White',
'Black',
'Youth',
'Middle',
'Old',
'Wrinkle',
'MakeUp',
'Bald',
'LargeForehead',
'RoundFace',
'DiamondFace',
'OvalFace',
'SquareShapeFace',
'NarrowEye',
'SleepyEye',
'SlantEye',
'SharpEye',
'FlabbyEye',
'BigEye',
'SmallEye',
'UnderEyePuffiness',
'BigNose',
'SmallNose',
'HighNose',
'FlatNose',
'HookNose',
'WideNose',
'NarrowNose',
'Toothy',
'Smile',
'BigMouth',
'SmallMouth',
'ThickLips',
'ThinLips',
'DoubleChin',
'ArchedEyebrows',
'FlatEyebrow',
'SlantedEyebrows',
'UpsideDownSlantedEyebrows',
'BushyEyebrows',
'ThickEyebrows',
'ThinEyebrows',
'Mustache',
'Goatee',
'Whiskers',
'OtherBeard&NoBeard',
'HighCheekbones',
'SquareJaw']
self.names, self.annas, self.visuals, self.num_attribute = self.getImgNameAndAnnas()
print(parse+"dataset, images: ", len(self.names), " type for: ", self.dataType, " num_attribute: ",
self.num_attribute)
def getImgNameAndAnnas(self):
names = []
annas = []
visuals = []
file = self.subPath+".txt"
file_v = self.subPath+"_V.txt"
fileList = open(os.path.join(self.dir_path, file)).readlines()
fileVList = open((os.path.join(self.dir_path, file_v))).readlines()
if self.modelType == 'seperate':
num_attribute = 1
attribute = self.des_attri
print("des_attribute", attribute)
if attribute not in self.attributes:
print("error: ", attribute, "is not in this dataset, please write a correct attribute in param")
exit(1)
for line in fileList:
names.append(line[0])
attributes = line[1::]
index = self.attributes.index(attribute)
annas.append([int(attributes[index])])
for line in fileVList:
attributes_v = line[1::]
index = self.attributes.index(attribute)
visuals.append([int(attributes_v[index])])
else:
for line in fileList:
names.append(line[0])
annas.append([int(x) for x in line[1::]])
for line in fileVList:
visuals.append([int(x) for x in line[1::]])
self.attributes = self.attributes
num_attribute = len(self.attributes)
return names, annas, visuals, num_attribute
def getPath(self, name):
name = name.replace(' ', '_')
name = name.replace('._', '_')
name = name.replace('-', '_')
name = name + ".jpg"
return name
| 2.125 | 2 |
moonworm/crawler/state/json_state.py | zomglings/moonworm | 10 | 5789 | <reponame>zomglings/moonworm<gh_stars>1-10
import datetime
import json
import time
from typing import Optional
from web3.datastructures import AttributeDict
from .event_scanner_state import EventScannerState
class JSONifiedState(EventScannerState):
"""Store the state of scanned blocks and all events.
All state is an in-memory dict.
Simple load/store massive JSON on start up.
"""
def __init__(self):
self.state = None
self.fname = "test-state.json"
# How many second ago we saved the JSON file
self.last_save = 0
def reset(self):
"""Create initial state of nothing scanned."""
self.state = {
"last_scanned_block": 0,
"blocks": {},
}
def restore(self):
"""Restore the last scan state from a file."""
try:
self.state = json.load(open(self.fname, "rt"))
print(
f"Restored the state, previously {self.state['last_scanned_block']} blocks have been scanned"
)
except (IOError, json.decoder.JSONDecodeError):
print("State starting from scratch")
self.reset()
def save(self):
"""Save everything we have scanned so far in a file."""
with open(self.fname, "wt") as f:
json.dump(self.state, f)
self.last_save = time.time()
#
# EventScannerState methods implemented below
#
def get_last_scanned_block(self):
"""The number of the last block we have stored."""
return self.state["last_scanned_block"]
def delete_data(self, since_block):
"""Remove potentially reorganised blocks from the scan data."""
for block_num in range(since_block, self.get_last_scanned_block()):
if block_num in self.state["blocks"]:
del self.state["blocks"][block_num]
def start_chunk(self, block_number, chunk_size):
pass
def end_chunk(self, block_number):
"""Save at the end of each block, so we can resume in the case of a crash or CTRL+C"""
# Next time the scanner is started we will resume from this block
self.state["last_scanned_block"] = block_number
# Save the database file for every minute
if time.time() - self.last_save > 60:
self.save()
def process_event(
self, block_when: Optional[datetime.datetime], event: AttributeDict
) -> str:
"""Record a ERC-20 transfer in our database."""
# Events are keyed by their transaction hash and log index
# One transaction may contain multiple events
# and each one of those gets their own log index
# event_name = event.event # "Transfer"
log_index = event.logIndex # Log index within the block
# transaction_index = event.transactionIndex # Transaction index within the block
txhash = event.transactionHash.hex() # Transaction hash
block_number = event.blockNumber
# Convert ERC-20 Transfer event to our internal format
args = event["args"]
transfer = {
"from": args["from"],
"to": args.to,
"value": args.value,
}
if block_when is not None:
transfer["timestamp"] = block_when.isoformat()
# Create empty dict as the block that contains all transactions by txhash
if block_number not in self.state["blocks"]:
self.state["blocks"][block_number] = {}
block = self.state["blocks"][block_number]
if txhash not in block:
# We have not yet recorded any transfers in this transaction
# (One transaction may contain multiple events if executed by a smart contract).
# Create a tx entry that contains all events by a log index
self.state["blocks"][block_number][txhash] = {}
# Record ERC-20 transfer in our database
self.state["blocks"][block_number][txhash][log_index] = transfer
# Return a pointer that allows us to look up this event later if needed
return f"{block_number}-{txhash}-{log_index}"
| 2.515625 | 3 |
npbench/benchmarks/nbody/nbody_dace.py | frahlg/npbench | 27 | 5790 | <reponame>frahlg/npbench<filename>npbench/benchmarks/nbody/nbody_dace.py
# Adapted from https://github.com/pmocz/nbody-python/blob/master/nbody.py
# TODO: Add GPL-3.0 License
import numpy as np
import dace as dc
"""
Create Your Own N-body Simulation (With Python)
<NAME> (2020) Princeton Univeristy, @PMocz
Simulate orbits of stars interacting due to gravity
Code calculates pairwise forces according to Newton's Law of Gravity
"""
N, Nt = (dc.symbol(s, dtype=dc.int64) for s in ('N', 'Nt'))
# @dc.program
# def hstack(out: dc.float64[N, 3], a: dc.float64[N],
# b: dc.float64[N], c: dc.float64[N]):
# out[:, 0] = a
# out[:, 1] = b
# out[:, 2] = c
@dc.program
def getAcc(pos: dc.float64[N, 3], mass: dc.float64[N], G: dc.float64,
softening: dc.float64):
"""
Calculate the acceleration on each particle due to Newton's Law
pos is an N x 3 matrix of positions
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
softening is the softening length
a is N x 3 matrix of accelerations
"""
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
# dx = x.T - x
# dy = y.T - y
# dz = z.T - z
# dx = np.transpose(x) - x
# dy = np.transpose(y) - y
# dz = np.transpose(z) - z
dx = np.add.outer(-x, x)
dy = np.add.outer(-y, y)
dz = np.add.outer(-z, z)
# matrix that stores 1/r^3 for all particle pairwise particle separations
inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2)
# inv_r3[inv_r3>0] = inv_r3[inv_r3>0]**(-1.5)
I = inv_r3 > 0
np.power(inv_r3, -1.5, out=inv_r3, where=I)
ax = G * (dx * inv_r3) @ mass
ay = G * (dy * inv_r3) @ mass
az = G * (dz * inv_r3) @ mass
# pack together the acceleration components
# a = np.hstack((ax,ay,az))
a = np.ndarray((N, 3), dtype=np.float64)
# hstack(a, ax, ay, az)
a[:, 0] = ax
a[:, 1] = ay
a[:, 2] = az
return a
@dc.program
def getEnergy(pos: dc.float64[N, 3], vel: dc.float64[N, 3],
mass: dc.float64[N], G: dc.float64):
"""
Get kinetic energy (KE) and potential energy (PE) of simulation
pos is N x 3 matrix of positions
vel is N x 3 matrix of velocities
mass is an N x 1 vector of masses
G is Newton's Gravitational constant
KE is the kinetic energy of the system
PE is the potential energy of the system
"""
# Kinetic Energy:
# KE = 0.5 * np.sum(np.sum( mass * vel**2 ))
# KE = 0.5 * np.sum( mass * vel**2 )
KE = 0.5 * np.sum(np.reshape(mass, (N, 1)) * vel**2)
# Potential Energy:
# positions r = [x,y,z] for all particles
x = pos[:, 0:1]
y = pos[:, 1:2]
z = pos[:, 2:3]
# matrix that stores all pairwise particle separations: r_j - r_i
# dx = x.T - x
# dy = y.T - y
# dz = z.T - z
# dx = np.transpose(x) - x
# dy = np.transpose(y) - y
# dz = np.transpose(z) - z
dx = np.add.outer(-x, x)
dy = np.add.outer(-y, y)
dz = np.add.outer(-z, z)
# matrix that stores 1/r for all particle pairwise particle separations
inv_r = np.sqrt(dx**2 + dy**2 + dz**2)
# inv_r[inv_r>0] = 1.0/inv_r[inv_r>0]
I = inv_r > 0
np.divide(1.0, inv_r, out=inv_r, where=I)
# sum over upper triangle, to count each interaction only once
# PE = G * np.sum(np.sum(np.triu(-(mass*mass.T)*inv_r,1)))
# PE = G * np.sum(np.triu(-(mass*mass.T)*inv_r,1))
tmp = -np.multiply.outer(mass, mass) * inv_r
PE = 0.0
for j in range(N):
for k in range(j + 1, N):
PE += tmp[j, k]
PE *= G
return KE, PE
@dc.program
def nbody(mass: dc.float64[N], pos: dc.float64[N, 3], vel: dc.float64[N, 3],
dt: dc.float64, G: dc.float64, softening: dc.float64):
# Convert to Center-of-Mass frame
# vel -= np.mean(mass * vel, axis=0) / np.mean(mass)
# vel -= np.mean(np.reshape(mass, (N, 1)) * vel, axis=0) / np.mean(mass)
# tmp = np.divide(np.mean(np.reshape(mass, (N, 1)) * vel, axis=0), np.mean(mass))
np.subtract(vel,
np.mean(np.reshape(mass,
(N, 1)) * vel, axis=0) / np.mean(mass),
out=vel)
# calculate initial gravitational accelerations
acc = getAcc(pos, mass, G, softening)
# calculate initial energy of system
KE = np.ndarray(Nt + 1, dtype=np.float64)
PE = np.ndarray(Nt + 1, dtype=np.float64)
KE[0], PE[0] = getEnergy(pos, vel, mass, G)
t = 0.0
# Simulation Main Loop
for i in range(Nt):
# (1/2) kick
vel += acc * dt / 2.0
# drift
pos += vel * dt
# update accelerations
acc[:] = getAcc(pos, mass, G, softening)
# (1/2) kick
vel += acc * dt / 2.0
# update time
t += dt
# get energy of system
KE[i + 1], PE[i + 1] = getEnergy(pos, vel, mass, G)
return KE, PE
| 2.625 | 3 |
application/__init__.py | Healthy-Kokoro/Hiroshima | 0 | 5791 | <gh_stars>0
# Third-party imports
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
configurations = {
'development': 'configurations.DevelopmentConfiguration',
'testing': 'configurations.TestingConfiguration',
'staging': 'configurations.StagingConfiguration',
'production': 'configurations.ProductionConfiguration'
}
database = SQLAlchemy()
def create_application(configuration):
application = Flask(__name__, instance_relative_config=True)
application.config.from_object(configurations[configuration])
application.config.from_pyfile('configuration.py', silent=True)
database.init_app(application)
from application.init.views import blueprint
application.register_blueprint(blueprint)
from application.metadata.views import blueprint
application.register_blueprint(blueprint)
return application
| 1.953125 | 2 |
lesson5/exceptions_except.py | drednout/letspython | 1 | 5792 | def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
| 3.984375 | 4 |
icfs/filesystem/exceptions.py | bhanupratapjain/icfs | 0 | 5793 | class ICFSError(IOError):
"""Error while making any filesystem API requests."""
| 1.578125 | 2 |
synapse/storage/data_stores/state/store.py | juhovan/synapse | 1 | 5794 | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import namedtuple
from typing import Dict, Iterable, List, Set, Tuple
from twisted.internet import defer
from synapse.api.constants import EventTypes
from synapse.storage._base import SQLBaseStore
from synapse.storage.data_stores.state.bg_updates import StateBackgroundUpdateStore
from synapse.storage.database import Database
from synapse.storage.state import StateFilter
from synapse.types import StateMap
from synapse.util.caches.descriptors import cached
from synapse.util.caches.dictionary_cache import DictionaryCache
logger = logging.getLogger(__name__)
MAX_STATE_DELTA_HOPS = 100
class _GetStateGroupDelta(
namedtuple("_GetStateGroupDelta", ("prev_group", "delta_ids"))
):
"""Return type of get_state_group_delta that implements __len__, which lets
us use the itrable flag when caching
"""
__slots__ = []
def __len__(self):
return len(self.delta_ids) if self.delta_ids else 0
class StateGroupDataStore(StateBackgroundUpdateStore, SQLBaseStore):
"""A data store for fetching/storing state groups.
"""
def __init__(self, database: Database, db_conn, hs):
super(StateGroupDataStore, self).__init__(database, db_conn, hs)
# Originally the state store used a single DictionaryCache to cache the
# event IDs for the state types in a given state group to avoid hammering
# on the state_group* tables.
#
# The point of using a DictionaryCache is that it can cache a subset
# of the state events for a given state group (i.e. a subset of the keys for a
# given dict which is an entry in the cache for a given state group ID).
#
# However, this poses problems when performing complicated queries
# on the store - for instance: "give me all the state for this group, but
# limit members to this subset of users", as DictionaryCache's API isn't
# rich enough to say "please cache any of these fields, apart from this subset".
# This is problematic when lazy loading members, which requires this behaviour,
# as without it the cache has no choice but to speculatively load all
# state events for the group, which negates the efficiency being sought.
#
# Rather than overcomplicating DictionaryCache's API, we instead split the
# state_group_cache into two halves - one for tracking non-member events,
# and the other for tracking member_events. This means that lazy loading
# queries can be made in a cache-friendly manner by querying both caches
# separately and then merging the result. So for the example above, you
# would query the members cache for a specific subset of state keys
# (which DictionaryCache will handle efficiently and fine) and the non-members
# cache for all state (which DictionaryCache will similarly handle fine)
# and then just merge the results together.
#
# We size the non-members cache to be smaller than the members cache as the
# vast majority of state in Matrix (today) is member events.
self._state_group_cache = DictionaryCache(
"*stateGroupCache*",
# TODO: this hasn't been tuned yet
50000,
)
self._state_group_members_cache = DictionaryCache(
"*stateGroupMembersCache*", 500000,
)
@cached(max_entries=10000, iterable=True)
def get_state_group_delta(self, state_group):
"""Given a state group try to return a previous group and a delta between
the old and the new.
Returns:
(prev_group, delta_ids), where both may be None.
"""
def _get_state_group_delta_txn(txn):
prev_group = self.db.simple_select_one_onecol_txn(
txn,
table="state_group_edges",
keyvalues={"state_group": state_group},
retcol="prev_state_group",
allow_none=True,
)
if not prev_group:
return _GetStateGroupDelta(None, None)
delta_ids = self.db.simple_select_list_txn(
txn,
table="state_groups_state",
keyvalues={"state_group": state_group},
retcols=("type", "state_key", "event_id"),
)
return _GetStateGroupDelta(
prev_group,
{(row["type"], row["state_key"]): row["event_id"] for row in delta_ids},
)
return self.db.runInteraction(
"get_state_group_delta", _get_state_group_delta_txn
)
@defer.inlineCallbacks
def _get_state_groups_from_groups(
self, groups: List[int], state_filter: StateFilter
):
"""Returns the state groups for a given set of groups from the
database, filtering on types of state events.
Args:
groups: list of state group IDs to query
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
results = {}
chunks = [groups[i : i + 100] for i in range(0, len(groups), 100)]
for chunk in chunks:
res = yield self.db.runInteraction(
"_get_state_groups_from_groups",
self._get_state_groups_from_groups_txn,
chunk,
state_filter,
)
results.update(res)
return results
def _get_state_for_group_using_cache(self, cache, group, state_filter):
"""Checks if group is in cache. See `_get_state_for_groups`
Args:
cache(DictionaryCache): the state group cache to use
group(int): The state group to lookup
state_filter (StateFilter): The state filter used to fetch state
from the database.
Returns 2-tuple (`state_dict`, `got_all`).
`got_all` is a bool indicating if we successfully retrieved all
requests state from the cache, if False we need to query the DB for the
missing state.
"""
is_all, known_absent, state_dict_ids = cache.get(group)
if is_all or state_filter.is_full():
# Either we have everything or want everything, either way
# `is_all` tells us whether we've gotten everything.
return state_filter.filter_state(state_dict_ids), is_all
# tracks whether any of our requested types are missing from the cache
missing_types = False
if state_filter.has_wildcards():
# We don't know if we fetched all the state keys for the types in
# the filter that are wildcards, so we have to assume that we may
# have missed some.
missing_types = True
else:
# There aren't any wild cards, so `concrete_types()` returns the
# complete list of event types we're wanting.
for key in state_filter.concrete_types():
if key not in state_dict_ids and key not in known_absent:
missing_types = True
break
return state_filter.filter_state(state_dict_ids), not missing_types
@defer.inlineCallbacks
def _get_state_for_groups(
self, groups: Iterable[int], state_filter: StateFilter = StateFilter.all()
):
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key
Args:
groups: list of state groups for which we want
to get the state.
state_filter: The state filter used to fetch state
from the database.
Returns:
Deferred[Dict[int, StateMap[str]]]: Dict of state group to state map.
"""
member_filter, non_member_filter = state_filter.get_member_split()
# Now we look them up in the member and non-member caches
(
non_member_state,
incomplete_groups_nm,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_cache, state_filter=non_member_filter
)
(
member_state,
incomplete_groups_m,
) = yield self._get_state_for_groups_using_cache(
groups, self._state_group_members_cache, state_filter=member_filter
)
state = dict(non_member_state)
for group in groups:
state[group].update(member_state[group])
# Now fetch any missing groups from the database
incomplete_groups = incomplete_groups_m | incomplete_groups_nm
if not incomplete_groups:
return state
cache_sequence_nm = self._state_group_cache.sequence
cache_sequence_m = self._state_group_members_cache.sequence
# Help the cache hit ratio by expanding the filter a bit
db_state_filter = state_filter.return_expanded()
group_to_state_dict = yield self._get_state_groups_from_groups(
list(incomplete_groups), state_filter=db_state_filter
)
# Now lets update the caches
self._insert_into_cache(
group_to_state_dict,
db_state_filter,
cache_seq_num_members=cache_sequence_m,
cache_seq_num_non_members=cache_sequence_nm,
)
# And finally update the result dict, by filtering out any extra
# stuff we pulled out of the database.
for group, group_state_dict in group_to_state_dict.items():
# We just replace any existing entries, as we will have loaded
# everything we need from the database anyway.
state[group] = state_filter.filter_state(group_state_dict)
return state
def _get_state_for_groups_using_cache(
self, groups: Iterable[int], cache: DictionaryCache, state_filter: StateFilter
) -> Tuple[Dict[int, StateMap[str]], Set[int]]:
"""Gets the state at each of a list of state groups, optionally
filtering by type/state_key, querying from a specific cache.
Args:
groups: list of state groups for which we want to get the state.
cache: the cache of group ids to state dicts which
we will pass through - either the normal state cache or the
specific members state cache.
state_filter: The state filter used to fetch state from the
database.
Returns:
Tuple of dict of state_group_id to state map of entries in the
cache, and the state group ids either missing from the cache or
incomplete.
"""
results = {}
incomplete_groups = set()
for group in set(groups):
state_dict_ids, got_all = self._get_state_for_group_using_cache(
cache, group, state_filter
)
results[group] = state_dict_ids
if not got_all:
incomplete_groups.add(group)
return results, incomplete_groups
def _insert_into_cache(
self,
group_to_state_dict,
state_filter,
cache_seq_num_members,
cache_seq_num_non_members,
):
"""Inserts results from querying the database into the relevant cache.
Args:
group_to_state_dict (dict): The new entries pulled from database.
Map from state group to state dict
state_filter (StateFilter): The state filter used to fetch state
from the database.
cache_seq_num_members (int): Sequence number of member cache since
last lookup in cache
cache_seq_num_non_members (int): Sequence number of member cache since
last lookup in cache
"""
# We need to work out which types we've fetched from the DB for the
# member vs non-member caches. This should be as accurate as possible,
# but can be an underestimate (e.g. when we have wild cards)
member_filter, non_member_filter = state_filter.get_member_split()
if member_filter.is_full():
# We fetched all member events
member_types = None
else:
# `concrete_types()` will only return a subset when there are wild
# cards in the filter, but that's fine.
member_types = member_filter.concrete_types()
if non_member_filter.is_full():
# We fetched all non member events
non_member_types = None
else:
non_member_types = non_member_filter.concrete_types()
for group, group_state_dict in group_to_state_dict.items():
state_dict_members = {}
state_dict_non_members = {}
for k, v in group_state_dict.items():
if k[0] == EventTypes.Member:
state_dict_members[k] = v
else:
state_dict_non_members[k] = v
self._state_group_members_cache.update(
cache_seq_num_members,
key=group,
value=state_dict_members,
fetched_keys=member_types,
)
self._state_group_cache.update(
cache_seq_num_non_members,
key=group,
value=state_dict_non_members,
fetched_keys=non_member_types,
)
def store_state_group(
self, event_id, room_id, prev_group, delta_ids, current_state_ids
):
"""Store a new set of state, returning a newly assigned state group.
Args:
event_id (str): The event ID for which the state was calculated
room_id (str)
prev_group (int|None): A previous state group for the room, optional.
delta_ids (dict|None): The delta between state at `prev_group` and
`current_state_ids`, if `prev_group` was given. Same format as
`current_state_ids`.
current_state_ids (dict): The state to store. Map of (type, state_key)
to event_id.
Returns:
Deferred[int]: The state group ID
"""
def _store_state_group_txn(txn):
if current_state_ids is None:
# AFAIK, this can never happen
raise Exception("current_state_ids cannot be None")
state_group = self.database_engine.get_next_state_group_id(txn)
self.db.simple_insert_txn(
txn,
table="state_groups",
values={"id": state_group, "room_id": room_id, "event_id": event_id},
)
# We persist as a delta if we can, while also ensuring the chain
# of deltas isn't tooo long, as otherwise read performance degrades.
if prev_group:
is_in_db = self.db.simple_select_one_onecol_txn(
txn,
table="state_groups",
keyvalues={"id": prev_group},
retcol="id",
allow_none=True,
)
if not is_in_db:
raise Exception(
"Trying to persist state with unpersisted prev_group: %r"
% (prev_group,)
)
potential_hops = self._count_state_group_hops_txn(txn, prev_group)
if prev_group and potential_hops < MAX_STATE_DELTA_HOPS:
self.db.simple_insert_txn(
txn,
table="state_group_edges",
values={"state_group": state_group, "prev_state_group": prev_group},
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in delta_ids.items()
],
)
else:
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": state_group,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in current_state_ids.items()
],
)
# Prefill the state group caches with this group.
# It's fine to use the sequence like this as the state group map
# is immutable. (If the map wasn't immutable then this prefill could
# race with another update)
current_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] == EventTypes.Member
}
txn.call_after(
self._state_group_members_cache.update,
self._state_group_members_cache.sequence,
key=state_group,
value=dict(current_member_state_ids),
)
current_non_member_state_ids = {
s: ev
for (s, ev) in current_state_ids.items()
if s[0] != EventTypes.Member
}
txn.call_after(
self._state_group_cache.update,
self._state_group_cache.sequence,
key=state_group,
value=dict(current_non_member_state_ids),
)
return state_group
return self.db.runInteraction("store_state_group", _store_state_group_txn)
def purge_unreferenced_state_groups(
self, room_id: str, state_groups_to_delete
) -> defer.Deferred:
"""Deletes no longer referenced state groups and de-deltas any state
groups that reference them.
Args:
room_id: The room the state groups belong to (must all be in the
same room).
state_groups_to_delete (Collection[int]): Set of all state groups
to delete.
"""
return self.db.runInteraction(
"purge_unreferenced_state_groups",
self._purge_unreferenced_state_groups,
room_id,
state_groups_to_delete,
)
def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete):
logger.info(
"[purge] found %i state groups to delete", len(state_groups_to_delete)
)
rows = self.db.simple_select_many_txn(
txn,
table="state_group_edges",
column="prev_state_group",
iterable=state_groups_to_delete,
keyvalues={},
retcols=("state_group",),
)
remaining_state_groups = {
row["state_group"]
for row in rows
if row["state_group"] not in state_groups_to_delete
}
logger.info(
"[purge] de-delta-ing %i remaining state groups",
len(remaining_state_groups),
)
# Now we turn the state groups that reference to-be-deleted state
# groups to non delta versions.
for sg in remaining_state_groups:
logger.info("[purge] de-delta-ing remaining state group %s", sg)
curr_state = self._get_state_groups_from_groups_txn(txn, [sg])
curr_state = curr_state[sg]
self.db.simple_delete_txn(
txn, table="state_groups_state", keyvalues={"state_group": sg}
)
self.db.simple_delete_txn(
txn, table="state_group_edges", keyvalues={"state_group": sg}
)
self.db.simple_insert_many_txn(
txn,
table="state_groups_state",
values=[
{
"state_group": sg,
"room_id": room_id,
"type": key[0],
"state_key": key[1],
"event_id": state_id,
}
for key, state_id in curr_state.items()
],
)
logger.info("[purge] removing redundant state groups")
txn.executemany(
"DELETE FROM state_groups_state WHERE state_group = ?",
((sg,) for sg in state_groups_to_delete),
)
txn.executemany(
"DELETE FROM state_groups WHERE id = ?",
((sg,) for sg in state_groups_to_delete),
)
@defer.inlineCallbacks
def get_previous_state_groups(self, state_groups):
"""Fetch the previous groups of the given state groups.
Args:
state_groups (Iterable[int])
Returns:
Deferred[dict[int, int]]: mapping from state group to previous
state group.
"""
rows = yield self.db.simple_select_many_batch(
table="state_group_edges",
column="prev_state_group",
iterable=state_groups,
keyvalues={},
retcols=("prev_state_group", "state_group"),
desc="get_previous_state_groups",
)
return {row["state_group"]: row["prev_state_group"] for row in rows}
def purge_room_state(self, room_id, state_groups_to_delete):
"""Deletes all record of a room from state tables
Args:
room_id (str):
state_groups_to_delete (list[int]): State groups to delete
"""
return self.db.runInteraction(
"purge_room_state",
self._purge_room_state_txn,
room_id,
state_groups_to_delete,
)
def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete):
# first we have to delete the state groups states
logger.info("[purge] removing %s from state_groups_state", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups_state",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state group edges
logger.info("[purge] removing %s from state_group_edges", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_group_edges",
column="state_group",
iterable=state_groups_to_delete,
keyvalues={},
)
# ... and the state groups
logger.info("[purge] removing %s from state_groups", room_id)
self.db.simple_delete_many_txn(
txn,
table="state_groups",
column="id",
iterable=state_groups_to_delete,
keyvalues={},
)
| 1.859375 | 2 |
core/migrations/0011_itemvariation_variation.py | manulangat1/djcommerce | 1 | 5795 | # Generated by Django 2.2.6 on 2020-02-09 12:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20200130_1135'),
]
operations = [
migrations.CreateModel(
name='Variation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Item')),
],
options={
'unique_together': {('item', 'name')},
},
),
migrations.CreateModel(
name='ItemVariation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=50)),
('attachment', models.ImageField(upload_to='variations/')),
('variation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Variation')),
],
options={
'unique_together': {('variation', 'value')},
},
),
]
| 1.710938 | 2 |
mnist/convolutional.py | Colins-Ford/mnist-webapp | 0 | 5796 | import os
from mnist import model
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
data = input_data.read_data_sets("data/dataset/", one_hot=True)
# model
with tf.variable_scope("convolutional"):
x = tf.placeholder(tf.float32, [None, 784])
keep_prob = tf.placeholder(tf.float32)
y, variables = model.convolutional(x, keep_prob)
# train
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
saver = tf.train.Saver(variables)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(20000):
batch = data.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y_: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
sess.run(train_step, feed_dict={x: batch[0], y_: batch[1], keep_prob: 0.5})
print(sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels, keep_prob: 1.0}))
path = saver.save(
sess, os.path.join(os.path.dirname(__file__), 'data', 'convolutional.ckpt'),
write_meta_graph=False, write_state=False)
print("Saved:", path)
| 3.140625 | 3 |
parse_scripts/import_osm.py | nokout/au_address | 1 | 5797 | import requests
import codecs
query1 = """<union>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:state"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:city"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="way">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:postcode"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:state"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:city"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:housenumber"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:type"/>
<has-kv k="addr:postcode"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 6)
r1 = requests.post('http://overpass-api.de/api/interpreter/', data=query1)
r1.encoding = 'utf-8'
f = codecs.open('data/osm_data.xml', encoding='utf-8' , mode='w+')
f.write(r1.text)
query2 = """<union>
<query type="way">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:street"/>
<has-kv k="addr:street:name"/>
<has-kv k="addr:street:prefix"/>
<has-kv k="addr:street:type"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>""" % ((-87.61309146881104, 41.890042371392965, 41.87234107841773, -87.64235973358154) * 2)
#r2 = requests.post('http://overpass-api.de/api/interpreter/', data=query2)
#f = codecs.open("data/osm_data_street.xml", "wb", "utf-8")
#r2.encoding = 'utf-8'
#f.write(r2.text)
query3 = """<union>
<query type="way">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
<query type="node">
<has-kv k="addr:full" regv="^[0-9]+.*[a-z]+.*[0-9]{5}.*"/>
<bbox-query e="%s" n="%s" s="%s" w="%s"/>
</query>
</union>
<print/>
""" % ((-70.000000, 50.000000, 25.000000, -125.000000) * 2)
if __name__ == '__main__' :
r3 = requests.post('http://overpass-api.de/api/interpreter/', data=query3)
f = codecs.open("data/osm_data_full_addr.xml", "wb", "utf-8")
r3.encoding = 'utf-8'
f.write(r3.text)
| 2.1875 | 2 |
tools/test_net_batch.py | abhirevan/pedestrian-detector | 0 | 5798 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Test a Fast R-CNN network on an image database."""
import _init_paths
from fast_rcnn.test import test_net
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
from datasets.factory import get_imdb
import caffe
import argparse
import pprint
import time, os, sys
import pandas as pd
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network pipeline')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int, required=True)
parser.add_argument('--dir', dest='dir',
help='Directory of the model files',
default="", type=str, required=True)
parser.add_argument('--models', dest='model_files',
help='Text file with names of models',
default=None, type=str, required=True)
parser.add_argument('--prototxt', dest='prototxt',
help='prototxt', default=None, type=str, required=True)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='ped_test_small', type=str, required=True)
parser.add_argument('--cfg', dest='cfg_file',
help='cfg',
default='experiments/cfgs/faster_rcnn_end2end.yml', type=str)
parser.add_argument('--res', dest='res_file',
help='result file',
default='', type=str, required=True)
args = parser.parse_args()
return args
def run_test_net(gpu_id, caffemodel, prototxt, imdb_name, cfg_file):
if cfg_file is not None:
cfg_from_file(cfg_file)
cfg.GPU_ID = gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(caffemodel):
print('Waiting for {} to exist...'.format(caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
net.name = os.path.splitext(os.path.basename(caffemodel))[0]
imdb = get_imdb(imdb_name)
if not cfg.TEST.HAS_RPN:
imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
n, _ = os.path.splitext(args.caffemodel)
paths = splitall(n)
proposal_prefix = paths[-1]
return test_net(net, imdb, max_per_image=100, vis=False, proposal_prefix=proposal_prefix)
def run_test_nets(gpu_id, dir, model_files, prototxt, imdb_name, cfg_file, res_file):
models = [line.rstrip('\n') for line in open(os.path.join(dir, model_files))]
df_results = pd.DataFrame()
for model in models:
results = run_test_net(gpu_id, os.path.join(dir, model), prototxt, imdb_name, cfg_file)
for result in results:
result['file'] = model
df_results = df_results.append(results, ignore_index=True)
df_results.to_csv(os.path.join(dir, res_file))
if __name__ == '__main__':
# args = parse_args()
gpu_id = 0
# dir = '/home/abhijitcbim/git/pedestrian-detector/output/faster_rcnn_end2end/train/backup'
# model_files = 'test.txt'
args = parse_args()
print('Called with args:')
print(args)
run_test_nets(args.gpu_id, args.dir, args.model_files, args.prototxt, args.imdb_name, args.cfg_file, args.res_file)
# run_test_net(gpu_id,caffemodel, prototxt, imdb_name, cfg_file)
| 2.375 | 2 |
mygallary/urls.py | mangowilliam/my_gallary | 0 | 5799 | <gh_stars>0
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns = [
url('^$', views.gallary,name = 'gallary'),
url(r'^search/', views.search_image, name='search_image'),
url(r'^details/(\d+)',views.search_location,name ='images')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 1.789063 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.