max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
autox/autox_server/model/lgb_with_fe.py | fanghy06/AutoX | 499 | 12762478 | import warnings
warnings.filterwarnings('ignore')
from autox.autox_server.model import model_util
def lgb_with_fe(G_df_dict, G_data_info, G_hist, is_train, remain_time, params, lgb_para_dict, data_name, exp_name):
remain_time = model_util.lgb_model(G_df_dict['BIG_FE'], G_data_info, G_hist, is_train, remain_time, exp_name, params,
lgb_para_dict, data_name)
return remain_time |
machina/apps/forum_member/admin.py | BrendaH/django-machina | 572 | 12762490 | """
Forum member model admin definitions
====================================
This module defines admin classes used to populate the Django administration dashboard.
"""
from django.contrib import admin
from machina.core.db.models import get_model
from machina.models.fields import MarkupTextField, MarkupTextFieldWidget
ForumProfile = get_model('forum_member', 'ForumProfile')
class ForumProfileAdmin(admin.ModelAdmin):
""" The Forum Profile model admin. """
list_display = ('id', 'user', 'posts_count', )
list_filter = ('posts_count', )
list_display_links = ('id', 'user', )
raw_id_fields = ('user', )
search_fields = ('user__username',)
formfield_overrides = {
MarkupTextField: {'widget': MarkupTextFieldWidget},
}
admin.site.register(ForumProfile, ForumProfileAdmin)
|
compressai/layers/gdn.py | Conzel/CompressAI | 515 | 12762501 | <reponame>Conzel/CompressAI<filename>compressai/layers/gdn.py
# Copyright (c) 2021-2022, InterDigital Communications, Inc
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of InterDigital Communications, Inc nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from compressai.ops.parametrizers import NonNegativeParametrizer
__all__ = ["GDN", "GDN1"]
class GDN(nn.Module):
r"""Generalized Divisive Normalization layer.
Introduced in `"Density Modeling of Images Using a Generalized Normalization
Transformation" <https://arxiv.org/abs/1511.06281>`_,
by <NAME>, <NAME>, and <NAME>, (2016).
.. math::
y[i] = \frac{x[i]}{\sqrt{\beta[i] + \sum_j(\gamma[j, i] * x[j]^2)}}
"""
def __init__(
self,
in_channels: int,
inverse: bool = False,
beta_min: float = 1e-6,
gamma_init: float = 0.1,
):
super().__init__()
beta_min = float(beta_min)
gamma_init = float(gamma_init)
self.inverse = bool(inverse)
self.beta_reparam = NonNegativeParametrizer(minimum=beta_min)
beta = torch.ones(in_channels)
beta = self.beta_reparam.init(beta)
self.beta = nn.Parameter(beta)
self.gamma_reparam = NonNegativeParametrizer()
gamma = gamma_init * torch.eye(in_channels)
gamma = self.gamma_reparam.init(gamma)
self.gamma = nn.Parameter(gamma)
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(x ** 2, gamma, beta)
if self.inverse:
norm = torch.sqrt(norm)
else:
norm = torch.rsqrt(norm)
out = x * norm
return out
class GDN1(GDN):
r"""Simplified GDN layer.
Introduced in `"Computationally Efficient Neural Image Compression"
<http://arxiv.org/abs/1912.08771>`_, by <NAME>, <NAME>, <NAME>, and <NAME>, (2019).
.. math::
y[i] = \frac{x[i]}{\beta[i] + \sum_j(\gamma[j, i] * |x[j]|}
"""
def forward(self, x: Tensor) -> Tensor:
_, C, _, _ = x.size()
beta = self.beta_reparam(self.beta)
gamma = self.gamma_reparam(self.gamma)
gamma = gamma.reshape(C, C, 1, 1)
norm = F.conv2d(torch.abs(x), gamma, beta)
if not self.inverse:
norm = 1.0 / norm
out = x * norm
return out
|
tests/common/test_run/ascend/five2four_run.py | tianjiashuo/akg | 286 | 12762521 | <reponame>tianjiashuo/akg
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.ops.array.ascend import Five2Four
from akg import tvm
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
import math
def compute_blockdim(shape):
size = 1
if isinstance(shape, (list, tuple)):
for i in shape:
size = size * i
elif isinstance(shape, int):
size = shape
else:
size = 2
return min(32, math.ceil(size / 16384))
def five2four_execute(shape4d, out_dtype, format, dtype, attrs):
# Generate data
op_attrs = [shape4d, out_dtype, format]
if attrs is None:
attrs = {}
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
input, bench_mark = gen_data(shape4d, dtype, out_dtype, format)
shape_5d = input.shape
mod = five2four_compile(shape_5d, dtype, op_attrs, attrs, kernel_name=kernel_name, tuning=t)
if t:
output = np.full(shape4d, np.nan, out_dtype)
return mod, bench_mark, (input, output)
else:
return mod
else:
input, bench_mark = gen_data(shape4d, dtype, out_dtype, format)
# mod launch
shape_5d = input.shape
mod = five2four_compile(shape_5d, dtype, op_attrs, attrs)
output = np.full(shape4d, np.nan, out_dtype)
args = [input, output]
# if attrs.get("dynamic"):
# for i in range(len(shape4d) - 1, -1, -1):
# args.append(shape4d[i])
if attrs.get("dynamic"):
args.append(shape_5d[0])
args.append(shape_5d[1])
args.append(shape_5d[4])
block_dim = compute_blockdim(shape4d)
args.append(block_dim)
output = utils.mod_launch(mod, args, outputs=(1,), expect=bench_mark)
# compare result
rtol, atol = get_rtol_atol("five2four", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
return input, output, bench_mark, compare_result
def five2four_compile(shape_5d, dtype, op_attrs, attrs, kernel_name='five2four', tuning=False):
if attrs.get("dynamic"):
var_shape = []
shape4d, dst_type, _ = op_attrs
channel_idx = 1
for i in range(len(shape_5d)):
if shape_5d[i] == 1:
var_shape.append(shape_5d[i])
else:
var_shape.append(tvm.var("I" + str(i)))
build_shape = var_shape
else:
build_shape = shape_5d
return utils.op_build_test(Five2Four, [build_shape], [dtype], op_attrs, kernel_name=kernel_name, attrs=attrs, tuning=tuning)
def gen_data(shape, dtype, out_dtype, format):
bench_mark = random_gaussian(shape, miu=1, sigma=0.3).astype(dtype)
if format == 'NCHW':
n, c, h, w = shape
if c % 16 != 0:
pad_input_shape = [n, c, h, w]
pad_c = (c + 15) // 16 * 16
pad_input_shape[1] = pad_c
pad_input = np.zeros(pad_input_shape).astype(dtype)
pad_input[:, :c, :, :] = bench_mark
new_shape = [n, pad_c // 16, 16, h, w]
input = pad_input.reshape(new_shape).transpose(0, 1, 3, 4, 2)
else:
new_shape = [n, c // 16, 16, h, w]
input = bench_mark.reshape(new_shape).transpose(0, 1, 3, 4, 2)
elif format == 'NHWC':
n, h, w, c = shape
if c % 16 != 0:
pad_input_shape = [n, h, w, c]
pad_c = (c + 15) // 16 * 16
pad_input_shape[3] = pad_c
pad_input = np.zeros(pad_input_shape).astype(dtype)
pad_input[:, :, :, :c] = bench_mark
new_shape = [n, h, w, pad_c // 16, 16]
input = pad_input.reshape(new_shape).transpose(0, 3, 1, 2, 4)
else:
new_shape = [n, h, w, c // 16, 16]
input = bench_mark.reshape(new_shape).transpose(0, 3, 1, 2, 4)
bench_mark = bench_mark.astype(out_dtype)
return input, bench_mark
|
asv_bench/benchmarks/io/style.py | umangino/pandas | 28,899 | 12762524 | import numpy as np
from pandas import (
DataFrame,
IndexSlice,
)
class Render:
params = [[12, 24, 36], [12, 120]]
param_names = ["cols", "rows"]
def setup(self, cols, rows):
self.df = DataFrame(
np.random.randn(rows, cols),
columns=[f"float_{i+1}" for i in range(cols)],
index=[f"row_{i+1}" for i in range(rows)],
)
def time_apply_render(self, cols, rows):
self._style_apply()
self.st._render_html(True, True)
def peakmem_apply_render(self, cols, rows):
self._style_apply()
self.st._render_html(True, True)
def time_classes_render(self, cols, rows):
self._style_classes()
self.st._render_html(True, True)
def peakmem_classes_render(self, cols, rows):
self._style_classes()
self.st._render_html(True, True)
def time_tooltips_render(self, cols, rows):
self._style_tooltips()
self.st._render_html(True, True)
def peakmem_tooltips_render(self, cols, rows):
self._style_tooltips()
self.st._render_html(True, True)
def time_format_render(self, cols, rows):
self._style_format()
self.st._render_html(True, True)
def peakmem_format_render(self, cols, rows):
self._style_format()
self.st._render_html(True, True)
def time_apply_format_hide_render(self, cols, rows):
self._style_apply_format_hide()
self.st._render_html(True, True)
def peakmem_apply_format_hide_render(self, cols, rows):
self._style_apply_format_hide()
self.st._render_html(True, True)
def _style_apply(self):
def _apply_func(s):
return [
"background-color: lightcyan" if s.name == "row_1" else "" for v in s
]
self.st = self.df.style.apply(_apply_func, axis=1)
def _style_classes(self):
classes = self.df.applymap(lambda v: ("cls-1" if v > 0 else ""))
classes.index, classes.columns = self.df.index, self.df.columns
self.st = self.df.style.set_td_classes(classes)
def _style_format(self):
ic = int(len(self.df.columns) / 4 * 3)
ir = int(len(self.df.index) / 4 * 3)
# apply a formatting function
# subset is flexible but hinders vectorised solutions
self.st = self.df.style.format(
"{:,.3f}", subset=IndexSlice["row_1":f"row_{ir}", "float_1":f"float_{ic}"]
)
def _style_apply_format_hide(self):
self.st = self.df.style.applymap(lambda v: "color: red;")
self.st.format("{:.3f}")
self.st.hide_index(self.st.index[1:])
self.st.hide_columns(self.st.columns[1:])
def _style_tooltips(self):
ttips = DataFrame("abc", index=self.df.index[::2], columns=self.df.columns[::2])
self.st = self.df.style.set_tooltips(ttips)
self.st.hide_index(self.st.index[12:])
self.st.hide_columns(self.st.columns[12:])
|
third_party/closure_compiler/compiler.py | zealoussnow/chromium | 14,668 | 12762538 | <reponame>zealoussnow/chromium<filename>third_party/closure_compiler/compiler.py
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs Closure compiler on JavaScript files to check for errors and produce
minified output."""
from __future__ import print_function
import os
import subprocess
_CURRENT_DIR = os.path.join(os.path.dirname(__file__))
_JAVA_PATH = os.path.join(_CURRENT_DIR, "..", "jdk", "current", "bin", "java")
assert os.path.isfile(_JAVA_PATH), "java only allowed in android builds"
class Compiler(object):
"""Runs the Closure compiler on given source files to typecheck them
and produce minified output."""
_JAR_COMMAND = [
_JAVA_PATH,
"-jar",
"-Xms1024m",
"-client",
"-XX:+TieredCompilation",
]
def __init__(self, verbose=False):
"""
Args:
verbose: Whether this class should output diagnostic messages.
"""
self._compiler_jar = os.path.join(_CURRENT_DIR, "compiler", "compiler.jar")
self._verbose = verbose
def _log_debug(self, msg, error=False):
"""Logs |msg| to stdout if --verbose/-v is passed when invoking this script.
Args:
msg: A debug message to log.
"""
if self._verbose:
print("(INFO) %s" % msg)
def run_jar(self, jar, args):
"""Runs a .jar from the command line with arguments.
Args:
jar: A file path to a .jar file
args: A list of command line arguments to be passed when running the .jar.
Return:
(exit_code, stderr) The exit code of the command (e.g. 0 for success) and
the stderr collected while running |jar| (as a string).
"""
shell_command = " ".join(self._JAR_COMMAND + [jar] + args)
self._log_debug("Running jar: %s" % shell_command)
devnull = open(os.devnull, "w")
process = subprocess.Popen(shell_command, universal_newlines=True,
shell=True, stdout=devnull,
stderr=subprocess.PIPE)
_, stderr = process.communicate()
return process.returncode, stderr
|
src/main/starlark/builtins_bzl/common/rule_util.bzl | AyuMol758/bazel | 16,989 | 12762541 | <reponame>AyuMol758/bazel
# Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines create_rule and create_dep macros"""
def create_rule(impl, attrs = {}, deps = [], fragments = [], remove_attrs = [], **kwargs):
"""Creates a rule composed from dependencies.
Args:
impl: The implementation function of the rule, taking as parameters the
rule ctx followed by the executable function of each dependency
attrs: Dict of attributes required by the rule. These will override any
conflicting attributes specified by dependencies
deps: Dict of name->dependency mappings, with each dependency struct
created using 'create_dep'. The keys of this dict are the parameter
names received by 'impl'
fragments: List of configuration fragments required by the rule
remove_attrs: List of attributes to remove from the implementation.
**kwargs: extra args to be passed for rule creation
Returns:
The composed rule
"""
merged_attrs = dict()
fragments = list(fragments)
merged_mandatory_attrs = []
for dep in deps:
merged_attrs.update(dep.attrs)
fragments.extend(dep.fragments)
merged_mandatory_attrs.extend(dep.mandatory_attrs)
merged_attrs.update(attrs)
for attr in remove_attrs:
if attr in merged_mandatory_attrs:
fail("Cannot remove mandatory attribute %s" % attr)
merged_attrs.pop(attr)
return rule(
implementation = impl,
attrs = merged_attrs,
fragments = fragments,
**kwargs
)
def create_dep(call, attrs = {}, fragments = [], mandatory_attrs = None):
"""Combines a dependency's executable function, attributes, and fragments.
Args:
call: the executable function
attrs: dict of required rule attrs
fragments: list of required configuration fragments
mandatory_attrs: list of attributes that can't be removed later
(when not set, all attributes are mandatory)
Returns:
The struct
"""
return _create_dep(call, attrs, fragments, mandatory_attrs if mandatory_attrs else attrs.keys())
def _create_dep(call, attrs = {}, fragments = [], mandatory_attrs = []):
return struct(
call = call,
attrs = attrs,
fragments = fragments,
mandatory_attrs = mandatory_attrs,
)
def create_composite_dep(merge_func, *deps):
"""Creates a dependency struct from multiple dependencies
Args:
merge_func: The executable function to evaluate the dependencies.
*deps: The dependencies to compose provided as keyword args
Returns:
A dependency struct
"""
merged_attrs = dict()
merged_frags = []
merged_mandatory_attrs = []
for dep in deps:
merged_attrs.update(dep.attrs)
merged_frags.extend(dep.fragments)
merged_mandatory_attrs.extend(dep.mandatory_attrs)
return _create_dep(
call = merge_func,
attrs = merged_attrs,
fragments = merged_frags,
mandatory_attrs = merged_mandatory_attrs,
)
|
unit_testing_course/lesson1/task1/task.py | behzod/pycharm-courses | 213 | 12762546 | # TODO: type solution here
|
vlm/data.py | woojeongjin/vokenization | 173 | 12762555 | <gh_stars>100-1000
import copy
import os
import random
import h5py
import torch
from torch.utils.data import DataLoader, Dataset
import tqdm
class CoLDataset(Dataset):
IGNORE_ID = -100
sent_strategy = 'first'
def __init__(self, file_path, tokenizer_name, tokenizer, block_size=512,
split_sent=False, voken_dir=None, suffix=None, verbose=False,
voken_ablation=None):
# Open token's hdf5
token_path = file_path + '.' + tokenizer_name + '.hdf5'
assert os.path.isfile(token_path)
if verbose:
print("-------- Load Data -------")
print("Load tokens from", token_path)
self.token_hdf5 = h5py.File(token_path, 'r')
self.tokenizer = tokenizer
self.tokens = self.token_hdf5['tokens']
self.verbose = verbose
self.voken_ablation = voken_ablation
self._iter_cnt = 0
# Open voken's hdf5 and load voken ids
if voken_dir is not None:
assert suffix is not None, 'Please provide suffix of the voken, e.g., vg_nococo.5000.'
self.sent_level = 'sent' in voken_dir
dset_fname = os.path.split(file_path)[-1]
voken_path = os.path.join(voken_dir, f"{dset_fname}.{suffix}.hdf5")
voken_ids_path = os.path.join(voken_dir, f"{dset_fname}.{suffix}.ids")
if verbose:
print("Load vokens from", voken_path)
self.voken_hdf5 = h5py.File(voken_path, 'r')
self.vokens = self.voken_hdf5['vokens']
assert len(self.vokens) == len(self.tokens)
self._voken_ids = list(
map(lambda x: x.strip(),
open(voken_ids_path).readlines())
)
if verbose:
print("\t with voken size", self.voken_size)
print("\t top 5 voken ids are:", self._voken_ids[:5])
else:
self.vokens = None
# Split for every block_size tokens
# The last block without full length will be dropped.
num_tokens = len(self.tokens)
self.starts = list(range(0, num_tokens, block_size))
self.batches = list(zip(self.starts[:-1], self.starts[1:]))
manual_filtered =False
if "en.train.raw" in file_path and tokenizer_name == "bert-base-uncased":
self.batches = manual_filter(self.batches)
if verbose:
print("Data: Mannually filter the range for counties.")
manual_filtered = True
# batch_info
if verbose:
print("Split sent with block size", block_size)
print(f"Total batches: {len(self.batches)}")
print(f"Total tokens: {len(self.tokens)}")
if voken_dir is not None:
print(f"Total vokens: {len(self.vokens)}")
if voken_ablation is not None:
print("The model will process voken ablation strategy:", voken_ablation)
print()
block_check(self.batches, block_size, fixed_size=True, manual_filtered=manual_filtered)
if self.voken_ablation == 'token':
self._voken_ids = list(range(30522))
@property
def voken_size(self):
return len(self._voken_ids)
@property
def voken_ids(self):
return copy.copy(self._voken_ids)
def assert_equal_vokens(self, dataset):
assert self.voken_size == dataset.voken_size
for vid, vid1 in zip(self.voken_ids, dataset.voken_ids):
assert vid == vid1
def __len__(self):
return len(self.batches) - 1
def __getitem__(self, item):
token_start, token_end = self.batches[item]
if self._iter_cnt < 5 and self.verbose:
print(f"Data Loader: data iteration {self._iter_cnt}, with range {token_start} to {token_end}.")
self._iter_cnt += 1
tokens = list(self.tokens[token_start: token_end])
token_tensor = torch.tensor(
self.tokenizer.build_inputs_with_special_tokens(tokens),
dtype=torch.long)
if self.vokens is not None:
vokens = list(self.vokens[token_start: token_end])
vokens = self.maybe_do_sent_level(vokens)
vokens = self.maybe_do_ablation_study(vokens, tokens)
voken_tensor = torch.tensor(
[self.IGNORE_ID] + vokens + [self.IGNORE_ID],
dtype=torch.long
)
return token_tensor, voken_tensor
else:
return token_tensor
def maybe_do_sent_level(self, vokens):
if not self.sent_level:
return vokens
else:
if self.sent_strategy == 'all':
vokens = [
(-voken-1 if voken < 0 else voken)
for voken in vokens
]
elif self.sent_strategy == 'first':
vokens = [
(self.IGNORE_ID if voken < 0 else voken)
for voken in vokens
]
return vokens
def maybe_do_ablation_study(self, vokens, tokens):
if self.voken_ablation is None:
return vokens
else:
if self._iter_cnt < 5 and self.verbose:
print("Before voken ablation: ", vokens)
if self.voken_ablation == 'random':
vokens = [random.randint(0, self.voken_size - 1)
for _ in range(len(vokens))]
elif self.voken_ablation == 'shuffle':
random.shuffle(vokens)
elif self.voken_ablation == 'reverse':
vokens = vokens[::-1]
elif self.voken_ablation == 'token':
vokens = tokens
if self._iter_cnt < 5 and self.verbose:
print("After voken ablation: ", vokens)
return vokens
def get_item_info(self, item):
token_start = self.batches[item]
token_end = self.batches[item + 1]
return token_start, token_end
def __del__(self):
self.token_hdf5.close()
if self.vokens is not None:
self.voken_hdf5.close()
FORBIDDEN_RANGE = (
119314944, # Start of iter 3700
187053048 # End of iter 5800
)
def intersect(x, y):
x1, x2 = x
y1, y2 = y
if x2 <= y1 or x2 >= y2:
# Case 1: [ x )[ y )
# Case 2: [ y )[ x )
return False
return True
def manual_filter(batches):
batches = list(filter(
lambda x: not intersect(x, FORBIDDEN_RANGE),
batches
))
return batches
def block_check(batches, block_size, fixed_size=False, manual_filtered=False):
"""
Check whether the batches satisfy following requirements.
1. Monotonic
2. Mutually exclusive
3. Range < block_size
"""
last_end = 0
for start_token, end_token in batches:
assert last_end <= start_token
if fixed_size:
assert (end_token - start_token) == block_size, 'len([%d, %d)) != %d' % (start_token, end_token, block_size)
else:
assert (end_token - start_token) <= block_size, 'len([%d, %d)) > %d' % (start_token, end_token, block_size)
if manual_filtered:
assert not intersect((start_token, end_token), FORBIDDEN_RANGE)
last_end = end_token
def get_voken_feats(dataset: CoLDataset, feat_dir: str):
"""
Load pre-extracted visual features regarding img_ids of vokens.
"""
set2id2feat = {}
voken_feats = []
for voken_id in dataset.voken_ids:
voken_img_set, voken_img_id = voken_id.split('/')
if voken_img_set not in set2id2feat:
img_ids = list(map(
lambda x: x.rstrip(),
open(os.path.join(feat_dir, f"{voken_img_set}.ids"))
))
img_feats = h5py.File(
os.path.join(feat_dir, f"{voken_img_set}.hdf5"), 'r'
)['keys'][:]
id2feat = {}
assert len(img_ids) == len(img_feats)
for img_id, img_feat in zip(img_ids, img_feats):
id2feat[img_id] = img_feat
set2id2feat[voken_img_set] = id2feat
voken_feats.append(set2id2feat[voken_img_set][voken_img_id])
return voken_feats
|
tests/test_query.py | pooya/disco | 786 | 12762560 | <filename>tests/test_query.py<gh_stars>100-1000
from disco.test import TestCase, TestPipe
from disco.compat import bytes_to_str, str_to_bytes
from disco.worker.pipeline.worker import Stage
from disco.worker.task_io import task_input_stream
import csv
from functools import partial
import hashlib
PREFIX='/tmp/'
def read(interface, state, label, inp):
from disco import util
for e in inp:
scheme, netloc, _ = util.urlsplit(e)
fileName, joinColumn = str(netloc).split('?')
File = open(PREFIX + fileName, 'r')
col = int(joinColumn)
reader = csv.reader(File)
firstRow = True
for row in reader:
if firstRow:
tableName = row[0]
firstRow = False
else:
fullName = tableName + '?' + str(col)
Hash = int(hashlib.md5(str_to_bytes(row[col])).hexdigest(), 16) % 160
interface.output(Hash).add(fullName, row)
def join_init(interface, params):
return {}
def join(interface, state, label, inp):
for k, v in inp:
if k not in state:
state[k] = [v]
else:
state[k].append(v)
def join_done(interface, state):
if len(state) != 2:
return
name0 = list(state.keys())[0]
name1 = list(state.keys())[1]
_, strCol0 = name0.split('?')
_, strCol1 = name1.split('?')
col0 = int(strCol0)
col1 = int(strCol1)
for entry0 in state[name0]:
for entry1 in state[name1]:
if entry0[col0] == entry1[col1]:
entry0_copy = entry0[:]
entry1_copy = entry1[:]
del entry0_copy[col0]
del entry1_copy[col1]
interface.output(0).add(entry0[col0], entry0_copy + entry1_copy)
def combine_init(interface, params, init):
return init()
def combine(interface, state, label, inp, func):
for k, v in inp:
func(state, k, v)
def combine_done(interface, state):
for k, v in state.items():
interface.output(0).add(k, v)
def _getPipeline():
select_stage = [("split", Stage('read', process=read))]
join_stage = [("group_label", Stage('join', init=join_init, process=join, done=join_done))]
def combine_row(state, k, v, func):
if k not in state:
state[k] = 0
state[k] = state[k] + func(v)
node_combine_stage = [("group_node_label",
Stage('node_combine', init=partial(combine_init, init=lambda: {}),
process=partial(combine, func=partial(combine_row, func=lambda v: 1)),
done=combine_done))]
combine_all_stage = [("group_label",
Stage('combine_all', init=partial(combine_init, init=lambda: {}),
process=partial(combine, func=partial(combine_row, func=lambda v: v)),
done=combine_done))]
return select_stage + join_stage + node_combine_stage + combine_all_stage
class PipeJob(TestPipe):
pipeline = _getPipeline()
class JoinTestCase(TestCase):
#input contains the file name and the join column
input = ['raw://cities.csv?0', 'raw://packages.csv?3']
def SetUpFiles(self):
F1 = open(PREFIX + 'cities.csv', 'w')
F1.write("cities\nEdmonton,-45\nCalgary,-35\nMontreal,-25\nToronto,-15\n")
F1.close()
F2 = open(PREFIX + 'packages.csv', 'w')
F2.write("packages\n0,2013-10-2,2013-11-3,Edmonton,Calgary\n" +
"1,2013-11-3,2013-12-3,Calgary,Toronto\n" +
"2,2013-10-4,2013-10-6,Edmonton,Montreal\n")
F2.close()
def serve(self, path):
return path
def test_per_node(self):
self.SetUpFiles()
self.job = PipeJob().run(input=self.test_server.urls(self.input))
self.assertEqual(sorted(self.results(self.job)), [('Calgary', 1), ('Edmonton', 2)])
|
jactorch/transforms/coor/functional.py | dapatil211/Jacinle | 114 | 12762572 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : functional.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 03/03/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import math
from PIL import Image
import numpy as np
import torchvision.transforms.functional as TF
import jactorch.transforms.image.functional as jac_tf
from jacinle.utils.argument import get_2dshape
def normalize_coor(img, coor):
coor = coor.copy()
coor[:, 0] /= img.width
coor[:, 1] /= img.height
return img, coor
def denormalize_coor(img, coor):
coor = coor.copy()
coor[:, 0] *= img.width
coor[:, 1] *= img.height
return img, coor
def crop(img, coor, i, j, h, w):
coor = coor.copy()
coor[:, 0] = (coor[:, 0] - j / img.width) * (img.width / w)
coor[:, 1] = (coor[:, 1] - i / img.height) * (img.height / h)
return TF.crop(img, i, j, h, w), coor
def center_crop(img, coor, output_size):
output_size = get_2dshape(output_size)
w, h = img.size
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(img, coor, i, j, th, tw)
def pad(img, coor, padding, mode='constant', fill=0):
if isinstance(padding, int):
padding = (padding, padding, padding, padding)
elif len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
else:
assert len(padding) == 4
img_new = jac_tf.pad(img, padding, mode=mode, fill=fill)
coor = coor.copy()
coor[:, 0] = (coor[:, 0] + padding[0] / img.width) * (img.width / img_new.width)
coor[:, 1] = (coor[:, 1] + padding[1] / img.height) * (img.height/ img_new.height)
return img_new, coor
def hflip(img, coor):
coor = coor.copy()
coor[:, 0] = 1 - coor[:, 0]
return TF.hflip(img), coor
def vflip(img, coor):
coor = coor.copy()
coor[:, 1] = 1 - coor[:, 1]
return TF.vflip(img), coor
def resize(img, coor, size, interpolation=Image.BILINEAR):
# Assuming coordinates are 0/1-normalized.
return TF.resize(img, size, interpolation=interpolation), coor
def resized_crop(img, coor, i, j, h, w, size, interpolation=Image.BILINEAR):
img, coor = crop(img, coor, i, j, h, w)
img, coor = resize(img, coor, size, interpolation)
return img, coor
def refresh_valid(img, coor, force=False):
if coor.shape[1] == 2:
if force:
coor = np.concatenate([coor, np.ones_like(coor[:, 0])], axis=1)
else:
return img, coor
assert coor.shape[1] == 3, 'Support only (x, y, valid) or (x, y) typed coordinates.'
out = []
for x, y, v in coor:
valid = (v == 1) and (x >= 0) and (x < img.width) and (y >= 0) and (y < img.height)
if valid:
out.append((x, y, v))
else:
out.append((0., 0., 0.))
return img, np.array(out, dtype='float32')
def rotate(img, coor, angle, resample, crop_, expand, center=None, translate=None):
assert translate is None
img_new = TF.rotate(img, angle, resample=resample, expand=expand, center=center)
matrix, extra_crop = get_rotation_matrix(img, angle, crop_, expand, center, translate)
_, coor = denormalize_coor(img, coor)
for i in range(coor.shape[0]):
coor[i, :2] = apply_affine_transform(*coor[i, :2], matrix)
_, coor = normalize_coor(img_new, coor)
if extra_crop is not None:
img_new, coor = crop(img_new, coor, *extra_crop)
return img_new, coor
def pad_multiple_of(img, coor, multiple, mode='constant', fill=0):
h, w = img.height, img.width
hh = h - h % multiple + multiple * int(h % multiple != 0)
ww = w - w % multiple + multiple * int(w % multiple != 0)
if h != hh or w != ww:
return pad(img, coor, (0, 0, ww - w, hh - h), mode=mode, fill=fill)
return img, coor
def get_rotation_matrix(image, angle, crop, expand, center, translate):
w, h = image.size
if translate is None:
translate = (0, 0)
if center is None:
center = (w / 2.0, h / 2.0)
angle = math.radians(angle % 360)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0
]
matrix[2], matrix[5] = apply_affine_transform(-center[0], -center[1], matrix)
matrix[2] += center[0] + translate[0]
matrix[5] += center[1] + translate[1]
# print('debug', angle, translate, center, matrix, apply_affine_transform(0.5, 0.5, matrix))
if crop or expand:
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = apply_affine_transform(x, y, matrix)
xx.append(x)
yy.append(y)
xx.sort()
yy.sort()
extra_crop = None
if crop:
assert not expand, 'Cannot use both expand and crop.'
nw = int(math.ceil(xx[2]) - math.floor(xx[1]))
nh = int(math.ceil(yy[2]) - math.floor(yy[1]))
# CAUSION! extra_crop is of format (dy, dx, h, w)
extra_crop = ((h - nh) // 2, (w - nw) // 2, nh, nw)
if expand:
nw = int(math.ceil(xx[3]) - math.floor(xx[0]))
nh = int(math.ceil(yy[3]) - math.floor(yy[0]))
matrix[2] += (nw - w) / 2.
matrix[5] += (nh - h) / 2.
return matrix, extra_crop
def apply_affine_transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
|
services/engine/model.py | chrkaatz/BitVision | 1,070 | 12762574 | <reponame>chrkaatz/BitVision
#########
# GLOBALS
#########
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
######
# MAIN
######
class Model(object):
def __init__(self, training_data, hyperopt=False):
self.scaler = StandardScaler()
self.scaler.fit(training_data.drop("Trend", axis=1))
self.model = LogisticRegression(penalty="l1", tol=.001, C=1000, max_iter=150)
normalized_training_data = self.scaler.transform(training_data.drop("Trend", axis=1))
self.model.fit(normalized_training_data, training_data["Trend"])
## Public Methods ##
def predict(self, vector):
return self.model.predict(self.scaler.transform(vector.reshape(1, -1)))
|
graphormer/data/__init__.py | shawnwang-tech/Graphormer | 858 | 12762590 | DATASET_REGISTRY = {}
def register_dataset(name: str):
def register_dataset_func(func):
DATASET_REGISTRY[name] = func()
return register_dataset_func
|
models/kp2uv_model.py | google/retiming | 152 | 12762591 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from third_party.models.base_model import BaseModel
from . import networks
class Kp2uvModel(BaseModel):
"""This class implements the keypoint-to-UV model (inference only)."""
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(dataset_mode='kpuv')
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- test options
"""
BaseModel.__init__(self, opt)
self.visual_names = ['keypoints', 'output_uv']
self.model_names = ['Kp2uv']
self.netKp2uv = networks.define_kp2uv(gpu_ids=self.gpu_ids)
self.isTrain = False # only test mode supported
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.keypoints = input['keypoints'].to(self.device)
self.image_paths = input['path']
def forward(self):
"""Run forward pass. This will be called by <test>."""
output = self.netKp2uv.forward(self.keypoints)
self.output_uv = self.output2rgb(output)
def output2rgb(self, output):
"""Convert network outputs to RGB image."""
pred_id, pred_uv = output
_, pred_id_class = pred_id.max(1)
pred_id_class = pred_id_class.unsqueeze(1)
# extract UV from pred_uv (48 channels); select based on class ID
selected_uv = -1 * torch.ones(pred_uv.shape[0], 2, pred_uv.shape[2], pred_uv.shape[3], device=pred_uv.device)
for partid in range(1, 25):
mask = (pred_id_class == partid).float()
selected_uv *= (1. - mask)
selected_uv += mask * pred_uv[:, (partid - 1) * 2:(partid - 1) * 2 + 2]
pred_uv = selected_uv
rgb = torch.cat([pred_id_class.float() * 10 / 255. * 2 - 1, pred_uv], 1)
return rgb
def optimize_parameters(self):
pass
|
extra/cda/cachegen.py | heinsm/qira | 2,056 | 12762592 | #!/usr/bin/env python2.7
import os
import sys
import cda_config
basedir = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(basedir+"/clang/llvm/tools/clang/bindings/python")
import clang.cindex as ci
ci.Config.set_library_file(cda_config.LIBCLANG_PATH)
import pickle
from clang.cindex import CursorKind
import json
from hashlib import sha1
# debug
DEBUG = 0
# cache generated
file_cache = {}
object_cache = {}
xref_cache = {}
# a single index for the runtime of the server
index = ci.Index.create()
def parse_node(node, d, filename, care):
#print node.location.file
if node.location.file != None and str(node.location.file) != filename:
return
ref = node.referenced
if type(ref) != type(None):
usr = ref.get_usr()
#print " "*d, node.kind, node.spelling, node.displayname, node.location, node.extent.start.offset, node.extent.end.offset, node.get_usr(), "****", ref.spelling, ref.location, ref.get_usr()
else:
usr = None
if DEBUG == 1:
print " "*d, node.kind, node.spelling, node.displayname, node.location, node.location.offset, node.extent.start.offset, node.extent.end.offset, usr
"""
if DEBUG == 1:
print " "*d, node.kind, node.spelling, node.displayname, node.location, node.location.offset, node.extent.start.offset, node.extent.end.offset, usr
"""
#print dir(node)
"""
print ref, node.get_usr()
print ref.location
for i in deff:
print i
"""
klass = str(node.kind).split('.')[-1]
(start, end) = (None, None)
if node.kind in [CursorKind.STRING_LITERAL, CursorKind.INTEGER_LITERAL, CursorKind.TYPE_REF, CursorKind.TEMPLATE_REF]:
#if node.kind in [CursorKind.STRING_LITERAL, CursorKind.TYPE_REF, CursorKind.TEMPLATE_REF]:
start = node.extent.start.offset
end = node.extent.end.offset
elif node.kind in [CursorKind.FUNCTION_DECL, CursorKind.FUNCTION_TEMPLATE, CursorKind.VAR_DECL, CursorKind.CLASS_DECL, CursorKind.CXX_METHOD, CursorKind.CLASS_TEMPLATE, CursorKind.PARM_DECL]:
start = node.location.offset
end = node.location.offset + len(node.spelling)
elif node.kind in [CursorKind.MEMBER_REF_EXPR]:
#print node.location.offset, node.extent.start.offset, node.extent.end.offset
if node.location.offset != 0:
start = node.location.offset
else:
start = node.extent.start.offset
end = node.extent.end.offset
#end = node.location.offset + len(node.displayname)
elif node.kind in [CursorKind.DECL_REF_EXPR]:
start = node.location.offset
end = node.extent.end.offset
if end != None:
care.append((start, end, klass, usr))
if end != None and usr != None and node.location.line > 0:
newval = filename+"#"+str(node.location.line)
if node.is_definition():
# defining the object
if usr in object_cache:
object_cache[usr].append(newval)
else:
object_cache[usr] = [newval]
else:
# xref
if usr in xref_cache:
xref_cache[usr].append(newval)
else:
xref_cache[usr] = [newval]
# link here is good
for child in node.get_children():
parse_node(child, d+1, filename, care)
def parse_file(filename, args=[]):
# traversal attack
exargs = ["-I", cda_config.CLANG_INCLUDES]
tu = index.parse(filename, args=exargs+args)
# bad shit happened
bad = False
for m in tu.diagnostics:
if m.severity >= 3:
print m
bad = True
if bad == True:
#raise Exception("parsing issue")
print "parsing issue"
# extract the things we care about
care = []
parse_node(tu.cursor, 0, filename, care)
care = sorted(care)
# get file data
rdat = open(filename).read()
return (care, rdat)
def parse_files(files, args=[]):
# for unbuilt clang
for fn in files:
print "CDA: caching",fn
try:
file_cache[fn] = parse_file(fn, args)
except Exception as e:
print "CDA: error on",fn,":",e
dat = (object_cache, file_cache, xref_cache)
return dat
|
mnist/DecoyMNIST/00_make_data.py | laura-rieger/deep-explanation-penalization | 105 | 12762597 | <reponame>laura-rieger/deep-explanation-penalization
import torch
import torchvision
import torchvision.datasets as datasets
import sys
import numpy as np
import torch.utils.data as utils
from colour import Color
from os.path import join as oj
mnist_trainset = datasets.MNIST(root='../data', train=True, download=True, transform=None)
color_x = np.zeros((60000, 1, 28, 28))
color_x = mnist_trainset.data[:, None].numpy().astype(np.float32)
color_y = mnist_trainset.targets.numpy().copy()
choice_1 = np.random.choice(2, size = len(color_x))*23
choice_2 = np.random.choice(2, size = len(color_x))*23
for i in range(len(color_x)):
color_x[i, :, choice_1[i]:choice_1[i]+5, choice_2[i]:choice_2[i]+5] = 255- 25*color_y[i]
color_x /=color_x.max()
color_x = color_x*2 -1
np.save(oj("../../data/ColorMNIST", "train_x_decoy.npy"), color_x)
from os.path import join as oj
mnist_trainset = datasets.MNIST(root='../data', train=False, download=True, transform=None)
color_x = np.zeros((len(mnist_trainset.data), 1, 28, 28))
color_x = mnist_trainset.data[:, None].numpy().astype(np.float32)
color_y = mnist_trainset.targets.numpy().copy()
choice_1 = np.random.choice(2, size = len(color_x))*23
choice_2 = np.random.choice(2, size = len(color_x))*23
for i in range(len(color_x)):
color_x[i, :, choice_1[i]:choice_1[i]+5, choice_2[i]:choice_2[i]+5] = 0+ 25*color_y[i]
color_x /=color_x.max()
color_x = color_x*2 -1
np.save(oj("../data/ColorMNIST", "test_x_decoy.npy"), color_x)
|
src/debugpy/_vendored/pydevd/tests_python/test_smart_step_into_bytecode.py | r3m0t/debugpy | 695 | 12762639 | <gh_stars>100-1000
import sys
try:
from _pydevd_bundle import pydevd_bytecode_utils
except ImportError:
pass
import pytest
pytestmark = pytest.mark.skipif(sys.version_info[0] < 3, reason='Only available for Python 3.')
@pytest.fixture(autouse=True, scope='function')
def enable_strict():
# In tests enable strict mode (in regular operation it'll be False and will just ignore
# bytecodes we still don't handle as if it didn't change the stack).
pydevd_bytecode_utils.STRICT_MODE = True
yield
pydevd_bytecode_utils.STRICT_MODE = False
def check(found, expected):
assert len(found) == len(expected), '%s != %s' % (found, expected)
last_offset = -1
for f, e in zip(found, expected):
try:
if isinstance(e.name, (list, tuple, set)):
assert f.name in e.name
else:
assert f.name == e.name
assert f.is_visited == e.is_visited
assert f.line == e.line
assert f.call_order == e.call_order
except AssertionError as exc:
raise AssertionError('%s\nError with: %s - %s' % (exc, f, e))
# We can't check the offset because it may be different among different python versions
# so, just check that it's always in order.
assert f.offset > last_offset
last_offset = f.offset
def collect_smart_step_into_variants(*args, **kwargs):
try:
return pydevd_bytecode_utils.calculate_smart_step_into_variants(*args, **kwargs)
except:
# In a failure, rerun with DEBUG!
debug = pydevd_bytecode_utils.DEBUG
pydevd_bytecode_utils.DEBUG = True
try:
return pydevd_bytecode_utils.calculate_smart_step_into_variants(*args, **kwargs)
finally:
pydevd_bytecode_utils.DEBUG = debug
def check_names_from_func_str(func_str, expected):
locs = {}
exec(func_str, globals(), locs)
function = locs['function']
class Frame:
f_code = function.__code__
f_lasti = 0
found = collect_smart_step_into_variants(
Frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, expected)
def test_smart_step_into_bytecode_info():
from _pydevd_bundle.pydevd_bytecode_utils import Variant
def function():
def some(arg):
pass
def call(arg):
pass
yield sys._getframe()
call(some(call(some())))
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check(found, [
Variant(name=('_getframe', 'sys'), is_visited=True, line=8, offset=20, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=34, call_order=1),
Variant(name='call', is_visited=False, line=9, offset=36, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=38, call_order=2),
Variant(name='call', is_visited=False, line=9, offset=40, call_order=2),
])
def check_name_and_line(found, expected):
names_and_lines = set()
for variant in found:
if variant.children_variants:
for v in variant.children_variants:
names_and_lines.add((v.name + (' (in %s)' % variant.name), v.line))
else:
names_and_lines.add((variant.name, variant.line))
if names_and_lines != set(expected):
raise AssertionError('Found: %s' % (sorted(names_and_lines, key=lambda tup:tuple(reversed(tup))),))
def test_smart_step_into_bytecode_info_002():
def function():
yield sys._getframe()
completions = foo.bar(
Something(param1, param2=xxx.yyy),
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_003():
def function():
yield sys._getframe()
bbb = foo.bar(
Something(param1, param2=xxx.yyy), {}
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_004():
def function():
yield sys._getframe()
bbb = foo.bar(
Something(param1, param2=xxx.yyy), {1: 1} # BUILD_MAP
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_005():
def function():
yield sys._getframe()
bbb = foo.bar(
Something(param1, param2=xxx.yyy), {1: 1, 2:2} # BUILD_CONST_KEY_MAP
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_006():
def function():
yield sys._getframe()
foo.bar(
Something(),
{
1: 1,
2:[
x for x
in call()
]
}
)
call2()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 8), ('call2', 12)])
def test_smart_step_into_bytecode_info_007():
def function():
yield sys._getframe()
a[0]
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 2)])
def test_smart_step_into_bytecode_info_008():
def function():
yield sys._getframe()
call(
[1, 2, 3])
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 2)])
def test_smart_step_into_bytecode_info_009():
def function():
yield sys._getframe()
[1, 2, 3][0]()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 2), ('__getitem__().__call__', 2)])
def test_smart_step_into_bytecode_info_011():
def function():
yield sys._getframe()
[1, 2, 3][0]()()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 2), ('__getitem__().__call__', 2)])
def test_smart_step_into_bytecode_info_012():
def function():
yield sys._getframe()
(lambda a:a)(1)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('<lambda>', 2)])
def test_smart_step_into_bytecode_info_013():
def function():
yield sys._getframe()
(lambda a:a,)[0](1)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__().__call__', 2), ('__getitem__', 2)])
def test_smart_step_into_bytecode_info_014():
def function():
yield sys._getframe()
try:
raise RuntimeError()
except Exception:
call2()
finally:
call3()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('RuntimeError', 3), ('call2', 5), ('call3', 7)])
def test_smart_step_into_bytecode_info_015():
def function():
yield sys._getframe()
with call():
call2()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 2), ('call2', 3)])
def test_smart_step_into_bytecode_info_016():
def function():
yield sys._getframe()
call2(
1,
2,
a=3,
*args,
**kwargs
)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call2', 2)])
def test_smart_step_into_bytecode_info_017():
def function():
yield sys._getframe()
call([
x for x in y
if x == call2()
])
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found,
[('_getframe', 1), ('call', 2), ('__eq__ (in <listcomp>)', 4), ('call2 (in <listcomp>)', 4)]
)
def test_smart_step_into_bytecode_info_018():
def function():
yield sys._getframe()
class Foo(object):
def __init__(self):
pass
f = Foo()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('Foo', 8)])
def test_smart_step_into_bytecode_info_019():
def function():
yield sys._getframe()
class Foo(object):
def __init__(self):
pass
f = Foo()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('Foo', 8)])
def test_smart_step_into_bytecode_info_020():
def function():
yield sys._getframe()
for a in call():
if a != 1:
a()
break
elif a != 2:
b()
break
else:
continue
else:
raise RuntimeError()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('call', 2), ('__ne__', 3), ('a', 4), ('__ne__', 6), ('b', 7), ('RuntimeError', 12)])
def test_smart_step_into_bytecode_info_021():
def function():
yield sys._getframe()
a, b = b, a
a, b, c = c, a, b
a, b, c, d = d, c, a, b
a()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('a', 5)])
def test_smart_step_into_bytecode_info_022():
def function():
yield sys._getframe()
a(
*{1, 2},
**{
1:('1' + '2'),
2: tuple(
x for x in c()
if x == d())
}
)
b()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('a', 2), ('tuple', 6), ('c', 7), ('__eq__ (in <genexpr>)', 8), ('d (in <genexpr>)', 8), ('b', 11)])
def test_smart_step_into_bytecode_info_023():
def function():
yield sys._getframe()
tuple(
x for x in
c()
if x == d()
)
tuple(
x for x in
c()
if x == d()
)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('tuple', 2), ('c', 4), ('__eq__ (in <genexpr>)', 5), ('d (in <genexpr>)', 5), ('tuple', 7), ('c', 9), ('__eq__ (in <genexpr>)', 10), ('d (in <genexpr>)', 10)])
def test_smart_step_into_bytecode_info_024():
func = '''def function():
yield sys._getframe()
call(a ** b)
call(a * b)
call(a @ b)
call(a / b)
call(a // b)
call(a % b)
call(a + b)
call(a - b)
call(a >> b)
call(a << b)
call(a & b)
call(a | b)
call(a ^ b)
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1),
('__pow__', 2),
('call', 2),
('__mul__', 3),
('call', 3),
('__matmul__', 4),
('call', 4),
('__div__', 5),
('call', 5),
('__floordiv__', 6),
('call', 6),
('__mod__', 7),
('call', 7),
('__add__', 8),
('call', 8),
('__sub__', 9),
('call', 9),
('__rshift__', 10),
('call', 10),
('__lshift__', 11),
('call', 11),
('__and__', 12),
('call', 12),
('__or__', 13),
('call', 13),
('__xor__', 14),
('call', 14)],
)
def test_smart_step_into_bytecode_info_025():
func = '''def function():
yield sys._getframe()
a **= b
a *= b
a @= b
a /= b
a //= b
a %= b
a += b
a -= b
a >>= b
a <<= b
a &= b
a |= b
a ^= b
call()
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 15)])
@pytest.mark.skipif(sys.version_info[0:2] < (3, 8), reason='Walrus operator only available for Python 3.8 onwards.')
def test_smart_step_into_bytecode_info_026():
func = '''def function():
yield sys._getframe()
call((a:=1))
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 2)])
def test_smart_step_into_bytecode_info_027():
def function():
yield sys._getframe()
def call():
pass
a = [1, call]
a[:1] = []
x = a[0]()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 8), ('__getitem__().__call__', 8)])
def test_smart_step_into_bytecode_info_028():
def function():
yield sys._getframe()
def call():
pass
a = [1, call]
a[:1] += []
x = a[0]()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 7), ('__getitem__', 8), ('__getitem__().__call__', 8)])
def test_smart_step_into_bytecode_info_029():
def function():
yield sys._getframe()
call((+b) + (-b) - (not b) * (~b))
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__add__', 3), ('__mul__', 3), ('__sub__', 3), ('call', 3)])
def test_smart_step_into_bytecode_info_030():
def function():
yield sys._getframe()
call({a for a in b})
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 3)])
def test_smart_step_into_bytecode_info_031():
def function():
yield sys._getframe()
call({a: b for a in b})
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 3)])
def test_smart_step_into_bytecode_info_032():
def function():
yield sys._getframe()
del a[:2]
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 4)])
def test_smart_step_into_bytecode_info_033():
check_names_from_func_str('''def function():
yield sys._getframe()
raise call()
''', [('_getframe', 1), ('call', 3)])
@pytest.mark.skipif(sys.version_info[0:2] < (3, 6), reason='Async only available for Python 3.6 onwards.')
def test_smart_step_into_bytecode_info_034():
check_names_from_func_str('''async def function():
await a()
async for b in c():
await d()
''', [('a', 1), ('c', 2), ('d', 3)])
def test_smart_step_into_bytecode_info_035():
check_names_from_func_str('''def function():
assert 0, 'Foo'
''', [('AssertionError', 1)])
def test_smart_step_into_bytecode_info_036():
check_names_from_func_str('''def function(a):
global some_name
some_name = a
some_name()
''', [('some_name', 3)])
def test_smart_step_into_bytecode_info_037():
func = '''def function():
some_name = 10
def another():
nonlocal some_name
some_name = a
some_name()
return another
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']()
class Frame:
f_code = function.__code__
f_lasti = 0
found = collect_smart_step_into_variants(
Frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('some_name', 3)])
def test_smart_step_into_bytecode_info_038():
check_names_from_func_str('''def function():
try:
call()
finally:
call2()
''', [('call', 2), ('call2', 4)])
def test_smart_step_into_bytecode_info_039():
check_names_from_func_str('''def function():
try:
call()
except:
return call2()
finally:
return call3()
''', [('call', 2), ('call2', 4), ('call3', 6)])
def test_smart_step_into_bytecode_info_040():
check_names_from_func_str('''def function():
a.call = foo()
a.call()
''', [('foo', 1), ('call', 2)])
def test_smart_step_into_bytecode_info_041():
check_names_from_func_str('''def function():
foo = 10
del foo
foo = method
foo()
''', [('foo', 4)])
def test_smart_step_into_bytecode_info_042():
check_names_from_func_str('''
foo = 10
def function():
global foo
foo()
''', [('foo', 2)])
def test_smart_step_into_bytecode_info_043():
def function(call):
def another_function():
yield sys._getframe()
call()
for frame in another_function():
yield frame
generator = iter(function(lambda: None))
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_044():
check_names_from_func_str('''
def function(args):
call, *c = args
call(*c)
''', [('call', 2)])
def test_smart_step_into_bytecode_info_045():
check_names_from_func_str('''
def function():
x.foo = 10
del x.foo
x.foo = lambda:None
x.foo()
''', [('foo', 4)])
def test_smart_step_into_bytecode_info_046():
check_names_from_func_str('''
a = 10
def function(args):
global a
del a
a()
''', [('a', 3)])
def test_smart_step_into_bytecode_info_047():
check_names_from_func_str('''
def function():
call(a, b=1, *c, **kw)
''', [('call', 1)])
def test_smart_step_into_bytecode_info_048():
check_names_from_func_str('''
def function(fn):
fn = call(fn)
def pa():
fn()
return pa()
''', [('call', 1), ('pa', 6)])
def test_smart_step_into_bytecode_info_049():
def function(foo):
class SomeClass(object):
implementation = foo
implementation()
f = sys._getframe()
return SomeClass.f
frame = function(object)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('implementation', 5), ('_getframe', 6)])
def test_smart_step_into_bytecode_info_050():
check_names_from_func_str('''
def function():
('a' 'b').index('x')
''', [('index', 1)])
def test_smart_step_into_bytecode_info_051():
check_names_from_func_str('''
def function():
v = 1
v2 = 2
call((f'a{v()!r}' f'b{v2()}'))
''', [('call', 3), ('v', 3), ('v2', 3)])
def test_smart_step_into_bytecode_info_052():
check_names_from_func_str('''
def function():
v = 1
v2 = 2
call({*v(), *v2()})
''', [('call', 3), ('v', 3), ('v2', 3)])
def test_smart_step_into_bytecode_info_053():
check_names_from_func_str('''
def function():
v = 1
v2 = 2
call({**v(), **v2()})
''', [('call', 3), ('v', 3), ('v2', 3)])
def test_smart_step_into_bytecode_info_054():
check_names_from_func_str('''
def function():
import a
from a import b
call()
''', [('call', 3)])
def test_smart_step_into_bytecode_info_055():
check_names_from_func_str('''
async def function():
async with lock() as foo:
await foo()
''', [('lock', 1), ('foo', 2)])
def test_smart_step_into_bytecode_info_056():
check_names_from_func_str('''
def function(mask_path):
wc = some_func(
parsed_content,
np.array(
Image.open(mask_path)
)
)
''', [('some_func', 1), ('array', 3), ('open', 4)])
def test_smart_step_into_bytecode_info_057():
check_names_from_func_str('''
def function(mask_path):
wc = some_func(
parsed_content,
np.array(
my.pack.Image.open(mask_path)
)
)
''', [('some_func', 1), ('array', 3), ('open', 4)])
def test_get_smart_step_into_variant_from_frame_offset():
from _pydevd_bundle.pydevd_bytecode_utils import Variant
found = [
Variant(name='_getframe', is_visited=True, line=8, offset=20, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=34, call_order=1),
Variant(name='call', is_visited=False, line=9, offset=36, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=38, call_order=2),
Variant(name='call', is_visited=False, line=9, offset=40, call_order=2),
]
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(19, found) is None
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(20, found).offset == 20
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(33, found).offset == 20
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(34, found).offset == 34
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(35, found).offset == 34
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(36, found).offset == 36
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(44, found).offset == 40
def test_smart_step_into_bytecode_info_00eq():
from _pydevd_bundle.pydevd_bytecode_utils import Variant
def function():
a = 1
b = 1
if a == b:
pass
if a != b:
pass
if a > b:
pass
if a >= b:
pass
if a < b:
pass
if a <= b:
pass
if a is b:
pass
yield sys._getframe()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
if sys.version_info[:2] < (3, 9):
check(found, [
Variant(name='__eq__', is_visited=True, line=3, offset=18, call_order=1),
Variant(name='__ne__', is_visited=True, line=5, offset=33, call_order=1),
Variant(name='__gt__', is_visited=True, line=7, offset=48, call_order=1),
Variant(name='__ge__', is_visited=True, line=9, offset=63, call_order=1),
Variant(name='__lt__', is_visited=True, line=11, offset=78, call_order=1),
Variant(name='__le__', is_visited=True, line=13, offset=93, call_order=1),
Variant(name='is', is_visited=True, line=15, offset=108, call_order=1),
Variant(name=('_getframe', 'sys'), is_visited=True, line=18, offset=123, call_order=1),
])
else:
check(found, [
Variant(name='__eq__', is_visited=True, line=3, offset=18, call_order=1),
Variant(name='__ne__', is_visited=True, line=5, offset=33, call_order=1),
Variant(name='__gt__', is_visited=True, line=7, offset=48, call_order=1),
Variant(name='__ge__', is_visited=True, line=9, offset=63, call_order=1),
Variant(name='__lt__', is_visited=True, line=11, offset=78, call_order=1),
Variant(name='__le__', is_visited=True, line=13, offset=93, call_order=1),
Variant(name=('_getframe', 'sys'), is_visited=True, line=18, offset=123, call_order=1),
])
def _test_find_bytecode():
import glob
import dis
from io import StringIO
root_dir = 'C:\\bin\\Python310\\Lib\\site-packages\\'
i = 0
for filename in glob.iglob(root_dir + '**/*.py', recursive=True):
print(filename)
with open(filename, 'r', encoding='utf-8') as stream:
try:
contents = stream.read()
except:
sys.stderr.write('Unable to read file: %s' % (filename,))
continue
code_obj = compile(contents, filename, 'exec')
s = StringIO()
dis.dis(code_obj, file=s)
# https://docs.python.org/3.10/library/dis.html has references to the new opcodes added.
if 'COPY_DICT_WITHOUT_KEYS' in s.getvalue():
dis.dis(code_obj)
raise AssertionError('Found bytecode in: %s' % filename)
# i += 1
# if i == 1000:
# break
|
se3cnn/point/self_interaction.py | mariogeiger/se3cnn | 170 | 12762641 | <filename>se3cnn/point/self_interaction.py
# pylint: disable=arguments-differ, no-member, missing-docstring, invalid-name, line-too-long
from functools import reduce
import torch
from se3cnn.point.kernel import Kernel
from se3cnn.point.radial import ConstantRadialModel
class SortSphericalSignals(torch.nn.Module):
def __init__(self, Rs):
super().__init__()
ljds = []
j = 0
for mul, l in Rs:
d = mul * (2 * l + 1)
ljds.append((l, j, d))
j += d
mixing_matrix = torch.zeros(j, j)
i = 0
for _l, j, d in sorted(ljds):
mixing_matrix[i:i+d, j:j+d] = torch.eye(d)
i += d
self.register_buffer('mixing_matrix', mixing_matrix)
def forward(self, x):
"""
:param x: tensor [batch, feature, ...]
"""
output = torch.einsum('ij,zja->zia', (self.mixing_matrix, x.flatten(2))).contiguous()
return output.view(*x.size())
class ConcatenateSphericalSignals(torch.nn.Module):
def __init__(self, *Rs):
super().__init__()
Rs = reduce(list.__add__, Rs, [])
self.sort = SortSphericalSignals(Rs)
def forward(self, *signals):
combined = torch.cat(signals, dim=1)
return self.sort(combined)
class SelfInteraction(torch.nn.Module):
def __init__(self, Rs_in, Rs_out):
super().__init__()
self.kernel = Kernel(Rs_in, Rs_out, ConstantRadialModel)
def forward(self, features):
"""
:param features: tensor [..., channel]
:return: tensro [..., channel]
"""
*size, n = features.size()
features = features.view(-1, n)
k = self.kernel(features.new_zeros(features.size(0), 3))
features = torch.einsum("zij,zj->zi", (k, features))
features = features.view(*size, -1)
return features
|
start.py | DennyDai/angr-management | 474 | 12762645 | <filename>start.py
#!/usr/bin/env python3
from angrmanagement.__main__ import main
if __name__ == '__main__':
main()
|
pykeops/torch/kernel_product/__init__.py | mdiazmel/keops | 695 | 12762648 | <reponame>mdiazmel/keops<gh_stars>100-1000
import warnings
warnings.simplefilter("default")
warnings.warn(
"[pyKeOps]: the kernel_product syntax is deprecated. Please consider using the LazyTensor helper instead.",
DeprecationWarning,
)
from .kernels import Kernel, kernel_product, kernel_formulas
from .formula import Formula
|
v2/backend/blog/models/category.py | jonfairbanks/rtsp-nvr | 558 | 12762652 | <filename>v2/backend/blog/models/category.py
from backend.database import (
Column,
Model,
String,
relationship,
slugify,
)
@slugify('name')
class Category(Model):
name = Column(String(32))
slug = Column(String(32))
articles = relationship('Article', back_populates='category')
series = relationship('Series', back_populates='category')
__repr_props__ = ('id', 'name')
def __init__(self, name, **kwargs):
super().__init__(**kwargs)
self.name = name
|
pronto/entity/__init__.py | althonos/pronto | 182 | 12762685 | <reponame>althonos/pronto<filename>pronto/entity/__init__.py<gh_stars>100-1000
import datetime
import operator
import typing
import weakref
from typing import AbstractSet, Any, Dict, FrozenSet, Iterable, Iterator, Optional, Set
from ..definition import Definition
from ..pv import PropertyValue
from ..synonym import Synonym, SynonymData, SynonymType
from ..utils.meta import roundrepr, typechecked
from ..xref import Xref
if typing.TYPE_CHECKING:
from ..ontology import _DataGraph, Ontology
from ..relationship import Relationship, RelationshipSet
from .attributes import Relationships
__all__ = ["EntityData", "Entity", "EntitySet"]
_D = typing.TypeVar("_D", bound="EntityData")
_E = typing.TypeVar("_E", bound="Entity")
_S = typing.TypeVar("_S", bound="EntitySet")
class EntityData:
id: str
alternate_ids: Set[str]
annotations: Set[PropertyValue]
anonymous: bool
builtin: bool
comment: Optional[str]
consider: Set[str]
created_by: Optional[str]
creation_date: Optional[datetime.datetime]
disjoint_from: Set[str]
definition: Optional[Definition]
equivalent_to: Set[str]
name: Optional[str]
namespace: Optional[str]
obsolete: bool
relationships: Dict[str, Set[str]]
replaced_by: Set[str]
subsets: Set[str]
synonyms: Set[SynonymData]
union_of: Set[str]
xrefs: Set[Xref]
if typing.TYPE_CHECKING:
__annotations__: Dict[str, str]
__slots__ = ("__weakref__",) + tuple(__annotations__) # noqa: E0602
class Entity(typing.Generic[_D, _S]):
"""An entity in the ontology graph.
With respects to the OBO semantics, an `Entity` is either a term or a
relationship in the ontology graph. Any entity has a unique identifier as
well as some common properties.
"""
if __debug__ or typing.TYPE_CHECKING:
__data: "weakref.ReferenceType[_D]"
__slots__: Iterable[str] = ()
def __init__(self, ontology: "Ontology", data: "_D"):
self.__data = weakref.ref(data)
self.__id = data.id
self.__ontology = ontology
def _data(self) -> "EntityData":
rdata = self.__data()
if rdata is None:
raise RuntimeError("internal data was deallocated")
return rdata
else:
__slots__: Iterable[str] = ("_data",) # type: ignore
def __init__(self, ontology: "Ontology", data: "_D"):
self._data = weakref.ref(data) # type: ignore
self.__ontology = ontology
self.__id = data.id
_Set: typing.ClassVar[typing.Type[_S]] = NotImplemented
_data_getter: typing.Callable[["Ontology"], "_DataGraph"] = NotImplemented
# --- Private helpers ----------------------------------------------------
def _ontology(self) -> "Ontology":
return self.__ontology
# --- Magic Methods ------------------------------------------------------
def __eq__(self, other: Any) -> bool:
if isinstance(other, Entity):
return self.id == other.id
return False
def __lt__(self, other):
if isinstance(other, Entity):
return self.id < other.id
return NotImplemented
def __le__(self, other):
if isinstance(other, Entity):
return self.id <= other.id
return NotImplemented
def __gt__(self, other):
if isinstance(other, Entity):
return self.id > other.id
return NotImplemented
def __ge__(self, other):
if isinstance(other, Entity):
return self.id >= other.id
return NotImplemented
def __hash__(self):
return hash((self.id))
def __repr__(self):
return roundrepr.make(type(self).__name__, self.id, name=(self.name, None))
# --- Data descriptors ---------------------------------------------------
@property
def alternate_ids(self) -> Set[str]:
"""`set` of `str`: A set of alternate IDs for this entity."""
return self._data().alternate_ids
@alternate_ids.setter # type: ignore
def alternate_ids(self, ids: Iterable[str]):
self._data().alternate_ids = set(ids)
@property
def annotations(self) -> Set[PropertyValue]:
"""`set` of `PropertyValue`: Annotations relevant to the entity."""
return self._data().annotations
@annotations.setter
def annotations(self, value: Iterable[PropertyValue]) -> None:
self._data().annotations = set(value)
@property
def anonymous(self) -> bool:
"""`bool`: Whether or not the entity has an anonymous id.
Semantics of anonymous entities are the same as B-Nodes in RDF.
"""
return self._data().anonymous
@anonymous.setter
def anonymous(self, value: bool):
self._data().anonymous = value
@property
def builtin(self) -> bool:
"""`bool`: Whether or not the entity is built-in to the OBO format.
``pronto`` uses this tag on the ``is_a`` relationship, which is the
axiomatic to the OBO language but treated as a relationship in the
library.
"""
return self._data().builtin
@builtin.setter # type: ignore
@typechecked(property=True)
def builtin(self, value: bool):
self._data().builtin = value
@property
def comment(self) -> Optional[str]:
"""`str` or `None`: A comment about the current entity.
Comments in ``comment`` clauses are guaranteed to be conserved by OBO
parsers and serializers, unlike bang comments. A non `None` `comment`
is semantically equivalent to a ``rdfs:comment`` in OWL2. When parsing
from OWL, several RDF comments will be merged together into a single
``comment`` clause spanning over multiple lines.
"""
return self._data().comment
@comment.setter
def comment(self, value: Optional[str]):
self._data().comment = value
@property
def consider(self) -> _S:
"""`EntitySet`: A set of potential substitutes for an obsolete term.
An obsolete entity can provide one or more entities which may be
appropriate substitutes, but needs to be looked at carefully by a
human expert before the replacement is done.
See Also:
`~Entity.replaced_by`, which provides a set of entities suitable
for automatic replacement.
"""
s = self._Set()
s._ids = self._data().consider
s._ontology = self._ontology()
return s
@consider.setter
def consider(self, consider: Iterable[_E]) -> None:
if isinstance(consider, EntitySet):
data = consider._ids
else:
data = {entity.id for entity in consider}
self._data().consider = data
@property
def created_by(self) -> Optional[str]:
"""`str` or `None`: The name of the creator of the entity, if any.
This property gets translated to a ``dc:creator`` annotation in OWL2,
which has very broad semantics. Some OBO ontologies may instead use
other annotation properties such as the ones found in `Information
Interchange Ontology <http://www.obofoundry.org/ontology/iao.html>`_,
which can be accessed in the `annotations` attribute of the entity,
if any.
"""
return self._data().created_by
@created_by.setter # type: ignore
@typechecked(property=True)
def created_by(self, value: Optional[str]):
self._data().created_by = value
@property
def creation_date(self) -> Optional[datetime.datetime]:
"""`~datetime.datetime` or `None`: The date the entity was created."""
return self._data().creation_date
@creation_date.setter # type: ignore
@typechecked(property=True)
def creation_date(self, value: Optional[datetime.datetime]):
self._data().creation_date = value
@property
def definition(self) -> Optional[Definition]:
"""`Definition` or `None`: The definition of the current entity.
Definitions in OBO are intended to be human-readable text describing
the entity, with some additional cross-references if possible.
Example:
>>> hp = pronto.Ontology.from_obo_library("hp.obo")
>>> term = hp["HP:0009882"]
>>> term.name
'Short distal phalanx of finger'
>>> str(term.definition)
'Short distance from the end of the finger to the most distal...'
>>> sorted(term.definition.xrefs)
[Xref('HPO:probinson'), Xref('PMID:19125433')]
"""
return self._data().definition
@definition.setter # type: ignore
@typechecked(property=True)
def definition(self, definition: Optional[Definition]):
self._data().definition = definition
@property
def disjoint_from(self) -> _S:
"""`EntitySet`: The entities declared as disjoint from this entity.
Two entities are disjoint if they have no instances in common. Two
entities that are disjoint cannot share any subentities, but the
opposite is not always true.
"""
s = self._Set()
s._ids = self._data().disjoint_from
s._ontology = self._ontology()
return s
@disjoint_from.setter
def disjoint_from(self, disjoint: Iterable[_E]):
if isinstance(disjoint, EntitySet):
data = disjoint._ids
else:
data = {entity.id for entity in disjoint}
self._data().disjoint_from = data
@property
def equivalent_to(self) -> _S:
"""`EntitySet`: The entities declared as equivalent to this entity."""
s = self._Set()
s._ids = self._data().equivalent_to
s._ontology = self._ontology()
return s
@equivalent_to.setter
def equivalent_to(self, entities: Iterable[_E]):
if isinstance(entities, EntitySet):
data = entities._ids
else:
data = {entity.id for entity in entities}
self._data().equivalent_to = data
@property
def id(self) -> str:
"""`str`: The OBO identifier of the entity.
Identifiers can be either prefixed (e.g. ``MS:1000031``), unprefixed
(e.g. ``part_of``) or given as plain URLs. Identifiers cannot be
edited.
"""
return self.__id
@property
def name(self) -> Optional[str]:
"""`str` or `None`: The name of the entity.
Names are formally equivalent to ``rdf:label`` in OWL2. The OBO format
version 1.4 made names optional to improve OWL interoperability, as
labels are optional in OWL.
"""
return self._data().name
@name.setter # type: ignore
@typechecked(property=True)
def name(self, value: Optional[str]):
self._data().name = value
@property
def namespace(self) -> Optional[str]:
"""`str` or `None`: The namespace this entity is defined in."""
return self._data().namespace
@namespace.setter # type: ignore
@typechecked(property=True)
def namespace(self, ns: Optional[str]):
self._data().namespace = ns
@property
def obsolete(self) -> bool:
"""`bool`: Whether or not the entity is obsolete.
Hint:
All OBO entities can be made obsolete through a boolean flag, and
map to one or several replacements. When querying an obsolete
entity, ``pronto`` will **not** attempt to perform any kind of
replacement itself ::
>>> ms = pronto.Ontology.from_obo_library("ms.obo")
>>> term = ms["MS:1001414"]
>>> term
Term('MS:1001414', name='MGF scans')
>>> term.obsolete
True
To always get the up-to-date, non-obsolete entity, you could use
the following snippet, going through a term replacement if there
is no ambiguity ::
>>> while term.obsolete:
... if len(term.replaced_by) != 1:
... raise ValueError(f"no replacement for {term.id}")
... term = term.replaced_by.pop()
>>> term
Term('MS:1000797', name='peak list scans')
See Also:
`~.Entity.consider` and `~Entity.replaced_by`, storing some
replacement options for an obsolete entity.
"""
return self._data().obsolete
@obsolete.setter # type: ignore
@typechecked(property=True)
def obsolete(self, value: bool):
self._data().obsolete = value
@property
def relationships(self: _E) -> "Relationships[_E, _S]":
"""`~.Relationships`: The links from an entity to other entities.
This property returns an object that maps a `~.Relationship` to
an `~.EntitySet` (either a `~.TermSet` for `Term.relationships`, or
a `~.RelationshipSet` for `Relationship.relationships`).
Hint:
The mapping is mutable, so relationships can be created or removed
using the usual interface of a `~collections.abc.MutableMapping`.
Example:
Get the ``MS:1000004`` term (*sample mass*) from the Mass
Spectrometry ontology::
>>> ms = pronto.Ontology.from_obo_library("ms.obo")
>>> sample_mass = ms["MS:1000004"]
Then use the ``relationships`` property to get the relevant
unit from the Unit Ontology::
>>> sorted(sample_mass.relationships.keys())
[Relationship('has_units', name='has_units')]
>>> sample_mass.relationships[ms.get_relationship('has_units')]
TermSet({Term('UO:0000021', name='gram')})
"""
from .attributes import Relationships
return Relationships(self)
@relationships.setter
def relationships(self, rels: typing.Mapping["Relationship", Iterable[_E]]):
self._data().relationships = {
relation.id: set(entity.id for entity in entities)
for relation, entities in rels.items()
}
@property
def replaced_by(self) -> _S:
"""`EntitySet`: A set of of replacements for an obsolete term.
An obsolete entity can provide one or more replacement that can
safely be used to automatically reassign instances to non-obsolete
classes.
See Also:
`~Entity.consider`, which provides a set of entities suitable
for replacement but requiring expert curation.
"""
s = self._Set()
s._ids = self._data().replaced_by
s._ontology = self._ontology()
return s
@replaced_by.setter
def replaced_by(self, replacements: Iterable[_E]) -> None:
if isinstance(replacements, EntitySet):
data = replacements._ids
else:
data = set(entity.id for entity in replacements)
self._data().replaced_by = data
@property
def subsets(self) -> FrozenSet[str]:
"""`frozenset` of `str`: The subsets containing this entity."""
return frozenset(self._data().subsets)
@subsets.setter # type: ignore
@typechecked(property=True)
def subsets(self, subsets: FrozenSet[str]):
declared = set(s.name for s in self._ontology().metadata.subsetdefs)
for subset in subsets:
if subset not in declared:
raise ValueError(f"undeclared subset: {subset!r}")
self._data().subsets = set(subsets)
@property
def synonyms(self) -> FrozenSet[Synonym]:
"""`frozenset` of `Synonym`: A set of synonyms for this entity."""
ontology, termdata = self._ontology(), self._data()
return frozenset(Synonym(ontology, s) for s in termdata.synonyms)
@synonyms.setter # type: ignore
@typechecked(property=True)
def synonyms(self, synonyms: Iterable[Synonym]):
self._data().synonyms = {syn._data() for syn in synonyms}
@property
def union_of(self) -> _S:
s = self._Set()
s._ids = self._data().union_of
s._ontology = self._ontology()
return s
@union_of.setter
def union_of(self, union_of: Iterable[_E]) -> None:
if isinstance(union_of, EntitySet):
data = union_of._ids
else:
data = set()
for entity in union_of:
if not isinstance(entity, Entity):
ty = type(entity).__name__
raise TypeError(f"expected `Entity`, found {ty}")
data.add(entity.id)
if len(data) == 1:
raise ValueError("'union_of' cannot have a cardinality of 1")
self._data().union_of = data
@property
def xrefs(self) -> FrozenSet[Xref]:
"""`frozenset` of `Xref`: A set of database cross-references.
Xrefs can be used to describe an analogous entity in another
vocabulary, such as a database or a semantic knowledge base.
"""
return frozenset(self._data().xrefs)
@xrefs.setter # type: ignore
@typechecked(property=True)
def xrefs(self, xrefs: FrozenSet[Xref]):
self._data().xrefs = set(xrefs)
# --- Convenience methods ------------------------------------------------
def add_synonym(
self,
description: str,
scope: Optional[str] = None,
type: Optional[SynonymType] = None,
xrefs: Optional[Iterable[Xref]] = None,
) -> Synonym:
"""Add a new synonym to the current entity.
Arguments:
description (`str`): The alternate definition of the entity, or a
related human-readable synonym.
scope (`str` or `None`): An optional synonym scope. Must be either
**EXACT**, **RELATED**, **BROAD** or **NARROW** if given.
type (`~pronto.SynonymType` or `None`): An optional synonym type.
Must be declared in the header of the current ontology.
xrefs (iterable of `Xref`, or `None`): A collections of database
cross-references backing the origin of the synonym.
Raises:
ValueError: when given an invalid synonym type or scope.
Returns:
`~pronto.Synonym`: A new synonym for the terms. The synonym is
already added to the `Entity.synonyms` collection.
"""
# check the type is declared in the current ontology
if type is None:
type_id: Optional[str] = None
else:
try:
type_id = self._ontology().get_synonym_type(type.id).id
except KeyError as ke:
raise ValueError(f"undeclared synonym type {type.id!r}") from ke
data = SynonymData(description, scope, type_id, xrefs=xrefs)
self._data().synonyms.add(data)
return Synonym(self._ontology(), data)
class EntitySet(typing.Generic[_E], typing.MutableSet[_E]):
"""A specialized mutable set to store `Entity` instances."""
# --- Magic methods ------------------------------------------------------
def __init__(self, entities: Optional[Iterable[_E]] = None) -> None:
self._ids: Set[str] = set()
self._ontology: "Optional[Ontology]" = None
for entity in entities if entities is not None else ():
if __debug__ and not isinstance(entity, Entity):
err_msg = "'entities' must be iterable of Entity, not {}"
raise TypeError(err_msg.format(type(entity).__name__))
if self._ontology is None:
self._ontology = entity._ontology()
if self._ontology is not entity._ontology():
raise ValueError("entities do not originate from the same ontology")
self._ids.add(entity.id)
def __contains__(self, other: object):
if isinstance(other, Entity):
return other.id in self._ids
return False
def __iter__(self) -> Iterator[_E]:
return map(lambda t: self._ontology[t], iter(self._ids)) # type: ignore
def __len__(self):
return len(self._ids)
def __repr__(self):
ontology = self._ontology
elements = (repr(ontology[id_]) for id_ in self._ids)
return f"{type(self).__name__}({{{', '.join(elements)}}})"
def __iand__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
self._ids &= other._ids
else:
super().__iand__(other)
if not self._ids:
self._ontology = None
return self
def __and__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__and__(other._ids)
s._ontology = self._ontology if s._ids else None
else:
s = type(self)(super().__and__(other))
return s
def __ior__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if not isinstance(other, EntitySet):
other = type(self)(other)
self._ids |= other._ids
self._ontology = self._ontology or other._ontology
return self
def __or__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__or__(other._ids)
s._ontology = self._ontology or other._ontology
else:
s = type(self)(super().__or__(other))
return s
def __isub__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
self._ids -= other._ids
else:
super().__isub__(other)
if not self._ids:
self._ontology = None
return self
def __sub__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__sub__(other._ids)
s._ontology = self._ontology
else:
s = type(self)(super().__sub__(other))
return s
def __ixor__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
self._ids ^= other._ids
self._ontology = self._ontology or other._ontology
else:
super().__ixor__(other)
if not self._ids:
self._ontology = None
return self
def __xor__(self, other: AbstractSet[_E]) -> "EntitySet[_E]":
if isinstance(other, EntitySet):
s = type(self)()
s._ids = self._ids.__xor__(other._ids)
s._ontology = self._ontology or other._ontology
else:
s = type(self)(super().__xor__(other))
if not s._ids:
s._ontology = None
return s
# --- Methods ------------------------------------------------------------
def add(self, entity: _E) -> None:
if self._ontology is None:
self._ontology = entity._ontology()
elif self._ontology is not entity._ontology():
raise ValueError("cannot use `Entity` instances from different `Ontology`")
self._ids.add(entity.id)
def clear(self) -> None:
self._ids.clear()
self._ontology = None
def discard(self, entity: _E) -> None:
self._ids.discard(entity.id)
def pop(self) -> _E:
id_ = self._ids.pop()
entity = self._ontology[id_] # type: ignore
if not self._ids:
self._ontology = None
return entity # type: ignore
def remove(self, entity: _E):
if self._ontology is not None and self._ontology is not entity._ontology():
raise ValueError("cannot use `Entity` instances from different `Ontology`")
self._ids.remove(entity.id)
# --- Attributes ---------------------------------------------------------
@property
def ids(self) -> FrozenSet[str]:
return frozenset(map(operator.attrgetter("id"), iter(self)))
@property
def alternate_ids(self) -> FrozenSet[str]:
return frozenset(id for entity in self for id in entity.alternate_ids)
@property
def names(self) -> FrozenSet[str]:
return frozenset(map(operator.attrgetter("name"), iter(self)))
|
tests/composite/examples/prim_composite_full.py | strint/myia | 222 | 12762706 | """Definitions for the primitive `composite_full`."""
from myia.lib import (
SHAPE,
TYPE,
VALUE,
AbstractArray,
AbstractScalar,
AbstractType,
abstract_array,
distribute,
force_pending,
scalar_cast,
u64tup_typecheck,
)
from myia.operations import primitives as P
from myia.xtype import NDArray
def pyimpl_composite_full(shape, fill_value, abstract_scalar_type):
"""Implement `composite_full`."""
scalar_value = scalar_cast(fill_value, abstract_scalar_type)
return distribute(
P.scalar_to_array(scalar_value, abstract_array(shape, scalar_value)),
shape,
)
async def infer_composite_full(
self,
engine,
shape: u64tup_typecheck,
fill_value: AbstractScalar,
dtype: AbstractType,
):
"""Infer the return type of primitive `composite_full`."""
return AbstractArray(
AbstractScalar(
{
TYPE: await force_pending(dtype.element.xtype()),
VALUE: fill_value.xvalue(),
}
),
{
SHAPE: tuple(
self.require_constant(e, argnum=f'"0:shape[{edx}]"')
for edx, e in enumerate(shape.elements)
),
TYPE: NDArray,
},
)
|
src/patchy/api.py | adamchainz/patchy | 105 | 12762718 | <gh_stars>100-1000
import __future__
import ast
import inspect
import os
import shutil
import subprocess
import sys
from functools import wraps
from tempfile import mkdtemp
from textwrap import dedent
from types import CodeType, TracebackType
from typing import (
Any,
Callable,
Dict,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from weakref import WeakKeyDictionary
from .cache import PatchingCache
if sys.version_info >= (3, 9):
from pkgutil import resolve_name as pkgutil_resolve_name
else:
from pkgutil_resolve_name import resolve_name as pkgutil_resolve_name
__all__ = ("patch", "mc_patchface", "unpatch", "replace", "temp_patch")
# Public API
def patch(func: Union[Callable[..., Any], str], patch_text: str) -> None:
_do_patch(func, patch_text, forwards=True)
mc_patchface = patch
def unpatch(func: Union[Callable[..., Any], str], patch_text: str) -> None:
_do_patch(func, patch_text, forwards=False)
def replace(
func: Callable[..., Any],
expected_source: Optional[str],
new_source: str,
) -> None:
if expected_source is not None:
expected_source = dedent(expected_source)
current_source = _get_source(func)
_assert_ast_equal(current_source, expected_source, func.__name__)
new_source = dedent(new_source)
_set_source(func, new_source)
AnyFunc = TypeVar("AnyFunc", bound=Callable[..., Any])
class temp_patch:
def __init__(self, func: Union[Callable[..., Any], str], patch_text: str) -> None:
self.func = func
self.patch_text = patch_text
def __enter__(self) -> None:
patch(self.func, self.patch_text)
def __exit__(
self,
exc_type: Union[Type[BaseException], None],
exc_val: Union[BaseException, None],
exc_tb: Union[TracebackType, None],
) -> None:
unpatch(self.func, self.patch_text)
def __call__(self, decorable: AnyFunc) -> AnyFunc:
@wraps(decorable)
def wrapper(*args: Any, **kwargs: Any) -> Any:
with self:
decorable(*args, **kwargs)
return cast(AnyFunc, wrapper)
# Gritty internals
def _do_patch(
func: Union[Callable[..., Any], str],
patch_text: str,
forwards: bool,
) -> None:
if isinstance(func, str):
func = cast(Callable[..., Any], pkgutil_resolve_name(func))
source = _get_source(func)
patch_text = dedent(patch_text)
new_source = _apply_patch(source, patch_text, forwards, func.__name__)
_set_source(func, new_source)
_patching_cache = PatchingCache(maxsize=100)
def _apply_patch(
source: str,
patch_text: str,
forwards: bool,
name: str,
) -> str:
# Cached ?
try:
return _patching_cache.retrieve(source, patch_text, forwards)
except KeyError:
pass
# Write out files
tempdir = mkdtemp(prefix="patchy")
try:
source_path = os.path.join(tempdir, name + ".py")
with open(source_path, "w") as source_file:
source_file.write(source)
patch_path = os.path.join(tempdir, name + ".patch")
with open(patch_path, "w") as patch_file:
patch_file.write(patch_text)
if not patch_text.endswith("\n"):
patch_file.write("\n")
# Call `patch` command
command = ["patch"]
if not forwards:
command.append("--reverse")
command.extend([source_path, patch_path])
proc = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
msg = "Could not {action} the patch {prep} '{name}'.".format(
action=("apply" if forwards else "unapply"),
prep=("to" if forwards else "from"),
name=name,
)
msg += " The message from `patch` was:\n{}\n{}".format(
stdout.decode("utf-8"), stderr.decode("utf-8")
)
msg += "\nThe code to patch was:\n{}\nThe patch was:\n{}".format(
source, patch_text
)
raise ValueError(msg)
with open(source_path) as source_file:
new_source = source_file.read()
finally:
shutil.rmtree(tempdir)
_patching_cache.store(source, patch_text, forwards, new_source)
return new_source
def _get_flags_mask() -> int:
result = 0
for name in __future__.all_feature_names:
result |= getattr(__future__, name).compiler_flag
return result
FEATURE_MASK = _get_flags_mask()
# Stores the source of functions that have had their source changed
# Bad type hints because WeakKeyDictionary only indexable on Python 3.9+
_source_map: Dict[Callable[..., Any], str] = cast(
Dict[Callable[..., Any], str],
WeakKeyDictionary(),
)
def _get_source(func: Callable[..., Any]) -> str:
real_func = _get_real_func(func)
try:
return _source_map[real_func]
except KeyError:
source = inspect.getsource(func)
source = dedent(source)
return source
def _class_name(func: Callable[..., Any]) -> Optional[str]:
split_name = func.__qualname__.split(".")
try:
class_name = split_name[-2]
except IndexError:
return None
else:
if class_name == "<locals>":
return None
return class_name
def _set_source(func: Callable[..., Any], func_source: str) -> None:
# Fetch the actual function we are changing
real_func = _get_real_func(func)
# Figure out any future headers that may be required
feature_flags = real_func.__code__.co_flags & FEATURE_MASK
class_name = _class_name(func)
def _compile(
code: Union[str, ast.Module],
flags: int = 0,
) -> Union[CodeType, ast.Module]:
return compile(
code, "<patchy>", "exec", flags=feature_flags | flags, dont_inherit=True
)
def _parse(code: str) -> ast.Module:
result = _compile(code, flags=ast.PyCF_ONLY_AST)
assert isinstance(result, ast.Module)
return result
def _process_freevars() -> Tuple[str, ast.AST, List[str]]:
"""
Wrap the new function in a __patchy_freevars__ method that provides all
freevars of the original function.
Because the new function must use exectaly the same freevars as the
original, also append to the new function with a body of code to force
use of those freevars (in the case the the patch drops use of any
freevars):
def __patchy_freevars__():
eg_free_var_spam = object() <- added in wrapper
eg_free_var_ham = object() <- added in wrapper
def patched_func():
return some_global(eg_free_var_ham)
eg_free_var_spam <- appended to new func body
eg_free_var_ham <- appended to new func body
return patched_func
"""
_def = "def __patchy_freevars__():"
fvs = func.__code__.co_freevars
fv_body = [f" {fv} = object()" for fv in fvs]
fv_force_use_body = [f" {fv}" for fv in fvs]
if fv_force_use_body:
fv_force_use_ast = _parse("\n".join([_def] + fv_force_use_body))
fv_force_use = fv_force_use_ast.body[0].body # type: ignore [attr-defined]
else:
fv_force_use = []
_ast = _parse(func_source).body[0]
_ast.body = _ast.body + fv_force_use # type: ignore [attr-defined]
return _def, _ast, fv_body
def _process_method() -> ast.Module:
"""
Wrap the new method in a class to ensure the same mangling as would
have been performed on the original method:
def __patchy_freevars__():
class SomeClass(object):
def patched_func(self):
return some_globals(self.__some_mangled_prop)
return SomeClass.patched_func
"""
_def, _ast, fv_body = _process_freevars()
_global = (
""
if class_name in func.__code__.co_freevars
else f" global {class_name}\n"
)
class_src = "{_global} class {name}(object):\n pass".format(
_global=_global, name=class_name
)
ret = " return {class_name}.{name}".format(
class_name=class_name, name=func.__name__
)
to_parse = "\n".join([_def] + fv_body + [class_src, ret])
new_source = _parse(to_parse)
new_source.body[0].body[-2].body[0] = _ast # type: ignore [attr-defined]
return new_source
def _process_function() -> ast.Module:
_def, _ast, fv_body = _process_freevars()
name = func.__name__
ret = f" return {name}"
_global = [] if name in func.__code__.co_freevars else [f" global {name}"]
to_parse = "\n".join([_def] + _global + fv_body + [" pass", ret])
new_source = _parse(to_parse)
new_source.body[0].body[-2] = _ast # type: ignore [attr-defined]
return new_source
if class_name:
new_source = _process_method()
else:
new_source = _process_function()
# Compile and retrieve the new Code object
localz: Dict[str, Any] = {}
new_code = cast(CodeType, _compile(new_source))
exec(
new_code,
dict(func.__globals__), # type: ignore [attr-defined]
localz,
)
new_func = localz["__patchy_freevars__"]()
# Put the new Code object in place
real_func.__code__ = new_func.__code__
# Store the modified source. This used to be attached to the function but
# that is a bit naughty
_source_map[real_func] = func_source
def _get_real_func(func: Callable[..., Any]) -> Callable[..., Any]:
"""
Duplicates some of the logic implicit in inspect.getsource(). Basically
some function-esque things, such as classmethods, aren't functions but we
can peel back the layers to the underlying function very easily.
"""
if inspect.ismethod(func):
return func.__func__ # type: ignore [attr-defined]
else:
return func
def _assert_ast_equal(current_source: str, expected_source: str, name: str) -> None:
current_ast = ast.parse(current_source)
expected_ast = ast.parse(expected_source)
if not ast.dump(current_ast) == ast.dump(expected_ast):
msg = (
"The code of '{name}' has changed from expected.\n"
"The current code is:\n{current_source}\n"
"The expected code is:\n{expected_source}"
).format(
name=name, current_source=current_source, expected_source=expected_source
)
raise ValueError(msg)
|
Adafruit_BluefruitLE/bluez_dbus/device.py | acoomans/Adafruit_Python_BluefruitLE | 415 | 12762744 | <reponame>acoomans/Adafruit_Python_BluefruitLE
# Python object to represent the bluez DBus device object. Provides properties
# and functions to easily interact with the DBus object.
# Author: <NAME>
#
# Copyright (c) 2015 Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from past.builtins import map
import threading
import time
import uuid
import dbus
from ..config import TIMEOUT_SEC
from ..interfaces import Device
from ..platform import get_provider
from .adapter import _INTERFACE as _ADAPTER_INTERFACE
from .gatt import BluezGattService, BluezGattCharacteristic, _SERVICE_INTERFACE, _CHARACTERISTIC_INTERFACE
_INTERFACE = 'org.bluez.Device1'
class BluezDevice(Device):
"""Bluez BLE device."""
def __init__(self, dbus_obj):
"""Create an instance of the bluetooth device from the provided bluez
DBus object.
"""
self._device = dbus.Interface(dbus_obj, _INTERFACE)
self._props = dbus.Interface(dbus_obj, 'org.freedesktop.DBus.Properties')
self._connected = threading.Event()
self._disconnected = threading.Event()
self._props.connect_to_signal('PropertiesChanged', self._prop_changed)
def _prop_changed(self, iface, changed_props, invalidated_props):
# Handle property changes for the device. Note this call happens in
# a separate thread so be careful to make thread safe changes to state!
# Skip any change events not for this adapter interface.
if iface != _INTERFACE:
return
# If connected then fire the connected event.
if 'Connected' in changed_props and changed_props['Connected'] == 1:
self._connected.set()
# If disconnected then fire the disconnected event.
if 'Connected' in changed_props and changed_props['Connected'] == 0:
self._disconnected.set()
def connect(self, timeout_sec=TIMEOUT_SEC):
"""Connect to the device. If not connected within the specified timeout
then an exception is thrown.
"""
self._connected.clear()
self._device.Connect()
if not self._connected.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting to connect to device!')
def disconnect(self, timeout_sec=TIMEOUT_SEC):
"""Disconnect from the device. If not disconnected within the specified
timeout then an exception is thrown.
"""
self._disconnected.clear()
self._device.Disconnect()
if not self._disconnected.wait(timeout_sec):
raise RuntimeError('Exceeded timeout waiting to disconnect from device!')
def list_services(self):
"""Return a list of GattService objects that have been discovered for
this device.
"""
return map(BluezGattService,
get_provider()._get_objects(_SERVICE_INTERFACE,
self._device.object_path))
def discover(self, service_uuids, char_uuids, timeout_sec=TIMEOUT_SEC):
"""Wait up to timeout_sec for the specified services and characteristics
to be discovered on the device. If the timeout is exceeded without
discovering the services and characteristics then an exception is thrown.
"""
# Turn expected values into a counter of each UUID for fast comparison.
expected_services = set(service_uuids)
expected_chars = set(char_uuids)
# Loop trying to find the expected services for the device.
start = time.time()
while True:
# Find actual services discovered for the device.
actual_services = set(self.advertised)
# Find actual characteristics discovered for the device.
chars = map(BluezGattCharacteristic,
get_provider()._get_objects(_CHARACTERISTIC_INTERFACE,
self._device.object_path))
actual_chars = set(map(lambda x: x.uuid, chars))
# Compare actual discovered UUIDs with expected and return true if at
# least the expected UUIDs are available.
if actual_services >= expected_services and actual_chars >= expected_chars:
# Found at least the expected services!
return True
# Couldn't find the devices so check if timeout has expired and try again.
if time.time()-start >= timeout_sec:
return False
time.sleep(1)
@property
def advertised(self):
"""Return a list of UUIDs for services that are advertised by this
device.
"""
uuids = []
# Get UUIDs property but wrap it in a try/except to catch if the property
# doesn't exist as it is optional.
try:
uuids = self._props.Get(_INTERFACE, 'UUIDs')
except dbus.exceptions.DBusException as ex:
# Ignore error if device has no UUIDs property (i.e. might not be
# a BLE device).
if ex.get_dbus_name() != 'org.freedesktop.DBus.Error.InvalidArgs':
raise ex
return [uuid.UUID(str(x)) for x in uuids]
@property
def id(self):
"""Return a unique identifier for this device. On supported platforms
this will be the MAC address of the device, however on unsupported
platforms (Mac OSX) it will be a unique ID like a UUID.
"""
return self._props.Get(_INTERFACE, 'Address')
@property
def name(self):
"""Return the name of this device."""
return self._props.Get(_INTERFACE, 'Name')
@property
def is_connected(self):
"""Return True if the device is connected to the system, otherwise False.
"""
return self._props.Get(_INTERFACE, 'Connected')
@property
def rssi(self):
"""Return the RSSI signal strength in decibels."""
return self._props.Get(_INTERFACE, 'RSSI')
@property
def _adapter(self):
"""Return the DBus path to the adapter that owns this device."""
return self._props.Get(_INTERFACE, 'Adapter')
|
pmm_scripts/hello_world_script.py | cardosofede/hummingbot | 542 | 12762756 | from hummingbot.pmm_script.pmm_script_base import PMMScriptBase
class HelloWorldPMMScript(PMMScriptBase):
"""
Demonstrates how to send messages using notify and log functions. It also shows how errors and commands are handled.
"""
def on_tick(self):
if len(self.mid_prices) < 3:
self.notify("Hello Hummingbots World!")
self.log("Hello world logged.")
elif 3 <= len(self.mid_prices) < 5:
# This below statement will cause ZeroDivisionError, Hummingbot will later report this on the log screen.
_ = 1 / 0
def on_command(self, cmd, args):
if cmd == 'ping':
self.notify('pong!')
else:
self.notify(f'Unrecognised command: {cmd}')
|
tests/settings.py | DamnedScholar/django-sockpuppet | 371 | 12762775 | """
Django settings for example project.
"""
import os
from pathlib import Path
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = Path.cwd()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "a_not_so_secret_key"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'channels',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'sockpuppet',
'tests.example',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ASGI_APPLICATION = 'sockpuppet.routing.application'
WSGI_APPLICATION = 'tests.example.wsgi.application'
CHANNEL_LAYERS = {
"default": {
"BACKEND": "channels.layers.InMemoryChannelLayer"
}
}
# CHANNEL_LAYERS = {
# "default": {
# "BACKEND": "channels_redis.core.RedisChannelLayer",
# "CONFIG": {
# "hosts": [("127.0.0.1", 6379)],
# },
# },
# }
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'handlers': ['console'],
'level': 'DEBUG'
},
'handlers': {
'console': {
'level': os.getenv('DJANGO_LOG_LEVEL', 'INFO'),
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'sockpuppet': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
}
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
'django.db.backends': {
# uncomment to see all queries
# 'level': 'DEBUG',
'handlers': ['console'],
},
'sockpuppet': {
'level': 'DEBUG',
'handlers': ['sockpuppet']
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
STATICFILES_DIRS = [
('js', '{}/jsdist/js/'.format(BASE_DIR)),
]
|
API/src/main/resources/images/_backup/_color/color.sikuli/color.py | MiguelDomingues/SikuliX1 | 1,746 | 12762817 | reg = Region(106,108,370,160)
img1 = "source_activate.jpg"
img2 = "source_activated.jpg"
button = "buttonactivate.png"
"""
m = find(button)
m.highlight(2)
exit()
"""
ib = Finder(Image.create(button))
ib.find(button)
print "button:", ib.next().getScore()
ib = Finder(Image.create(img1))
ib.find(button)
print "img1:", ib.next().getScore()
ib = Finder(Image.create(img2))
ib.find(button)
print "img2:", ib.next().getScore()
"""
print "button:", Image(button).find(button)
print "img1:", Image(img1).find(button)
print "img2:", Image(img2).find(button)
"""
|
parsing/views.py | playyard/infomate.club | 327 | 12762841 | <reponame>playyard/infomate.club<gh_stars>100-1000
from django.contrib.syndication.views import Feed
from parsing.telegram.parser import parse_channel
class TelegramChannelFeed(Feed):
FEED_ITEMS = 30
def get_object(self, request, channel_name):
limit = int(request.GET.get("size") or self.FEED_ITEMS)
only = str(request.GET.get("only") or "")
return parse_channel(channel_name, only_text=only == "text", limit=limit)
def title(self, obj):
return obj.name
def items(self, obj):
return obj.messages
def link(self, obj):
return obj.url
def item_title(self, item):
return item.text
def item_description(self, item):
result = ""
if item.photo:
result += f"<img src=\"{item.photo}\"><br>"
if item.text:
result += str(item.text)
return result
def item_link(self, item):
return item.url
def item_pubdate(self, item):
return item.created_at
|
GPyOpt/util/stats.py | zhenwendai/GPyOpt | 850 | 12762878 | <reponame>zhenwendai/GPyOpt
# Copyright (c) 2016, the GPyOpt Authors
# Licensed under the BSD 3-clause license (see LICENSE.txt)
#from ..util.general import samples_multidimensional_uniform, multigrid, iroot
import numpy as np
|
geocube/_version.py | snowman2/geocube | 152 | 12762885 | """GeoCube Version"""
__version__ = "0.1.1.dev0"
|
receipt_parser_core/enhancer.py | Dielee/receipt-parser-legacy | 611 | 12762903 | # !/usr/bin/python3
# coding: utf-8
# Copyright 2015-2018
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import cv2
import numpy as np
from PIL import Image
from pytesseract import pytesseract
from wand.image import Image as WandImage
from scipy.ndimage import interpolation as inter
from receipt_parser_core import Receipt
from receipt_parser_core.config import read_config
BASE_PATH = os.getcwd()
INPUT_FOLDER = os.path.join(BASE_PATH, "data/img")
TMP_FOLDER = os.path.join(BASE_PATH, "data/tmp")
OUTPUT_FOLDER = os.path.join(BASE_PATH, "data/txt")
ORANGE = '\033[33m'
RESET = '\033[0m'
def prepare_folders():
"""
:return: void
Creates necessary folders
"""
for folder in [
INPUT_FOLDER, TMP_FOLDER, OUTPUT_FOLDER
]:
if not os.path.exists(folder):
os.makedirs(folder)
def find_images(folder):
"""
:param folder: str
Path to folder to search
:return: generator of str
List of images in folder
"""
for file in os.listdir(folder):
full_path = os.path.join(folder, file)
if os.path.isfile(full_path):
try:
_ = Image.open(full_path) # if constructor succeeds
yield file
except:
pass
def rotate_image(input_file, output_file, angle=90):
"""
:param input_file: str
Path to image to rotate
:param output_file: str
Path to output image
:param angle: float
Angle to rotate
:return: void
Rotates image and saves result
"""
with WandImage(filename=input_file) as img:
width, height = img.size
if width < height:
angle = 0
print(ORANGE + '\t~: ' + RESET + 'Rotate image by: ' + str(angle) + "°" + RESET)
with img.clone() as rotated:
rotated.rotate(angle)
rotated.save(filename=output_file)
def deskew_image(image, delta=1, limit=5):
def determine_score(arr, angle):
data = inter.rotate(arr, angle, reshape=False, order=0)
histogram = np.sum(data, axis=1)
score = np.sum((histogram[1:] - histogram[:-1]) ** 2)
return histogram, score
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
scores = []
angles = np.arange(-limit, limit + delta, delta)
for angle in angles:
histogram, score = determine_score(thresh, angle)
scores.append(score)
best_angle = angles[scores.index(max(scores))]
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, best_angle, 1.0)
print(ORANGE + '\t~: ' + RESET + 'Deskew image by: ' + str(best_angle) + ' angle' + RESET)
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, \
borderMode=cv2.BORDER_REPLICATE)
return rotated
def run_tesseract(input_file, output_file, language="deu"):
"""
:param input_file: str
Path to image to OCR
:param output_file: str
Path to output file
:return: void
Runs tesseract on image and saves result
"""
print(ORANGE + '\t~: ' + RESET + 'Parse image using pytesseract' + RESET)
print(ORANGE + '\t~: ' + RESET + 'Parse image at: ' + input_file + RESET)
print(ORANGE + '\t~: ' + RESET + 'Write result to: ' + output_file + RESET)
with io.BytesIO() as transfer:
with WandImage(filename=input_file) as img:
img.save(transfer)
with Image.open(transfer) as img:
image_data = pytesseract.image_to_string(img, lang=language, timeout=60, config="--psm 6")
out = open(output_file, "w", encoding='utf-8')
out.write(image_data)
out.close()
def rescale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Rescale image' + RESET)
img = cv2.resize(img, None, fx=1.2, fy=1.2, interpolation=cv2.INTER_CUBIC)
return img
def grayscale_image(img):
print(ORANGE + '\t~: ' + RESET + 'Grayscale image' + RESET)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def remove_noise(img):
kernel = np.ones((1, 1), np.uint8)
img = cv2.dilate(img, kernel, iterations=1)
img = cv2.erode(img, kernel, iterations=1)
print(ORANGE + '\t~: ' + RESET + 'Applying gaussianBlur and medianBlur' + RESET)
img = cv2.threshold(cv2.GaussianBlur(img, (5, 5), 0), 150, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.threshold(cv2.bilateralFilter(img, 5, 75, 75), 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
img = cv2.adaptiveThreshold(cv2.bilateralFilter(img, 9, 75, 75), 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 31, 2)
return img
def remove_shadows(img):
rgb_planes = cv2.split(img)
result_planes = []
result_norm_planes = []
for plane in rgb_planes:
dilated_img = cv2.dilate(plane, np.ones((7,7), np.uint8))
bg_img = cv2.medianBlur(dilated_img, 21)
diff_img = 255 - cv2.absdiff(plane, bg_img)
norm_img = cv2.normalize(diff_img,None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)
result_planes.append(diff_img)
result_norm_planes.append(norm_img)
result = cv2.merge(result_planes)
return result
def detect_orientation(image):
coords = np.column_stack(np.where(image > 0))
angle = cv2.minAreaRect(coords)[-1]
print(ORANGE + '\t~: ' + RESET + 'Get rotation angle:' + str(angle) + RESET)
return image
def enhance_image(img, tmp_path ,high_contrast=True, gaussian_blur=True, rotate=True):
img = rescale_image(img)
if rotate:
cv2.imwrite(tmp_path, img)
rotate_image(tmp_path, tmp_path)
img = cv2.imread(tmp_path)
img = deskew_image(img)
img = remove_shadows(img)
if high_contrast:
img = grayscale_image(img)
if gaussian_blur:
img = remove_noise(img)
return img
def process_receipt(config, filename, rotate=True, grayscale=True, gaussian_blur=True):
input_path = INPUT_FOLDER + "/" + filename
output_path = OUTPUT_FOLDER + "/" + filename.split(".")[0] + ".txt"
print(ORANGE + '~: ' + RESET + 'Process image: ' + ORANGE + input_path + RESET)
prepare_folders()
try:
img = cv2.imread(input_path)
except FileNotFoundError:
return Receipt(config=config, raw="")
tmp_path = os.path.join(
TMP_FOLDER, filename
)
img = enhance_image(img, tmp_path,grayscale, gaussian_blur)
print(ORANGE + '~: ' + RESET + 'Temporary store image at: ' + ORANGE + tmp_path + RESET)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, output_path, config.language)
print(ORANGE + '~: ' + RESET + 'Store parsed text at: ' + ORANGE + output_path + RESET)
raw = open(output_path, 'r').readlines()
return Receipt(config=config, raw=raw)
def main():
prepare_folders()
dir_path = os.getcwd()
config = read_config(config=dir_path + "/config.yml")
images = list(find_images(INPUT_FOLDER))
print(ORANGE + '~: ' + RESET + 'Found: ' + ORANGE + str(len(images)),
RESET + ' images in: ' + ORANGE + INPUT_FOLDER + RESET)
i = 1
for image in images:
input_path = os.path.join(
INPUT_FOLDER,
image
)
tmp_path = os.path.join(
TMP_FOLDER,
image
)
out_path = os.path.join(
OUTPUT_FOLDER,
image + ".txt"
)
if i != 1: print()
print(ORANGE + '~: ' + RESET + 'Process image (' + ORANGE + str(i) + '/' + str(
len(images)) + RESET + ') : ' + input_path + RESET)
img = cv2.imread(input_path)
img = enhance_image(img, tmp_path)
cv2.imwrite(tmp_path, img)
run_tesseract(tmp_path, out_path, config.language)
i = i + 1
if __name__ == '__main__':
main()
|
src/local/butler/create_config.py | mi-ac/clusterfuzz | 5,023 | 12762931 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script for creating a new deployment config."""
import json
import os
import shutil
import subprocess
import sys
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient import discovery
import google_auth_httplib2
import httplib2
from local.butler import appengine
from local.butler import common
_REQUIRED_SERVICES = (
'appengineflex.googleapis.com',
'bigquery-json.googleapis.com',
'cloudapis.googleapis.com',
'cloudbuild.googleapis.com',
'clouddebugger.googleapis.com',
'clouderrorreporting.googleapis.com',
'cloudprofiler.googleapis.com',
'cloudresourcemanager.googleapis.com',
'compute.googleapis.com',
'containerregistry.googleapis.com',
'datastore.googleapis.com',
'deploymentmanager.googleapis.com',
'file.googleapis.com',
'iam.googleapis.com',
'iamcredentials.googleapis.com',
'logging.googleapis.com',
'monitoring.googleapis.com',
'oslogin.googleapis.com',
'pubsub.googleapis.com',
'redis.googleapis.com',
'replicapool.googleapis.com',
'replicapoolupdater.googleapis.com',
'resourceviews.googleapis.com',
'siteverification.googleapis.com',
'sourcerepo.googleapis.com',
'stackdriver.googleapis.com',
'storage-api.googleapis.com',
'storage-component.googleapis.com',
'vpcaccess.googleapis.com',
)
_NUM_RETRIES = 2
_ENABLE_SERVICE_BATCH_SIZE = 19
class DomainVerifier(object):
"""Domain verifier."""
def __init__(self, oauth_client_secrets_path):
flow = InstalledAppFlow.from_client_secrets_file(
oauth_client_secrets_path,
scopes=['https://www.googleapis.com/auth/siteverification'])
credentials = flow.run_console()
http = google_auth_httplib2.AuthorizedHttp(
credentials, http=httplib2.Http())
self.api = discovery.build('siteVerification', 'v1', http=http)
def get_domain_verification_tag(self, domain):
"""Get the domain verification meta tag."""
response = self.api.webResource().getToken(
body={
'verificationMethod': 'FILE',
'site': {
'identifier': domain,
'type': 'SITE',
}
}).execute(num_retries=_NUM_RETRIES)
return response['token']
def verify(self, domain):
"""Verify the domain verification meta tag."""
self.api.webResource().insert(
body={
'site': {
'identifier': domain,
'type': 'SITE',
}
},
verificationMethod='FILE').execute(num_retries=_NUM_RETRIES)
def add_owner(self, domain, email):
"""Add a new domain owner."""
response = self.api.webResource().get(id=domain).execute(
num_retries=_NUM_RETRIES)
if email not in response['owners']:
response['owners'].append(email)
self.api.webResource().update(
id=domain, body=response).execute(num_retries=_NUM_RETRIES)
def get_numeric_project_id(gcloud, project_id):
"""Get the numeric project ID."""
project_info = json.loads(
gcloud.run('projects', 'describe', project_id, '--format=json'))
return project_info['projectNumber']
def app_engine_service_account(project_id):
"""Get the default App Engine service account."""
return project_id + '@appspot.gserviceaccount.com'
def compute_engine_service_account(gcloud, project_id):
"""Get the default compute engine service account."""
return (get_numeric_project_id(gcloud, project_id) +
<EMAIL>')
def enable_services(gcloud):
"""Enable required services."""
for i in range(0, len(_REQUIRED_SERVICES), _ENABLE_SERVICE_BATCH_SIZE):
end = i + _ENABLE_SERVICE_BATCH_SIZE
gcloud.run('services', 'enable', *_REQUIRED_SERVICES[i:i + end])
def replace_file_contents(file_path, replacements):
"""Replace contents of a file."""
with open(file_path) as f:
old_contents = f.read()
contents = old_contents
for find, replace in replacements:
contents = contents.replace(find, replace)
if contents == old_contents:
return
with open(file_path, 'w') as f:
f.write(contents)
def project_bucket(project_id, bucket_name):
"""Return a project-specific bucket name."""
return '{name}.{project_id}.appspot.com'.format(
name=bucket_name, project_id=project_id)
def create_new_config(gcloud, project_id, new_config_dir,
domain_verification_tag, bucket_replacements,
gae_location, gce_zone, firebase_api_key):
"""Create a new config directory."""
if os.path.exists(new_config_dir):
print('Overwriting existing directory.')
shutil.rmtree(new_config_dir)
gae_region = appengine.region_from_location(gae_location)
replacements = [
('test-clusterfuzz-service-account-email',
compute_engine_service_account(gcloud, project_id)),
('test-clusterfuzz', project_id),
('test-project', project_id),
('domain-verification-tag', domain_verification_tag),
('gae-region', gae_region),
('gce-zone', gce_zone),
('firebase-api-key', firebase_api_key),
]
replacements.extend(bucket_replacements)
shutil.copytree(os.path.join('configs', 'test'), new_config_dir)
for root_dir, _, filenames in os.walk(new_config_dir):
for filename in filenames:
file_path = os.path.join(root_dir, filename)
replace_file_contents(file_path, replacements)
def deploy_appengine(gcloud, config_dir, appengine_location):
"""Deploy to App Engine."""
try:
gcloud.run('app', 'describe')
except common.GcloudError:
# Create new App Engine app if it does not exist.
gcloud.run('app', 'create', '--region=' + appengine_location)
subprocess.check_call([
'python', 'butler.py', 'deploy', '--force', '--targets', 'appengine',
'--prod', '--config-dir', config_dir
])
def deploy_zips(config_dir):
"""Deploy source zips."""
subprocess.check_call([
'python', 'butler.py', 'deploy', '--force', '--targets', 'zips', '--prod',
'--config-dir', config_dir
])
def create_buckets(project_id, buckets):
"""Create buckets."""
gsutil = common.Gsutil()
for bucket in buckets:
try:
gsutil.run('defstorageclass', 'get', 'gs://' + bucket)
except common.GsutilError:
# Create the bucket if it does not exist.
gsutil.run('mb', '-p', project_id, 'gs://' + bucket)
def set_cors(config_dir, buckets):
"""Sets cors settings."""
gsutil = common.Gsutil()
cors_file_path = os.path.join(config_dir, 'gae', 'cors.json')
for bucket in buckets:
gsutil.run('cors', 'set', cors_file_path, 'gs://' + bucket)
def add_service_account_role(gcloud, project_id, service_account, role):
"""Add an IAM role to a service account."""
gcloud.run('projects', 'add-iam-policy-binding', project_id, '--member',
'serviceAccount:' + service_account, '--role', role)
def execute(args):
"""Create a new config directory and deployment."""
# Check this early on, as the deployment at the end would fail otherwise.
if common.is_git_dirty():
print('Your checkout contains uncommitted changes. Cannot proceed.')
sys.exit(1)
verifier = DomainVerifier(args.oauth_client_secrets_path)
gcloud = common.Gcloud(args.project_id)
enable_services(gcloud)
# Get tag for domain verification.
appspot_domain = 'https://' + args.project_id + '.appspot.com/'
domain_verification_tag = verifier.get_domain_verification_tag(appspot_domain)
blobs_bucket = project_bucket(args.project_id, 'blobs')
deployment_bucket = project_bucket(args.project_id, 'deployment')
bucket_replacements = (
('test-blobs-bucket', blobs_bucket),
('test-deployment-bucket', deployment_bucket),
('test-bigquery-bucket', project_bucket(args.project_id, 'bigquery')),
('test-backup-bucket', project_bucket(args.project_id, 'backup')),
('test-coverage-bucket', project_bucket(args.project_id, 'coverage')),
('test-fuzzer-logs-bucket', project_bucket(args.project_id,
'fuzzer-logs')),
('test-corpus-bucket', project_bucket(args.project_id, 'corpus')),
('test-quarantine-bucket', project_bucket(args.project_id, 'quarantine')),
('test-shared-corpus-bucket',
project_bucket(args.project_id, 'shared-corpus')),
('test-fuzz-logs-bucket', project_bucket(args.project_id, 'fuzz-logs')),
('test-mutator-plugins-bucket',
project_bucket(args.project_id, 'mutator-plugins')),
)
# Write new configs.
create_new_config(gcloud, args.project_id, args.new_config_dir,
domain_verification_tag, bucket_replacements,
args.appengine_location, args.gce_zone,
args.firebase_api_key)
prev_dir = os.getcwd()
os.chdir(args.new_config_dir)
# Deploy App Engine and finish verification of domain.
os.chdir(prev_dir)
deploy_appengine(
gcloud, args.new_config_dir, appengine_location=args.appengine_location)
verifier.verify(appspot_domain)
# App Engine service account requires:
# - Domain ownership to create domain namespaced GCS buckets
# - Datastore export permission for periodic backups.
# - Service account signing permission for GCS uploads.
service_account = app_engine_service_account(args.project_id)
verifier.add_owner(appspot_domain, service_account)
add_service_account_role(gcloud, args.project_id, service_account,
'roles/datastore.importExportAdmin')
add_service_account_role(gcloud, args.project_id, service_account,
'roles/iam.serviceAccountTokenCreator')
# Create buckets now that domain is verified.
create_buckets(args.project_id, [bucket for _, bucket in bucket_replacements])
# Set CORS settings on the buckets.
set_cors(args.new_config_dir, [blobs_bucket])
# Set deployment bucket for the cloud project.
gcloud.run('compute', 'project-info', 'add-metadata',
'--metadata=deployment-bucket=' + deployment_bucket)
# Deploy source zips.
deploy_zips(args.new_config_dir)
|
nuitka/utils/Jinja2.py | mikehaben69/Nuitka | 5,421 | 12762933 | # Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Jinja folklore wrappers and handling of inline copy usage.
"""
from .Importing import importFromInlineCopy
environments = {}
def unlikely_if(value):
if value:
return "unlikely"
else:
return ""
def unlikely_or_likely_from(value):
if value:
return "unlikely"
else:
return "likely"
def getEnvironment(module_name):
if module_name not in environments:
# Import dependencies, sadly we get to manage this ourselves.
importFromInlineCopy("markupsafe", must_exist=True)
jinja2 = importFromInlineCopy("jinja2", must_exist=True)
import jinja2
env = jinja2.Environment(
loader=jinja2.PackageLoader(module_name, "templates"),
# extensions=["jinja2.ext.do"],
trim_blocks=True,
lstrip_blocks=True,
)
# For shared global functions.
env.globals.update(
{
"unlikely_if": unlikely_if,
"unlikely_or_likely_from": unlikely_or_likely_from,
}
)
env.undefined = jinja2.StrictUndefined
environments[module_name] = env
return environments[module_name]
def getTemplate(module_name, template_name):
return getEnvironment(module_name).get_template(template_name)
|
tests/unit/trace/propagation/test_text_format.py | Flared/opencensus-python | 650 | 12762967 | <reponame>Flared/opencensus-python
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from opencensus.trace.propagation import text_format
class Test_from_carrier(unittest.TestCase):
def test_from_carrier_keys_exist(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_span_id = '00f067aa0ba902b7'
test_options = 1
carrier = {
text_format._TRACE_ID_KEY: test_trace_id,
text_format._SPAN_ID_KEY: test_span_id,
text_format._TRACE_OPTIONS_KEY: test_options,
}
propagator = text_format.TextFormatPropagator()
span_context = propagator.from_carrier(carrier)
self.assertEqual(span_context.trace_id, test_trace_id)
self.assertEqual(span_context.span_id, test_span_id)
self.assertEqual(span_context.trace_options.enabled,
bool(test_options))
def test_from_carrier_keys_not_exist(self):
carrier = {}
propagator = text_format.TextFormatPropagator()
span_context = propagator.from_carrier(carrier)
self.assertIsNotNone(span_context.trace_id)
# Span_id should be None here which indicates no parent span_id for
# the child spans
self.assertIsNone(span_context.span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_to_carrier_has_span_id(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_span_id = '00f067aa0ba902b7'
test_options = '2'
span_context = mock.Mock()
span_context.trace_id = test_trace_id
span_context.span_id = test_span_id
span_context.trace_options.trace_options_byte = test_options
carrier = {}
propagator = text_format.TextFormatPropagator()
carrier = propagator.to_carrier(span_context, carrier)
self.assertEqual(carrier[text_format._TRACE_ID_KEY], test_trace_id)
self.assertEqual(carrier[text_format._SPAN_ID_KEY], str(test_span_id))
self.assertEqual(carrier[text_format._TRACE_OPTIONS_KEY], test_options)
def test_to_carrier_no_span_id(self):
test_trace_id = '6e0c63257de34c92bf9efcd03927272e'
test_options = '1'
span_context = mock.Mock()
span_context.trace_id = test_trace_id
span_context.span_id = None
span_context.trace_options.trace_options_byte = test_options
carrier = {}
propagator = text_format.TextFormatPropagator()
carrier = propagator.to_carrier(span_context, carrier)
self.assertEqual(carrier[text_format._TRACE_ID_KEY], test_trace_id)
self.assertIsNone(carrier.get(text_format._SPAN_ID_KEY))
self.assertEqual(carrier[text_format._TRACE_OPTIONS_KEY], test_options)
|
setup.py | Jingren-hou/NeuralCDE | 438 | 12762972 | <reponame>Jingren-hou/NeuralCDE<filename>setup.py
import pathlib
import setuptools
here = pathlib.Path(__file__).resolve().parent
with open(here / 'controldiffeq/README.md', 'r') as f:
readme = f.read()
setuptools.setup(name='controldiffeq',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
description='PyTorch functions for solving CDEs.',
long_description=readme,
url='https://github.com/patrick-kidger/NeuralCDE/tree/master/controldiffeq',
license='Apache-2.0',
zip_safe=False,
python_requires='>=3.5, <4',
install_requires=['torch>=1.0.0', 'torchdiffeq>=0.0.1'],
packages=['controldiffeq'],
classifiers=["Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License"])
|
util/chplenv/chplenv.py | MayukhSobo/chapel | 1,602 | 12762973 | import chpl_cpu
import chpl_atomics
import chpl_aux_filesys
import chpl_bin_subdir
import chpl_make
import chpl_platform
import chpl_comm
import chpl_comm_debug
import chpl_comm_segment
import chpl_comm_substrate
import chpl_compiler
import chpl_gasnet
import chpl_gmp
import chpl_hwloc
import chpl_jemalloc
import chpl_launcher
import chpl_libfabric
import chpl_llvm
import chpl_locale_model
import chpl_gpu
import chpl_arch
import chpl_mem
import chpl_qthreads
import chpl_re2
import chpl_tasks
import chpl_timers
import chpl_unwind
import chpl_lib_pic
import chpl_sanitizers
# General purpose helpers
import chpl_home_utils
import chpl_python_version
import compiler_utils
import overrides
import utils
|
tests/r/test_swahili.py | hajime9652/observations | 199 | 12762974 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.swahili import swahili
def test_swahili():
"""Test module swahili.py by downloading
swahili.csv and testing shape of
extracted data has 480 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = swahili(test_path)
try:
assert x_train.shape == (480, 4)
except:
shutil.rmtree(test_path)
raise()
|
mods/Maze/main.py | SummitChen/opennero | 215 | 12763036 | # OpenNero will execute ModMain when this mod is loaded
from Maze.client import ClientMain
def ModMain(mode = ""):
ClientMain(mode)
def StartMe():
from Maze.module import getMod
getMod().set_speedup(1.0) # full speed ahead
getMod().start_sarsa() # start an algorithm for headless mode
|
tests/arch/arm/translators/test_branch.py | IMULMUL/barf-project | 1,395 | 12763040 | <reponame>IMULMUL/barf-project
# Copyright (c) 2019, Fundacion Dr. <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import platform
import unittest
from .armtranslator import ArmTranslationTestCase
@unittest.skipUnless(platform.machine().lower() in ['armv6l', 'armv7l'],
'Not running on an ARMv6 system')
class ArmTranslationBranchTests(ArmTranslationTestCase):
def test_branch_instructions(self):
untouched_value = 0x45454545
touched_value = 0x31313131
# R11 is used as a dirty register to check if the branch was
# taken or not.
instr_samples = [
["mov r11, #0x{:x}".format(untouched_value),
"b #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"bx #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"bl #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"blx #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["movs r11, #0x{:x}".format(untouched_value),
"bne #0x800c",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"mov r1, #0x8010",
"bx r1",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
["mov r11, #0x{:x}".format(untouched_value),
"mov r1, #0x8010",
"blx r1",
"mov r11, #0x{:x}".format(touched_value),
"mov r0, r0",
],
]
for instr in instr_samples:
reil_ctx_out = self._execute_asm(instr, 0x8000)
self.assertTrue(reil_ctx_out['r11'] == untouched_value)
|
examples/tenant_tutorial/customers/apps.py | buraketmen/django-tenants | 514 | 12763057 | from __future__ import unicode_literals
from django.apps import AppConfig
class CustomersConfig(AppConfig):
name = 'customers'
verbose_name = 'Customers'
def ready(self):
import customers.handlers
|
GRU.py | harrys17451/CryptocurrencyPrediction | 669 | 12763088 | <reponame>harrys17451/CryptocurrencyPrediction
import pandas as pd
import numpy as numpy
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten,Reshape
from keras.layers import Conv1D, MaxPooling1D, LeakyReLU
from keras.utils import np_utils
from keras.layers import GRU,CuDNNGRU
from keras.callbacks import CSVLogger, ModelCheckpoint
import h5py
import os
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
with h5py.File(''.join(['bitcoin2015to2017_close.h5']), 'r') as hf:
datas = hf['inputs'].value
labels = hf['outputs'].value
output_file_name='bitcoin2015to2017_close_GRU_1_tanh_relu_'
step_size = datas.shape[1]
units= 50
batch_size = 8
nb_features = datas.shape[2]
epochs = 100
output_size=16
#split training validation
training_size = int(0.8* datas.shape[0])
training_datas = datas[:training_size,:]
training_labels = labels[:training_size,:,0]
validation_datas = datas[training_size:,:]
validation_labels = labels[training_size:,:,0]
#build model
model = Sequential()
model.add(GRU(units=units, input_shape=(step_size,nb_features),return_sequences=False))
model.add(Activation('tanh'))
model.add(Dropout(0.2))
model.add(Dense(output_size))
model.add(Activation('relu'))
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger(output_file_name+'.csv', append=True),ModelCheckpoint('weights/'+output_file_name+'-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1,mode='min')])
# model.fit(datas,labels)
#model.save(output_file_name+'.h5')
|
sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py | praveenkuttappan/azure-sdk-for-python | 2,728 | 12763097 | <reponame>praveenkuttappan/azure-sdk-for-python
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from testcase import (
ConversationTest,
GlobalConversationAccountPreparer
)
from azure.ai.language.conversations import ConversationAnalysisClient
from azure.ai.language.conversations.models import (
ConversationAnalysisOptions,
AnalyzeConversationResult,
ConversationPrediction
)
class ConversationAppTests(ConversationTest):
@GlobalConversationAccountPreparer()
def test_conversation_app(self, conv_account, conv_key, conv_project):
# prepare data
query = "One california maki please."
input = ConversationAnalysisOptions(
query=query,
)
# analyze quey
client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key))
with client:
result = client.analyze_conversations(
input,
project_name=conv_project,
deployment_name='production'
)
# assert
assert isinstance(result, AnalyzeConversationResult)
assert result.query == query
assert isinstance(result.prediction, ConversationPrediction)
assert result.prediction.project_kind == 'conversation'
assert result.prediction.top_intent == 'Order'
assert len(result.prediction.entities) > 0
assert len(result.prediction.intents) > 0
assert result.prediction.intents[0].category == 'Order'
assert result.prediction.intents[0].confidence_score > 0
assert result.prediction.entities[0].category == 'OrderItem'
assert result.prediction.entities[0].text == 'california maki'
assert result.prediction.entities[0].confidence_score > 0
@GlobalConversationAccountPreparer()
def test_conversation_app_with_dictparams(self, conv_account, conv_key, conv_project):
# prepare data
query = "One california maki please."
params = {
"query": query,
"api_version": "2021-11-01-preview"
}
# analyze quey
client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key))
with client:
result = client.analyze_conversations(
params,
project_name=conv_project,
deployment_name='production'
)
# assert
assert isinstance(result, AnalyzeConversationResult)
assert result.query == query
assert isinstance(result.prediction, ConversationPrediction)
assert result.prediction.project_kind == 'conversation'
assert result.prediction.top_intent == 'Order'
assert len(result.prediction.entities) > 0
assert len(result.prediction.intents) > 0
assert result.prediction.intents[0].category == 'Order'
assert result.prediction.intents[0].confidence_score > 0
assert result.prediction.entities[0].category == 'OrderItem'
assert result.prediction.entities[0].text == 'california maki'
assert result.prediction.entities[0].confidence_score > 0
|
bip_utils/ss58/ss58.py | MIPPLTeam/bip_utils | 149 | 12763114 | <reponame>MIPPLTeam/bip_utils
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Module for SS58 decoding/encoding.
Reference: https://github.com/paritytech/substrate/wiki/External-Address-Format-(SS58).
"""
# Imports
from typing import Tuple
from bip_utils.ss58.ss58_ex import SS58ChecksumError
from bip_utils.base58 import Base58Decoder, Base58Encoder
from bip_utils.utils.misc import ConvUtils, CryptoUtils
class SS58Const:
"""Class container for SS58 constants."""
# Max format for simple account
SIMPLE_ACCOUNT_FORMAT_MAX_VAL: int = 63
# Format maximum value
FORMAT_MAX_VAL: int = 16383
# Reserved formats
RESERVED_FORMATS: Tuple[int, int] = (46, 47)
# Data length in bytes
DATA_BYTE_LEN: int = 32
# Checksum length in bytes
CHECKSUM_BYTE_LEN: int = 2
# Checksum prefix
CHECKSUM_PREFIX: bytes = b"SS58PRE"
class SS58Utils:
"""Class container for SS58 utility functions."""
@staticmethod
def ComputeChecksum(data_bytes: bytes) -> bytes:
"""
Compute SS58 checksum.
Args:
data_bytes (bytes): Data bytes
Returns:
bytes: Computed checksum
"""
return CryptoUtils.Blake2b(SS58Const.CHECKSUM_PREFIX + data_bytes)[:SS58Const.CHECKSUM_BYTE_LEN]
class SS58Encoder:
"""SS58 encoder class. It provides methods for encoding to SS58 format."""
@staticmethod
def Encode(data_bytes: bytes,
ss58_format: int) -> str:
"""
Encode bytes into a SS58 string.
Args:
data_bytes (bytes): Data bytes (32-byte length)
ss58_format (int) : SS58 format
Returns:
str: SS58 encoded string
Raises:
ValueError: If parameters are not valid
"""
# Check parameters
if len(data_bytes) != SS58Const.DATA_BYTE_LEN:
raise ValueError(f"Invalid data length ({len(data_bytes)})")
if ss58_format < 0 or ss58_format > SS58Const.FORMAT_MAX_VAL:
raise ValueError(f"Invalid SS58 format ({ss58_format})")
if ss58_format in SS58Const.RESERVED_FORMATS:
raise ValueError(f"Invalid SS58 format ({ss58_format})")
# Simple account
if ss58_format <= SS58Const.SIMPLE_ACCOUNT_FORMAT_MAX_VAL:
ss58_format_bytes = ConvUtils.IntegerToBytes(ss58_format)
# Full address
else:
# 0b00HHHHHH_MMLLLLLL -> (0b01LLLLLL, 0bHHHHHHMM)
ss58_format_bytes = bytes([
((ss58_format & 0x00FC) >> 2) | 0x0040,
(ss58_format >> 8) | ((ss58_format & 0x0003) << 6)
])
# Get payload
payload = ss58_format_bytes + data_bytes
# Compute checksum
checksum = SS58Utils.ComputeChecksum(payload)
# Encode
return Base58Encoder.Encode(payload + checksum)
class SS58Decoder:
"""SS58 decoder class. It provides methods for decoding SS58 format."""
@staticmethod
def Decode(data_str: str) -> Tuple[int, bytes]:
"""
Decode bytes from a SS58 string.
Args:
data_str (string): Data string
Returns:
tuple: SS58 format and data bytes
Raises:
SS58ChecksumError: If checksum is not valid
ValueError: If the string is not a valid SS58 format
"""
# Decode string
dec_bytes = Base58Decoder.Decode(data_str)
# Full address
if dec_bytes[0] & 0x40:
ss58_format_len = 2
ss58_format = ((dec_bytes[0] & 0x3F) << 2) | (dec_bytes[1] >> 6) | \
((dec_bytes[1] & 0x3F) << 8)
# Simple account
else:
ss58_format_len = 1
ss58_format = dec_bytes[0]
# Check format
if ss58_format in SS58Const.RESERVED_FORMATS:
raise ValueError(f"Invalid SS58 format ({ss58_format})")
# Get back data and checksum
data_bytes = dec_bytes[ss58_format_len:-SS58Const.CHECKSUM_BYTE_LEN]
checksum_bytes = dec_bytes[-SS58Const.CHECKSUM_BYTE_LEN:]
# Check data length
if len(data_bytes) != SS58Const.DATA_BYTE_LEN:
raise ValueError(f"Invalid data length ({len(data_bytes)})")
# Compute checksum
comp_checksum = SS58Utils.ComputeChecksum(dec_bytes[:-SS58Const.CHECKSUM_BYTE_LEN])
# Verify checksum
if checksum_bytes != comp_checksum:
raise SS58ChecksumError(
f"Invalid checksum (expected {ConvUtils.BytesToHexString(comp_checksum)}, "
f"got {ConvUtils.BytesToHexString(checksum_bytes)})"
)
return ss58_format, data_bytes
|
CondTools/DT/test/popcon_keyconf_user.py | ckamtsikis/cmssw | 852 | 12763119 | import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.connect = 'sqlite_file:userconf.db'
process.CondDBCommon.DBParameters.authenticationPath = '.'
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:log.db'),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('DTCCBConfigRcd'),
tag = cms.string('conf_test'),
timetype = cms.untracked.string('runnumber')
),
cms.PSet(
record = cms.string('keyedConfBricks'),
tag = cms.string('DT_keyedConfBricks_V01'),
timetype = cms.untracked.string('hash'),
withWrapper = cms.untracked.bool(True),
outOfOrder = cms.untracked.bool(True)
),
cms.PSet(
record = cms.string('keyedConfListIOV'),
tag = cms.string('DT_keyedConfListIOV_V01'),
timetype = cms.untracked.string('runnumber'),
withWrapper = cms.untracked.bool(True),
outOfOrder = cms.untracked.bool(False)
)
)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
process.essource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
DumpStat=cms.untracked.bool(True),
toGet = cms.VPSet(
cms.PSet(
record = cms.string('DTKeyedConfigListRcd'),
tag = cms.string('DT_keyedConfListIOV_V01')
),
cms.PSet(
record = cms.string('DTKeyedConfigContainerRcd'),
tag = cms.string('DT_keyedConfBricks_V01')
)
)
)
process.conf_o2o = cms.EDAnalyzer("DTUserKeyedConfigPopConAnalyzer",
name = cms.untracked.string('DTCCBConfig'),
Source = cms.PSet(
DBParameters = cms.PSet(
messageLevel = cms.untracked.int32(0),
authenticationPath = cms.untracked.string('.')
),
onlineDB = cms.string('sqlite_file:dummy_online.db'),
tag = cms.string('conf_test'),
run = cms.int32(1),
writeKeys = cms.bool(True),
writeData = cms.bool(True),
container = cms.string('keyedConfBricks'),
DTConfigKeys = cms.VPSet(
cms.PSet(
configType = cms.untracked.int32(1),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(2),
configKey = cms.untracked.int32(926)
),
cms.PSet(
configType = cms.untracked.int32(3),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(4),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(5),
configKey = cms.untracked.int32(542)
),
cms.PSet(
configType = cms.untracked.int32(6),
configKey = cms.untracked.int32(1226)
)
),
onlineAuthentication = cms.string('.')
),
SinceAppendMode = cms.bool(True),
record = cms.string('DTCCBConfigRcd'),
loggingOn = cms.untracked.bool(True),
debug = cms.bool(False)
)
process.p = cms.Path(process.conf_o2o)
|
AISnake/Algorithm_2/modules/agent.py | EdgarLi/AIGames | 543 | 12763122 | <reponame>EdgarLi/AIGames
'''
Function:
define the ai agent
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
from modules.food import *
from operator import itemgetter
from collections import OrderedDict
'''ai agent'''
class Agent():
def __init__(self, cfg, snake, **kwargs):
self.cfg = cfg
self.num_rows = cfg.GAME_MATRIX_SIZE[1]
self.num_cols = cfg.GAME_MATRIX_SIZE[0]
self.directions = [(0, -1), (0, 1), (-1, 0), (1, 0)]
self.path = self.buildcircle(snake)
self.shortcut_path = {}
'''make decision'''
def act(self, snake, food):
# make decision
if self.shortcut_path:
head_next = self.shortcut_path.pop(snake.coords[0])
else:
head_next = self.path[snake.coords[0]]
query = (head_next[0]-snake.coords[0][0], head_next[1]-snake.coords[0][1])
direction = {(-1, 0): 'left', (1, 0): 'right', (0, -1): 'up', (0, 1): 'down'}[query]
snake.setDirection(direction)
if snake.update(food):
food = Apple(self.cfg, snake.coords)
infos = {'eaten': True, 'food': food}
else:
infos = {'eaten': False, 'food': None}
# if snake has eaten the food
if head_next == food.coord:
path = self.buildcircle(snake)
if path:
self.path = path
# take shortcut
if self.shortcut_path:
return
shortcut_path = self.shortcut(snake, food)
if shortcut_path:
self.shortcut_path = shortcut_path
# return the necessary infos
return infos
'''calculate shortcut path'''
def shortcut(self, snake, food):
# empty screen, with the ordered hamitonian cycle precomputed and order numbered
world = [[0 for i in range(self.num_cols)] for j in range(self.num_rows)]
num = 1
node = snake.coords[-1]
world[node[1]][node[0]] = num
node = self.path[node]
while node != snake.coords[-1]:
num += 1
world[node[1]][node[0]] = num
node = self.path[node]
# obtain shortcut_path
wall = snake.coords
food = food.coord
food_number = world[food[1]][food[0]]
node, pre = wall[0], (-1, -1)
wait = OrderedDict()
wait[node] = pre
path = {}
while wait:
node, pre = wait.popitem(last=False)
path[node] = pre
if node == food:
break
node_number = world[node[1]][node[0]]
neigh = {}
for direction in self.directions:
to = (node[0]+direction[0], node[1]+direction[1])
if not self.checkboundary(to):
continue
if to in wait or to in wall or to in path:
continue
to_number = world[to[1]][to[0]]
if to_number > node_number and to_number <= food_number:
neigh[node_number] = to
neigh = sorted(neigh.items(), key=itemgetter(0), reverse=True)
for item in neigh:
wait[item[1]] = node
if node != food:
return {}
return self.reverse(path, snake.coords[0], food)
'''check boundary'''
def checkboundary(self, pos):
if pos[0] < 0 or pos[1] < 0 or pos[0] >= self.num_cols or pos[1] >= self.num_rows:
return False
return True
'''the shortest'''
def shortest(self, wall, head, food):
wait = OrderedDict()
node, pre = head, (-1, -1)
wait[node] = pre
path = {}
while wait:
node, pre = wait.popitem(last=False)
path[node] = pre
if node == food:
break
if pre in path:
prepre = path[pre]
direction = (pre[0]-prepre[0], pre[1]-prepre[1])
if (direction in self.directions) and (direction != self.directions[0]):
self.directions.remove(direction)
self.directions.insert(0, direction)
for direction in self.directions:
to = (node[0] + direction[0], node[1] + direction[1])
if not self.checkboundary(to):
continue
if to in path or to in wait or to in wall:
continue
wait[to] = node
if node != food:
return None
return self.reverse(path, head, food)
'''reverse path'''
def reverse(self, path, head, food):
if not path: return path
path_new = {}
node = food
while node != head:
path_new[path[node]] = node
node = path[node]
return path_new
'''the longest'''
def longest(self, wall, head, food):
path = self.shortest(wall, head, food)
if path is None:
return None
node = head
while node != food:
if self.extendpath(path, node, wall+[food]):
node = head
continue
node = path[node]
return path
'''extend path'''
def extendpath(self, path, node, wall):
next_ = path[node]
direction_1 = (next_[0]-node[0], next_[1]-node[1])
if direction_1 in [(0, -1), (0, 1)]:
directions = [(-1, 0), (1, 0)]
else:
directions = [(0, -1), (0, 1)]
for d in directions:
src = (node[0]+d[0], node[1]+d[1])
to = (next_[0]+d[0], next_[1]+d[1])
if (src == to) or not (self.checkboundary(src) and self.checkboundary(to)):
continue
if src in path or src in wall or to in path or to in wall:
continue
direction_2 = (to[0]-src[0], to[1]-src[1])
if direction_1 == direction_2:
path[node] = src
path[src] = to
path[to] = next_
return True
return False
'''build a Hamiltonian cycle'''
def buildcircle(self, snake):
path = self.longest(snake.coords[1: -1], snake.coords[0], snake.coords[-1])
if (not path) or (len(path) - 1 != self.num_rows * self.num_cols - len(snake.coords)):
return None
for i in range(1, len(snake.coords)):
path[snake.coords[i]] = snake.coords[i-1]
return path |
Deep_Crossing/modules.py | jingxiufenghua/rec-model | 1,323 | 12763140 | <filename>Deep_Crossing/modules.py
"""
Created on May 18, 2021
modules of Deep&Crossing: Residual units
@author: <NAME>(<EMAIL>)
"""
import tensorflow as tf
from tensorflow.keras.layers import Dense, ReLU, Layer
class Residual_Units(Layer):
"""
Residual Units
"""
def __init__(self, hidden_unit, dim_stack):
"""
:param hidden_unit: A list. Neural network hidden units.
:param dim_stack: A scalar. The dimension of inputs unit.
"""
super(Residual_Units, self).__init__()
self.layer1 = Dense(units=hidden_unit, activation='relu')
self.layer2 = Dense(units=dim_stack, activation=None)
self.relu = ReLU()
def call(self, inputs, **kwargs):
x = inputs
x = self.layer1(x)
x = self.layer2(x)
outputs = self.relu(x + inputs)
return outputs |
mangle-infra-agent/Faults/NetworkFaults.py | vmaligireddy/mangle | 151 | 12763158 | from enum import Enum
class NetworkFaults(Enum):
NETWORK_DELAY_MILLISECONDS = 1
PACKET_DUPLICATE_PERCENTAGE = 2
PACKET_CORRUPT_PERCENTAGE = 3
PACKET_LOSS_PERCENTAGE = 4 |
tests/generators/transition/main.py | jacobkaufmann/consensus-specs | 2,161 | 12763166 | from typing import Iterable
from eth2spec.test.helpers.constants import ALTAIR, MINIMAL, MAINNET, PHASE0
from eth2spec.test.altair.transition import (
test_transition as test_altair_transition,
test_activations_and_exits as test_altair_activations_and_exits,
test_leaking as test_altair_leaking,
test_slashing as test_altair_slashing,
test_operations as test_altair_operations,
)
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
def create_provider(tests_src, preset_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
def prepare_fn() -> None:
return
def cases_fn() -> Iterable[gen_typing.TestCase]:
return generate_from_tests(
runner_name='transition',
handler_name='core',
src=tests_src,
fork_name=post_fork_name,
phase=pre_fork_name,
preset_name=preset_name,
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
TRANSITION_TESTS = (
(PHASE0, ALTAIR, test_altair_transition),
(PHASE0, ALTAIR, test_altair_activations_and_exits),
(PHASE0, ALTAIR, test_altair_leaking),
(PHASE0, ALTAIR, test_altair_slashing),
(PHASE0, ALTAIR, test_altair_operations),
)
if __name__ == "__main__":
for pre_fork, post_fork, transition_test_module in TRANSITION_TESTS:
gen_runner.run_generator("transition", [
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
])
|
mlflow/entities/model_registry/__init__.py | PeterSulcs/mlflow | 10,351 | 12763168 | from mlflow.entities.model_registry.registered_model import RegisteredModel
from mlflow.entities.model_registry.model_version import ModelVersion
from mlflow.entities.model_registry.registered_model_tag import RegisteredModelTag
from mlflow.entities.model_registry.model_version_tag import ModelVersionTag
__all__ = [
"RegisteredModel",
"ModelVersion",
"RegisteredModelTag",
"ModelVersionTag",
]
|
tests/issues/test_project_issue.py | mubashshirjamal/code | 1,582 | 12763173 | # encoding: UTF-8
from tests.base import TestCase
from vilya.models.issue import Issue
from vilya.models.project_issue import ProjectIssue
class TestProjectIssue(TestCase):
def test_add_issue(self):
p = ProjectIssue.add('test', 'test description', 'test', project=1)
assert isinstance(p, ProjectIssue)
assert p.title == 'test'
assert p.description == 'test description'
assert p.project_id == 1
p.delete()
def test_get_issue(self):
p = ProjectIssue.add('test', 'test description', 'test', project=1)
r = ProjectIssue.get(p.project_id, issue_id=p.issue_id)
assert isinstance(r, ProjectIssue)
assert r.project_id == 1
r = ProjectIssue.get(p.project_id, number=p.number)
assert isinstance(r, ProjectIssue)
assert r.project_id == 1
r = Issue.get_cached_issue(p.issue_id)
assert isinstance(r, ProjectIssue)
assert r.title == 'test'
assert r.description == 'test description'
assert r.project_id == 1
p2 = ProjectIssue.add(
'test2', 'test2 description', 'test', project=1,
assignee='assignee')
p3 = ProjectIssue.add(
'test3', 'test3 description', 'test', project=1,
assignee='assignee')
p4 = ProjectIssue.add(
'test4', 'test4 description', 'test', project=1, assignee='test')
p5 = ProjectIssue.add(
'test5', 'test5 description', 'test1', project=2, assignee='test')
rs = ProjectIssue._gets_by_project_id(1)
assert len(rs) == 4
rs = ProjectIssue._get_issues_by_project_id(1)
assert all([isinstance(i, ProjectIssue) for i in rs])
assert len(rs) == 4
rs = ProjectIssue.gets_by_assignee_id(1, 'assignee')
assert all([isinstance(i, ProjectIssue) for i in rs])
assert len(rs) == 2
rs = ProjectIssue.gets_by_creator_id(1, 'test')
assert all([isinstance(i, ProjectIssue) for i in rs])
assert len(rs) == 4
for p in [p, p2, p3, p4, p5]:
p.delete()
def test_n_issue(self):
p1 = ProjectIssue.add(
'test1', 'test1 description', 'test', project=1,
assignee='assignee')
p1.close('test')
p2 = ProjectIssue.add(
'test2', 'test2 description', 'test', project=1,
assignee='assignee')
p2.close('test')
p3 = ProjectIssue.add(
'test3', 'test3 description', 'test', project=1,
assignee='assignee')
p4 = ProjectIssue.add(
'test4', 'test4 description', 'test', project=1,
assignee='test')
p5 = ProjectIssue.add(
'test5', 'test5 description', 'test1', project=2,
assignee='test')
count = ProjectIssue.get_count_by_project_id(1)
assert count == 4
count = ProjectIssue.get_count_by_project_id(1, 'open')
assert count == 2
count = ProjectIssue.get_count_by_project_id(1, 'closed')
assert count == 2
count = ProjectIssue.get_count_by_assignee_id(1, 'assignee')
assert count == 3
count = ProjectIssue.get_count_by_assignee_id(1, 'assignee', 'open')
assert count == 1
count = ProjectIssue.get_count_by_assignee_id(1, 'assignee', 'closed')
assert count == 2
count = ProjectIssue.get_count_by_creator_id(1, 'test')
assert count == 4
count = ProjectIssue.get_count_by_creator_id(1, 'test', 'open')
assert count == 2
count = ProjectIssue.get_count_by_creator_id(1, 'test', 'closed')
assert count == 2
r = ProjectIssue.get(p1.project_id, p1.issue_id)
assert isinstance(r, ProjectIssue)
assert r.n_closed_issues == 2
assert r.n_open_issues == 2
for p in [p1, p2, p3, p4, p5]:
p.delete()
def test_open_and_close_issue(self):
p1 = ProjectIssue.add('test1', 'test1 description', 'test', project=1)
p2 = ProjectIssue.add('test2', 'test2 description', 'test', project=1)
p3 = ProjectIssue.add('test3', 'test3 description', 'test', project=1)
count = ProjectIssue.get_count_by_project_id(1)
assert count == 3
p1.close('test')
count = ProjectIssue.get_count_by_project_id(1, 'open')
assert count == 2
p1.open()
count = ProjectIssue.get_count_by_project_id(1, 'open')
assert count == 3
for p in [p1, p2, p3]:
p.delete()
def test_add_tags(self):
target_id = project_id = 1
p = ProjectIssue.add(
'test', 'test description', 'test', project=project_id)
assert isinstance(p, ProjectIssue)
assert p.title == 'test'
assert p.description == 'test description'
assert p.project_id == 1
tags = ['tag1', 'tag2', 'tag3']
p.add_tags(tags, target_id)
assert len(p.tags) == len(tags)
tag_names = [t.name for t in p.tags]
assert set(tags) & set(tag_names) == set(tags)
p.delete()
def test_gets_by_issue_ids(self):
project_id = 1
p = ProjectIssue.add(
'test', 'test description', 'test', project=project_id)
assert isinstance(p, ProjectIssue)
assert p.title == 'test'
assert p.description == 'test description'
assert p.project_id == 1
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state=None)
assert len(project_issues) == 1
pissue = project_issues[0]
assert isinstance(pissue, ProjectIssue)
assert pissue.project_id == project_id
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="open")
assert len(project_issues) == 1
pissue = project_issues[0]
assert isinstance(pissue, ProjectIssue)
assert pissue.project_id == project_id
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="closed")
assert len(project_issues) == 0
pissue.close("test")
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="open")
assert len(project_issues) == 0
project_issues = ProjectIssue._gets_by_issue_ids(
[p.issue_id], state="closed")
assert len(project_issues) == 1
pissue = project_issues[0]
assert isinstance(pissue, ProjectIssue)
assert pissue.project_id == project_id
p.delete()
def test_gets_by_project_ids(self):
p1 = ProjectIssue.add('test1', 'desp', 'test', project=1)
p2 = ProjectIssue.add('test2', 'desp', 'test2', project=2)
p3 = ProjectIssue.add('test3', 'desp', 'test3', project=2)
issues = ProjectIssue.gets_by_project_ids([1, 2])
assert len(issues), 3
for p in [p1, p2, p3]:
p.delete()
|
sdk/python/pulumi_gcp/dns/get_keys.py | sisisin/pulumi-gcp | 121 | 12763237 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetKeysResult',
'AwaitableGetKeysResult',
'get_keys',
'get_keys_output',
]
@pulumi.output_type
class GetKeysResult:
"""
A collection of values returned by getKeys.
"""
def __init__(__self__, id=None, key_signing_keys=None, managed_zone=None, project=None, zone_signing_keys=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if key_signing_keys and not isinstance(key_signing_keys, list):
raise TypeError("Expected argument 'key_signing_keys' to be a list")
pulumi.set(__self__, "key_signing_keys", key_signing_keys)
if managed_zone and not isinstance(managed_zone, str):
raise TypeError("Expected argument 'managed_zone' to be a str")
pulumi.set(__self__, "managed_zone", managed_zone)
if project and not isinstance(project, str):
raise TypeError("Expected argument 'project' to be a str")
pulumi.set(__self__, "project", project)
if zone_signing_keys and not isinstance(zone_signing_keys, list):
raise TypeError("Expected argument 'zone_signing_keys' to be a list")
pulumi.set(__self__, "zone_signing_keys", zone_signing_keys)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="keySigningKeys")
def key_signing_keys(self) -> Sequence['outputs.GetKeysKeySigningKeyResult']:
"""
A list of Key-signing key (KSK) records. Structure is documented below. Additionally, the DS record is provided:
"""
return pulumi.get(self, "key_signing_keys")
@property
@pulumi.getter(name="managedZone")
def managed_zone(self) -> str:
return pulumi.get(self, "managed_zone")
@property
@pulumi.getter
def project(self) -> str:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="zoneSigningKeys")
def zone_signing_keys(self) -> Sequence['outputs.GetKeysZoneSigningKeyResult']:
"""
A list of Zone-signing key (ZSK) records. Structure is documented below.
"""
return pulumi.get(self, "zone_signing_keys")
class AwaitableGetKeysResult(GetKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetKeysResult(
id=self.id,
key_signing_keys=self.key_signing_keys,
managed_zone=self.managed_zone,
project=self.project,
zone_signing_keys=self.zone_signing_keys)
def get_keys(managed_zone: Optional[str] = None,
project: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetKeysResult:
"""
Get the DNSKEY and DS records of DNSSEC-signed managed zones. For more information see the
[official documentation](https://cloud.google.com/dns/docs/dnskeys/)
and [API](https://cloud.google.com/dns/docs/reference/v1/dnsKeys).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
foo = gcp.dns.ManagedZone("foo",
dns_name="foo.bar.",
dnssec_config=gcp.dns.ManagedZoneDnssecConfigArgs(
state="on",
non_existence="nsec3",
))
foo_dns_keys = foo.id.apply(lambda id: gcp.dns.get_keys(managed_zone=id))
pulumi.export("fooDnsDsRecord", foo_dns_keys.key_signing_keys[0].ds_record)
```
:param str managed_zone: The name or id of the Cloud DNS managed zone.
:param str project: The ID of the project in which the resource belongs. If `project` is not provided, the provider project is used.
"""
__args__ = dict()
__args__['managedZone'] = managed_zone
__args__['project'] = project
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('gcp:dns/getKeys:getKeys', __args__, opts=opts, typ=GetKeysResult).value
return AwaitableGetKeysResult(
id=__ret__.id,
key_signing_keys=__ret__.key_signing_keys,
managed_zone=__ret__.managed_zone,
project=__ret__.project,
zone_signing_keys=__ret__.zone_signing_keys)
@_utilities.lift_output_func(get_keys)
def get_keys_output(managed_zone: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetKeysResult]:
"""
Get the DNSKEY and DS records of DNSSEC-signed managed zones. For more information see the
[official documentation](https://cloud.google.com/dns/docs/dnskeys/)
and [API](https://cloud.google.com/dns/docs/reference/v1/dnsKeys).
## Example Usage
```python
import pulumi
import pulumi_gcp as gcp
foo = gcp.dns.ManagedZone("foo",
dns_name="foo.bar.",
dnssec_config=gcp.dns.ManagedZoneDnssecConfigArgs(
state="on",
non_existence="nsec3",
))
foo_dns_keys = foo.id.apply(lambda id: gcp.dns.get_keys(managed_zone=id))
pulumi.export("fooDnsDsRecord", foo_dns_keys.key_signing_keys[0].ds_record)
```
:param str managed_zone: The name or id of the Cloud DNS managed zone.
:param str project: The ID of the project in which the resource belongs. If `project` is not provided, the provider project is used.
"""
...
|
bin/lib/releases.py | jfalcou/infra | 135 | 12763257 | <filename>bin/lib/releases.py<gh_stars>100-1000
from enum import Enum
from typing import Optional, Tuple
from attr import dataclass
@dataclass(frozen=True)
class Hash:
hash: str
def __str__(self) -> str:
return f'{str(self.hash[:6])}..{str(self.hash[-6:])}'
class VersionSource(Enum):
value: Tuple[int, str]
TRAVIS = (0, 'tr')
GITHUB = (1, 'gh')
def __lt__(self, other):
return self.value < other.value
def __str__(self):
return f'{self.value[1]}'
@dataclass(frozen=True, repr=False)
class Version:
source: VersionSource
number: int
@staticmethod
def from_string(version_str: str, assumed_source: VersionSource = VersionSource.GITHUB):
if '-' not in version_str:
return Version(assumed_source, int(version_str))
source, num = version_str.split('-')
for possible_source in list(VersionSource):
if possible_source.value[1] == source:
return Version(possible_source, int(num))
raise RuntimeError(f'Unknown source {source}')
def __str__(self):
return f'{self.source}-{self.number}'
def __repr__(self):
return str(self)
@dataclass
class Release:
version: Version
branch: str
key: str
info_key: str
size: int
hash: Hash
static_key: Optional[str] = None
|
modelchimp/migrations/0049_auto_20190516_0759.py | samzer/modelchimp-server | 134 | 12763272 | # Generated by Django 2.2 on 2019-05-16 07:59
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('modelchimp', '0048_auto_20190515_1032'),
]
operations = [
migrations.RemoveField(
model_name='experiment',
name='algorithm',
),
migrations.RemoveField(
model_name='experiment',
name='features',
),
migrations.RemoveField(
model_name='experiment',
name='platform',
),
migrations.RemoveField(
model_name='experiment',
name='platform_library',
),
]
|
dfirtrack_artifacts/tests/artifact/test_artifact_creator_forms.py | stuhli/dfirtrack | 273 | 12763274 | from django.contrib.auth.models import User
from django.test import TestCase
from dfirtrack_artifacts.forms import ArtifactCreatorForm
from dfirtrack_artifacts.models import Artifactpriority, Artifactstatus, Artifacttype
from dfirtrack_main.models import System, Systemstatus, Tag, Tagcolor
class ArtifactCreatorFormTestCase(TestCase):
"""artifact creator form tests"""
@classmethod
def setUpTestData(cls):
# create user
test_user = User.objects.create_user(
username='testuser_artifact_creator', password='<PASSWORD>'
)
# create object
systemstatus_1 = Systemstatus.objects.create(systemstatus_name='systemstatus_1')
# create object
System.objects.create(
system_name='system_1',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
System.objects.create(
system_name='system_2',
systemstatus=systemstatus_1,
system_created_by_user_id=test_user,
system_modified_by_user_id=test_user,
)
# create object
tagcolor_1 = Tagcolor.objects.create(tagcolor_name='tagcolor_1')
# create object
Tag.objects.create(
tag_name='tag_1',
tagcolor=tagcolor_1,
)
Tag.objects.create(
tag_name='tag_2',
tagcolor=tagcolor_1,
)
# create object
Artifactpriority.objects.create(artifactpriority_name='prio_1')
# create object
Artifactstatus.objects.create(artifactstatus_name='artifactstatus_1')
# create object
Artifacttype.objects.create(artifacttype_name='artifacttype_1')
Artifacttype.objects.create(artifacttype_name='artifacttype_2')
def test_artifact_creator_artifactpriority_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifactpriority'].label, 'Artifactpriority (*)')
def test_artifact_creator_artifactstatus_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifactstatus'].label, 'Artifactstatus (*)')
def test_artifact_creator_artifacttype_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifacttype'].label,
'Artifacttypes (*) - Will also be set as artifact names',
)
def test_artifact_creator_system_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['system'].label, 'Systems (*)')
def test_artifact_creator_tag_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['tag'].label, 'Tags')
def test_artifact_creator_analysisresult_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifact_note_analysisresult'].label, 'Analysis result'
)
def test_artifact_creator_external_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifact_note_external'].label, 'External note')
def test_artifact_creator_internal_note_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(form.fields['artifact_note_internal'].label, 'Internal note')
def test_artifact_creator_name_choice_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['alternative_artifact_name_choice'].label,
'Use alternative artifact name',
)
def test_artifact_creator_name_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['alternative_artifact_name'].label, 'Alternative artifact name'
)
def test_artifact_creator_source_path_form_label(self):
"""test form label"""
# get object
form = ArtifactCreatorForm()
# compare
self.assertEqual(
form.fields['artifact_source_path'].label,
'Artifact source path (attention: will be set for all artifacts regardless of type)',
)
def test_artifact_creator_form_empty(self):
"""test minimum form requirements / INVALID"""
# get object
form = ArtifactCreatorForm(data={})
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifacttype_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifactpriority_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_artifactstatus_form_filled(self):
"""test minimum form requirements / INVALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
}
)
# compare
self.assertFalse(form.is_valid())
def test_artifact_creator_system_form_filled(self):
"""test minimum form requirements / VALID"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
system_2_id = System.objects.get(system_name='system_2').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
'system': [
system_1_id,
system_2_id,
],
}
)
# compare
self.assertTrue(form.is_valid())
def test_artifact_creator_all_fields_form_filled(self):
"""test additional form content"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# get object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
artifacttype_2_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_2'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
system_2_id = System.objects.get(system_name='system_2').system_id
# get object
tag_1_id = Tag.objects.get(tag_name='tag_1').tag_id
tag_2_id = Tag.objects.get(tag_name='tag_2').tag_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
artifacttype_2_id,
],
'system': [
system_1_id,
system_2_id,
],
'tag': [
tag_1_id,
tag_2_id,
],
'artifact_note_analysisresult': 'lorem ipsum',
'artifact_note_external': 'lorem ipsum',
'artifact_note_internal': 'lorem ipsum',
'artifact_source_path': 'evil.exe',
}
)
# compare
self.assertTrue(form.is_valid())
def test_artifact_creator_alternative_name_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name': 'alternative name',
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['alternative_artifact_name'],
['Either both or neither of the fields is required.'],
)
def test_artifact_creator_alternative_choice_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name_choice': True,
}
)
# compare
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['alternative_artifact_name'],
['Either both or neither of the fields is required.'],
)
def test_artifact_creator_alternative_both_form_filled(self):
"""test custom field validation"""
# get object
artifactpriority_id = Artifactpriority.objects.get(
artifactpriority_name='prio_1'
).artifactpriority_id
# get object
artifactstatus_id = Artifactstatus.objects.get(
artifactstatus_name='artifactstatus_1'
).artifactstatus_id
# create object
artifacttype_1_id = Artifacttype.objects.get(
artifacttype_name='artifacttype_1'
).artifacttype_id
# get object
system_1_id = System.objects.get(system_name='system_1').system_id
# get object
form = ArtifactCreatorForm(
data={
'artifactpriority': artifactpriority_id,
'artifactstatus': artifactstatus_id,
'artifacttype': [
artifacttype_1_id,
],
'system': [
system_1_id,
],
'alternative_artifact_name_choice': True,
'alternative_artifact_name': 'alternative name',
}
)
# compare
self.assertTrue(form.is_valid())
|
alipay/aop/api/response/ZolozIdentificationUserWebQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 12763306 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class ZolozIdentificationUserWebQueryResponse(AlipayResponse):
def __init__(self):
super(ZolozIdentificationUserWebQueryResponse, self).__init__()
self._extern_info = None
@property
def extern_info(self):
return self._extern_info
@extern_info.setter
def extern_info(self, value):
self._extern_info = value
def parse_response_content(self, response_content):
response = super(ZolozIdentificationUserWebQueryResponse, self).parse_response_content(response_content)
if 'extern_info' in response:
self.extern_info = response['extern_info']
|
tests/__init__.py | szabosteve/eland | 335 | 12763311 | <filename>tests/__init__.py
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pandas as pd
from elasticsearch import Elasticsearch
from eland.common import es_version
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
# Define test files and indices
ELASTICSEARCH_HOST = os.environ.get(
"ELASTICSEARCH_URL", os.environ.get("ELASTICSEARCH_HOST", "http://localhost:9200")
)
# Define client to use in tests
ES_TEST_CLIENT = Elasticsearch(ELASTICSEARCH_HOST)
ES_VERSION = es_version(ES_TEST_CLIENT)
FLIGHTS_INDEX_NAME = "flights"
FLIGHTS_MAPPING = {
"mappings": {
"properties": {
"AvgTicketPrice": {"type": "float"},
"Cancelled": {"type": "boolean"},
"Carrier": {"type": "keyword"},
"Dest": {"type": "keyword"},
"DestAirportID": {"type": "keyword"},
"DestCityName": {"type": "keyword"},
"DestCountry": {"type": "keyword"},
"DestLocation": {"type": "geo_point"},
"DestRegion": {"type": "keyword"},
"DestWeather": {"type": "keyword"},
"DistanceKilometers": {"type": "float"},
"DistanceMiles": {"type": "float"},
"FlightDelay": {"type": "boolean"},
"FlightDelayMin": {"type": "integer"},
"FlightDelayType": {"type": "keyword"},
"FlightNum": {"type": "keyword"},
"FlightTimeHour": {"type": "float"},
"FlightTimeMin": {"type": "float"},
"Origin": {"type": "keyword"},
"OriginAirportID": {"type": "keyword"},
"OriginCityName": {"type": "keyword"},
"OriginCountry": {"type": "keyword"},
"OriginLocation": {"type": "geo_point"},
"OriginRegion": {"type": "keyword"},
"OriginWeather": {"type": "keyword"},
"dayOfWeek": {"type": "byte"},
"timestamp": {"type": "date", "format": "strict_date_hour_minute_second"},
}
}
}
FLIGHTS_FILE_NAME = ROOT_DIR + "/flights.json.gz"
FLIGHTS_DF_FILE_NAME = ROOT_DIR + "/flights_df.json.gz"
FLIGHTS_SMALL_INDEX_NAME = "flights_small"
FLIGHTS_SMALL_MAPPING = FLIGHTS_MAPPING
FLIGHTS_SMALL_FILE_NAME = ROOT_DIR + "/flights_small.json.gz"
ECOMMERCE_INDEX_NAME = "ecommerce"
ECOMMERCE_MAPPING = {
"mappings": {
"properties": {
"category": {"type": "text", "fields": {"keyword": {"type": "keyword"}}},
"currency": {"type": "keyword"},
"customer_birth_date": {"type": "date"},
"customer_first_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"customer_full_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"customer_gender": {"type": "text"},
"customer_id": {"type": "keyword"},
"customer_last_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"customer_phone": {"type": "keyword"},
"day_of_week": {"type": "keyword"},
"day_of_week_i": {"type": "integer"},
"email": {"type": "keyword"},
"geoip": {
"properties": {
"city_name": {"type": "keyword"},
"continent_name": {"type": "keyword"},
"country_iso_code": {"type": "keyword"},
"location": {"type": "geo_point"},
"region_name": {"type": "keyword"},
}
},
"manufacturer": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"order_date": {"type": "date"},
"order_id": {"type": "keyword"},
"products": {
"properties": {
"_id": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"base_price": {"type": "half_float"},
"base_unit_price": {"type": "half_float"},
"category": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"created_on": {"type": "date"},
"discount_amount": {"type": "half_float"},
"discount_percentage": {"type": "half_float"},
"manufacturer": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
},
"min_price": {"type": "half_float"},
"price": {"type": "half_float"},
"product_id": {"type": "long"},
"product_name": {
"type": "text",
"fields": {"keyword": {"type": "keyword"}},
"analyzer": "english",
},
"quantity": {"type": "integer"},
"sku": {"type": "keyword"},
"tax_amount": {"type": "half_float"},
"taxful_price": {"type": "half_float"},
"taxless_price": {"type": "half_float"},
"unit_discount_amount": {"type": "half_float"},
}
},
"sku": {"type": "keyword"},
"taxful_total_price": {"type": "float"},
"taxless_total_price": {"type": "float"},
"total_quantity": {"type": "integer"},
"total_unique_products": {"type": "integer"},
"type": {"type": "keyword"},
"user": {"type": "keyword"},
}
}
}
ECOMMERCE_FILE_NAME = ROOT_DIR + "/ecommerce.json.gz"
ECOMMERCE_DF_FILE_NAME = ROOT_DIR + "/ecommerce_df.json.gz"
TEST_MAPPING1 = {
"mappings": {
"properties": {
"city": {"type": "text", "fields": {"raw": {"type": "keyword"}}},
"text": {
"type": "text",
"fields": {"english": {"type": "text", "analyzer": "english"}},
},
"origin_location": {
"properties": {
"lat": {
"type": "text",
"index_prefixes": {},
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
"lon": {
"type": "text",
"fields": {"keyword": {"type": "keyword", "ignore_above": 256}},
},
}
},
"maps-telemetry": {
"properties": {
"attributesPerMap": {
"properties": {
"dataSourcesCount": {
"properties": {
"avg": {"type": "long"},
"max": {"type": "long"},
"min": {"type": "long"},
}
},
"emsVectorLayersCount": {
"dynamic": "true",
"properties": {
"france_departments": {
"properties": {
"avg": {"type": "float"},
"max": {"type": "long"},
"min": {"type": "long"},
}
}
},
},
}
}
}
},
"type": {"type": "keyword"},
"name": {"type": "text"},
"user_name": {"type": "keyword"},
"email": {"type": "keyword"},
"content": {"type": "text"},
"tweeted_at": {"type": "date"},
"dest_location": {"type": "geo_point"},
"my_join_field": {
"type": "join",
"relations": {"question": ["answer", "comment"], "answer": "vote"},
},
}
}
}
TEST_MAPPING1_INDEX_NAME = "mapping1"
TEST_MAPPING1_EXPECTED = {
"city": "text",
"city.raw": "keyword",
"content": "text",
"dest_location": "geo_point",
"email": "keyword",
"maps-telemetry.attributesPerMap.dataSourcesCount.avg": "long",
"maps-telemetry.attributesPerMap.dataSourcesCount.max": "long",
"maps-telemetry.attributesPerMap.dataSourcesCount.min": "long",
"maps-telemetry.attributesPerMap.emsVectorLayersCount.france_departments.avg": "float",
"maps-telemetry.attributesPerMap.emsVectorLayersCount.france_departments.max": "long",
"maps-telemetry.attributesPerMap.emsVectorLayersCount.france_departments.min": "long",
"my_join_field": "join",
"name": "text",
"origin_location.lat": "text",
"origin_location.lat.keyword": "keyword",
"origin_location.lon": "text",
"origin_location.lon.keyword": "keyword",
"text": "text",
"text.english": "text",
"tweeted_at": "date",
"type": "keyword",
"user_name": "keyword",
}
TEST_MAPPING1_EXPECTED_DF = pd.DataFrame.from_dict(
data=TEST_MAPPING1_EXPECTED, orient="index", columns=["es_dtype"]
)
TEST_MAPPING1_EXPECTED_SOURCE_FIELD_DF = TEST_MAPPING1_EXPECTED_DF.drop(
index=[
"city.raw",
"origin_location.lat.keyword",
"origin_location.lon.keyword",
"text.english",
]
)
TEST_MAPPING1_EXPECTED_SOURCE_FIELD_COUNT = len(
TEST_MAPPING1_EXPECTED_SOURCE_FIELD_DF.index
)
TEST_NESTED_USER_GROUP_INDEX_NAME = "nested_user_group"
TEST_NESTED_USER_GROUP_MAPPING = {
"mappings": {
"properties": {
"group": {"type": "keyword"},
"user": {
"properties": {
"first": {"type": "keyword"},
"last": {"type": "keyword"},
"address": {"type": "keyword"},
}
},
}
}
}
TEST_NESTED_USER_GROUP_DOCS = [
{
"_index": TEST_NESTED_USER_GROUP_INDEX_NAME,
"_source": {
"group": "amsterdam",
"user": [
{
"first": "Manke",
"last": "Nelis",
"address": ["Elandsgracht", "Amsterdam"],
},
{
"first": "Johnny",
"last": "Jordaan",
"address": ["Elandsstraat", "Amsterdam"],
},
],
},
},
{
"_index": TEST_NESTED_USER_GROUP_INDEX_NAME,
"_source": {
"group": "london",
"user": [
{"first": "Alice", "last": "Monkton"},
{"first": "Jimmy", "last": "White", "address": ["London"]},
],
},
},
{
"_index": TEST_NESTED_USER_GROUP_INDEX_NAME,
"_source": {"group": "new york", "user": [{"first": "Bill", "last": "Jones"}]},
},
]
|
tests/core/test_record_components.py | ai-fast-track/mantisshrimp | 580 | 12763320 | import pytest
from icevision.all import *
@pytest.fixture
def dummy_class_map():
return ClassMap(["dummy-1", "dummy-2"], background=None)
@pytest.fixture
def dummy_class_map_elaborate():
return ClassMap(["dummy-1", "dummy-2", "dummy-3", "dummy-4"], background=None)
def test_classification_multilabel(dummy_class_map):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=True)])
rec.classification.set_class_map(dummy_class_map)
rec.classification.set_labels_by_id([0, 1])
assert rec.classification.label_ids == [0, 1]
assert (rec.classification.one_hot_encoded() == np.array([1, 1])).all()
@pytest.mark.parametrize(
"label_ids",
[
([0, 1]),
([0]),
],
)
def test_classification_single_label(dummy_class_map, label_ids):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=False)])
rec.classification.set_class_map(dummy_class_map)
rec.classification.set_labels_by_id(label_ids)
if len(label_ids) > 1:
# label_ids == [0, 1]
# Setting two labels when `is_multilabel=False` raises an error
with pytest.raises(AutofixAbort):
rec.classification._autofix()
else:
# label_ids == [0]
# Only one label must be assigned
assert all(rec.classification._autofix().values())
assert rec.classification.one_hot_encoded().sum() == 1
@pytest.mark.parametrize(
"label_ids",
[
([0, 1, 2]),
([0, 1]),
([0]),
],
)
def test_one_hot_encodings(dummy_class_map_elaborate, label_ids):
rec = BaseRecord([ClassificationLabelsRecordComponent(is_multilabel=True)])
rec.classification.set_class_map(dummy_class_map_elaborate)
rec.classification.set_labels_by_id(label_ids)
assert all(rec.classification._autofix().values())
# Ensure we have the correct no. of labels and that they are indeed
# one-hot encoded
one_hot_values = rec.classification.one_hot_encoded()
assert one_hot_values.sum() == len(label_ids)
assert np.unique(one_hot_values).tolist() == [0, 1]
|
earth_enterprise/src/update_fusion_version.py | ezeeyahoo/earthenterprise | 2,661 | 12763324 | <reponame>ezeeyahoo/earthenterprise
#!/usr/bin/env python2.7
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Update the Fusion version string.
It takes two flags --long long_version_name and --short short_version_name.
Then it opens all the files need to be updated and changes from current
long_version_name and short_version_name to these new values.
Note: The long version name needs to start with short version name.
Example Usage:
./update_fusion_version.py --long "3.2.0" --short "3.2"
"""
import datetime
import fileinput
import os
import sys
from pyglib import app
from pyglib import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('long',
'',
'Long version string for fusion (e.g 3.2.0')
flags.DEFINE_string('short',
'',
'Short version string for fusion (e.g 3.2')
def FindUpdateCurrentVersion(fusion_version_file, long_version, short_version,
year):
"""Find and update long and short version names in the fusion_version_file.
Args:
fusion_version_file: Absolute filename for fusion_version.txt
long_version: The new long_version to update to.
short_version: The new short_version to update to.
year: The current year to be used in copyright statement.
Returns:
A couple of string (in a list) representing current long and short
respectively.
Raises:
AssertionError: Whenever anything fails.
"""
cmd = 'cd %s; g4 open %s' % (os.path.dirname(fusion_version_file),
os.path.basename(fusion_version_file))
if os.system(cmd):
raise AssertionError('Cannot run command "%s"' % cmd)
stage = 0 # not yet reached long_version
for line in fileinput.FileInput(fusion_version_file, inplace=1):
if stage == 0:
if not line.startswith('#'):
stage = 1 # long_version reached
old_long = line[:-1]
print long_version
else:
# TODO: Create script to do this for all copyrights.
if line.startswith('# Copyright'):
print '# Copyright %d Google Inc. All Rights Reserved.' % year
else:
print line,
elif stage == 1:
old_short = line[:-1]
print short_version
stage = 2 # short version reached
else:
raise AssertionError('Cannot comprehend line "%s" in %s' % (
line, fusion_version_file))
return (old_long, old_short)
def ChangeVersionInInstallerFiles(
common_prefix, old_long, new_long, old_short, new_short):
"""For installer xml files change from old version to new version.
Args:
common_prefix: Common root for all files to change.
old_long: Current long version string.
new_long: New long version string.
old_short: Current short version string.
new_short: New short version string.
Raises:
AssertionError: Whenever anything fails.
"""
installer_files = ('installer/config/GoogleEarthInstaller.iap_xml',
'installer/config/GoogleFusionInstaller.iap_xml',
'installer/config/GoogleFusionToolsInstaller.iap_xml')
old_long_cdata = 'CDATA[%s]' % (old_long)
old_short_cdata = 'CDATA[%s]' % (old_short)
new_long_cdata = 'CDATA[%s]' % (new_long)
new_short_cdata = 'CDATA[%s]' % (new_short)
long_key = 'CDATA[$LONG_VERSION$]'
short_key = 'CDATA[$SHORT_VERSION$]'
for file_name in installer_files:
file_name = '%s/%s' % (common_prefix, file_name)
cmd = 'cd %s; g4 open %s' % (os.path.dirname(file_name),
os.path.basename(file_name))
if os.system(cmd):
raise AssertionError('Cannot run command "%s"' % cmd)
in_defered_mode = False
defered_lines = []
for line in fileinput.FileInput(file_name, inplace=1):
if not in_defered_mode:
if line.find(old_long_cdata) >= 0 or line.find(old_short_cdata) >= 0:
in_defered_mode = True
defered_lines.append(line)
else:
line = line.replace(old_long, new_long)
print line,
else:
long_key_found = (line.find(long_key) >= 0)
if long_key_found or (line.find(short_key) >= 0):
if long_key_found:
print defered_lines[0].replace(old_long_cdata, new_long_cdata),
else:
print defered_lines[0].replace(old_short_cdata, new_short_cdata),
for index in range(1, len(defered_lines)):
print defered_lines[index],
print line,
defered_lines = []
in_defered_mode = False
else:
defered_lines.append(line)
def main(argv):
if not (len(argv) == 1 and FLAGS.long and FLAGS.short and
FLAGS.long.startswith(FLAGS.short)):
sys.stderr.write('Wrong Usage of the script %s \n\n' % argv[0])
sys.stderr.write(__doc__)
sys.exit(-1)
script_path = os.path.abspath(argv[0])
common_prefix = os.path.dirname(os.path.dirname(script_path))
fusion_version_file = '%s/%s' % (common_prefix, 'src/fusion_version.txt')
(old_long, old_short) = FindUpdateCurrentVersion(fusion_version_file,
FLAGS.long, FLAGS.short,
datetime.datetime.now().year)
ChangeVersionInInstallerFiles(
common_prefix, old_long, FLAGS.long, old_short, FLAGS.short)
if __name__ == '__main__':
app.run()
|
office365/sharepoint/sharing/sharingLinkInfo.py | rikeshtailor/Office365-REST-Python-Client | 544 | 12763331 | <gh_stars>100-1000
from office365.runtime.client_value import ClientValue
class SharingLinkInfo(ClientValue):
def __init__(self):
"""
Specifies the information about the tokenized sharing link.
"""
super(SharingLinkInfo, self).__init__()
self.AllowsAnonymousAccess = None
self.ApplicationId = None
self.CreatedBy = None
self.PasswordProtected = None
|
ansible/roles/blade.cumulus/library/ssh_user_alias.py | ClashTheBunny/cmdb | 111 | 12763333 | <filename>ansible/roles/blade.cumulus/library/ssh_user_alias.py
#!/usr/bin/python
DOCUMENTATION = """
---
module: ssh_user_alias.py
short_description: Create alias for users in SSH authorized_keys
options:
user:
description:
- base user to make alias for
groups:
description:
- list of groups we want our aliases to be in
"""
import os
import re
from ansible.module_utils.basic import AnsibleModule
def main():
module_args = dict(
user=dict(type='str', required=True),
groups=dict(type='list', elements='str', default=[])
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
result = dict(
changed=False
)
got = {}
wanted = {}
for source in ["/etc/passwd",
"/etc/shadow",
"/etc/group"]:
with open(source) as f:
got[source] = f.read()
wanted[source] = got[source]
base_uid = None
to_remove = []
# Handle /etc/passwd
to_keep = []
for line in wanted["/etc/passwd"].split("\n"):
if not line:
continue
user, _, uid, gid, gecos, home, shell = line.split(":")
if user == module.params["user"]:
base_uid, base_gid, base_home, base_shell = uid, gid, home, shell
elif gecos == "cmdb,,,":
to_remove.append(user)
continue
to_keep.append(line)
if base_uid is None:
result["msg"] = "user {} not found in /etc/passwd".format(
module.params["user"])
module.fail_json(**result)
# Get HOME/.ssh/authorized_keys
to_add = []
with open(os.path.join(base_home, ".ssh", "authorized_keys")) as f:
for line in f:
if not line:
continue
line = line.strip()
user = line.split(" ", 2)[-1]
if re.match(r"[a-z]+", user):
to_add.append(user)
# Add users
for user in to_add:
to_keep.append(":".join([user, "x", base_uid, base_gid,
"cmdb,,,", base_home, base_shell]))
wanted["/etc/passwd"] = "\n".join(to_keep) + "\n"
# Handle /etc/shadow
to_keep = []
for line in wanted["/etc/shadow"].split("\n"):
if not line:
continue
user, passwd, _, _, _, _, _, _, _ = line.split(":")
if passwd != "cmdb":
to_keep.append(line)
for user in to_add:
to_keep.append(":".join([user, "cmdb", "18312", "0",
"999999", "7", "", "", ""]))
wanted["/etc/shadow"] = "\n".join(to_keep) + "\n"
# Handle /etc/group
to_keep = []
for line in wanted["/etc/group"].split("\n"):
if not line:
continue
group, password, gid, users = line.split(":")
users = [u for u in users.split(",")
if u and u not in to_remove]
if group in module.params["groups"]:
users.extend(to_add)
users = ",".join(users)
to_keep.append(":".join([group, password, gid, users]))
wanted["/etc/group"] = "\n".join(to_keep) + "\n"
if got != wanted:
result['changed'] = True
result['diff'] = [
dict(
before_header=f,
after_header=f,
before=got[f],
after=wanted[f])
for f in got
]
if module.check_mode or not result['changed']:
module.exit_json(**result)
# Apply changes.
for dest in wanted:
with open(dest, "w") as f:
f.write(wanted[dest])
module.exit_json(**result)
if __name__ == '__main__':
main()
|
stix2/v20/vocab.py | frank7y/cti-python-stix2 | 277 | 12763360 | """
STIX 2.0 open vocabularies and enums
"""
ATTACK_MOTIVATION_ACCIDENTAL = "accidental"
ATTACK_MOTIVATION_COERCION = "coercion"
ATTACK_MOTIVATION_DOMINANCE = "dominance"
ATTACK_MOTIVATION_IDEOLOGY = "ideology"
ATTACK_MOTIVATION_NOTORIETY = "notoriety"
ATTACK_MOTIVATION_ORGANIZATIONAL_GAIN = "organizational-gain"
ATTACK_MOTIVATION_PERSONAL_GAIN = "personal-gain"
ATTACK_MOTIVATION_PERSONAL_SATISFACTION = "personal-satisfaction"
ATTACK_MOTIVATION_REVENGE = "revenge"
ATTACK_MOTIVATION_UNPREDICTABLE = "unpredictable"
ATTACK_MOTIVATION = [
ATTACK_MOTIVATION_ACCIDENTAL,
ATTACK_MOTIVATION_COERCION,
ATTACK_MOTIVATION_DOMINANCE,
ATTACK_MOTIVATION_IDEOLOGY,
ATTACK_MOTIVATION_NOTORIETY,
ATTACK_MOTIVATION_ORGANIZATIONAL_GAIN,
ATTACK_MOTIVATION_PERSONAL_GAIN,
ATTACK_MOTIVATION_PERSONAL_SATISFACTION,
ATTACK_MOTIVATION_REVENGE,
ATTACK_MOTIVATION_UNPREDICTABLE,
]
ATTACK_RESOURCE_LEVEL_INDIVIDUAL = "individual"
ATTACK_RESOURCE_LEVEL_CLUB = "club"
ATTACK_RESOURCE_LEVEL_CONTEST = "contest"
ATTACK_RESOURCE_LEVEL_TEAM = "team"
ATTACK_RESOURCE_LEVEL_ORGANIZATION = "organization"
ATTACK_RESOURCE_LEVEL_GOVERNMENT = "government"
ATTACK_RESOURCE_LEVEL = [
ATTACK_RESOURCE_LEVEL_INDIVIDUAL,
ATTACK_RESOURCE_LEVEL_CLUB,
ATTACK_RESOURCE_LEVEL_CONTEST,
ATTACK_RESOURCE_LEVEL_TEAM,
ATTACK_RESOURCE_LEVEL_ORGANIZATION,
ATTACK_RESOURCE_LEVEL_GOVERNMENT,
]
HASHING_ALGORITHM_MD5 = "MD5"
HASHING_ALGORITHM_MD6 = "MD6"
HASHING_ALGORITHM_RIPEMD_160 = "RIPEMD-160"
HASHING_ALGORITHM_SHA_1 = "SHA-1"
HASHING_ALGORITHM_SHA_224 = "SHA-224"
HASHING_ALGORITHM_SHA_256 = "SHA-256"
HASHING_ALGORITHM_SHA_384 = "SHA-384"
HASHING_ALGORITHM_SHA_512 = "SHA-512"
HASHING_ALGORITHM_SHA3_224 = "SHA3-224"
HASHING_ALGORITHM_SHA3_256 = "SHA3-256"
HASHING_ALGORITHM_SHA3_384 = "SHA3-384"
HASHING_ALGORITHM_SHA3_512 = "SHA3-512"
HASHING_ALGORITHM_SSDEEP = "ssdeep"
HASHING_ALGORITHM_WHIRLPOOL = "WHIRLPOOL"
HASHING_ALGORITHM = [
HASHING_ALGORITHM_MD5,
HASHING_ALGORITHM_MD6,
HASHING_ALGORITHM_RIPEMD_160,
HASHING_ALGORITHM_SHA_1,
HASHING_ALGORITHM_SHA_224,
HASHING_ALGORITHM_SHA_256,
HASHING_ALGORITHM_SHA_384,
HASHING_ALGORITHM_SHA_512,
HASHING_ALGORITHM_SHA3_224,
HASHING_ALGORITHM_SHA3_256,
HASHING_ALGORITHM_SHA3_384,
HASHING_ALGORITHM_SHA3_512,
HASHING_ALGORITHM_SSDEEP,
HASHING_ALGORITHM_WHIRLPOOL,
]
IDENTITY_CLASS_INDIVIDUAL = "individual"
IDENTITY_CLASS_GROUP = "group"
IDENTITY_CLASS_ORGANIZATION = "organization"
IDENTITY_CLASS_CLASS = "class"
IDENTITY_CLASS_UNKNOWN = "unknown"
IDENTITY_CLASS = [
IDENTITY_CLASS_INDIVIDUAL,
IDENTITY_CLASS_GROUP,
IDENTITY_CLASS_ORGANIZATION,
IDENTITY_CLASS_CLASS,
IDENTITY_CLASS_UNKNOWN,
]
INDICATOR_LABEL_ANOMALOUS_ACTIVITY = "anomalous-activity"
INDICATOR_LABEL_ANONYMIZATION = "anonymization"
INDICATOR_LABEL_BENIGN = "benign"
INDICATOR_LABEL_COMPROMISED = "compromised"
INDICATOR_LABEL_MALICIOUS_ACTIVITY = "malicious-activity"
INDICATOR_LABEL_ATTRIBUTION = "attribution"
INDICATOR_LABEL = [
INDICATOR_LABEL_ANOMALOUS_ACTIVITY,
INDICATOR_LABEL_ANONYMIZATION,
INDICATOR_LABEL_BENIGN,
INDICATOR_LABEL_COMPROMISED,
INDICATOR_LABEL_MALICIOUS_ACTIVITY,
INDICATOR_LABEL_ATTRIBUTION,
]
INDUSTRY_SECTOR_AGRICULTURE = "agriculture"
INDUSTRY_SECTOR_AEROSPACE = "aerospace"
INDUSTRY_SECTOR_AUTOMOTIVE = "automotive"
INDUSTRY_SECTOR_COMMUNICATIONS = "communications"
INDUSTRY_SECTOR_CONSTRUCTION = "construction"
INDUSTRY_SECTOR_DEFENCE = "defence"
INDUSTRY_SECTOR_EDUCATION = "education"
INDUSTRY_SECTOR_ENERGY = "energy"
INDUSTRY_SECTOR_ENTERTAINMENT = "entertainment"
INDUSTRY_SECTOR_FINANCIAL_SERVICES = "financial-services"
INDUSTRY_SECTOR_GOVERNMENT_NATIONAL = "government-national"
INDUSTRY_SECTOR_GOVERNMENT_REGIONAL = "government-regional"
INDUSTRY_SECTOR_GOVERNMENT_LOCAL = "government-local"
INDUSTRY_SECTOR_GOVERNMENT_PUBLIC_SERVICES = "government-public-services"
INDUSTRY_SECTOR_HEALTHCARE = "healthcare"
INDUSTRY_SECTOR_HOSPITALITY_LEISURE = "hospitality-leisure"
INDUSTRY_SECTOR_INFRASTRUCTURE = "infrastructure"
INDUSTRY_SECTOR_INSURANCE = "insurance"
INDUSTRY_SECTOR_MANUFACTURING = "manufacturing"
INDUSTRY_SECTOR_MINING = "mining"
INDUSTRY_SECTOR_NON_PROFIT = "non-profit"
INDUSTRY_SECTOR_PHARMACEUTICALS = "pharmaceuticals"
INDUSTRY_SECTOR_RETAIL = "retail"
INDUSTRY_SECTOR_TECHNOLOGY = "technology"
INDUSTRY_SECTOR_TELECOMMUNICATIONS = "telecommunications"
INDUSTRY_SECTOR_TRANSPORTATION = "transportation"
INDUSTRY_SECTOR_UTILITIES = "utilities"
INDUSTRY_SECTOR = [
INDUSTRY_SECTOR_AGRICULTURE,
INDUSTRY_SECTOR_AEROSPACE,
INDUSTRY_SECTOR_AUTOMOTIVE,
INDUSTRY_SECTOR_COMMUNICATIONS,
INDUSTRY_SECTOR_CONSTRUCTION,
INDUSTRY_SECTOR_DEFENCE,
INDUSTRY_SECTOR_EDUCATION,
INDUSTRY_SECTOR_ENERGY,
INDUSTRY_SECTOR_ENTERTAINMENT,
INDUSTRY_SECTOR_FINANCIAL_SERVICES,
INDUSTRY_SECTOR_GOVERNMENT_NATIONAL,
INDUSTRY_SECTOR_GOVERNMENT_REGIONAL,
INDUSTRY_SECTOR_GOVERNMENT_LOCAL,
INDUSTRY_SECTOR_GOVERNMENT_PUBLIC_SERVICES,
INDUSTRY_SECTOR_HEALTHCARE,
INDUSTRY_SECTOR_HOSPITALITY_LEISURE,
INDUSTRY_SECTOR_INFRASTRUCTURE,
INDUSTRY_SECTOR_INSURANCE,
INDUSTRY_SECTOR_MANUFACTURING,
INDUSTRY_SECTOR_MINING,
INDUSTRY_SECTOR_NON_PROFIT,
INDUSTRY_SECTOR_PHARMACEUTICALS,
INDUSTRY_SECTOR_RETAIL,
INDUSTRY_SECTOR_TECHNOLOGY,
INDUSTRY_SECTOR_TELECOMMUNICATIONS,
INDUSTRY_SECTOR_TRANSPORTATION,
INDUSTRY_SECTOR_UTILITIES,
]
MALWARE_LABEL_ADWARE = "adware"
MALWARE_LABEL_BACKDOOR = "backdoor"
MALWARE_LABEL_BOT = "bot"
MALWARE_LABEL_DDOS = "ddos"
MALWARE_LABEL_DROPPER = "dropper"
MALWARE_LABEL_EXPLOIT_KIT = "exploit-kit"
MALWARE_LABEL_KEYLOGGER = "keylogger"
MALWARE_LABEL_RANSOMWARE = "ransomware"
MALWARE_LABEL_REMOTE_ACCESS_TROJAN = "remote-access-trojan"
MALWARE_LABEL_RESOURCE_EXPLOITATION = "resource-exploitation"
MALWARE_LABEL_ROGUE_SECURITY_SOFTWARE = "rogue-security-software"
MALWARE_LABEL_ROOTKIT = "rootkit"
MALWARE_LABEL_SCREEN_CAPTURE = "screen-capture"
MALWARE_LABEL_SPYWARE = "spyware"
MALWARE_LABEL_TROJAN = "trojan"
MALWARE_LABEL_VIRUS = "virus"
MALWARE_LABEL_WORM = "worm"
MALWARE_LABEL = [
MALWARE_LABEL_ADWARE,
MALWARE_LABEL_BACKDOOR,
MALWARE_LABEL_BOT,
MALWARE_LABEL_DDOS,
MALWARE_LABEL_DROPPER,
MALWARE_LABEL_EXPLOIT_KIT,
MALWARE_LABEL_KEYLOGGER,
MALWARE_LABEL_RANSOMWARE,
MALWARE_LABEL_REMOTE_ACCESS_TROJAN,
MALWARE_LABEL_RESOURCE_EXPLOITATION,
MALWARE_LABEL_ROGUE_SECURITY_SOFTWARE,
MALWARE_LABEL_ROOTKIT,
MALWARE_LABEL_SCREEN_CAPTURE,
MALWARE_LABEL_SPYWARE,
MALWARE_LABEL_TROJAN,
MALWARE_LABEL_VIRUS,
MALWARE_LABEL_WORM,
]
REPORT_LABEL_THREAT_REPORT = "threat-report"
REPORT_LABEL_ATTACK_PATTERN = "attack-pattern"
REPORT_LABEL_CAMPAIGN = "campaign"
REPORT_LABEL_IDENTITY = "identity"
REPORT_LABEL_INDICATOR = "indicator"
REPORT_LABEL_INTRUSION_SET = "intrusion-set"
REPORT_LABEL_MALWARE = "malware"
REPORT_LABEL_OBSERVED_DATA = "observed-data"
REPORT_LABEL_THREAT_ACTOR = "threat-actor"
REPORT_LABEL_TOOL = "tool"
REPORT_LABEL_VULNERABILITY = "vulnerability"
REPORT_LABEL = [
REPORT_LABEL_THREAT_REPORT,
REPORT_LABEL_ATTACK_PATTERN,
REPORT_LABEL_CAMPAIGN,
REPORT_LABEL_IDENTITY,
REPORT_LABEL_INDICATOR,
REPORT_LABEL_INTRUSION_SET,
REPORT_LABEL_MALWARE,
REPORT_LABEL_OBSERVED_DATA,
REPORT_LABEL_THREAT_ACTOR,
REPORT_LABEL_TOOL,
REPORT_LABEL_VULNERABILITY,
]
THREAT_ACTOR_LABEL_ACTIVIST = "activist"
THREAT_ACTOR_LABEL_COMPETITOR = "competitor"
THREAT_ACTOR_LABEL_CRIME_SYNDICATE = "crime-syndicate"
THREAT_ACTOR_LABEL_CRIMINAL = "criminal"
THREAT_ACTOR_LABEL_HACKER = "hacker"
THREAT_ACTOR_LABEL_INSIDER_ACCIDENTAL = "insider-accidental"
THREAT_ACTOR_LABEL_INSIDER_DISGRUNTLED = "insider-disgruntled"
THREAT_ACTOR_LABEL_NATION_STATE = "nation-state"
THREAT_ACTOR_LABEL_SENSATIONALIST = "sensationalist"
THREAT_ACTOR_LABEL_SPY = "spy"
THREAT_ACTOR_LABEL_TERRORIST = "terrorist"
THREAT_ACTOR_LABEL = [
THREAT_ACTOR_LABEL_ACTIVIST,
THREAT_ACTOR_LABEL_COMPETITOR,
THREAT_ACTOR_LABEL_CRIME_SYNDICATE,
THREAT_ACTOR_LABEL_CRIMINAL,
THREAT_ACTOR_LABEL_HACKER,
THREAT_ACTOR_LABEL_INSIDER_ACCIDENTAL,
THREAT_ACTOR_LABEL_INSIDER_DISGRUNTLED,
THREAT_ACTOR_LABEL_NATION_STATE,
THREAT_ACTOR_LABEL_SENSATIONALIST,
THREAT_ACTOR_LABEL_SPY,
THREAT_ACTOR_LABEL_TERRORIST,
]
THREAT_ACTOR_ROLE_AGENT = "agent"
THREAT_ACTOR_ROLE_DIRECTOR = "director"
THREAT_ACTOR_ROLE_INDEPENDENT = "independent"
THREAT_ACTOR_ROLE_INFRASTRUCTURE_ARCHITECT = "infrastructure-architect"
THREAT_ACTOR_ROLE_INFRASTRUCTURE_OPERATOR = "infrastructure-operator"
THREAT_ACTOR_ROLE_MALWARE_AUTHOR = "malware-author"
THREAT_ACTOR_ROLE_SPONSOR = "sponsor"
THREAT_ACTOR_ROLE = [
THREAT_ACTOR_ROLE_AGENT,
THREAT_ACTOR_ROLE_DIRECTOR,
THREAT_ACTOR_ROLE_INDEPENDENT,
THREAT_ACTOR_ROLE_INFRASTRUCTURE_ARCHITECT,
THREAT_ACTOR_ROLE_INFRASTRUCTURE_OPERATOR,
THREAT_ACTOR_ROLE_MALWARE_AUTHOR,
THREAT_ACTOR_ROLE_SPONSOR,
]
THREAT_ACTOR_SOPHISTICATION_NONE = "none"
THREAT_ACTOR_SOPHISTICATION_MINIMAL = "minimal"
THREAT_ACTOR_SOPHISTICATION_INTERMEDIATE = "intermediate"
THREAT_ACTOR_SOPHISTICATION_ADVANCED = "advanced"
THREAT_ACTOR_SOPHISTICATION_EXPERT = "expert"
THREAT_ACTOR_SOPHISTICATION_INNOVATOR = "innovator"
THREAT_ACTOR_SOPHISTICATION_STRATEGIC = "strategic"
THREAT_ACTOR_SOPHISTICATION = [
THREAT_ACTOR_SOPHISTICATION_NONE,
THREAT_ACTOR_SOPHISTICATION_MINIMAL,
THREAT_ACTOR_SOPHISTICATION_INTERMEDIATE,
THREAT_ACTOR_SOPHISTICATION_ADVANCED,
THREAT_ACTOR_SOPHISTICATION_EXPERT,
THREAT_ACTOR_SOPHISTICATION_INNOVATOR,
THREAT_ACTOR_SOPHISTICATION_STRATEGIC,
]
TOOL_LABEL_DENIAL_OF_SERVICE = "denial-of-service"
TOOL_LABEL_EXPLOITATION = "exploitation"
TOOL_LABEL_INFORMATION_GATHERING = "information-gathering"
TOOL_LABEL_NETWORK_CAPTURE = "network-capture"
TOOL_LABEL_CREDENTIAL_EXPLOITATION = "credential-exploitation"
TOOL_LABEL_REMOTE_ACCESS = "remote-access"
TOOL_LABEL_VULNERABILITY_SCANNING = "vulnerability-scanning"
TOOL_LABEL = [
TOOL_LABEL_DENIAL_OF_SERVICE,
TOOL_LABEL_EXPLOITATION,
TOOL_LABEL_INFORMATION_GATHERING,
TOOL_LABEL_NETWORK_CAPTURE,
TOOL_LABEL_CREDENTIAL_EXPLOITATION,
TOOL_LABEL_REMOTE_ACCESS,
TOOL_LABEL_VULNERABILITY_SCANNING,
]
|
htmlmin/tests/test_decorator.py | Embed-Engineering/django-htmlmin | 389 | 12763369 | <filename>htmlmin/tests/test_decorator.py
# Copyright 2013 django-htmlmin authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from django.test.client import Client
class TestDecorator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = Client()
def test_should_minify_the_content_of_a_view_decorated(self):
response = self.client.get('/min')
minified = b'<html><head></head><body><p>Hello world! :D' + \
b'</p><div>Copyright 3000</div></body></html>'
self.assertEquals(minified, response.content)
def should_not_touch_the_content_of_an_undecorated_view(self):
expected = '''
<html>
<body>
<p>Hello world! :D</p>
<div>Copyright 3000</div>
</body>
</html>
'''
response = self.client.get('/raw')
self.assertEquals(expected, response.content)
def test_minify_response_should_be_false_in_not_minified_views(self):
response = self.client.get('/not_min')
self.assertEquals(False, response.minify_response)
|
pygimli/physics/SIP/siptools.py | baender/gimli | 224 | 12763373 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""pygimli functions for dc resistivity / SIP data."""
# TODO Please sort the content into SIP package!
import pylab as P
import numpy as N
import pygimli as pg
from pygimli.utils import rndig
def astausgleich(ab2org, mn2org, rhoaorg):
"""shifts the branches of a dc sounding to generate a matching curve."""
ab2 = P.asarray(ab2org)
mn2 = P.asarray(mn2org)
rhoa = P.asarray(rhoaorg)
um = P.unique(mn2)
for i in range(len(um) - 1):
r0, r1 = [], []
ac = P.intersect1d(ab2[mn2 == um[i]], ab2[mn2 == um[i + 1]])
for a in ac:
r0.append(rhoa[(ab2 == a) * (mn2 == um[i])][0])
r1.append(rhoa[(ab2 == a) * (mn2 == um[i + 1])][0])
if len(r0) > 0:
fak = P.mean(P.array(r0) / P.array(r1))
print(fak)
if P.isfinite(fak) and fak > 0.:
rhoa[mn2 == um[i + 1]] *= fak
return rhoa # formerly pg as vector
def loadSIPallData(filename, outnumpy=False):
"""load SIP data with the columns ab/2,mn/2,rhoa and PHI with the
corresponding frequencies in the first row."""
if outnumpy:
A = N.loadtxt(filename)
fr = A[0, 3:]
ab2 = A[1:, 0]
mn2 = A[1:, 1]
rhoa = A[1:, 2]
PHI = A[1:, 3:]
else:
A = pg.Matrix()
pg.loadMatrixCol(A, 'sch/dc.ves')
ndata = A.cols()
ab2 = A[0](1, ndata)
mn2 = A[1](1, ndata)
rhoa = A[2](1, ndata)
PHI = pg.Matrix()
fr = []
for i in range(3, A.rows()):
fr.append(A[i][0])
PHI.push_back(A[i](1, ndata))
return ab2, mn2, rhoa, PHI, fr
def makeSlmData(ab2, mn2, rhoa=None, filename=None):
"""generate a pygimli data container from ab/2 and mn/2 array."""
data = pg.DataContainer()
data.resize(len(ab2))
pos = N.unique(N.hstack((ab2, mn2)))
for elx in N.hstack((-pos[::-1], pos)):
data.createElectrode(elx, 0., 0.)
if filename is not None:
f = open(filename, 'w')
f.write(str(len(pos) * 2) + '\n#x y z\n')
for elx in N.hstack((-pos[::-1], pos)):
f.write(str(elx) + '\t0\t0\n')
f.write(str(len(ab2)) + '\n#a\tb\tm\tn\tk\trhoa\n')
lpos = len(pos)
iab = pos.searchsorted(ab2)
imn = pos.searchsorted(mn2)
if (filename is not None) & (rhoa is None):
rhoa = N.ones(len(ab2))
for i in range(len(iab)):
# print -pos[iab[i]], -pos[imn[i]], pos[imn[i]], pos[iab[i]]
k = (ab2[i]**2 - mn2[i]**2) * N.pi / mn2[i] / 2.0
if filename is not None:
f.write(str(lpos - iab[i]) + '\t' + str(lpos + iab[i] + 1) + '\t')
f.write(str(lpos - imn[i]) + '\t' + str(lpos + imn[i] + 1) + '\t')
f.write(str(rndig(k, 4)) + '\t' + str(rhoa[i]) + '\n')
data.createFourPointData(i, int(lpos - iab[i]), int(lpos + iab[i] + 1),
int(lpos - imn[i]), int(lpos + imn[i] + 1))
if filename is not None:
f.close()
data.set('rhoa', pg.asvector(rhoa))
return data
def showsounding(ab2, rhoa, resp=None, mn2=None, islog=True, xlab=None):
"""
Display a sounding curve (rhoa over ab/2) and an additional response.
"""
if xlab is None:
xlab = r'$\rho_a$ in $\Omega$m'
ab2a = N.asarray(ab2)
rhoa = N.asarray(rhoa)
if mn2 is None:
if islog:
l1 = P.loglog(rhoa, ab2, 'rx-', label='observed')
else:
l1 = P.semilogy(rhoa, ab2, 'rx-', label='observed')
P.hold(True)
if resp is not None:
if islog:
l2 = P.loglog(resp, ab2, 'bo-', label='simulated')
else:
l2 = P.semilogy(resp, ab2, 'bo-', label='simulated')
P.legend((l1, l2), ('obs', 'sim'), loc=0)
else:
for unmi in N.unique(mn2):
if islog:
l1 = P.loglog(rhoa[mn2 == unmi], ab2a[mn2 == unmi],
'rx-', label='observed')
else:
l1 = P.semilogy(rhoa[mn2 == unmi], ab2a[mn2 == unmi],
'rx-', label='observed')
P.hold(True)
if resp is not None:
l2 = P.loglog(resp[mn2 == unmi], ab2a[mn2 == unmi],
'bo-', label='simulated')
P.legend((l1, l2), ('obs', 'sim'))
P.axis('tight')
P.ylim((max(ab2), min(ab2)))
locs = P.yticks()[0]
if len(locs) < 2:
locs = N.hstack((min(ab2), locs, max(ab2)))
else:
locs[0] = max(locs[0], min(ab2))
locs[-1] = min(locs[-1], max(ab2))
a = []
for l in locs:
a.append('%g' % rndig(l))
P.yticks(locs, a)
locs = P.xticks()[0]
a = []
for l in locs:
a.append('%g' % rndig(l))
P.xticks(locs, a)
P.grid(which='both')
P.xlabel(xlab)
P.ylabel('AB/2 in m')
# P.legend()
P.show()
return
def showsip1ddata(PHI, fr, ab2, mn2=None, cmax=None, ylab=True, cbar=True):
"""display SIP phase data as image plot."""
P.cla()
ax = P.gca()
pal = P.cm.get_cmap()
pal.set_under('w')
pal.set_bad('w')
if isinstance(PHI, pg.Vector):
PHI = N.asarray(PHI)
im = P.imshow(PHI.reshape((len(ab2), len(fr))),
interpolation='nearest', cmap=pal)
if cmax is None:
cmax = N.max(PHI)
im.set_clim((0., cmax))
ax.xaxis.set_label_position('top')
P.xlabel('f in Hz')
a = []
df = 1
for f in fr[::df]:
a.append("%g" % rndig(f))
P.xticks(N.arange(0, len(fr), df), a)
xtl = ax.get_xticklabels()
for i, xtli in enumerate(xtl):
xtli.set_rotation('vertical')
if ylab:
a = []
yla = 'AB/2'
if mn2 is None:
for i in range(len(ab2)):
a.append(str(ab2[i]))
else:
yla = yla + '-MN/2'
for i in range(len(ab2)):
a.append('%g%g' % (rndig(ab2[i]), rndig(mn2[i])))
P.yticks(N.arange(len(ab2)), a)
P.ylabel(yla + ' in m')
if cbar:
P.colorbar(aspect=40, shrink=0.6)
P.ylim((len(ab2) - 0.5, -0.5))
P.show()
P.ylim((len(ab2) - 0.5, -0.5))
return
def showsip1dmodel(M, tau, thk, res=None, z=None,
cmin=None, cmax=None, islog=True):
"""
Display an SIP Debye block model as image.
"""
if z is None:
z = N.cumsum(N.hstack((0., thk)))
P.cla()
pal = P.cm.get_cmap()
pal.set_under('w')
pal.set_bad('w')
if isinstance(M, pg.Vector):
M = N.asarray(M)
if islog:
M = N.log10(M)
M = M.reshape((len(z), len(tau)))
im = P.imshow(M, interpolation='nearest', cmap=pal)
if cmax is None:
cmax = N.max(M)
if cmax is None:
cmax = N.max(M)
im.set_clim((cmin, cmax))
a = []
for t in tau[::2]:
a.append("%g" % rndig(t * 1000, 2))
P.xticks(N.arange(0, len(tau), 2), a)
a = []
for zi in z:
a.append(str(zi))
P.yticks(N.arange(len(z)) - 0.5, a)
P.xlabel(r'$\tau$ in ms')
P.ylabel('z in m')
P.ylim((len(z) - 0.5, -0.5))
P.colorbar(orientation='horizontal', aspect=40, shrink=0.6)
if res is not None:
xl = P.xlim()[1]
for i in range(len(res)):
P.text(xl, i, r' %g $\Omega$m' % rndig(res[i], 2))
lgm = N.zeros((len(z), 1))
tch = N.zeros((len(z), 1))
lgt = N.log(tau)
if islog:
M = 10**M
for n in range(len(M)):
m = N.abs(M[n])
tch[n] = N.sum(m)
lgm[n] = N.exp(N.sum(m * lgt) / N.sum(m))
tpos = N.interp(N.log(lgm), N.log(tau), N.arange(len(tau)))
P.plot(tpos, N.arange(len(z)), 'w*')
P.title('logarithmized spectral chargeability')
P.show()
return lgm, tch
class DebyeModelling(pg.core.ModellingBase):
"""forward operator for Debye decomposition."""
def __init__(self, fvec, tvec=None, zero=False, verbose=False):
if tvec is None:
tvec = N.logspace(-4, 0, 5)
mesh = pg.meshtools.createMesh1D(len(tvec))
if zero:
mesh.cell(0).setMarker(-1)
mesh.cell(len(tvec) - 1).setMarker(1)
pg.core.ModellingBase.__init__(self, mesh, verbose)
self.f_ = pg.asvector(fvec)
self.t_ = tvec
self.zero_ = zero
def response(self, par):
"""phase spectrum as function of spectral chargeabilities."""
y = pg.Vector(len(self.f_), 0.0)
for (t, p) in zip(self.t_, par):
wt = self.f_ * 2.0 * P.pi * t
y = y + wt / (wt * wt + 1.) * p
return y
def DebyeDecomposition(fr, phi, maxfr=None, tv=None, verbose=False,
zero=False, err=0.25e-3, lam=10., blocky=False):
"""Debye decomposition of a phase spectrum."""
if maxfr is not None:
idx = (fr <= maxfr) & (phi >= 0.)
phi1 = phi[idx]
fr1 = fr[idx]
print("using frequencies from ", N.min(fr), " to ", N.max(fr), "Hz")
else:
phi1 = phi
fr1 = fr
if tv is None:
tmax = 1. / N.min(fr1) / 2. / N.pi * 4.
tmin = 1. / N.max(fr1) / 2. / N.pi / 8.
tvec = N.logspace(N.log10(tmin), N.log10(tmax), 30)
else:
tvec = tv
f = DebyeModelling(fr1, tvec, zero=zero)
tvec = f.t_
tm = pg.trans.TransLog()
start = pg.Vector(len(tvec), 1e-4)
if zero:
f.region(-1).setConstraintType(0) # smoothness
f.region(0).setConstraintType(1) # smoothness
f.region(1).setConstraintType(0) # min length
f.regionManager().setInterRegionConstraint(-1, 0, 1.)
f.regionManager().setInterRegionConstraint(0, 1, 1.)
f.region(-1).setTransModel(tm)
f.region(0).setTransModel(tm)
f.region(1).setTransModel(tm)
f.region(-1).setModelControl(1000.)
f.region(1).setModelControl(1000.)
else:
f.regionManager().setConstraintType(1) # smoothness
inv = pg.Inversion(pg.asvector(phi1 * 1e-3), f, verbose)
inv.setAbsoluteError(pg.Vector(len(fr1), err))
inv.setLambda(lam)
inv.setModel(start)
inv.setBlockyModel(blocky)
if zero:
inv.setReferenceModel(start)
else:
inv.setTransModel(tm)
mvec = inv.run()
resp = inv.response()
return tvec, mvec, N.array(resp) * 1e3, idx
class DoubleColeColeModelling(pg.core.ModellingBase):
"""
Modelling using two Cole-Cole terms
"""
def __init__(self, mesh, fvec, si=1.0, verbose=False):
pg.core.ModellingBase.__init__(self, mesh, verbose)
self.f_ = fvec
self.si_ = si
def response(self, par):
"""yields phase response response of double Cole Cole model."""
y = pg.Vector(self.f_.size(), 0.0)
wti = self.f_ * par[1] * 2.0 * P.pi
wte = self.f_ * par[4] * 2.0 * P.pi
for i in range(0, y.size()):
cpI = 1. / (N.power(wti[i] * 1j, par[2]) + 1.)
cpE = 1. / (N.power(wte[i] * 1j, par[5]) + 1.)
y[i] = - N.imag(cpI) * par[0] - N.imag(cpE) * par[3] * self.si_
# y[i] = - par[0] - N.imag(cpE) * par[3] * self.si_
return y
def ReadAndRemoveEM(filename, readsecond=False, doplot=False,
dellast=True, ePhi=0.5, ePerc=1., lam=2000.):
"""
Read res1file and remove EM effects using a double-Cole-Cole model
fr,rhoa,phi,dphi = ReadAndRemoveEM(filename, readsecond/doplot bools)
"""
fr, rhoa, phi, drhoa, dphi = read1resfile(filename,
readsecond,
dellast=dellast)
# forward problem
mesh = pg.meshtools.createMesh1D(1, 6) # 6 independent parameters
f = DoubleColeColeModelling(mesh, pg.asvector(fr), phi[2] / abs(phi[2]))
f.regionManager().loadMap("region.control")
model = f.createStartVector()
# inversion
inv = pg.Inversion(phi, f, True, False)
inv.setAbsoluteError(phi * ePerc * 0.01 + ePhi / 1000.)
inv.setRobustData(True)
# inv.setCWeight(pg.Vector(6, 1.0)) # wozu war das denn gut?
inv.setMarquardtScheme(0.8)
inv.setLambda(lam)
inv.setModel(model)
erg = inv.run()
inv.echoStatus()
chi2 = inv.chi2()
mod0 = pg.Vector(erg)
mod0[0] = 0.0 # set IP term to zero to obtain pure EM term
emphi = f.response(mod0)
resid = (phi - emphi) * 1000.
if doplot:
s = "IP: m= " + str(rndig(erg[0])) + " t=" + str(rndig(erg[1])) + \
" c =" + str(rndig(erg[2]))
s += " EM: m= " + str(rndig(erg[3])) + " t=" + str(rndig(erg[4])) + \
" c =" + str(rndig(erg[5]))
fig = P.figure(1)
fig.clf()
ax = P.subplot(111)
P.errorbar(
fr,
phi *
1000.,
yerr=dphi *
1000.,
fmt='x-',
label='measured')
ax.set_xscale('log')
P.semilogx(fr, emphi * 1000., label='EM term (CC)')
P.errorbar(fr, resid, yerr=dphi * 1000., label='IP term')
ax.set_yscale('log')
P.xlim((min(fr), max(fr)))
P.ylim((0.1, max(phi) * 1000.))
P.xlabel('f in Hz')
P.ylabel(r'-$\phi$ in mrad')
P.grid(True)
P.title(s)
P.legend(loc=2) # ('measured','2-cole-cole','residual'))
fig.show()
return N.array(fr), N.array(rhoa), N.array(resid), N.array(
phi) * 1e3, dphi, chi2, N.array(emphi) * 1e3
|
examples/plotting/file/hover_glyph.py | g-parki/bokeh | 15,193 | 12763408 | from bokeh.models import HoverTool
from bokeh.plotting import figure, output_file, show
from bokeh.sampledata.glucose import data
x = data.loc['2010-10-06'].index.to_series()
y = data.loc['2010-10-06']['glucose']
# Basic plot setup
p = figure(width=800, height=400, x_axis_type="datetime",
tools="", toolbar_location=None, title='Hover over points')
p.ygrid.grid_line_color = None
p.background_fill_color = "#fafafa"
p.line(x, y, line_dash="4 4", line_width=1, color='gray')
cr = p.circle(x, y, size=20,
fill_color="steelblue", alpha=0.1, line_color=None,
hover_fill_color="midnightblue", hover_alpha=0.5,
hover_line_color="white")
p.add_tools(HoverTool(tooltips=None, renderers=[cr], mode='hline'))
output_file("hover_glyph.html", title="hover_glyph.py example")
show(p)
|
Code-Sleep-Python/Encryption-Techniques/AES/tables.py | shardul08/Code-Sleep-Python | 420 | 12763421 | sbox = [
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
]
sboxInv = [
0x52, 0x09, 0x6A, 0xD5, 0x30, 0x36, 0xA5, 0x38, 0xBF, 0x40, 0xA3, 0x9E, 0x81, 0xF3, 0xD7, 0xFB,
0x7C, 0xE3, 0x39, 0x82, 0x9B, 0x2F, 0xFF, 0x87, 0x34, 0x8E, 0x43, 0x44, 0xC4, 0xDE, 0xE9, 0xCB,
0x54, 0x7B, 0x94, 0x32, 0xA6, 0xC2, 0x23, 0x3D, 0xEE, 0x4C, 0x95, 0x0B, 0x42, 0xFA, 0xC3, 0x4E,
0x08, 0x2E, 0xA1, 0x66, 0x28, 0xD9, 0x24, 0xB2, 0x76, 0x5B, 0xA2, 0x49, 0x6D, 0x8B, 0xD1, 0x25,
0x72, 0xF8, 0xF6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xD4, 0xA4, 0x5C, 0xCC, 0x5D, 0x65, 0xB6, 0x92,
0x6C, 0x70, 0x48, 0x50, 0xFD, 0xED, 0xB9, 0xDA, 0x5E, 0x15, 0x46, 0x57, 0xA7, 0x8D, 0x9D, 0x84,
0x90, 0xD8, 0xAB, 0x00, 0x8C, 0xBC, 0xD3, 0x0A, 0xF7, 0xE4, 0x58, 0x05, 0xB8, 0xB3, 0x45, 0x06,
0xD0, 0x2C, 0x1E, 0x8F, 0xCA, 0x3F, 0x0F, 0x02, 0xC1, 0xAF, 0xBD, 0x03, 0x01, 0x13, 0x8A, 0x6B,
0x3A, 0x91, 0x11, 0x41, 0x4F, 0x67, 0xDC, 0xEA, 0x97, 0xF2, 0xCF, 0xCE, 0xF0, 0xB4, 0xE6, 0x73,
0x96, 0xAC, 0x74, 0x22, 0xE7, 0xAD, 0x35, 0x85, 0xE2, 0xF9, 0x37, 0xE8, 0x1C, 0x75, 0xDF, 0x6E,
0x47, 0xF1, 0x1A, 0x71, 0x1D, 0x29, 0xC5, 0x89, 0x6F, 0xB7, 0x62, 0x0E, 0xAA, 0x18, 0xBE, 0x1B,
0xFC, 0x56, 0x3E, 0x4B, 0xC6, 0xD2, 0x79, 0x20, 0x9A, 0xDB, 0xC0, 0xFE, 0x78, 0xCD, 0x5A, 0xF4,
0x1F, 0xDD, 0xA8, 0x33, 0x88, 0x07, 0xC7, 0x31, 0xB1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xEC, 0x5F,
0x60, 0x51, 0x7F, 0xA9, 0x19, 0xB5, 0x4A, 0x0D, 0x2D, 0xE5, 0x7A, 0x9F, 0x93, 0xC9, 0x9C, 0xEF,
0xA0, 0xE0, 0x3B, 0x4D, 0xAE, 0x2A, 0xF5, 0xB0, 0xC8, 0xEB, 0xBB, 0x3C, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2B, 0x04, 0x7E, 0xBA, 0x77, 0xD6, 0x26, 0xE1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0C, 0x7D
]
rCon = [
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d
]
vector_table = [2, 3, 1, 1,
1, 2, 3, 1,
1, 1, 2, 3,
3, 1, 1, 2]
table_2 = [ 0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5]
table_3 = [ 0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a]
table_9 = [ 0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7,
0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc,
0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01,
0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91,
0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a,
0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa,
0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b,
0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b,
0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0,
0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30,
0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed,
0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d,
0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6,
0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46]
table_11 = [0x00, 0x0b, 0x16, 0x1d, 0x2c, 0x27, 0x3a, 0x31, 0x58, 0x53, 0x4e, 0x45, 0x74, 0x7f, 0x62, 0x69,
0xb0, 0xbb, 0xa6, 0xad, 0x9c, 0x97, 0x8a, 0x81, 0xe8, 0xe3, 0xfe, 0xf5, 0xc4, 0xcf, 0xd2, 0xd9,
0x7b, 0x70, 0x6d, 0x66, 0x57, 0x5c, 0x41, 0x4a, 0x23, 0x28, 0x35, 0x3e, 0x0f, 0x04, 0x19, 0x12,
0xcb, 0xc0, 0xdd, 0xd6, 0xe7, 0xec, 0xf1, 0xfa, 0x93, 0x98, 0x85, 0x8e, 0xbf, 0xb4, 0xa9, 0xa2,
0xf6, 0xfd, 0xe0, 0xeb, 0xda, 0xd1, 0xcc, 0xc7, 0xae, 0xa5, 0xb8, 0xb3, 0x82, 0x89, 0x94, 0x9f,
0x46, 0x4d, 0x50, 0x5b, 0x6a, 0x61, 0x7c, 0x77, 0x1e, 0x15, 0x08, 0x03, 0x32, 0x39, 0x24, 0x2f,
0x8d, 0x86, 0x9b, 0x90, 0xa1, 0xaa, 0xb7, 0xbc, 0xd5, 0xde, 0xc3, 0xc8, 0xf9, 0xf2, 0xef, 0xe4,
0x3d, 0x36, 0x2b, 0x20, 0x11, 0x1a, 0x07, 0x0c, 0x65, 0x6e, 0x73, 0x78, 0x49, 0x42, 0x5f, 0x54,
0xf7, 0xfc, 0xe1, 0xea, 0xdb, 0xd0, 0xcd, 0xc6, 0xaf, 0xa4, 0xb9, 0xb2, 0x83, 0x88, 0x95, 0x9e,
0x47, 0x4c, 0x51, 0x5a, 0x6b, 0x60, 0x7d, 0x76, 0x1f, 0x14, 0x09, 0x02, 0x33, 0x38, 0x25, 0x2e,
0x8c, 0x87, 0x9a, 0x91, 0xa0, 0xab, 0xb6, 0xbd, 0xd4, 0xdf, 0xc2, 0xc9, 0xf8, 0xf3, 0xee, 0xe5,
0x3c, 0x37, 0x2a, 0x21, 0x10, 0x1b, 0x06, 0x0d, 0x64, 0x6f, 0x72, 0x79, 0x48, 0x43, 0x5e, 0x55,
0x01, 0x0a, 0x17, 0x1c, 0x2d, 0x26, 0x3b, 0x30, 0x59, 0x52, 0x4f, 0x44, 0x75, 0x7e, 0x63, 0x68,
0xb1, 0xba, 0xa7, 0xac, 0x9d, 0x96, 0x8b, 0x80, 0xe9, 0xe2, 0xff, 0xf4, 0xc5, 0xce, 0xd3, 0xd8,
0x7a, 0x71, 0x6c, 0x67, 0x56, 0x5d, 0x40, 0x4b, 0x22, 0x29, 0x34, 0x3f, 0x0e, 0x05, 0x18, 0x13,
0xca, 0xc1, 0xdc, 0xd7, 0xe6, 0xed, 0xf0, 0xfb, 0x92, 0x99, 0x84, 0x8f, 0xbe, 0xb5, 0xa8, 0xa3]
table_13 = [0x00, 0x0d, 0x1a, 0x17, 0x34, 0x39, 0x2e, 0x23, 0x68, 0x65, 0x72, 0x7f, 0x5c, 0x51, 0x46, 0x4b,
0xd0, 0xdd, 0xca, 0xc7, 0xe4, 0xe9, 0xfe, 0xf3, 0xb8, 0xb5, 0xa2, 0xaf, 0x8c, 0x81, 0x96, 0x9b,
0xbb, 0xb6, 0xa1, 0xac, 0x8f, 0x82, 0x95, 0x98, 0xd3, 0xde, 0xc9, 0xc4, 0xe7, 0xea, 0xfd, 0xf0,
0x6b, 0x66, 0x71, 0x7c, 0x5f, 0x52, 0x45, 0x48, 0x03, 0x0e, 0x19, 0x14, 0x37, 0x3a, 0x2d, 0x20,
0x6d, 0x60, 0x77, 0x7a, 0x59, 0x54, 0x43, 0x4e, 0x05, 0x08, 0x1f, 0x12, 0x31, 0x3c, 0x2b, 0x26,
0xbd, 0xb0, 0xa7, 0xaa, 0x89, 0x84, 0x93, 0x9e, 0xd5, 0xd8, 0xcf, 0xc2, 0xe1, 0xec, 0xfb, 0xf6,
0xd6, 0xdb, 0xcc, 0xc1, 0xe2, 0xef, 0xf8, 0xf5, 0xbe, 0xb3, 0xa4, 0xa9, 0x8a, 0x87, 0x90, 0x9d,
0x06, 0x0b, 0x1c, 0x11, 0x32, 0x3f, 0x28, 0x25, 0x6e, 0x63, 0x74, 0x79, 0x5a, 0x57, 0x40, 0x4d,
0xda, 0xd7, 0xc0, 0xcd, 0xee, 0xe3, 0xf4, 0xf9, 0xb2, 0xbf, 0xa8, 0xa5, 0x86, 0x8b, 0x9c, 0x91,
0x0a, 0x07, 0x10, 0x1d, 0x3e, 0x33, 0x24, 0x29, 0x62, 0x6f, 0x78, 0x75, 0x56, 0x5b, 0x4c, 0x41,
0x61, 0x6c, 0x7b, 0x76, 0x55, 0x58, 0x4f, 0x42, 0x09, 0x04, 0x13, 0x1e, 0x3d, 0x30, 0x27, 0x2a,
0xb1, 0xbc, 0xab, 0xa6, 0x85, 0x88, 0x9f, 0x92, 0xd9, 0xd4, 0xc3, 0xce, 0xed, 0xe0, 0xf7, 0xfa,
0xb7, 0xba, 0xad, 0xa0, 0x83, 0x8e, 0x99, 0x94, 0xdf, 0xd2, 0xc5, 0xc8, 0xeb, 0xe6, 0xf1, 0xfc,
0x67, 0x6a, 0x7d, 0x70, 0x53, 0x5e, 0x49, 0x44, 0x0f, 0x02, 0x15, 0x18, 0x3b, 0x36, 0x21, 0x2c,
0x0c, 0x01, 0x16, 0x1b, 0x38, 0x35, 0x22, 0x2f, 0x64, 0x69, 0x7e, 0x73, 0x50, 0x5d, 0x4a, 0x47,
0xdc, 0xd1, 0xc6, 0xcb, 0xe8, 0xe5, 0xf2, 0xff, 0xb4, 0xb9, 0xae, 0xa3, 0x80, 0x8d, 0x9a, 0x97]
table_14 = [0x00, 0x0e, 0x1c, 0x12, 0x38, 0x36, 0x24, 0x2a, 0x70, 0x7e, 0x6c, 0x62, 0x48, 0x46, 0x54, 0x5a,
0xe0, 0xee, 0xfc, 0xf2, 0xd8, 0xd6, 0xc4, 0xca, 0x90, 0x9e, 0x8c, 0x82, 0xa8, 0xa6, 0xb4, 0xba,
0xdb, 0xd5, 0xc7, 0xc9, 0xe3, 0xed, 0xff, 0xf1, 0xab, 0xa5, 0xb7, 0xb9, 0x93, 0x9d, 0x8f, 0x81,
0x3b, 0x35, 0x27, 0x29, 0x03, 0x0d, 0x1f, 0x11, 0x4b, 0x45, 0x57, 0x59, 0x73, 0x7d, 0x6f, 0x61,
0xad, 0xa3, 0xb1, 0xbf, 0x95, 0x9b, 0x89, 0x87, 0xdd, 0xd3, 0xc1, 0xcf, 0xe5, 0xeb, 0xf9, 0xf7,
0x4d, 0x43, 0x51, 0x5f, 0x75, 0x7b, 0x69, 0x67, 0x3d, 0x33, 0x21, 0x2f, 0x05, 0x0b, 0x19, 0x17,
0x76, 0x78, 0x6a, 0x64, 0x4e, 0x40, 0x52, 0x5c, 0x06, 0x08, 0x1a, 0x14, 0x3e, 0x30, 0x22, 0x2c,
0x96, 0x98, 0x8a, 0x84, 0xae, 0xa0, 0xb2, 0xbc, 0xe6, 0xe8, 0xfa, 0xf4, 0xde, 0xd0, 0xc2, 0xcc,
0x41, 0x4f, 0x5d, 0x53, 0x79, 0x77, 0x65, 0x6b, 0x31, 0x3f, 0x2d, 0x23, 0x09, 0x07, 0x15, 0x1b,
0xa1, 0xaf, 0xbd, 0xb3, 0x99, 0x97, 0x85, 0x8b, 0xd1, 0xdf, 0xcd, 0xc3, 0xe9, 0xe7, 0xf5, 0xfb,
0x9a, 0x94, 0x86, 0x88, 0xa2, 0xac, 0xbe, 0xb0, 0xea, 0xe4, 0xf6, 0xf8, 0xd2, 0xdc, 0xce, 0xc0,
0x7a, 0x74, 0x66, 0x68, 0x42, 0x4c, 0x5e, 0x50, 0x0a, 0x04, 0x16, 0x18, 0x32, 0x3c, 0x2e, 0x20,
0xec, 0xe2, 0xf0, 0xfe, 0xd4, 0xda, 0xc8, 0xc6, 0x9c, 0x92, 0x80, 0x8e, 0xa4, 0xaa, 0xb8, 0xb6,
0x0c, 0x02, 0x10, 0x1e, 0x34, 0x3a, 0x28, 0x26, 0x7c, 0x72, 0x60, 0x6e, 0x44, 0x4a, 0x58, 0x56,
0x37, 0x39, 0x2b, 0x25, 0x0f, 0x01, 0x13, 0x1d, 0x47, 0x49, 0x5b, 0x55, 0x7f, 0x71, 0x63, 0x6d,
0xd7, 0xd9, 0xcb, 0xc5, 0xef, 0xe1, 0xf3, 0xfd, 0xa7, 0xa9, 0xbb, 0xb5, 0x9f, 0x91, 0x83, 0x8d] |
corehq/ex-submodules/couchforms/const.py | dimagilg/commcare-hq | 471 | 12763439 |
TAG_TYPE = "#type"
TAG_XML = "#xml"
TAG_VERSION = "@version"
TAG_UIVERSION = "@uiVersion"
TAG_NAMESPACE = "@xmlns"
TAG_NAME = "@name"
TAG_META = "meta"
TAG_FORM = 'form'
ATTACHMENT_NAME = "form.xml"
MAGIC_PROPERTY = 'xml_submission_file'
RESERVED_WORDS = [TAG_TYPE, TAG_XML, TAG_VERSION, TAG_UIVERSION, TAG_NAMESPACE,
TAG_NAME, TAG_META, ATTACHMENT_NAME, 'case', MAGIC_PROPERTY]
DEVICE_LOG_XMLNS = 'http://code.javarosa.org/devicereport'
|
rlschool/metamaze/test.py | HaojieSHI98/RLSchool | 169 | 12763440 | import gym
import sys
import rlschool.metamaze
def test_2d_maze(max_iteration):
print("Testing 2D Maze...")
maze_env = gym.make("meta-maze-2D-v0", enable_render=False)
cell_scale = 9
task = maze_env.sample_task(cell_scale=cell_scale)
maze_env.set_task(task)
iteration = 0
while iteration < max_iteration:
iteration += 1
maze_env.reset()
done=False
sum_reward = 0
while not done:
state, reward, done, _ = maze_env.step(maze_env.action_space.sample())
sum_reward += reward
print("Episode is over! You got %.1f score."%sum_reward)
if(sum_reward > 0.0):
cell_scale += 2 # gradually increase the difficulty
print("Increase the difficulty, cell_scale = %d"%cell_scale)
task = maze_env.sample_task(cell_scale=cell_scale)
maze_env.set_task(task)
def test_3d_maze(max_iteration):
print("Testing 3D Maze...")
maze_env = gym.make("meta-maze-3D-v0", enable_render=False)
cell_scale = 9
task = maze_env.sample_task(cell_scale=cell_scale, cell_size=2.0, wall_height=3.2)
maze_env.set_task(task)
iteration = 0
while iteration < max_iteration:
iteration += 1
maze_env.reset()
done=False
sum_reward = 0
while not done:
state, reward, done, _ = maze_env.step(maze_env.action_space.sample())
sum_reward += reward
print("Episode is over! You got %.1f score."%sum_reward)
if(sum_reward > 0.0):
cell_scale += 2 # gradually increase the difficulty
print("Increase the difficulty, cell_scale = %d"%cell_scale)
task = maze_env.sample_task(cell_scale=cell_scale, cell_size=2.0, wall_height=3.2)
maze_env.set_task(task)
if __name__=="__main__":
test_2d_maze(100)
test_3d_maze(100)
|
openaddr/ci/webdotmap.py | MiniCodeMonkey/machine | 101 | 12763456 | import apsw
import boto3
import os
import json
from flask import Blueprint, Response, abort, current_app, render_template, url_for
from . import setup_logger
from .webcommon import log_application_errors, flask_log_level
from .webhooks import get_memcache_client
dots = Blueprint('dots', __name__)
# https://stackoverflow.com/questions/56776974/sqlite3-connect-to-a-database-in-cloud-s3
class S3VFS(apsw.VFS):
def __init__(self, vfsname="s3", basevfs="", cache=None):
self.vfsname = vfsname
self.basevfs = basevfs
self.cache = cache
apsw.VFS.__init__(self, self.vfsname, self.basevfs)
def xOpen(self, name, flags):
return S3VFSFile(self.basevfs, name, flags, self.cache)
class S3VFSFile():
def __init__(self, inheritfromvfsname, filename, flags, cache):
self.s3 = boto3.client('s3')
self.cache = cache
self.bucket = filename.uri_parameter("bucket")
self.key = filename.filename().lstrip("/")
def _cache_key(self, amount, offset):
return '{bucket}/{key}/{amount}/{offset}'.format(
bucket=self.bucket,
key=self.key,
amount=amount,
offset=offset,
)
def xRead(self, amount, offset):
data = None
if self.cache:
cache_key = self._cache_key(amount, offset)
data = self.cache.get(cache_key)
if data is None:
response = self.s3.get_object(Bucket=self.bucket, Key=self.key, Range='bytes={}-{}'.format(offset, offset + amount))
data = response['Body'].read()
if self.cache:
self.cache.set(cache_key, data)
return data
def xFileSize(self):
length = None
if self.cache:
cache_key = '{bucket}/{key}/size'.format(bucket=self.bucket, key=self.key)
length = self.cache.get(cache_key)
if length is None:
response = self.s3.head_object(Bucket=self.bucket, Key=self.key)
length = response['ContentLength']
if self.cache:
self.cache.set(cache_key, length)
return length
def xClose(self):
pass
def xFileControl(self, op, ptr):
return False
def get_mbtiles_connection(bucket, key, cache):
'''
'''
s3vfs = S3VFS(cache=cache)
return apsw.Connection(
'file:/{key}?bucket={bucket}&immutable=1'.format(bucket=bucket, key=key),
flags=apsw.SQLITE_OPEN_READONLY | apsw.SQLITE_OPEN_URI,
vfs=s3vfs.vfsname,
)
def get_mbtiles_metadata(bucket, key, cache):
'''
'''
if cache:
cache_key = '{bucket}/{key}/metadata'.format(bucket=bucket, key=key)
cached = cache.get(cache_key)
if cached:
return cached
connection = get_mbtiles_connection(bucket, key, cache)
cur = connection.cursor()
res = cur.execute('''SELECT name, value FROM metadata
WHERE name IN ('center', 'json')''')
data = dict(res.fetchall())
lon, lat, zoom = map(float, data.get('center', '0,0,0').split(','))
more = json.loads(data.get('json', '{}'))
fields = list(more.get('vector_layers', [])[0].get('fields', {}).keys())
cur.close()
metadata_tuple = (zoom, lat, lon, fields)
if cache:
cache.set(cache_key, metadata_tuple)
return metadata_tuple
def get_mbtiles_tile(bucket, key, row, col, zoom, cache):
'''
'''
if cache:
cache_key = '{bucket}/{key}/{zoom}/{row}/{col}'.format(bucket=bucket, key=key, zoom=zoom, row=row, col=col)
cached = cache.get(cache_key)
if cached:
return cached
connection = get_mbtiles_connection(bucket, key, cache)
cur = connection.cursor()
flipped_row = (2**zoom) - 1 - row
res = cur.execute('''SELECT tile_data FROM tiles
WHERE zoom_level=? AND tile_column=? AND tile_row=?''', (zoom, col, flipped_row))
data = res.fetchone()
cur.close()
if cache:
cache.set(cache_key, data)
return data
@dots.route('/runs/<int:run_id>/dotmap/index.html')
@log_application_errors
def dotmap_preview(run_id):
'''
'''
if not run_id:
return 'invalid run_id', 404
try:
bucket = "data.openaddresses.io"
key = "runs/{run_id}/slippymap.mbtiles".format(run_id=run_id)
mc = get_memcache_client(current_app.config)
zoom, lat, lon, fields = get_mbtiles_metadata(bucket, key, mc)
except ValueError:
abort(500)
return render_template(
'dotmap-index.html',
run_id=run_id,
zoom=zoom,
lat=lat,
lon=lon,
fields=fields,
scene_url=url_for('dots.get_scene', run_id=run_id)
)
@dots.route('/runs/<run_id>/dotmap/scene.yaml')
@log_application_errors
def get_scene(run_id):
if not run_id:
return 'invalid run_id', 404
tile_args = dict(run_id=run_id, zoom=123, col=456, row=789)
tile_url = url_for('dots.get_one_tile', **tile_args).replace('123/456/789', '{z}/{x}/{y}')
return Response(
render_template('dotmap-scene.yaml', tile_url=tile_url),
headers={'Content-Type': 'application/x-yaml'},
)
@dots.route('/runs/<run_id>/dotmap/tiles/<int:zoom>/<int:col>/<int:row>.mvt')
@log_application_errors
def get_one_tile(run_id, zoom, col, row):
'''
'''
if not run_id:
return 'invalid run_id', 404
bucket = "data.openaddresses.io"
key = "runs/{run_id}/slippymap.mbtiles".format(run_id=run_id)
mc = get_memcache_client(current_app.config)
body = get_mbtiles_tile(bucket, key, row, col, zoom, mc)
if not body:
return 'tile not found', 404
headers = {
'Content-Type': 'application/vnd.mapbox-vector-tile',
'Content-Encoding': 'gzip',
}
return Response(body, headers=headers)
def apply_dotmap_blueprint(app):
'''
'''
@dots.after_request
def cache_everything(response):
response.cache_control.max_age = 31556952 # 1 year
response.cache_control.public = True
return response
app.register_blueprint(dots)
@app.before_first_request
def app_prepare():
setup_logger(os.environ.get('AWS_SNS_ARN'), None, flask_log_level(app.config))
|
notebooks-text-format/linreg_hierarchical_non_centered_numpyro.py | arpitvaghela/probml-notebooks | 166 | 12763485 | <reponame>arpitvaghela/probml-notebooks
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/linreg_hierarchical_non_centered_numpyro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="f_py_lrTPdK1"
#
#
# # Hierarchical non-centered Bayesian Linear Regression in NumPyro
#
# The text and code for this notebook are taken directly from [this blog post](https://twiecki.io/blog/2017/02/08/bayesian-hierchical-non-centered/)
# by <NAME>. [Original notebook](https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb)
#
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="XcsJEi91Qelr" outputId="8a943870-b8fe-4ef7-aa9f-0006e3266ae7"
# !pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro arviz
# !pip install arviz
# !pip install seaborn
# + [markdown] id="J3PmS3woW962"
# First, we will import the libraries we need to:
# + id="QPTA4cZCPdK1"
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import arviz as az
import numpyro
from numpyro.infer import MCMC, NUTS, Predictive
import numpyro.distributions as dist
from jax import random
sns.set_style('whitegrid')
np.random.seed(123)
# + [markdown] id="JzDno90bHlrO"
# Then, we'll load the data:
# + id="c4BgCIlclQXX"
url = 'https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/radon.csv?raw=true'
data = pd.read_csv(url)
# + id="17ISOnzPlSR1"
county_names = data.county.unique()
county_idx = data.county_code.values
# + [markdown] id="KdWGECP9PdK1"
# ## The intuitive specification
#
# Usually, hierachical models are specified in a *centered* way. In a regression model, individual slopes would be centered around a group mean with a certain group variance, which controls the shrinkage:
# + id="R3K2OfGGnWlZ"
def hierarchical_model_centered(county, floor, log_of_radon):
# Hyperpriors
mu_a = numpyro.sample("mu_a", dist.Normal(0., 100**2))
sigma_a = numpyro.sample("sigma_a", dist.HalfCauchy(5.))
mu_b = numpyro.sample("mu_b", dist.Normal(0., 100**2))
sigma_b = numpyro.sample("sigma_b", dist.HalfCauchy(5.))
unique_counties = np.unique(county)
n_counties = len(unique_counties)
with numpyro.plate("counties", n_counties):
# Intercept for each county, distributed around group mean mu_a
a = numpyro.sample("a", dist.Normal(mu_a, sigma_a))
# Intercept for each county, distributed around group mean mu_a
b = numpyro.sample("b", dist.Normal(mu_b, sigma_b))
# Model error
eps = numpyro.sample("eps", dist.HalfCauchy(scale=5.))
# Expected value
radon_est = a[county_idx] + b[county_idx] * floor
with numpyro.plate("data", len(county)):
# Data likelihood
numpyro.sample("obs", dist.Normal(radon_est, eps), obs=log_of_radon)
# + colab={"base_uri": "https://localhost:8080/"} id="pmpzyT74Cj17" outputId="b0540268-3b40-4f57-fffa-d1880b859369"
nuts_kernel = NUTS(hierarchical_model_centered)
mcmc = MCMC(nuts_kernel, num_samples=5000, num_warmup=1000, num_chains=2)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, data.county.values, data.floor.values, data.log_radon.values)
hierarchical_centered_trace = mcmc.get_samples(True)
# Eliminates the first 1000 samples
hierarchical_centered_trace = {k: v[:, 1000:, :] if len(v.shape)==3 else v[:, 1000:] for k,v in hierarchical_centered_trace.items()}
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="M6hlZ2905Eoo" outputId="5e8a6fe3-6555-4fcb-9e9e-9e39d7bc348a"
inference_data = az.from_numpyro(mcmc)
az.plot_trace(inference_data, compact=True);
# + [markdown] id="OAbZ_QXGPdK2"
# I have seen plenty of traces with terrible convergences but this one might look fine to the unassuming eye. Perhaps `sigma_b` has some problems, so let's look at the Rhat:
# + id="EdTq66JUPdK2" colab={"base_uri": "https://localhost:8080/"} outputId="93b8896f-326b-43b1-b059-a95f79966804"
print('Rhat(sigma_b) = {}'.format(numpyro.diagnostics.gelman_rubin(hierarchical_centered_trace['sigma_b'])))
# + [markdown] id="JHSPBEbQPdK2"
# Not too bad -- well below 1.1. I used to think this wasn't a big deal but <NAME> in his [StanCon 2017 talk](https://www.youtube.com/watch?v=DJ0c7Bm5Djk&feature=youtu.be&t=4h40m9s) makes a strong point that it is actually very problematic. To understand what's going on, let's take a closer look at the slopes `b` and their group variance (i.e. how far they are allowed to move from the mean) `sigma_b`. I'm just plotting a single chain now.
# + id="AzfoQz2RPdK2" colab={"base_uri": "https://localhost:8080/", "height": 268} outputId="f439fe30-1b94-40ed-df80-719878b576dc"
fig, axs = plt.subplots(nrows=2)
axs[0].plot(hierarchical_centered_trace['sigma_b'][1], alpha=.5);
axs[0].set(ylabel='sigma_b');
axs[1].plot(hierarchical_centered_trace['b'][1], alpha=.5);
axs[1].set(ylabel='b');
# + [markdown] id="0zBgOlmnPdK2"
# `sigma_b` seems to drift into this area of very small values and get stuck there for a while. This is a common pattern and the sampler is trying to tell you that there is a region in space that it can't quite explore efficiently. While stuck down there, the slopes `b_i` become all squished together. We've entered **The Funnel of Hell** (it's just called the funnel, I added the last part for dramatic effect).
# + [markdown] id="iTckxwW7PdK2"
# ## The Funnel of Hell (and how to escape it)
#
# Let's look at the joint posterior of a single slope `b` (I randomly chose the 75th one) and the slope group variance `sigma_b`.
# + id="e1gZ_JZSPdK2" colab={"base_uri": "https://localhost:8080/", "height": 441} outputId="2703eeff-e39a-4d4b-b02e-3a46b1034023"
x = pd.Series(hierarchical_centered_trace['b'][:, :, 75].flatten(), name='slope b_75')
y = pd.Series(hierarchical_centered_trace['sigma_b'][:, :].flatten(), name='slope group variance sigma_b')
sns.jointplot(x=x, y=y, ylim=(0, .7));
# + [markdown] id="byYER5es2l_l"
# This makes sense, as the slope group variance goes to zero (or, said differently, we apply maximum shrinkage), individual slopes are not allowed to deviate from the slope group mean, so they all collapose to the group mean.
#
# While this property of the posterior in itself is not problematic, it makes the job extremely difficult for our sampler. Imagine a [Metropolis-Hastings](https://twiecki.github.io/blog/2015/11/10/mcmc-sampling/) exploring this space with a medium step-size (we're using NUTS here but the intuition works the same): in the wider top region we can comfortably make larger jumps to explore the space efficiently. However, once we move to the narrow bottom region we can change `b_75` and `sigma_b` only by tiny amounts. This causes the sampler to become trapped in that region of space. Most of the proposals will be rejected because our step-size is too large for this narrow part of the space and exploration will be very inefficient.
#
# You might wonder if we could somehow choose the step-size based on the denseness (or curvature) of the space. Indeed that's possible and it's called [Riemannian HMC](https://arxiv.org/abs/0907.1100). It works very well but is quite costly to run. Here, we will explore a different, simpler method.
#
# Finally, note that this problem does not exist for the intercept parameters `a`. Because we can determine individual intercepts `a_i` with enough confidence, `sigma_a` is not small enough to be problematic. Thus, the funnel of hell can be a problem in hierarchical models, but it does not have to be. (Thanks to <NAME> for pointing this out).
#
#
# ## Reparameterization
#
# If we can't easily make the sampler step-size adjust to the region of space, maybe we can adjust the region of space to make it simpler for the sampler? This is indeed possible and quite simple with a small reparameterization trick, we will call this the *non-centered* version.
# + id="HZp-OZ_RLWxN"
def hierarchical_model_non_centered(county, floor, log_of_radon):
# Hyperpriors
mu_a = numpyro.sample("mu_a", dist.Normal(0., 100**2))
sigma_a = numpyro.sample("sigma_a", dist.HalfCauchy(5.))
mu_b = numpyro.sample("mu_b", dist.Normal(0., 100**2))
sigma_b = numpyro.sample("sigma_b", dist.HalfCauchy(5.))
unique_counties = np.unique(county)
n_counties = len(unique_counties)
with numpyro.plate("counties", n_counties):
# Intercept for each county, distributed around group mean mu_a
a_offset = numpyro.sample("a_offset", dist.Normal(0, 1))
a = numpyro.deterministic("a", mu_a + a_offset * sigma_a)
# Intercept for each county, distributed around group mean mu_a
b_offset = numpyro.sample("b_offset", dist.Normal(0, 1))
b = numpyro.deterministic("b", mu_b + b_offset * sigma_b)
# Model error
eps = numpyro.sample("eps", dist.HalfCauchy(scale=5.))
# Expected value
radon_est = a[county_idx] + b[county_idx] * floor
with numpyro.plate("data", len(county)):
# Data likelihood
numpyro.sample("obs", dist.Normal(radon_est, eps), obs=log_of_radon)
# + id="eCnNxlmD2g-G" colab={"base_uri": "https://localhost:8080/"} outputId="a9df6771-8bfc-4d6f-9ef7-dc1a04c9f9ed"
nuts_kernel = NUTS(hierarchical_model_non_centered)
mcmc = MCMC(nuts_kernel, num_samples=5000, num_warmup=1000, num_chains=2)
mcmc.run(rng_key, data.county.values, data.floor.values, data.log_radon.values)
hierarchical_non_centered_trace = mcmc.get_samples(True)
hierarchical_non_centered_trace = {k: v[:, 1000:, :] if len(v.shape)==3 else v[:, 1000:] for k,v in hierarchical_non_centered_trace.items()}
# + [markdown] id="3Be9WYvFPdK3"
# Pay attention to the definitions of `a_offset`, `a`, `b_offset`, and `b` and compare them to before (commented out). What's going on here? It's pretty neat actually. Instead of saying that our individual slopes `b` are normally distributed around a group mean (i.e. modeling their absolute values directly), we can say that they are offset from a group mean by a certain value (`b_offset`; i.e. modeling their values relative to that mean). Now we still have to consider how far from that mean we actually allow things to deviate (i.e. how much shrinkage we apply). This is where `sigma_b` makes a comeback. We can simply multiply the offset by this scaling factor to get the same effect as before, just under a different parameterization. For a more formal introduction, see e.g. [Betancourt & Girolami (2013)](https://arxiv.org/pdf/1312.0906.pdf).
#
# Critically, `b_offset` and `sigma_b` are now mostly independent. This will become more clear soon. Let's first look at if this transform helped our sampling:
# + id="zzrN4osl2kMq" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a46c60da-cf05-4382-9603-7f7b87526fc9"
var_names = ['a', 'b', 'mu_a', 'mu_b', 'sigma_a', 'sigma_b', 'eps']
inference_data = az.from_numpyro(mcmc)
az.plot_trace(inference_data, var_names=var_names, compact=True);
# + [markdown] id="b1lMZjlxPdK3"
# That looks much better as also confirmed by the joint plot:
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="_dcp7FYr2-dH" outputId="892efbac-6411-4b51-8d94-2641d6fcb174"
fig, axs = plt.subplots(ncols=2, sharex=True, sharey=True)
x = pd.Series(hierarchical_centered_trace['b'][:, :, 75].flatten(), name='slope b_75')
y = pd.Series(hierarchical_centered_trace['sigma_b'][:, :].flatten(), name='slope group variance sigma_b')
axs[0].plot(x, y, '.');
axs[0].set(title='Centered', ylabel='sigma_b', xlabel='b_75')
x = pd.Series(hierarchical_non_centered_trace['b'][:, :, 75].flatten(), name='slope b_75')
y = pd.Series(hierarchical_non_centered_trace['sigma_b'].flatten(), name='slope group variance sigma_b')
axs[1].plot(x, y, '.');
axs[1].set(title='Non-centered', xlabel='b_75');
# + [markdown] id="Q_W701t6PdK3"
# To really drive this home, let's also compare the `sigma_b` marginal posteriors of the two models:
# + id="XJxFSFbnPdK3" colab={"base_uri": "https://localhost:8080/", "height": 313} outputId="ae23d007-188a-435a-a2c9-d786dc18708e"
az.plot_kde(np.stack([hierarchical_centered_trace['sigma_b'], hierarchical_non_centered_trace['sigma_b'], ]).T)
plt.axvline(hierarchical_centered_trace['sigma_b'].mean(), color='b', linestyle='--')
plt.axvline(hierarchical_non_centered_trace['sigma_b'].mean(), color='g', linestyle='--')
plt.legend(['Centered', 'Non-cenetered', 'Centered posterior mean', 'Non-centered posterior mean']);
plt.xlabel('sigma_b'); plt.ylabel('Probability Density');
# + [markdown] id="QXe9_4vIPdK3"
# That's crazy -- there's a large region of very small `sigma_b` values that the sampler could not even explore before. In other words, our previous inferences ("Centered") were severely biased towards higher values of `sigma_b`. Indeed, if you look at the [previous blog post](https://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/) the sampler never even got stuck in that low region causing me to believe everything was fine. These issues are hard to detect and very subtle, but they are meaningful as demonstrated by the sizable difference in posterior mean.
#
# But what does this concretely mean for our analysis? Over-estimating `sigma_b` means that we have a biased (=false) belief that we can tell individual slopes apart better than we actually can. There is less information in the individual slopes than what we estimated.
# + [markdown] id="3G2KQzuvPdK3"
# ### Why does the reparameterized model work better?
#
# To more clearly understand why this model works better, let's look at the joint distribution of `b_offset`:
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="Uje-j5FJ5WM0" outputId="b0d4f19e-f3e5-4fb1-ccfd-41954d36caa0"
x = pd.Series(hierarchical_non_centered_trace['b'][:, :, 75].flatten(), name='slope b_offset_75')
y = pd.Series(hierarchical_non_centered_trace['sigma_b'][:, :].flatten(), name='slope group variance sigma_b')
sns.jointplot(x=x, y=y, ylim=(0, .7));
# + [markdown] id="iUUIWErkPdK3"
# This is the space the sampler sees; you can see how the funnel is flattened out. We can freely change the (relative) slope offset parameters even if the slope group variance is tiny as it just acts as a scaling parameter.
#
# Note that the funnel is still there -- it's a perfectly valid property of the model -- but the sampler has a much easier time exploring it in this different parameterization.
# + [markdown] id="5Klof7DEPdK3"
# ## Why hierarchical models are Bayesian
#
# Finally, I want to take the opportunity to make another point that is not directly related to hierarchical models but can be demonstrated quite well here.
#
# Usually when talking about the perils of Bayesian statistics we talk about priors, uncertainty, and flexibility when coding models using Probabilistic Programming. However, an even more important property is rarely mentioned because it is much harder to communicate. <NAME> touched on this point in his tweet:
# + [markdown] id="i4dat7gDPdK3"
# <blockquote class="twitter-tweet" data-lang="en"><p lang="en" dir="ltr">It's interesting that many summarize Bayes as being about priors; but real power is its focus on integrals/expectations over maxima/modes</p>— <NAME> (@rosstaylor90) <a href="https://twitter.com/rosstaylor90/status/827263854002401281">February 2, 2017</a></blockquote>
# <script async src="//platform.twitter.com/widgets.js" charset="utf-8"></script>
# + [markdown] id="4tJwmkxRPdK3"
# <NAME> makes a similar point when he says ["Expectations are the only thing that make sense."](https://www.youtube.com/watch?v=pHsuIaPbNbY&t=8s)
#
# But what's wrong with maxima/modes? Aren't those really close to the posterior mean (i.e. the expectation)? Unfortunately, that's only the case for the simple models we teach to build up intuitions. In complex models, like the hierarchical one, the MAP can be far away and not be interesting or meaningful at all.
#
# Let's compare the posterior mode (i.e. the MAP) to the posterior mean of our hierachical linear regression model:
# + id="df4orfyOPdK3" colab={"base_uri": "https://localhost:8080/"} outputId="37e89240-dd0f-45c6-cdb3-fe6ba7cb6958"
hierarchical_centered_trace['b'].reshape(8000,-1).mean(axis=0)
# + id="rsadfvlSPdK3" colab={"base_uri": "https://localhost:8080/"} outputId="b1ce064b-25fb-4a3e-b427-bca426c2a275"
hierarchical_centered_trace['sigma_b'].reshape(1,-1).std(axis=1)
# + [markdown] id="muQpdSipPdK3"
# As you can see, the slopes are all identical and the group slope variance is effectively zero. The reason is again related to the funnel. The MAP only cares about the probability **density** which is highest at the bottom of the funnel.
#
# But if you could only choose one point in parameter space to summarize the posterior above, would this be the one you'd pick? Probably not.
#
# Let's instead look at the **Expected Value** (i.e. posterior mean) which is computed by integrating probability **density** and **volume** to provide probabilty **mass** -- the thing we really care about. Under the hood, that's the integration performed by the MCMC sampler.
# + colab={"base_uri": "https://localhost:8080/"} id="5uXWUhPbnowC" outputId="af8ac9fb-e6d4-4c23-8a69-b7ec280156c4"
hierarchical_non_centered_trace['b'].reshape(8000,-1).mean(axis=0)
# + id="9h-FzVGJPdK3" colab={"base_uri": "https://localhost:8080/"} outputId="38fa1e6c-99e7-4d98-f8cb-bf4d8d9f391b"
hierarchical_non_centered_trace['sigma_b'].reshape(1,-1).mean(axis=1)
# + [markdown] id="-AL504GdPdK3"
# Quite a difference. This also explains why it can be a bad idea to use the MAP to initialize your sampler: in certain models the MAP is not at all close to the region you want to explore (i.e. the "typical set").
#
# This strong divergence of the MAP and the Posterior Mean does not only happen in hierarchical models but also in high dimensional ones, where our intuitions from low-dimensional spaces gets twisted in serious ways. [This talk by <NAME>](https://www.youtube.com/watch?v=pHsuIaPbNbY&t=8s) makes the point quite nicely.
#
# So why do people -- especially in Machine Learning -- still use the MAP/MLE? As we all learned in high school first hand, integration is much harder than differentation. This is really the only reason.
#
# Final disclaimer: This might provide the impression that this is a property of being in a Bayesian framework, which is not true. Technically, we can talk about Expectations vs Modes irrespective of that. Bayesian statistics just happens to provide a very intuitive and flexible framework for expressing and estimating these models.
#
# See [here](https://rawgithub.com/twiecki/WhileMyMCMCGentlySamples/master/content/downloads/notebooks/GLM_hierarchical_non_centered.ipynb) for the underlying notebook of this blog post.
# + [markdown] id="SzMHO6fNPdK3"
# ## Acknowledgements
#
# Thanks to [<NAME>](https://twitter.com/jonsedar) for helpful comments on an earlier draft.
|
src/storage-preview/azext_storage_preview/vendored_sdks/azure_storagev2/fileshare/v2020_02_10/_shared/request_handlers.py | Mannan2812/azure-cli-extensions | 2,728 | 12763487 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from typing import ( # pylint: disable=unused-import
Union, Optional, Any, Iterable, Dict, List, Type, Tuple,
TYPE_CHECKING
)
import logging
from os import fstat
from io import (SEEK_END, SEEK_SET, UnsupportedOperation)
import isodate
from azure.core.exceptions import raise_with_traceback
_LOGGER = logging.getLogger(__name__)
def serialize_iso(attr):
"""Serialize Datetime object into ISO-8601 formatted string.
:param Datetime attr: Object to be serialized.
:rtype: str
:raises: ValueError if format invalid.
"""
if not attr:
return None
if isinstance(attr, str):
attr = isodate.parse_datetime(attr)
try:
utc = attr.utctimetuple()
if utc.tm_year > 9999 or utc.tm_year < 1:
raise OverflowError("Hit max or min date")
date = "{:04}-{:02}-{:02}T{:02}:{:02}:{:02}".format(
utc.tm_year, utc.tm_mon, utc.tm_mday,
utc.tm_hour, utc.tm_min, utc.tm_sec)
return date + 'Z'
except (ValueError, OverflowError) as err:
msg = "Unable to serialize datetime object."
raise_with_traceback(ValueError, msg, err)
except AttributeError as err:
msg = "ISO-8601 object must be valid Datetime object."
raise_with_traceback(TypeError, msg, err)
def get_length(data):
length = None
# Check if object implements the __len__ method, covers most input cases such as bytearray.
try:
length = len(data)
except: # pylint: disable=bare-except
pass
if not length:
# Check if the stream is a file-like stream object.
# If so, calculate the size using the file descriptor.
try:
fileno = data.fileno()
except (AttributeError, UnsupportedOperation):
pass
else:
try:
return fstat(fileno).st_size
except OSError:
# Not a valid fileno, may be possible requests returned
# a socket number?
pass
# If the stream is seekable and tell() is implemented, calculate the stream size.
try:
current_position = data.tell()
data.seek(0, SEEK_END)
length = data.tell() - current_position
data.seek(current_position, SEEK_SET)
except (AttributeError, UnsupportedOperation):
pass
return length
def read_length(data):
try:
if hasattr(data, 'read'):
read_data = b''
for chunk in iter(lambda: data.read(4096), b""):
read_data += chunk
return len(read_data), read_data
if hasattr(data, '__iter__'):
read_data = b''
for chunk in data:
read_data += chunk
return len(read_data), read_data
except: # pylint: disable=bare-except
pass
raise ValueError("Unable to calculate content length, please specify.")
def validate_and_format_range_headers(
start_range, end_range, start_range_required=True,
end_range_required=True, check_content_md5=False, align_to_page=False):
# If end range is provided, start range must be provided
if (start_range_required or end_range is not None) and start_range is None:
raise ValueError("start_range value cannot be None.")
if end_range_required and end_range is None:
raise ValueError("end_range value cannot be None.")
# Page ranges must be 512 aligned
if align_to_page:
if start_range is not None and start_range % 512 != 0:
raise ValueError("Invalid page blob start_range: {0}. "
"The size must be aligned to a 512-byte boundary.".format(start_range))
if end_range is not None and end_range % 512 != 511:
raise ValueError("Invalid page blob end_range: {0}. "
"The size must be aligned to a 512-byte boundary.".format(end_range))
# Format based on whether end_range is present
range_header = None
if end_range is not None:
range_header = 'bytes={0}-{1}'.format(start_range, end_range)
elif start_range is not None:
range_header = "bytes={0}-".format(start_range)
# Content MD5 can only be provided for a complete range less than 4MB in size
range_validation = None
if check_content_md5:
if start_range is None or end_range is None:
raise ValueError("Both start and end range requied for MD5 content validation.")
if end_range - start_range > 4 * 1024 * 1024:
raise ValueError("Getting content MD5 for a range greater than 4MB is not supported.")
range_validation = 'true'
return range_header, range_validation
def add_metadata_headers(metadata=None):
# type: (Optional[Dict[str, str]]) -> Dict[str, str]
headers = {}
if metadata:
for key, value in metadata.items():
headers['x-ms-meta-{}'.format(key.strip())] = value.strip() if value else value
return headers
|
ssod/datasets/pipelines/formatting.py | huimlight/SoftTeacher | 604 | 12763496 | import numpy as np
from mmdet.datasets import PIPELINES
from mmdet.datasets.pipelines.formating import Collect
from ssod.core import TrimapMasks
@PIPELINES.register_module()
class ExtraAttrs(object):
def __init__(self, **attrs):
self.attrs = attrs
def __call__(self, results):
for k, v in self.attrs.items():
assert k not in results
results[k] = v
return results
@PIPELINES.register_module()
class ExtraCollect(Collect):
def __init__(self, *args, extra_meta_keys=[], **kwargs):
super().__init__(*args, **kwargs)
self.meta_keys = self.meta_keys + tuple(extra_meta_keys)
@PIPELINES.register_module()
class PseudoSamples(object):
def __init__(
self, with_bbox=False, with_mask=False, with_seg=False, fill_value=255
):
"""
Replacing gt labels in original data with fake labels or adding extra fake labels for unlabeled data.
This is to remove the effect of labeled data and keep its elements aligned with other sample.
Args:
with_bbox:
with_mask:
with_seg:
fill_value:
"""
self.with_bbox = with_bbox
self.with_mask = with_mask
self.with_seg = with_seg
self.fill_value = fill_value
def __call__(self, results):
if self.with_bbox:
results["gt_bboxes"] = np.zeros((0, 4))
results["gt_labels"] = np.zeros((0,))
if "bbox_fields" not in results:
results["bbox_fields"] = []
if "gt_bboxes" not in results["bbox_fields"]:
results["bbox_fields"].append("gt_bboxes")
if self.with_mask:
num_inst = len(results["gt_bboxes"])
h, w = results["img"].shape[:2]
results["gt_masks"] = TrimapMasks(
[
self.fill_value * np.ones((h, w), dtype=np.uint8)
for _ in range(num_inst)
],
h,
w,
)
if "mask_fields" not in results:
results["mask_fields"] = []
if "gt_masks" not in results["mask_fields"]:
results["mask_fields"].append("gt_masks")
if self.with_seg:
results["gt_semantic_seg"] = self.fill_value * np.ones(
results["img"].shape[:2], dtype=np.uint8
)
if "seg_fields" not in results:
results["seg_fields"] = []
if "gt_semantic_seg" not in results["seg_fields"]:
results["seg_fields"].append("gt_semantic_seg")
return results
|
observations/r/davis.py | hajime9652/observations | 199 | 12763520 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def davis(path):
"""Self-Reports of Height and Weight
The `Davis` data frame has 200 rows and 5 columns. The subjects were
men and women engaged in regular exercise. There are some missing data.
This data frame contains the following columns:
sex
A factor with levels: `F`, female; `M`, male.
weight
Measured weight in kg.
height
Measured height in cm.
repwt
Reported weight in kg.
repht
Reported height in cm.
Personal communication from <NAME>, Departments of Physical Education
and Psychology, York University.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `davis.csv`.
Returns:
Tuple of np.ndarray `x_train` with 200 rows and 5 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'davis.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/car/Davis.csv'
maybe_download_and_extract(path, url,
save_file_name='davis.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
nasdaqdatalink/model/data_list.py | edvn0/data-link-python | 1,178 | 12763553 | from .model_list import ModelList
from .data_mixin import DataMixin
class DataList(DataMixin, ModelList):
pass
|
puput/wagtail_hooks.py | UCBerkeleySETI/puput | 554 | 12763554 | <gh_stars>100-1000
import wagtail.admin.rich_text.editors.draftail.features as draftail_features
from wagtail.admin.rich_text.converters.html_to_contentstate import InlineStyleElementHandler, BlockElementHandler
from wagtail.core import hooks
@hooks.register('register_rich_text_features')
def register_blockquote_feature(features):
"""
Registering the `blockquote` feature, which uses the `blockquote` Draft.js block type,
and is stored as HTML with a `<blockquote>` tag.
"""
feature_name = 'blockquote'
type_ = 'blockquote'
tag = 'blockquote'
control = {
'type': type_,
'label': '❝',
'description': 'Quote',
'element': 'blockquote',
}
features.register_editor_plugin(
'draftail',
feature_name,
draftail_features.BlockFeature(control)
)
features.register_converter_rule(
'contentstate',
feature_name,
{
'from_database_format': {tag: BlockElementHandler(type_)},
'to_database_format': {'block_map': {type_: tag}},
}
)
features.default_features.append(feature_name)
@hooks.register('register_rich_text_features')
def register_codeline_feature(features):
feature_name = 'Code Line'
type_ = 'CODE'
tag = 'code'
control = {
'type': type_,
'label': '>_',
'description': 'Code Line',
}
features.register_editor_plugin(
'draftail', feature_name, draftail_features.InlineStyleFeature(control)
)
db_conversion = {
'from_database_format': {tag: InlineStyleElementHandler(type_)},
'to_database_format': {'style_map': {type_: tag}},
}
features.register_converter_rule('contentstate', feature_name, db_conversion)
features.default_features.append(feature_name)
|
lightnion/cache.py | pthevenet/lightnion | 120 | 12763614 | <reponame>pthevenet/lightnion
import os
import time
import json
import shutil
import base64
import logging
cache_directory = '.lightnion-cache.d'
def directory(base_dir=None):
if base_dir is None:
base_dir = os.getcwd()
base_dir = os.path.join(base_dir, cache_directory)
if not os.path.isdir(base_dir):
logging.info(
'Note: creating {} to cache descriptors.'.format(base_dir))
os.mkdir(base_dir)
if not os.path.isdir(base_dir):
raise RuntimeError(
'Unable to fetch cache directory: {}'.format(base_dir))
return base_dir
def purge():
base_dir = directory()
logging.warning('Note: removing {} to purge cache.'.format(base_dir))
shutil.rmtree(base_dir)
class descriptors:
@staticmethod
def filename(descriptor, get=False):
base_dir = 'descriptors'
if 'micro' in descriptor['flavor']:
base_dir = 'micro-' + base_dir
base_dir = os.path.join(directory(), base_dir)
if not os.path.isdir(base_dir):
os.mkdir(base_dir)
field = 'digest'
if 'micro' in descriptor['flavor']:
field = 'micro-digest'
digest = descriptor[field]
if (not get) or 'micro' in descriptor['flavor']:
digest = base64.b64decode(descriptor[field] + '====').hex()
half_dir = os.path.join(base_dir, digest[:2])
if not os.path.isdir(half_dir):
os.mkdir(half_dir)
return os.path.join(half_dir, digest)
@staticmethod
def put(descriptor):
filename = descriptors.filename(descriptor)
if os.path.isfile(filename):
return
with open(filename, 'w') as f:
json.dump(descriptor, f)
@staticmethod
def get(flavor, digest):
field = 'digest'
if 'micro' in flavor:
field = 'micro-digest'
descriptor = {'flavor': flavor, field: digest}
filename = descriptors.filename(descriptor, get=True)
with open(filename, 'r') as f:
descriptor = json.load(f)
if not descriptor['flavor'] == flavor:
raise ValueError('Mismatched flavor.')
new_digest = descriptor[field]
if not 'micro' in field:
new_digest = base64.b64decode(new_digest + '====').hex()
if not new_digest == digest:
raise ValueError('Mismatched digest.')
return descriptor
class consensus:
@staticmethod
def filename(flavor):
return os.path.join(directory(), 'consensus-{}'.format(flavor))
@staticmethod
def put(fields):
filename = consensus.filename(fields['flavor'])
with open(filename, 'w') as f:
json.dump(fields, f)
@staticmethod
def get(flavor):
filename = consensus.filename(flavor)
with open(filename, 'r') as f:
fields = json.load(f)
if not fields['flavor'] == flavor:
raise ValueError('Mismatched flavor.')
if fields['headers']['valid-until']['stamp'] < time.time():
raise ValueError('Consensus need to be refreshed: {} < {}'.format(
fields['headers']['valid-until']['stamp'], time.time()))
return fields
|
models/Lightweight/MobileNetV1.py | Dou-Yu-xuan/deep-learning-visal | 150 | 12763616 | <filename>models/Lightweight/MobileNetV1.py
import torch
import torch.nn as nn
import torchvision
def BottleneckV1(in_channels, out_channels, stride):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels,out_channels=in_channels,kernel_size=3,stride=stride,padding=1,groups=in_channels),
nn.BatchNorm2d(in_channels),
nn.ReLU6(inplace=True),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1),
nn.BatchNorm2d(out_channels),
nn.ReLU6(inplace=True)
)
class MobileNetV1(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV1, self).__init__()
self.first_conv = nn.Sequential(
nn.Conv2d(in_channels=3,out_channels=32,kernel_size=3,stride=2,padding=1),
nn.BatchNorm2d(32),
nn.ReLU6(inplace=True),
)
self.bottleneck = nn.Sequential(
BottleneckV1(32, 64, stride=1),
BottleneckV1(64, 128, stride=2),
BottleneckV1(128, 128, stride=1),
BottleneckV1(128, 256, stride=2),
BottleneckV1(256, 256, stride=1),
BottleneckV1(256, 512, stride=2),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 512, stride=1),
BottleneckV1(512, 1024, stride=2),
BottleneckV1(1024, 1024, stride=1),
)
self.avg_pool = nn.AvgPool2d(kernel_size=7,stride=1)
self.linear = nn.Linear(in_features=1024,out_features=num_classes)
self.dropout = nn.Dropout(p=0.2)
self.softmax = nn.Softmax(dim=1)
self.init_params()
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
nn.init.constant_(m.bias,0)
elif isinstance(m, nn.Linear) or isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.first_conv(x)
x = self.bottleneck(x)
x = self.avg_pool(x)
x = x.view(x.size(0),-1)
x = self.dropout(x)
x = self.linear(x)
out = self.softmax(x)
return out
if __name__=='__main__':
model = MobileNetV1()
print(model)
input = torch.randn(1, 3, 224, 224)
out = model(input)
print(out.shape)
|
enaml/qt/qt_html.py | xtuzy/enaml | 1,080 | 12763627 | <filename>enaml/qt/qt_html.py
#------------------------------------------------------------------------------
# Copyright (c) 2013-2017, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#------------------------------------------------------------------------------
from atom.api import Typed
from enaml.widgets.html import ProxyHtml
from .QtWidgets import QTextEdit
from .qt_control import QtControl
class QtHtml(QtControl, ProxyHtml):
""" A Qt implementation of an Enaml ProxyHtml widget.
"""
#: A reference to the widget created by the proxy.
widget = Typed(QTextEdit)
#--------------------------------------------------------------------------
# Initialization API
#--------------------------------------------------------------------------
def create_widget(self):
""" Create the underlying html widget.
"""
widget = QTextEdit(self.parent_widget())
widget.setReadOnly(True)
self.widget = widget
def init_widget(self):
""" Initialize the underlying widget.
"""
super(QtHtml, self).init_widget()
self.set_source(self.declaration.source)
#--------------------------------------------------------------------------
# ProxyHtml API
#--------------------------------------------------------------------------
def set_source(self, source):
""" Set the source of the html widget
"""
self.widget.setHtml(source)
|
tick/robust/tests/model_huber_test.py | sumau/tick | 411 | 12763639 | # License: BSD 3 clause
import unittest
import numpy as np
from scipy.sparse import csr_matrix
from tick.robust import ModelHuber
from tick.base_model.tests.generalized_linear_model import TestGLM
from tick.linear_model import SimuLinReg
class Test(TestGLM):
def test_ModelHuber(self):
"""...Numerical consistency check of loss and gradient for Huber model
"""
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLinReg(w0, c0, n_samples=n_samples,
verbose=False).simulate()
X_spars = csr_matrix(X)
model = ModelHuber(fit_intercept=True, threshold=1.3).fit(X, y)
model_spars = ModelHuber(fit_intercept=True, threshold=1.3).fit(
X_spars, y)
self.run_test_for_glm(model, model_spars)
self._test_glm_intercept_vs_hardcoded_intercept(model)
# Then check without intercept
X, y = SimuLinReg(w0, None, n_samples=n_samples, verbose=False,
seed=2038).simulate()
X_spars = csr_matrix(X)
model = ModelHuber(fit_intercept=False).fit(X, y)
model_spars = ModelHuber(fit_intercept=False).fit(X_spars, y)
self.run_test_for_glm(model, model_spars)
# Test for the Lipschitz constants without intercept
self.assertAlmostEqual(model.get_lip_best(), 2.6873683857125981)
self.assertAlmostEqual(model.get_lip_mean(), 9.95845726788432)
self.assertAlmostEqual(model.get_lip_max(), 54.82616964855237)
self.assertAlmostEqual(model_spars.get_lip_mean(),
model.get_lip_mean())
self.assertAlmostEqual(model_spars.get_lip_max(), model.get_lip_max())
# Test for the Lipschitz constants with intercept
model = ModelHuber(fit_intercept=True).fit(X, y)
model_spars = ModelHuber(fit_intercept=True).fit(X_spars, y)
self.assertAlmostEqual(model.get_lip_best(), 2.687568385712598)
self.assertAlmostEqual(model.get_lip_mean(), 10.958457267884327)
self.assertAlmostEqual(model.get_lip_max(), 55.82616964855237)
self.assertAlmostEqual(model_spars.get_lip_mean(),
model.get_lip_mean())
self.assertAlmostEqual(model_spars.get_lip_max(), model.get_lip_max())
def test_ModelHuber_threshold(self):
np.random.seed(12)
n_samples, n_features = 5000, 10
w0 = np.random.randn(n_features)
c0 = np.random.randn()
# First check with intercept
X, y = SimuLinReg(w0, c0, n_samples=n_samples,
verbose=False).simulate()
model = ModelHuber(threshold=1.541).fit(X, y)
self.assertEqual(model._model.get_threshold(), 1.541)
model.threshold = 3.14
self.assertEqual(model._model.get_threshold(), 3.14)
msg = '^threshold must be > 0$'
with self.assertRaisesRegex(RuntimeError, msg):
model = ModelHuber(threshold=-1).fit(X, y)
with self.assertRaisesRegex(RuntimeError, msg):
model.threshold = 0.
if __name__ == '__main__':
unittest.main()
|
d2go/data/dataset_mappers/rotated_dataset_mapper.py | wenliangzhao2018/d2go | 687 | 12763656 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
import logging
import numpy as np
import torch
from d2go.data.dataset_mappers.d2go_dataset_mapper import D2GoDatasetMapper
from detectron2.data import detection_utils as utils, transforms as T
from detectron2.structures import BoxMode, Instances, RotatedBoxes
from .build import D2GO_DATA_MAPPER_REGISTRY
logger = logging.getLogger(__name__)
@D2GO_DATA_MAPPER_REGISTRY.register()
class RotatedDatasetMapper(D2GoDatasetMapper):
def _original_call(self, dataset_dict):
"""
Modified from detectron2's original __call__ in DatasetMapper
"""
dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
image = self._read_image(dataset_dict, format=self.img_format)
if not self.backfill_size:
utils.check_image_size(dataset_dict, image)
if "annotations" not in dataset_dict:
image, transforms = T.apply_transform_gens(
([self.crop_gen] if self.crop_gen else []) + self.tfm_gens, image
)
else:
# Crop around an instance if there are instances in the image.
# USER: Remove if you don't use cropping
if self.crop_gen:
crop_tfm = utils.gen_crop_transform_with_instance(
self.crop_gen.get_crop_size(image.shape[:2]),
image.shape[:2],
np.random.choice(dataset_dict["annotations"]),
)
image = crop_tfm.apply_image(image)
image, transforms = T.apply_transform_gens(self.tfm_gens, image)
if self.crop_gen:
transforms = crop_tfm + transforms
image_shape = image.shape[:2] # h, w
dataset_dict["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
assert not self.load_proposals, "Not supported!"
if not self.is_train:
dataset_dict.pop("annotations", None)
dataset_dict.pop("sem_seg_file_name", None)
return dataset_dict
if "annotations" in dataset_dict:
for anno in dataset_dict["annotations"]:
if not self.mask_on:
anno.pop("segmentation", None)
if not self.keypoint_on:
anno.pop("keypoints", None)
# Convert dataset_dict["annotations"] to dataset_dict["instances"]
annotations = [
obj
for obj in dataset_dict.pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Convert either rotated box or horizontal box to XYWHA_ABS format
original_boxes = [
BoxMode.convert(
box=obj["bbox"],
from_mode=obj["bbox_mode"],
to_mode=BoxMode.XYWHA_ABS,
)
for obj in annotations
]
transformed_boxes = transforms.apply_rotated_box(
np.array(original_boxes, dtype=np.float64)
)
instances = Instances(image_shape)
instances.gt_classes = torch.tensor(
[obj["category_id"] for obj in annotations], dtype=torch.int64
)
instances.gt_boxes = RotatedBoxes(transformed_boxes)
instances.gt_boxes.clip(image_shape)
dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
return dataset_dict
|
corus/sources/toloka.py | Ilseyar/corus | 205 | 12763706 |
from corus.record import Record
from corus.io import (
load_lines,
parse_tsv,
skip_header,
)
class LRWCRecord(Record):
__attributes__ = ['hyponym', 'hypernym', 'genitive', 'judgement', 'confidence']
def __init__(self, hyponym, hypernym, genitive, judgement, confidence):
self.hyponym = hyponym
self.hypernym = hypernym
self.genitive = genitive
self.judgement = judgement
self.confidence = confidence
# INPUT:hyponym INPUT:hypernym INPUT:genitive OUTPUT:judgement CONFIDENCE:judgement
# автомобиль автомашина автомашины true 99.75%
# автомобиль автомототранспорт автомототранспорта true 99.96%
# автомобиль автомототранспортный автомототранспортного true 99.99%
def parse_judgement(value):
if value == 'true':
return 1.0
elif value == 'false':
return 0.0
def parse_confidence(value):
return float(value[:-1])
def parse_toloka_lrwc(lines):
skip_header(lines)
records = parse_tsv(lines)
for record in records:
hyponym, hypernym, genitive, judgement, confidence = record
judgement = parse_judgement(judgement)
confidence = parse_confidence(confidence)
yield LRWCRecord(hyponym, hypernym, genitive, judgement, confidence)
def load_toloka_lrwc(path):
lines = load_lines(path)
return parse_toloka_lrwc(lines)
class RuADReCTRecord(Record):
__attributes__ = ['tweet_id', 'tweet', 'label']
def __init__(self, tweet_id, tweet, label):
self.tweet_id = tweet_id
self.tweet = tweet
self.label = label
# – tweet_id: уникальный номер сообщения в системе twitter;
# – tweet: текст сообщения (твита);
# - label: класс твита, 1 - содержит упоминание побочного эффекта, 0 - не содердит
def parse_ruadrect(lines):
rows = parse_tsv(lines)
skip_header(rows)
for cells in rows:
yield RuADReCTRecord(*cells)
def load_ruadrect(path):
lines = load_lines(path)
return parse_ruadrect(lines)
|
Code/python/Py/PostProcess.py | cy15196/FastCAE | 117 | 12763709 | #-------关联C++库---------------
import ctypes
import platform
system = platform.system()
if system == "Windows":
pre = "./"
suff = ".dll"
else:
pre = "./lib"
suff = ".so"
libfile = ctypes.cdll.LoadLibrary
filename = pre+"GraphicsAnalyse"+suff
postPro = libfile(filename)
import MainWindow
#---------------------------------
#-------定义函数------------------
def script_openFile(id, type, file):
MainWindow.script_openFile(id, type, file)
pass
def script_applyClicked(id, type):
MainWindow.script_applyClicked(id, type)
pass
def script_Properties_Opacity(id, type, obj_id, mOpacity):
MainWindow.script_Properties_Opacity(id, type, obj_id, mOpacity)
pass
def script_Properties_colorColumn(id, type, obj_id, mColorColumnStyle):
MainWindow.script_Properties_colorColumn(id, type, obj_id, mColorColumnStyle)
pass
def script_Properties_scalarBarTitle(id, type, obj_id, colName, m_title):
MainWindow.script_Properties_scalarBarTitle(id, type, obj_id, colName, m_title)
pass
def script_Properties_scalarBarFontSize(id, type, obj_id, colName, m_fontSize):
MainWindow.script_Properties_scalarBarFontSize(id, type, obj_id, colName, m_fontSize)
pass
def script_Properties_scalarBarNumLables(id, type, obj_id, colName, m_numLables):
MainWindow.script_Properties_scalarBarNumLables(id, type, obj_id, colName, m_numLables)
pass
def script_Properties_lineWidth(id, type, obj_id, mLineWidth):
MainWindow.script_Properties_lineWidth(id, type, obj_id, mLineWidth)
pass
def script_Properties_pointSize(id, type, obj_id, mPointSize):
MainWindow.script_Properties_pointSize(id, type, obj_id, mPointSize)
pass
def script_Properties_translate(id, type, obj_id, x, y, z):
MainWindow.script_Properties_translate(id, type, obj_id, x, y, z)
pass
def script_Properties_origin(id, type, obj_id, x, y, z):
MainWindow.script_Properties_origin(id, type, obj_id, x, y, z)
pass
def script_Properties_scale(id, type, obj_id, x, y, z):
MainWindow.script_Properties_scale(id, type, obj_id, x, y, z)
pass
def script_Properties_orientation(id, type, obj_id, x, y, z):
MainWindow.script_Properties_orientation(id, type, obj_id, x, y, z)
pass
def script_Properties_representation(id, type, obj_id, m_enum_representationtype):
MainWindow.script_Properties_representation(id, type, obj_id, m_enum_representationtype)
pass
def script_Properties_specular(id, type, obj_id, mSpecular):
MainWindow.script_Properties_specular(id, type, obj_id, mSpecular)
pass
def script_Properties_diffuse(id, type, obj_id, mDiffuse):
MainWindow.script_Properties_diffuse(id, type, obj_id, mDiffuse)
pass
def script_Properties_ambient(id, type, obj_id, mAmbient):
MainWindow.script_Properties_ambient(id, type, obj_id, mAmbient)
pass
def script_Properties_specularPower(id, type, obj_id, mSpecularPower):
MainWindow.script_Properties_specularPower(id, type, obj_id, mSpecularPower)
pass
def script_Properties_specularColor(id, type, obj_id, r, g, b):
MainWindow.script_Properties_specularColor(id, type, obj_id, r, g, b)
pass
def script_Properties_solidColor(id, type, obj_id, r, g, b):
MainWindow.script_Properties_solidColor(id, type, obj_id, r, g, b)
pass
def script_Properties_edgeColor(id, type, obj_id, r, g, b):
MainWindow.script_Properties_edgeColor(id, type, obj_id, r, g, b)
pass
def script_Properties_interpolation(id, type, obj_id, m_enum_interpolationtype):
MainWindow.script_Properties_interpolation(id, type, obj_id, m_enum_interpolationtype)
pass
def script_Properties_Flag_scalarBar(id, type, obj_id, mColorColumnStyle):
MainWindow.script_Properties_Flag_scalarBar(id, type, obj_id, mColorColumnStyle)
pass
def script_Properties_EnableOpacityMap(id, type, obj_id, val):
MainWindow.script_Properties_EnableOpacityMap(id, type, obj_id, val)
pass
def script_Properties_visible(id, type, obj_id, flag_show_actors):
MainWindow.script_Properties_visible(id, type, obj_id, flag_show_actors)
pass
def script_Properties_show_scalarBars(id, type, obj_id, mScalarBarVisible):
MainWindow.script_Properties_show_scalarBars(id, type, obj_id, mScalarBarVisible)
pass
def script_Properties_show_cubeAxes(id, type, obj_id, flag_cubeAxes):
MainWindow.script_Properties_show_cubeAxes(id, type, obj_id, flag_cubeAxes)
pass
def script_Properties_scalarBarPosition(id, type, obj_id, colName, tep_orietation, pos0, pos1, pos2, pos3):
MainWindow.script_Properties_scalarBarPosition(id, type, obj_id,colName, tep_orietation, pos0, pos1, pos2, pos3)
pass
def script_Clip(id, type, obj_id):
MainWindow.script_FilterClip(id, type, obj_id)
pass
def script_Slice(id, type, obj_id):
MainWindow.script_FilterSlice(id, type, obj_id)
pass
def script_Contour(id, type, obj_id):
MainWindow.script_FilterContour(id, type, obj_id)
pass
def script_Vector(id, type, obj_id):
MainWindow.script_FilterVector(id, type, obj_id)
pass
def script_Reflection(id, type, obj_id):
MainWindow.script_FilterReflection(id, type, obj_id)
pass
def script_Smooth(id, type, obj_id):
MainWindow.script_FilterSmooth(id, type, obj_id)
pass
def script_StreamLine(id, type, obj_id):
MainWindow.script_FilterStreamLine(id, type, obj_id)
pass
###################
def script_Vector_GlyphVector(id, type, obj_id, val):
MainWindow.script_Properties_vector_GlyphVector(id, type, obj_id, val)
pass
def script_Vector_scalar(id, type, obj_id, val):
MainWindow.script_Properties_vector_scalar(id, type, obj_id, val)
pass
def script_Vector_normal(id, type, obj_id, val):
MainWindow.script_Properties_vector_normal(id, type, obj_id, val)
pass
def script_Vector_numPoints(id, type, obj_id, val):
MainWindow.script_Properties_vector_numPoints(id, type, obj_id, val)
pass
def script_Vector_glyph_type(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_type(id, type, obj_id, val)
pass
def script_Vector_glyph_tipRes(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_tipRes(id, type, obj_id, val)
pass
def script_Vector_glyph_tipRad(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_tipRad(id, type, obj_id, val)
pass
def script_Vector_glyph_tipLen(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_tipLen(id, type, obj_id, val)
pass
def script_FilterStreamLine(id, type, obj_id):
MainWindow.script_FilterStreamLine(id, type, obj_id)
pass
def script_Vector_glyph_shaftRes(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_shaftRes(id, type, obj_id, val)
pass
def script_Vector_glyph_shaftRad(id, type, obj_id, val):
MainWindow.script_Properties_vector_glyph_shaftRad(id, type, obj_id, val)
pass
def script_Properties_view_backgroundType(id, type, obj_id, val):
MainWindow.script_Properties_view_backgroundType(id, type, obj_id, val)
pass
def script_Properties_view_backgroundColor(id, type, obj_id, red, green, blue):
MainWindow.script_Properties_view_backgroundColor(id, type, obj_id, red, green, blue)
pass
def script_Properties_view_background2Color(id, type, obj_id, red, green, blue):
MainWindow.script_Properties_view_background2Color(id, type, obj_id, red, green, blue)
pass
def script_Properties_view_axesVisible(id, type, a):
MainWindow.script_Properties_view_axesVisible(id, type, a)
pass
def script_Properties_view_cameraParallel(id, type, a):
MainWindow.script_Properties_view_cameraParallel(id, type, a)
pass
def script_Properties_view_interaction(id, type, a):
MainWindow.script_Properties_view_interaction(id, type, a)
pass
def script_Properties_renderView(id, type):
MainWindow.script_Properties_renderView(id, type)
pass
def script_Camera_Position(id, type, pos0, pos1, pos2):
MainWindow.script_Camera_Position(id, type, pos0, pos1, pos2)
pass
def script_Camera_FocalPoint(id, type, focalPoint0, focalPoint1, focalPoint2):
MainWindow.script_Camera_FocalPoint(id, type, focalPoint0, focalPoint1, focalPoint2)
pass
def script_Camera_ClippingRange(id, type, clippingRange0, clippingRange1):
MainWindow.script_Camera_ClippingRange(id, type, clippingRange0, clippingRange1)
pass
def script_Camera_ViewUp(id, type, viewup0, viewup1, viewup2):
MainWindow.script_Camera_ViewUp(id, type, viewup0, viewup1, viewup2)
pass
def script_Camera_ViewAngle(id, type, angle):
MainWindow.script_Camera_ViewAngle(id, type, angle)
pass
def script_Camera_Zoom(id, type, zoom):
MainWindow.script_Camera_Zoom(id, type, zoom)
pass
def script_Camera_Reset(id, type,):
MainWindow.script_Camera_Reset(id, type,)
pass
def script_Properties_planeOrigin(id, type, obj_id, x, y, z):
MainWindow.script_Properties_planeOrigin(id, type, obj_id, x, y, z)
pass
def script_Properties_planeNormal(id, type, obj_id, x, y, z):
MainWindow.script_Properties_planeNormal(id, type, obj_id, x, y, z)
pass
def script_Properties_planeVisible(id, type, obj_id, a):
MainWindow.script_Properties_planeVisible(id, type, obj_id, a)
pass
def script_Properties_insideOut(id, type, obj_id, a):
MainWindow.script_Properties_insideOut(id, type, obj_id, a)
pass
def script_Contour_Column(id, type, obj_id, val):
MainWindow.script_Properties_contourColumn(id, type, obj_id, val)
pass
def script_Contour_value(id, type, obj_id, val):
MainWindow.script_Properties_contourValue(id, type, obj_id, val)
pass
def script_Contour_reflection(id, type, obj_id, aaa):
MainWindow.script_Properties_contour_reflection(id, type, obj_id, aaa)
pass
def script_Contour_reflectionAxes(id, type, obj_id, val):
MainWindow.script_Properties_contour_reflectionAxes(id, type, obj_id, val)
pass
def script_Properties_reflectionAxes(id, type, obj_id, reflection_axis):
MainWindow.script_Properties_reflectionAxes(id, type, obj_id, reflection_axis)
pass
def Smooth_smooth(id, type, obj_id, smotype, coef):
MainWindow.script_Properties_smooth(id, type, obj_id, smotype, coef)
pass
def script_Streamline_vector(id, type, obj_id, val):
MainWindow.script_Properties_streamline_vector(id, type, obj_id, val)
pass
def script_Streamline_integration_type(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_type(id, type, obj_id, val)
pass
def script_Streamline_integration_direction(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_direction(id, type, obj_id, val)
pass
def script_Streamline_integration_stepUnit(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_type(id, type, obj_id, val)
pass
def script_Properties_streamline_integration_stepUnit(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_stepUnit(id, type, obj_id, val)
pass
def script_Streamline_integration_initStepLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_initStepLen(id, type, obj_id, val)
pass
def script_Streamline_integration_miniStepLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_miniStepLen(id, type, obj_id, val)
pass
def script_Streamline_integration_maxiStepLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_integration_maxiStepLen(id, type, obj_id, val)
pass
def script_Streamline_stream_maxiSteps(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_maxiSteps(id, type, obj_id, val)
pass
def script_Streamline_stream_maxiStreamLen(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_maxiStreamLen(id, type, obj_id, val)
pass
###########
def script_Streamline_stream_terminalSpeed(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_terminalSpeed(id, type, obj_id, val)
pass
def script_Streamline_stream_maxiError(id, type, obj_id, val):
MainWindow.script_Properties_streamline_stream_maxiError(id, type, obj_id, val)
pass
def script_Streamline_seeds_type(id, type, obj_id, val):
MainWindow.script_Properties_streamline_seeds_type(id, type, obj_id, val)
pass
def script_Streamline_seeds_mPoint(id, type, obj_id, val0, val1, val2):
MainWindow.script_Properties_streamline_seeds_mPoint(id, type, obj_id, val0, val1, val2)
pass
def script_Streamline_seeds_num_points(id, type, obj_id, val):
MainWindow.script_Properties_streamline_seeds_num_points(id, type, obj_id, val)
pass
def script_Streamline_seeds_radius(id, type, obj_id, val):
MainWindow.script_Properties_streamline_seeds_radius(id, type, obj_id, val)
pass
def script_Streamline_vorticity(id, type, obj_id, val):
MainWindow.script_Properties_streamline_vorticity(id, type, obj_id, val)
pass
def script_Streamline_interpolatorType(id, type, obj_id, val):
MainWindow.script_Properties_streamline_interpolatorType(id, type, obj_id, val)
pass
def script_Streamline_surface_streamLines(id, type, obj_id, val):
MainWindow.script_Properties_streamline_surface_streamLines(id, type, obj_id, val)
pass
def script_Properties_streamline_reflection(id, type, obj_id, val):
MainWindow.script_Properties_streamline_reflection(id, type, obj_id, val)
pass
def script_Properties_streamline_reflectionAxes(id, type, obj_id, val):
MainWindow.script_Properties_streamline_reflectionAxes(id, type, obj_id, val)
pass
|
mcg/sampling.py | nyu-dl/dl4mt-multi | 143 | 12763811 | <reponame>nyu-dl/dl4mt-multi
import logging
import copy
import numpy
import operator
import os
import re
import signal
import time
import theano
from blocks.extensions import SimpleExtension
from collections import OrderedDict
from subprocess import Popen, PIPE
from toolz import merge
from .utils import _p, get_enc_dec_ids
logger = logging.getLogger(__name__)
def gen_sample(f_init, f_next, x, src_selector, trg_selector, k=1,
maxlen=30, stochastic=True, argmax=False, eos_idx=0,
cond_init_trg=False, ignore_unk=False, minlen=1, unk_idx=1,
f_next_state=None, return_alphas=False):
if k > 1:
assert not stochastic, \
'Beam search does not support stochastic sampling'
sample = []
sample_score = []
sample_decalphas = []
if stochastic:
sample_score = 0
live_k = 1
dead_k = 0
hyp_samples = [[]] * live_k
hyp_decalphas = []
hyp_scores = numpy.zeros(live_k).astype('float32')
hyp_states = []
# multi-source
inp_xs = [x]
init_inps = inp_xs
ret = f_init(*init_inps)
next_state, ctx0 = ret[0], ret[1]
next_w = -1 * numpy.ones((1,)).astype('int64')
for ii in range(maxlen):
ctx = numpy.tile(ctx0, [live_k, 1])
prev_w = copy.copy(next_w)
prev_state = copy.copy(next_state)
inps = [next_w, ctx, next_state]
ret = f_next(*inps)
next_p, next_w, next_state = ret[0], ret[1], ret[2]
if return_alphas:
next_decalpha = ret.pop(0)
if stochastic:
if argmax:
nw = next_p[0].argmax()
else:
nw = next_w[0]
sample.append(nw)
sample_score -= numpy.log(next_p[0, nw])
if nw == eos_idx:
break
else:
log_probs = numpy.log(next_p)
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:, unk_idx] = -numpy.inf
if ii < minlen:
log_probs[:, eos_idx] = -numpy.inf
cand_scores = hyp_scores[:, None] - numpy.log(next_p)
cand_flat = cand_scores.flatten()
ranks_flat = cand_flat.argsort()[:(k-dead_k)]
voc_size = next_p.shape[1]
trans_indices = ranks_flat / voc_size
word_indices = ranks_flat % voc_size
costs = cand_flat[ranks_flat]
new_hyp_samples = []
new_hyp_scores = numpy.zeros(k-dead_k).astype('float32')
new_hyp_states = []
new_hyp_decalphas = []
for idx, [ti, wi] in enumerate(zip(trans_indices, word_indices)):
new_hyp_samples.append(hyp_samples[ti]+[wi])
new_hyp_scores[idx] = copy.copy(costs[idx])
new_hyp_states.append(copy.copy(next_state[ti]))
if return_alphas:
tmp_decalphas = []
if ii > 0:
tmp_decalphas = copy.copy(hyp_decalphas[ti])
tmp_decalphas.append(next_decalpha[ti])
new_hyp_decalphas.append(tmp_decalphas)
# check the finished samples
new_live_k = 0
hyp_samples = []
hyp_scores = []
hyp_states = []
hyp_decalphas = []
for idx in range(len(new_hyp_samples)):
if new_hyp_samples[idx][-1] == eos_idx:
sample.append(new_hyp_samples[idx])
sample_score.append(new_hyp_scores[idx])
if return_alphas:
sample_decalphas.append(new_hyp_decalphas[idx])
dead_k += 1
else:
new_live_k += 1
hyp_samples.append(new_hyp_samples[idx])
hyp_scores.append(new_hyp_scores[idx])
hyp_states.append(new_hyp_states[idx])
if return_alphas:
hyp_decalphas.append(new_hyp_decalphas[idx])
hyp_scores = numpy.array(hyp_scores)
live_k = new_live_k
if new_live_k < 1:
break
if dead_k >= k:
break
next_w = numpy.array([w[-1] for w in hyp_samples])
next_state = numpy.array(hyp_states)
if not stochastic:
# dump every remaining one
if live_k > 0:
for idx in range(live_k):
sample.append(hyp_samples[idx])
sample_score.append(hyp_scores[idx])
if return_alphas:
sample_decalphas.append(hyp_decalphas[idx])
if not return_alphas:
return numpy.array(sample), numpy.array(sample_score)
return numpy.array(sample), numpy.array(sample_score), \
numpy.array(sample_decalphas)
class SamplingBase(object):
def _get_attr_rec(self, obj, attr):
return self._get_attr_rec(getattr(obj, attr), attr) \
if hasattr(obj, attr) else obj
def _get_true_length(self, seq, eos_idx):
try:
return seq.tolist().index(eos_idx) + 1
except ValueError:
return len(seq)
def _oov_to_unk(self, seq):
return [x if x < self.src_vocab_size else self.unk_idx
for x in seq]
def _parse_input(self, line, eos_idx):
seqin = line.split()
seqlen = len(seqin)
seq = numpy.zeros(seqlen+1, dtype='int64')
for idx, sx in enumerate(seqin):
seq[idx] = self.vocab.get(sx, self.unk_idx)
if seq[idx] >= self.src_vocab_size:
seq[idx] = self.unk_idx
seq[-1] = eos_idx
return seq
def _idx_to_word(self, seq, ivocab):
return " ".join([ivocab.get(idx, "<UNK>") for idx in seq])
def _get_true_seq(self, seq, eos_idx):
return seq[:self._get_true_length(seq, eos_idx)]
def _make_matrix(self, arr):
if arr.ndim >= 2:
return arr
return arr[None, :]
class Sampler(SimpleExtension, SamplingBase):
"""Samples from computation graph
Does not use peeked batches
"""
def __init__(self, f_init, f_next, data_stream, num_samples=1,
src_vocab=None, trg_vocab=None, src_ivocab=None,
trg_ivocab=None, enc_id=0, dec_id=0, src_eos_idx=-1,
trg_eos_idx=-1, cond_init_trg=False, f_next_state=None,
**kwargs):
super(Sampler, self).__init__(**kwargs)
self.f_init = f_init
self.f_next = f_next
self.f_next_state = f_next_state
self.data_stream = data_stream
self.num_samples = num_samples
self.src_vocab = src_vocab
self.trg_vocab = trg_vocab
self.src_ivocab = src_ivocab
self.trg_ivocab = trg_ivocab
self.src_eos_idx = src_eos_idx
self.trg_eos_idx = trg_eos_idx
self.cond_init_trg = cond_init_trg
self.enc_id = enc_id
self.dec_id = dec_id
self._synced = False
self.sampling_fn = gen_sample
def do(self, which_callback, *args):
batch = args[0]
# Get current model parameters
if not self._synced:
sources = self._get_attr_rec(
self.main_loop.data_stream.streams[_p(self.enc_id,
self.dec_id)],
'data_stream')
self.sources = sources
self._synced = True
batch = self.main_loop.data_stream\
.get_batch_with_stream_id(_p(self.enc_id, self.dec_id))
batch_size = batch['source'].shape[1]
# Load vocabularies and invert if necessary
# WARNING: Source and target indices from data stream
# can be different
if not self.src_vocab:
self.src_vocab = self.sources.data_streams[0].dataset.dictionary
if not self.trg_vocab:
self.trg_vocab = self.sources.data_streams[1].dataset.dictionary
if not self.src_ivocab:
self.src_ivocab = {v: k for k, v in self.src_vocab.items()}
self.src_ivocab[self.src_eos_idx] = '</S>'
if not self.trg_ivocab:
self.trg_ivocab = {v: k for k, v in self.trg_vocab.items()}
self.trg_ivocab[self.trg_eos_idx] = '</S>'
sample_idx = numpy.random.choice(
batch_size, self.num_samples, replace=False)
src_batch = batch['source']
trg_batch = batch['target']
input_ = src_batch[:, sample_idx]
target_ = trg_batch[:, sample_idx]
# Sample
outputs = [list() for _ in sample_idx]
costs = [list() for _ in sample_idx]
for i, idx in enumerate(sample_idx):
outputs[i], costs[i] = self.sampling_fn(
self.f_init, self.f_next, eos_idx=self.trg_eos_idx,
x=self._get_true_seq(input_[:, i], self.src_eos_idx)[:, None],
src_selector=self._make_matrix(batch['src_selector'][idx, :]),
trg_selector=self._make_matrix(batch['trg_selector'][idx, :]),
k=1, maxlen=30, stochastic=True, argmax=False,
cond_init_trg=self.cond_init_trg,
f_next_state=self.f_next_state)
print ""
logger.info("Sampling from computation graph[{}-{}]"
.format(self.enc_id, self.dec_id))
for i in range(len(outputs)):
input_length = self._get_true_length(input_[:, i],
self.src_eos_idx)
target_length = self._get_true_length(target_[:, i],
self.trg_eos_idx)
sample_length = self._get_true_length(outputs[i],
self.trg_eos_idx)
print "Input : ", self._idx_to_word(input_[:, i][:input_length],
self.src_ivocab)
print "Target: ", self._idx_to_word(target_[:, i][:target_length],
self.trg_ivocab)
print "Sample: ", self._idx_to_word(outputs[i][:sample_length],
self.trg_ivocab)
print "Sample cost: ", costs[i].sum()
print ""
class BleuValidator(SimpleExtension, SamplingBase):
"""Highly not recommended for use."""
def __init__(self, f_init, f_next, data_stream,
bleu_script, val_set_out, val_set_grndtruth, src_vocab_size,
src_selector=None, trg_selector=None, n_best=1,
track_n_models=1, trg_ivocab=None, beam_size=5,
val_burn_in=10000, _reload=True, enc_id=None, dec_id=None,
saveto=None, src_eos_idx=-1, trg_eos_idx=-1, normalize=True,
cond_init_trg=False,**kwargs):
super(BleuValidator, self).__init__(**kwargs)
self.f_init = f_init
self.f_next = f_next
self.data_stream = data_stream
self.bleu_script = bleu_script
self.val_set_out = val_set_out
self.val_set_grndtruth = val_set_grndtruth
self.src_vocab_size = src_vocab_size
self.src_selector = src_selector
self.trg_selector = trg_selector
self.n_best = n_best
self.track_n_models = track_n_models
self.trg_ivocab = trg_ivocab
self.beam_size = beam_size
self.val_burn_in = val_burn_in
self._reload = _reload
self.enc_id = enc_id
self.dec_id = dec_id
self.saveto = saveto if saveto else "."
self.verbose = val_set_out
self._synced = False
self.src_eos_idx = src_eos_idx
self.trg_eos_idx = trg_eos_idx
self.normalize = normalize
self.cond_init_trg = cond_init_trg
# Helpers
self.vocab = data_stream.dataset.dictionary
self.unk_sym = data_stream.dataset.unk_token
self.eos_sym = data_stream.dataset.eos_token
self.unk_idx = self.vocab[self.unk_sym]
self.best_models = []
self.val_bleu_curve = []
self.sampling_fn = gen_sample
self.multibleu_cmd = ['perl', bleu_script, val_set_grndtruth, '<']
# Create saving directory if it does not exist
if not os.path.exists(saveto):
os.makedirs(saveto)
if self._reload:
try:
bleu_score = numpy.load(
os.path.join(
saveto, 'val_bleu_scores{}_{}.npz'.format(
self.enc_id, self.dec_id)))
self.val_bleu_curve = bleu_score['bleu_scores'].tolist()
# Track n best previous bleu scores
for i, bleu in enumerate(
sorted(self.val_bleu_curve, reverse=True)):
if i < self.track_n_models:
self.best_models.append(ModelInfo(bleu))
logger.info("BleuScores Reloaded")
except:
logger.info("BleuScores not Found")
def do(self, which_callback, *args):
# Track validation burn in
if self.main_loop.status['iterations_done'] <= self.val_burn_in:
return
# Get current model parameters
if not self._synced:
enc_ids, dec_ids = get_enc_dec_ids(self.main_loop.models.keys())
self.enc_idx = enc_ids.index(self.enc_id)
self.dec_idx = dec_ids.index(self.dec_id)
self.sources = self._get_attr_rec(
self.main_loop.data_stream.streams[_p(self.enc_id,
self.dec_id)],
'data_stream')
self._synced = True
# Evaluate and save if necessary
self._save_model(self._evaluate_model())
def _evaluate_model(self):
logger.info("Started Validation: ")
val_start_time = time.time()
mb_subprocess = Popen(self.multibleu_cmd, stdin=PIPE, stdout=PIPE)
total_cost = 0.0
# Get target vocabulary
if not self.trg_ivocab:
trg_vocab = self.sources.data_streams[1].dataset.dictionary
self.trg_ivocab = {v: k for k, v in trg_vocab.items()}
if self.verbose:
ftrans = open(self.val_set_out, 'w')
for i, line in enumerate(self.data_stream.get_epoch_iterator()):
"""
Load the sentence, retrieve the sample, write to file
"""
seq = numpy.array(self._oov_to_unk(line[0])).astype('int64')
# Branch for multiple computation graphs
src_selector_input = numpy.zeros(
(1, self.main_loop.num_encs)).astype(theano.config.floatX)
src_selector_input[0, self.enc_idx] = 1.
trg_selector_input = numpy.zeros(
(1, self.main_loop.num_decs)).astype(theano.config.floatX)
trg_selector_input[0, self.dec_idx] = 1.
# draw sample, checking to ensure we don't get an empty string back
trans, costs = self.sampling_fn(
self.f_init, self.f_next,
x=seq.reshape([len(seq), 1]), eos_idx=self.trg_eos_idx,
src_selector=src_selector_input,
trg_selector=trg_selector_input,
k=self.beam_size, maxlen=3*len(seq), stochastic=False,
argmax=False, cond_init_trg=self.cond_init_trg)
if self.normalize:
lengths = numpy.array([len(s) for s in trans])
costs = costs / lengths
nbest_idx = numpy.argsort(costs)[:self.n_best]
for j, best in enumerate(nbest_idx):
try:
total_cost += costs[best]
trans_out = trans[best]
# convert idx to words
trans_out = self._idx_to_word(trans_out[:-1],
self.trg_ivocab)
except ValueError:
logger.info(
"Can NOT find a translation for line: {}".format(i+1))
trans_out = '<UNK>'
if j == 0:
# Write to subprocess and file if it exists
print >> mb_subprocess.stdin, trans_out
if self.verbose:
print >> ftrans, trans_out
if i != 0 and i % 100 == 0:
logger.info(
"Translated {} lines of validation set...".format(i))
mb_subprocess.stdin.flush()
logger.info("Total cost of the validation: {}".format(total_cost))
self.data_stream.reset()
if self.verbose:
ftrans.close()
# send end of file, read output.
mb_subprocess.stdin.close()
stdout = mb_subprocess.stdout.readline()
logger.info(stdout)
out_parse = re.match(r'BLEU = [-.0-9]+', stdout)
logger.info("Validation Took: {} minutes".format(
float(time.time() - val_start_time) / 60.))
assert out_parse is not None
# extract the score
bleu_score = float(out_parse.group()[6:])
self.val_bleu_curve.append(bleu_score)
logger.info('BLEU score: {}'.format(bleu_score))
mb_subprocess.terminate()
# Save bleu scores to file
self._save_bleu_scores()
return bleu_score
def _is_valid_to_save(self, bleu_score):
if not self.best_models or min(self.best_models,
key=operator.attrgetter('bleu_score')).bleu_score < bleu_score:
return True
return False
def _save_model(self, bleu_score):
if self._is_valid_to_save(bleu_score):
model = ModelInfo(
bleu_score, self.saveto, self.enc_id, self.dec_id)
# Manage n-best model list first
if len(self.best_models) >= self.track_n_models:
old_model = self.best_models[0]
if old_model.path and os.path.isfile(old_model.path):
logger.info("Deleting old model %s" % old_model.path)
os.remove(old_model.path)
self.best_models.remove(old_model)
self.best_models.append(model)
self.best_models.sort(key=operator.attrgetter('bleu_score'))
# Save the model here
s = signal.signal(signal.SIGINT, signal.SIG_IGN)
logger.info("Saving new model {}".format(model.path))
params_to_save = []
for cg_name in self.main_loop.models.keys():
params_to_save.append(
self.main_loop.models[cg_name].get_param_values())
params_to_save = merge(params_to_save)
self._save_params(model, params_to_save)
self._save_bleu_scores()
signal.signal(signal.SIGINT, s)
def _save_params(self, model, params):
# Rename accordingly for blocks compatibility
params_to_save = dict(
(k.replace('/', '-'), v) for k, v in params.items())
numpy.savez(model.path, **params_to_save)
def _save_bleu_scores(self):
numpy.savez(
os.path.join(
self.saveto,
'val_bleu_scores{}_{}.npz'.format(self.enc_id, self.dec_id)),
bleu_scores=self.val_bleu_curve)
class ModelInfo:
def __init__(self, bleu_score, path=None, enc_id=None, dec_id=None):
self.bleu_score = bleu_score
self.enc_id = enc_id if enc_id is not None else ''
self.dec_id = dec_id if dec_id is not None else ''
self.path = self._generate_path(path) if path else None
def _generate_path(self, path):
return os.path.join(
path, 'best_bleu_model{}_{}_{}_BLEU{:.2f}.npz'.format(
self.enc_id, self.dec_id, int(time.time()), self.bleu_score))
|
arachne/tests/test_extensions.py | sliderSun/arachne | 137 | 12763827 | <filename>arachne/tests/test_extensions.py<gh_stars>100-1000
"""
To see if we have the right pipelines in place
"""
import inspect
from unittest import TestCase
from scrapy import signals, Field, Item
from mock import patch, mock_open, Mock, call
from arachne.extensions import ExportCSV, ExportData, ExportJSON
from scrapy.contrib.exporter import CsvItemExporter, JsonItemExporter
class ScrapyItem(Item):
field1 = Field()
field2 = Field()
field3 = Field()
class TestPipelines(TestCase):
def test_cls_export_data(self):
cls = ExportData()
self.assertTrue(inspect.ismethod(cls.from_crawler))
with self.assertRaises(NotImplementedError):
cls.spider_opened('test')
# TODO: test extension signals connect using `mock.assert_has_calls`
crawler_mock = Mock()
cls.from_crawler(crawler_mock)
assert crawler_mock.signals.connect.called
self.assertEquals(cls.files, {})
self.assertIsNone(cls.exporter)
def test_export_cls(self):
test_classes = [
{'cls': ExportJSON,
'file_type': 'json',
'exporter': JsonItemExporter},
{'cls': ExportCSV,
'file_type': 'csv',
'exporter': CsvItemExporter}
]
for test_cls in test_classes:
cls = test_cls['cls']()
mock_open_func = mock_open(read_data='Hello')
spider = Mock()
spider.name = 'abc'
with patch('arachne.extensions.open', mock_open_func):
cls.spider_opened(spider)
path = 'exports/%s/abc.%s' % (test_cls['file_type'],
test_cls['file_type'])
mock_open_func.assert_called_with(path, 'w+b')
self.assertIsInstance(cls.exporter, test_cls['exporter'])
# test if cls.files is empty
cls.spider_closed(spider)
self.assertEquals(cls.files, {})
# test exporter.export_item
item = ScrapyItem()
result = cls.item_scraped(item, spider)
self.assertEquals(item, result)
|
test/examples/simple/tlm2/blocking_simple/initiator.py | rodrigomelo9/uvm-python | 140 | 12763828 | #//----------------------------------------------------------------------
#// Copyright 2010-2011 Mentor Graphics Corporation
#// Copyright 2010-2011 Synopsys, Inc
#// Copyright 2019-2020 <NAME> (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------
import cocotb
from uvm import (UVMComponent, uvm_component_utils, UVMTLMTime,
UVMTLMBInitiatorSocket)
from uvm.macros import *
from apb_rw import apb_rw
class initiator(UVMComponent):
# uvm_tlm_b_initiator_socket#(apb_rw) sock
def __init__(self, name="initiator", parent=None):
super().__init__(name, parent)
self.sock = UVMTLMBInitiatorSocket("sock", self) # (apb_rw)("sock", self)
# //
# // Execute a simple read-modify-write
# //
async def run_phase(self, phase):
delay = UVMTLMTime()
phase.raise_objection(self)
for i in range(10):
rw = apb_rw.type_id.create("rw",None,self.get_full_name())
rw.kind = apb_rw.READ
rw.addr = 0x0000FF00
rw.data = i + 1
await self.sock.b_transport(rw, delay)
# Ok to reuse the same RW instance
rw.kind = apb_rw.WRITE
rw.data = ~rw.data
await self.sock.b_transport(rw, delay)
phase.drop_objection(self)
uvm_component_utils(initiator)
|
tests/spells/test_length_of.py | awesome-archive/geomancer | 216 | 12763867 | # -*- coding: utf-8 -*-
# Import modules
import pytest
from google.cloud import bigquery
from tests.spells.base_test_spell import BaseTestSpell, SpellDB
# Import from package
from geomancer.backend.settings import SQLiteConfig
from geomancer.spells import LengthOf
params = [
SpellDB(
spell=LengthOf(
on="residential",
within=50,
source_table="gis_osm_roads_free_1",
feature_name="len_residential",
options=SQLiteConfig(),
),
dburl="sqlite:///tests/data/source.sqlite",
)
]
@pytest.mark.slow
class TestLengthOf(BaseTestSpell):
@pytest.fixture(params=params, ids=["roads-sqlite"])
def spelldb(self, request):
return request.param
|
test/test_pwm_setup.py | mrtnschltr/CHIP_IO | 295 | 12763868 | <reponame>mrtnschltr/CHIP_IO<filename>test/test_pwm_setup.py
import pytest
import os
import time
import CHIP_IO.PWM as PWM
import CHIP_IO.OverlayManager as OM
import CHIP_IO.Utilities as UT
def setup_module(module):
if not UT.is_chip_pro():
OM.load("PWM0")
def teardown_module(module):
PWM.cleanup()
if not UT.is_chip_pro():
OM.unload("PWM0")
class TestPwmSetup:
def setup_method(self, test_method):
time.sleep(0.5)
def test_start_pwm(self):
PWM.start("PWM0", 0)
pwm_test = '/sys/class/pwm/pwmchip0/pwm0/'
assert os.path.exists(pwm_test) == True
duty = open(pwm_test + 'duty_cycle').readline().strip()
period = open(pwm_test + 'period').readline().strip()
assert int(duty) == 0
assert int(period) == 500000
def test_start_pwm_with_polarity_one(self):
PWM.cleanup()
PWM.start("PWM0", 0, 2000, 1)
pwm_test = '/sys/class/pwm/pwmchip0/pwm0/'
duty = open(pwm_test + 'duty_cycle').readline().strip()
period = open(pwm_test + 'period').readline().strip()
polarity = open(pwm_test + 'polarity').readline().strip()
assert int(duty) == 0
assert int(period) == 500000
assert str(polarity) == "inversed"
def test_start_pwm_with_polarity_default(self):
PWM.cleanup()
PWM.start("PWM0", 0, 2000, 0)
pwm_test = '/sys/class/pwm/pwmchip0/pwm0/'
duty = open(pwm_test + 'duty_cycle').readline().strip()
period = open(pwm_test + 'period').readline().strip()
polarity = open(pwm_test + 'polarity').readline().strip()
assert int(duty) == 0
assert int(period) == 500000
assert str(polarity) == "normal"
def test_start_pwm_with_polarity_zero(self):
PWM.cleanup()
PWM.start("PWM0", 0, 2000, 0)
pwm_test = '/sys/class/pwm/pwmchip0/pwm0/'
duty = open(pwm_test + 'duty_cycle').readline().strip()
period = open(pwm_test + 'period').readline().strip()
polarity = open(pwm_test + 'polarity').readline().strip()
assert int(duty) == 0
assert int(period) == 500000
assert str(polarity) == "normal"
def test_pwm_start_invalid_pwm_key(self):
with pytest.raises(ValueError):
PWM.start("P8_25", -1)
def test_pwm_start_invalid_duty_cycle_negative(self):
with pytest.raises(ValueError):
PWM.start("PWM0", -1)
def test_pwm_start_valid_duty_cycle_min(self):
#testing an exception isn't thrown
PWM.cleanup()
PWM.start("PWM0", 0)
PWM.cleanup()
def test_pwm_start_valid_duty_cycle_max(self):
#testing an exception isn't thrown
PWM.start("PWM0", 100)
PWM.cleanup()
def test_pwm_start_invalid_duty_cycle_high(self):
with pytest.raises(ValueError):
PWM.start("PWM0", 101)
def test_pwm_start_invalid_duty_cycle_string(self):
with pytest.raises(TypeError):
PWM.start("PWM0", "1")
def test_pwm_start_invalid_frequency_negative(self):
with pytest.raises(ValueError):
PWM.start("PWM0", 0, -1)
def test_pwm_start_invalid_frequency_string(self):
with pytest.raises(TypeError):
PWM.start("PWM0", 0, "1")
def test_pwm_start_negative_polarity(self):
with pytest.raises(ValueError):
PWM.start("PWM0", 0, 100, -1)
def test_pwm_start_invalid_positive_polarity(self):
with pytest.raises(ValueError):
PWM.start("PWM0", 0, 100, 2)
def test_pwm_start_invalid_polarity_type(self):
with pytest.raises(TypeError):
PWM.start("PWM0", 0, 100, "1")
@pytest.mark.xfail(reason="pwm cleanup is doing weirdness for this test")
def test_pwm_duty_modified(self):
PWM.start("PWM0", 0)
pwm_test = '/sys/class/pwm/pwmchip0/pwm0/'
assert os.path.exists(pwm_test) == True
duty = open(pwm_test + 'duty_cycle').readline().strip()
period = open(pwm_test + 'period').readline().strip()
assert int(duty) == 0
assert int(period) == 500000
PWM.set_duty_cycle("PWM0", 100)
duty = open(pwm_test + 'duty_cycle').readline().strip()
period = open(pwm_test + 'period').readline().strip()
assert int(duty) == 500000
assert int(period) == 500000
def test_pwm_duty_cycle_non_setup_key(self):
with pytest.raises(ValueError):
PWM.cleanup()
PWM.set_duty_cycle("PWM0", 100)
def test_pwm_duty_cycle_invalid_key(self):
with pytest.raises(ValueError):
PWM.set_duty_cycle("P9_15", 100)
def test_pwm_duty_cycle_invalid_value_high(self):
PWM.start("PWM0", 0)
with pytest.raises(ValueError):
PWM.set_duty_cycle("PWM0", 101)
PWM.cleanup()
def test_pwm_duty_cycle_invalid_value_negative(self):
PWM.start("PWM0", 0)
with pytest.raises(ValueError):
PWM.set_duty_cycle("PWM0", -1)
PWM.cleanup()
def test_pwm_duty_cycle_invalid_value_string(self):
PWM.start("PWM0", 0)
with pytest.raises(TypeError):
PWM.set_duty_cycle("PWM0", "a")
PWM.cleanup()
def test_pwm_frequency_invalid_value_negative(self):
PWM.start("PWM0", 0)
with pytest.raises(ValueError):
PWM.set_frequency("PWM0", -1)
PWM.cleanup()
def test_pwm_frequency_invalid_value_string(self):
PWM.start("PWM0", 0)
with pytest.raises(TypeError):
PWM.set_frequency("PWM0", "11")
PWM.cleanup()
def test_pwm_freq_non_setup_key(self):
with pytest.raises(RuntimeError):
PWM.set_frequency("PWM0", 100)
def test_pwm_freq_non_setup_key(self):
with pytest.raises(ValueError):
PWM.set_frequency("P9_15", 100)
|
tests/py/test_fake_data.py | kant/gratipay.com | 517 | 12763876 | from __future__ import print_function, unicode_literals
from gratipay import fake_data
from gratipay.testing import Harness
from gratipay.cli.fake_data import main
class TestFakeData(Harness):
def test_fake_data_cli(self):
num_participants = 6
num_tips = 25
num_teams = 5
num_packages = 5
main(self.db, num_participants, num_tips, num_teams, num_packages)
participants = self.db.all("SELECT * FROM participants")
teams = self.db.all("SELECT * FROM teams")
packages = self.db.all("SELECT * FROM packages")
payment_instructions = self.db.all("SELECT * FROM payment_instructions")
assert len(participants) == num_participants
assert len(teams) == num_teams + 1 # +1 for the fake Gratipay team.
assert len(packages) == num_packages
assert len(payment_instructions) == num_tips
def test_fake_participant_identity(self):
crusher = self.make_participant('crusher', email_address='<EMAIL>')
country_id = fake_data.fake_participant_identity(crusher)
assert [x.country.id for x in crusher.list_identity_metadata()] == [country_id]
def test_fake_team_doesnt_fail_for_name_with_apostrophe(self):
crusher = self.make_participant('crusher', email_address='<EMAIL>')
team = fake_data.fake_team(self.db, crusher, "D'Amorebury")
assert team.name != "d-amorebury"
|
scripts/owner/what_do_i_own.py | wwjiang007/fuchsia-1 | 210 | 12763916 | <filename>scripts/owner/what_do_i_own.py
#!/usr/bin/env python2.7
# Copyright 2019 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import os
import re
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
FUCHSIA_ROOT = os.path.dirname( # $root
os.path.dirname( # scripts
SCRIPT_DIR)) # owner
# $ what_do_i_own.py me@mydomain file_i_own file_i_dont
# file_i_own
def main():
parser = argparse.ArgumentParser(
description='Filters `paths` for those owned by `owner`')
parser.add_argument('owner')
parser.add_argument('paths', nargs='+')
args = parser.parse_args()
owner = args.owner
abspaths = [os.path.abspath(path) for path in args.paths]
perfile_exp = re.compile('^\s*per-file ([^\s=]*)\s*=\s*' + owner)
# Find all OWNERS files
path_to_owners = {}
for path in abspaths:
dir = path if os.path.isdir(path) else os.path.dirname(path)
dir = os.path.abspath(dir)
while (os.path.exists(dir) and
os.path.commonprefix([dir, FUCHSIA_ROOT]) == FUCHSIA_ROOT):
owners_path = os.path.join(dir, 'OWNERS')
if os.path.exists(owners_path):
path_to_owners[path] = owners_path
break
dir = os.path.dirname(dir)
# Parse all OWNERS files
owned = set()
for path, owners in path_to_owners.iteritems():
with open(owners) as f:
for line in f.readlines():
if line.strip().startswith(owner):
owned.add(path)
continue
match = perfile_exp.match(line)
if match:
filename = os.path.abspath(
os.path.join(os.path.dirname(owners), match.group(1)))
if filename in abspaths:
owned.add(path)
# Print owned files
for owned_path in sorted(owned):
print os.path.relpath(owned_path)
return 0
if __name__ == '__main__':
sys.exit(main())
|
src/robusta/core/reporting/consts.py | robusta-dev/robusta | 273 | 12763924 | from enum import Enum
SYNC_RESPONSE_SINK = "robusta-synchronized-response-sink"
class FindingType(Enum):
ISSUE = "issue"
CONF_CHANGE = "configuration_change"
HEALTH_CHECK = "health_check"
REPORT = "report"
# Finding sources
class FindingSource(Enum):
NONE = None # empty default
KUBERNETES_API_SERVER = "kubernetes_api_server"
PROMETHEUS = "prometheus"
MANUAL = "manual"
CALLBACK = "callback"
# Finding subject types
class FindingSubjectType(Enum):
TYPE_NONE = None # empty default
TYPE_DEPLOYMENT = "deployment"
TYPE_NODE = "node"
TYPE_POD = "pod"
TYPE_JOB = "job"
TYPE_DAEMONSET = "daemonset"
@staticmethod
def from_kind(kind: str):
if kind == "deployment":
return FindingSubjectType.TYPE_DEPLOYMENT
elif kind == "node":
return FindingSubjectType.TYPE_NODE
elif kind == "pod":
return FindingSubjectType.TYPE_POD
elif kind == "job":
return FindingSubjectType.TYPE_JOB
elif kind == "daemonset":
return FindingSubjectType.TYPE_DAEMONSET
return FindingSubjectType.TYPE_NONE
# Annotations
class SlackAnnotations:
UNFURL = "unfurl"
ATTACHMENT = "attachment"
|
threedod/benchmark_scripts/utils/tenFpsDataLoader.py | Levintsky/ARKitScenes | 237 | 12763985 | import copy
import cv2
import glob
import json
import numpy as np
import os
from .box_utils import compute_box_3d, boxes_to_corners_3d, get_size
from .rotation import convert_angle_axis_to_matrix3
from .taxonomy import class_names, ARKitDatasetConfig
def TrajStringToMatrix(traj_str):
""" convert traj_str into translation and rotation matrices
Args:
traj_str: A space-delimited file where each line represents a camera position at a particular timestamp.
The file has seven columns:
* Column 1: timestamp
* Columns 2-4: rotation (axis-angle representation in radians)
* Columns 5-7: translation (usually in meters)
Returns:
ts: translation matrix
Rt: rotation matrix
"""
# line=[float(x) for x in traj_str.split()]
# ts = line[0];
# R = cv2.Rodrigues(np.array(line[1:4]))[0];
# t = np.array(line[4:7]);
# Rt = np.concatenate((np.concatenate((R, t[:,np.newaxis]), axis=1), [[0.0,0.0,0.0,1.0]]), axis=0)
tokens = traj_str.split()
assert len(tokens) == 7
ts = tokens[0]
# Rotation in angle axis
angle_axis = [float(tokens[1]), float(tokens[2]), float(tokens[3])]
r_w_to_p = convert_angle_axis_to_matrix3(np.asarray(angle_axis))
# Translation
t_w_to_p = np.asarray([float(tokens[4]), float(tokens[5]), float(tokens[6])])
extrinsics = np.eye(4, 4)
extrinsics[:3, :3] = r_w_to_p
extrinsics[:3, -1] = t_w_to_p
Rt = np.linalg.inv(extrinsics)
return (ts, Rt)
def st2_camera_intrinsics(filename):
w, h, fx, fy, hw, hh = np.loadtxt(filename)
return np.asarray([[fx, 0, hw], [0, fy, hh], [0, 0, 1]])
def generate_point(
rgb_image,
depth_image,
intrinsic,
subsample=1,
world_coordinate=True,
pose=None,
):
"""Generate 3D point coordinates and related rgb feature
Args:
rgb_image: (h, w, 3) rgb
depth_image: (h, w) depth
intrinsic: (3, 3)
subsample: int
resize stride
world_coordinate: bool
pose: (4, 4) matrix
transfer from camera to world coordindate
Returns:
points: (N, 3) point cloud coordinates
in world-coordinates if world_coordinate==True
else in camera coordinates
rgb_feat: (N, 3) rgb feature of each point
"""
intrinsic_4x4 = np.identity(4)
intrinsic_4x4[:3, :3] = intrinsic
u, v = np.meshgrid(
range(0, depth_image.shape[1], subsample),
range(0, depth_image.shape[0], subsample),
)
d = depth_image[v, u]
d_filter = d != 0
mat = np.vstack(
(
u[d_filter] * d[d_filter],
v[d_filter] * d[d_filter],
d[d_filter],
np.ones_like(u[d_filter]),
)
)
new_points_3d = np.dot(np.linalg.inv(intrinsic_4x4), mat)[:3]
if world_coordinate:
new_points_3d_padding = np.vstack(
(new_points_3d, np.ones((1, new_points_3d.shape[1])))
)
world_coord_padding = np.dot(pose, new_points_3d_padding)
new_points_3d = world_coord_padding[:3]
rgb_feat = rgb_image[v, u][d_filter]
return new_points_3d.T, rgb_feat
def extract_gt(gt_fn):
"""extract original label data
Args:
gt_fn: str (file name of "annotation.json")
after loading, we got a dict with keys
'data', 'stats', 'comment', 'confirm', 'skipped'
['data']: a list of dict for bboxes, each dict has keys:
'uid', 'label', 'modelId', 'children', 'objectId',
'segments', 'hierarchy', 'isInGroup', 'labelType', 'attributes'
'label': str
'segments': dict for boxes
'centroid': list of float (x, y, z)?
'axesLengths': list of float (x, y, z)?
'normalizedAxes': list of float len()=9
'uid'
'comments':
'stats': ...
Returns:
skipped: bool
skipped or not
boxes_corners: (n, 8, 3) box corners
**world-coordinate**
centers: (n, 3)
**world-coordinate**
sizes: (n, 3) full-sizes (no halving!)
labels: list of str
uids: list of str
"""
gt = json.load(open(gt_fn, "r"))
skipped = gt['skipped']
if len(gt) == 0:
boxes_corners = np.zeros((0, 8, 3))
centers = np.zeros((0, 3))
sizes = np.zeros((0, 3))
labels, uids = [], []
return skipped, boxes_corners, centers, sizes, labels, uids
boxes_corners = []
centers = []
sizes = []
labels = []
uids = []
for data in gt['data']:
l = data["label"]
for delimiter in [" ", "-", "/"]:
l = l.replace(delimiter, "_")
if l not in class_names:
print("unknown category: %s" % l)
continue
rotmat = np.array(data["segments"]["obbAligned"]["normalizedAxes"]).reshape(
3, 3
)
center = np.array(data["segments"]["obbAligned"]["centroid"]).reshape(-1, 3)
size = np.array(data["segments"]["obbAligned"]["axesLengths"]).reshape(-1, 3)
box3d = compute_box_3d(size.reshape(3).tolist(), center, rotmat)
'''
Box corner order that we return is of the format below:
6 -------- 7
/| /|
5 -------- 4 .
| | | |
. 2 -------- 3
|/ |/
1 -------- 0
'''
boxes_corners.append(box3d.reshape(1, 8, 3))
size = np.array(get_size(box3d)).reshape(1, 3)
center = np.mean(box3d, axis=0).reshape(1, 3)
# boxes_corners.append(box3d.reshape(1, 8, 3))
centers.append(center)
sizes.append(size)
# labels.append(l)
labels.append(data["label"])
uids.append(data["uid"])
centers = np.concatenate(centers, axis=0)
sizes = np.concatenate(sizes, axis=0)
boxes_corners = np.concatenate(boxes_corners, axis=0)
return skipped, boxes_corners, centers, sizes, labels, uids
class TenFpsDataLoader(object):
def __init__(
self,
dataset_cfg,
class_names,
root_path=None,
gt_path=None,
logger=None,
frame_rate=1,
with_color_image=True,
subsample=2,
world_coordinate=True,
):
"""
Args:
dataset_cfg: EasyDict() with key
POINT_CLOUD_RANGE
POINT_FEATURE_ENCODING
DATA_PROCESSOR
class_names: list of str
root_path: path with all info for a scene_id
color, color_2det, depth, label, vote, ...
gt_path: xxx.json
just to get correct floor height
an2d_root: path to scene_id.json
or None
logger:
frame_rate: int
subsample: int
world_coordinate: bool
"""
self.root_path = root_path
# pipeline does box residual coding here
self.num_class = len(class_names)
self.dc = ARKitDatasetConfig()
depth_folder = os.path.join(self.root_path, "lowres_depth")
if not os.path.exists(depth_folder):
self.frame_ids = []
else:
depth_images = sorted(glob.glob(os.path.join(depth_folder, "*.png")))
self.frame_ids = [os.path.basename(x) for x in depth_images]
self.frame_ids = [x.split(".png")[0].split("_")[1] for x in self.frame_ids]
self.video_id = depth_folder.split('/')[-3]
self.frame_ids = [x for x in self.frame_ids]
self.frame_ids.sort()
self.intrinsics = {}
traj_file = os.path.join(self.root_path, 'lowres_wide.traj')
with open(traj_file) as f:
self.traj = f.readlines()
# convert traj to json dict
poses_from_traj = {}
for line in self.traj:
traj_timestamp = line.split(" ")[0]
poses_from_traj[f"{round(float(traj_timestamp), 3):.3f}"] = TrajStringToMatrix(line)[1].tolist()
if os.path.exists(traj_file):
# self.poses = json.load(open(traj_file))
self.poses = poses_from_traj
else:
self.poses = {}
# get intrinsics
for frame_id in self.frame_ids:
intrinsic_fn = os.path.join(self.root_path, "lowres_wide_intrinsics", f"{self.video_id}_{frame_id}.pincam")
if not os.path.exists(intrinsic_fn):
intrinsic_fn = os.path.join(self.root_path, "lowres_wide_intrinsics",
f"{self.video_id}_{float(frame_id) - 0.001:.3f}.pincam")
if not os.path.exists(intrinsic_fn):
intrinsic_fn = os.path.join(self.root_path, "lowres_wide_intrinsics",
f"{self.video_id}_{float(frame_id) + 0.001:.3f}.pincam")
if not os.path.exists(intrinsic_fn):
print("frame_id", frame_id)
print(intrinsic_fn)
self.intrinsics[frame_id] = st2_camera_intrinsics(intrinsic_fn)
# # intrinsic_fn = os.path.join(self.root_path, "camera.txt")
# intrinsic_fn = os.path.join(self.root_path, "color.pincam")
# if os.path.exists(intrinsic_fn):
# self.intrinsics = st2_camera_intrinsics(intrinsic_fn)
# else:
# self.intrinsics = None
self.frame_rate = frame_rate
self.subsample = subsample
self.with_color_image = with_color_image
self.world_coordinate = world_coordinate
if gt_path is not None and os.path.exists(gt_path):
skipped, gt_corners, gt_centers, gt_sizes, _, _ = extract_gt(gt_path)
self.gt_corners = gt_corners
self.gt_centers = gt_centers
self.gt_sizes = gt_sizes
else:
self.gt_corners = None
self.gt_centers = None
self.gt_sizes = None
def __iter__(self):
return self
def __len__(self):
return len(self.frame_ids)
def __getitem__(self, idx):
"""
Returns:
frame: a dict
{frame_id}: str
{depth}: (h, w)
{image}: (h, w)
{image_path}: str
{intrinsics}: np.array 3x3
{pose}: np.array 4x4
{pcd}: np.array (n, 3)
in world coordinate
{color}: (n, 3)
"""
frame_id = self.frame_ids[idx]
frame = {}
frame["frame_id"] = frame_id
fname = "{}_{}.png".format(self.video_id, frame_id)
# fname = "{}.png".format(frame_id)
depth_image_path = os.path.join(self.root_path, "lowres_depth", fname)
if not os.path.exists(depth_image_path):
print(depth_image_path)
image_path = os.path.join(self.root_path, "lowres_wide", fname)
if not os.path.exists(depth_image_path):
print(depth_image_path, "does not exist")
frame["depth"] = cv2.imread(depth_image_path, -1)
frame["image"] = cv2.imread(image_path)
frame["image_path"] = image_path
depth_height, depth_width = frame["depth"].shape
im_height, im_width, im_channels = frame["image"].shape
frame["intrinsics"] = copy.deepcopy(self.intrinsics[frame_id])
if str(frame_id) in self.poses.keys():
frame_pose = np.array(self.poses[str(frame_id)])
else:
for my_key in list(self.poses.keys()):
if abs(float(frame_id) - float(my_key)) < 0.005:
frame_pose = np.array(self.poses[str(my_key)])
frame["pose"] = copy.deepcopy(frame_pose)
im_height_scale = np.float(depth_height) / im_height
im_width_scale = np.float(depth_width) / im_width
if depth_height != im_height:
frame["image"] = np.zeros([depth_height, depth_width, 3]) # 288, 384, 3
frame["image"][48 : 48 + 192, 64 : 64 + 256, :] = cv2.imread(image_path)
(m, n, _) = frame["image"].shape
depth_image = frame["depth"] / 1000.0
rgb_image = frame["image"] / 255.0
pcd, rgb_feat = generate_point(
rgb_image,
depth_image,
frame["intrinsics"],
self.subsample,
self.world_coordinate,
frame_pose,
)
frame["pcd"] = pcd
frame["color"] = rgb_feat
return frame |
regionator/parse_object_db.py | lubber-de/neohabitat | 181 | 12763994 | '''
Parse the MC_object database from the Habitat Stratus backup.
There are still lots of unknowns:
* Many objects have container 0x20202020. They appear to be unused, but it's
unclear why.
* Some address strings have unprintable characters. It's unclear if this
was intentional or garbage data.
* Matchbook (class 49): there are 3 objects of this type, but they appear
to be overwritten or otherwise unused.
* When combined with MC_regions, we find lots of orphaned objects. This may
be because of broken relationships. Some appear to be pockets of avatars.
'''
import json, struct, sys
from collections import OrderedDict
STRUCT_ITEMS = (
'id',
'class',
'container',
'contype',
'x_pos',
'y_pos',
'style',
'gr_state',
'orientation',
'gr_width',
'nitty_bits',
'prop_length',
'property_data',
)
FORMAT = '> 3I 7H I 10x H 86s'
assert struct.calcsize(FORMAT) == 128
PARSERS = {
2: ('>HI', ['magic_type', 'magic_data']),
129: ('>H', ['state']),
6: ('>HW', ['open_flags', 'key']),
130: ('>H', ['open_flags']),
10: ('>HIH', ['current_page', 'text_id', 'last_page']),
12: ('>H', ['filled']),
13: ('>HW', ['open_flags', 'key']),
131: ('>HH', ['width', 'length']),
132: ('>xxxxxxi', ['connection']),
158: ('>H', ['open_flags']),
134: ('>H', ['open_flags']),
135: ('>HW', ['open_flags', 'key']),
136: ('>I', ['take']),
137: ('>H', ['open_flags']),
18: ('>HW', ['open_flags', 'key']), # + whoput array
20: ('>H', ['live']),
21: ('>H', ['state']),
22: ('>HWIH', ['open_flags', 'key', 'owner', 'locked']),
23: ('>HWi', ['open_flags', 'key', 'connection']),
25: ('>HH', ['count', 'effect']),
28: ('>HI20s', ['state', 'take', 'address']),
26: ('>H', ['charge']),
27: ('>H', ['state']),
29: ('>H', ['mass']),
30: ('>H', ['on']),
93: ('>H', ['flat_type']),
139: ('>H', ['on']),
140: ('>I', ['take']),
141: ('>H', ['live']),
5: ('>H', ['state']),
32: ('>HW', ['open_flags', 'key']),
33: ('>HI', ['magic_type', 'magic_data']),
98: ('>HWHHHHHHHHHHHH', ['open_flags', 'key', 'x_offset_1', 'y_offset_1',
'x_offset_2', 'y_offset_2', 'x_offset_3', 'y_offset_3', 'x_offset_4',
'y_offset_4', 'x_offset_5', 'y_offset_5', 'x_offset_6', 'y_offset_6']),
35: ('>H', ['pinpulled']),
38: ('>H', ['state']),
88: ('>HW', ['open_flags', 'key']),
40: ('>H', ['instant_what']),
42: ('>W', ['key_number']),
43: ('>H', ['is_magic']),
45: ('>HHxxxxH', ['lamp_state', 'wisher', 'live']),
46: ('>HI', ['magic_type', 'magic_data']),
48: ('>HI', ['mail_arrived', 'owner']),
# XXX can't find valid example to decode varstring properly
#49: ('>84s', ['mtext']),
52: ('>H', ['on']),
54: ('>I', ['text_id']),
96: ('>HW', ['open_flags', 'key']),
152: ('>HH', ['mass', 'picture']),
58: ('>H', ['mass']),
55: ('>HIH', ['current_page', 'text_id', 'last_page']),
60: ('>HI', ['magic_type', 'magic_data']),
61: ('>H', ['mass']),
149: ('>HH', ['base', 'pattern']),
150: ('>HW', ['open_flags', 'key']),
63: ('>H', ['on']),
64: ('>H', ['scan_type']),
#56: short sign, handled below
#57: sign, handled below
95: ('>H', ['charge']),
70: ('>HH', ['on', 'tape']),
153: ('>HH', ['width', 'height']),
92: ('>HHHHHHHH', ['trapezoid_type', 'upper_left_x', 'upper_right_x',
'lower_left_x', 'lower_right_x', 'height',
'pattern_x_size','pattern_y_size']), # + pattern array
97: ('>HI', ['magic_type', 'magic_data']),
155: ('>HW', ['open_flags', 'key']),
74: ('>HI20s', ['state', 'take', 'address']),
75: ('>H', ['event']),
76: ('>W', ['denom']),
87: ('>HHHHHH', ['trapezoid_type', 'upper_left_x', 'upper_right_x',
'lower_left_x', 'lower_right_x', 'height']),
85: ('>HWHH', ['open_flags', 'key', 'item_price',
'display_item']), # + prices array
86: ('>HW', ['open_flags', 'key']),
80: ('>HH', ['length', 'height', 'pattern']),
82: ('>H', ['wind_level']),
}
def decode_properties(buf, fmt, keys):
'''
Parse the properties from the given byte buffer, using the format string
and names of keys for each item in the format string. Returns a dict
of name/value pairs for all keys.
'''
fat_words = []
# Handle fatwords, which are 16-bits stored as 00 xx 00 yy.
if 'W' in fmt:
# Hack: our fatword handling doesn't count repeated format strings
idx = fmt.index('W')
if fmt[:idx].isdigit():
raise ValueError('cant handle format strings with numbers')
base = 1 if not fmt[0].isalpha() else 0
fmt_chars = []
for i, c in enumerate(fmt):
if c == 'W':
c = 'I'
fat_words.append(keys[i - base])
fmt_chars.append(c)
fmt = ''.join(fmt_chars)
data = OrderedDict(zip(
keys,
struct.unpack(fmt, buf[:struct.calcsize(fmt)])))
# Replace each fat word with its actual value
for name in fat_words:
data[name] = ((data[name] >> 8) & 0xff00) | (data[name] & 0xff)
return data
def parse_array(buf, fmt, count):
'''
Unpack a number of same-sized items into an array
'''
items = []
item_size = struct.calcsize(fmt)
for i in range(count):
items += struct.unpack(fmt, buf[i * item_size:(i + 1) * item_size])
return items
def decode_text(buf):
'''
Decode a word-packed string (00 x 00 y ...), which is similar to a
fatword but is a string instead of int.
'''
return [buf[i] for i in range(1, len(buf), 2)]
def parse_properties(cls, property_data):
'''
Decode basic properties and then class-specific ones
'''
data = OrderedDict()
args = PARSERS.get(cls)
if args:
data.update(decode_properties(property_data, *args))
remainder_off = struct.calcsize(args[0].replace('W', 'I'))
# Special class decoders for those not fully handled above
if cls == 56:
# short sign
data['text'] = decode_text(property_data[:10 * 2])
elif cls == 57:
# sign
data['text'] = decode_text(property_data[:40 * 2])
elif cls == 18:
# countertop: whoput = 5 ints
n = 5
data['whoput'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>I',
n)
elif cls == 92:
# super trapezoid: pattern = 32 halfwords
n = 32
data['pattern'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>H',
n)
elif cls == 85:
# vendo front: prices = 10 halfwords
n = 10
data['prices'] = parse_array(
property_data[remainder_off:remainder_off + n * 4],
'>H',
n)
return data
def decode_row(row):
'''
Parse a single row and return a dict of the items
'''
data = OrderedDict(zip(STRUCT_ITEMS, struct.unpack(FORMAT, row)))
data.update(parse_properties(data['class'], data['property_data']))
# Debug-dump the Matchbook class
#if data['class'] == 49:
# print ' '.join('%02x' % ord(c) for c in row)
# print data
# These fields tend to be all padding for many objects.
# Maybe these were deleted or superseded?
data['deleted'] = (data['container'] == 0x20202020 and
data['contype'] == 0x2020)
# Always remove the raw property bytes, which we've decoded
del data['property_data']
# Clear text data if it's unprintable
if 'address' in data:
if any(c >= 0x80 for c in data['address']):
#print ' '.join('%02x' % ord(c) for c in row)
#print data
data['address'] = ''
else:
data['address'] = data['address'].decode('ascii')
return data
def main():
'''
Read each row from database and then decode it, dumping output to JSON
'''
items = []
with open(sys.argv[1], "rb") as fp:
while True:
row = fp.read(struct.calcsize(FORMAT))
if not row:
break
items.append(decode_row(row))
with open(sys.argv[2], 'w') as fp:
json.dump(items, fp, indent=2)
if __name__ == '__main__':
main()
|
ast/test_NodeVisitor.py | MaxTurchin/pycopy-lib | 126 | 12764014 | import sys
import ast
import io
class Visitor(ast.NodeVisitor):
def __init__(self, f):
self.f = f
def generic_visit(self, node):
self.f.write(ast.dump(node))
self.f.write("\n")
super().generic_visit(node)
def visit_Assign(self, node):
for n in node.targets:
self.visit(n)
self.f.write(" = ")
self.visit(node.value)
self.f.write("\n")
def visit_Name(self, node):
self.f.write(node.id)
def visit_Num(self, node):
self.f.write(str(node.n))
SRC = """\
a = 1
a = b = 1
"""
EXP = """\
Module(body=[Assign(targets=[Name(id='a', ctx=Store())], value=Num(n=1)), Assign(targets=[Name(id='a', ctx=Store()), Name(id='b', ctx=Store())], value=Num(n=1))])
a = 1
a = b = 1
"""
t = ast.parse(SRC)
buf = io.StringIO()
visitor = Visitor(buf)
visitor.visit(t)
assert buf.getvalue() == EXP
|
src/lib/SocketServer.py | DTenore/skulpt | 2,671 | 12764017 | import _sk_fail; _sk_fail._("SocketServer")
|
src/Sastrawi/Stemmer/Filter/TextNormalizer.py | ZenaNugraha/PySastrawi | 282 | 12764048 | import re
def normalize_text(text):
result = text.lower() #lower the text even unicode given
result = re.sub(r'[^a-z0-9 -]', ' ', result, flags = re.IGNORECASE|re.MULTILINE)
result = re.sub(r'( +)', ' ', result, flags = re.IGNORECASE|re.MULTILINE)
return result.strip()
|
sympy/matrices/expressions/tests/test_funcmatrix.py | ovolve/sympy | 319 | 12764112 | from sympy import (symbols, FunctionMatrix, MatrixExpr, Lambda, Matrix)
def test_funcmatrix():
i, j = symbols('i,j')
X = FunctionMatrix(3, 3, Lambda((i, j), i - j))
assert X[1, 1] == 0
assert X[1, 2] == -1
assert X.shape == (3, 3)
assert X.rows == X.cols == 3
assert Matrix(X) == Matrix(3, 3, lambda i, j: i - j)
assert isinstance(X*X + X, MatrixExpr)
|
matplotlibTUT/plt12_contours.py | subshine/tutorials | 10,786 | 12764129 | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 12 - contours
"""
Please note, this script is for python3+.
If you are using python2+, please modify it accordingly.
Tutorial reference:
http://www.scipy-lectures.org/intro/matplotlib/matplotlib.html
"""
import matplotlib.pyplot as plt
import numpy as np
def f(x,y):
# the height function
return (1 - x / 2 + x**5 + y**3) * np.exp(-x**2 -y**2)
n = 256
x = np.linspace(-3, 3, n)
y = np.linspace(-3, 3, n)
X,Y = np.meshgrid(x, y)
# use plt.contourf to filling contours
# X, Y and value for (X,Y) point
plt.contourf(X, Y, f(X, Y), 8, alpha=.75, cmap=plt.cm.hot)
# use plt.contour to add contour lines
C = plt.contour(X, Y, f(X, Y), 8, colors='black', linewidth=.5)
# adding label
plt.clabel(C, inline=True, fontsize=10)
plt.xticks(())
plt.yticks(())
plt.show()
|
tools/win/linker_verbose_tracking.py | google-ar/chromium | 777 | 12764132 | <filename>tools/win/linker_verbose_tracking.py
# Copyright (c) 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This script parses the /verbose output from the VC++ linker and uses it to
explain why a particular object file is being linked in. It parses records
like these:
Found "public: static void * __cdecl SkTLS::Get(void * (__cdecl*)(void)...
Referenced in chrome_crash_reporter_client_win.obj
Referenced in skia.lib(SkError.obj)
Loaded skia.lib(SkTLS.obj)
and then uses the information to answer questions such as "why is SkTLS.obj
being linked in. In this case it was requested by SkError.obj, and the process
is then repeated for SkError.obj. It traces the dependency tree back to a file
that was specified on the command line. Typically that file is part of a
source_set, and if that source_set is causing unnecessary code and data to be
pulled in then changing it to a static_library may reduce the binary size. See
crrev.com/2556603002 for an example of a ~900 KB savings from such a change.
In other cases the source_set to static_library fix does not work because some
of the symbols are required, while others are pulling in unwanted object files.
In these cases it can be necessary to see what symbol is causing one object file
to reference another. Removing or moving the problematic symbol can fix the
problem. See crrev.com/2559063002 for an example of such a change.
One complication is that there are sometimes multiple source files with the
same name, such as crc.c, which can make analysis more difficult or
ambiguous. If this becomes a blocking issue they it may be necessary to
temporarily rename the source file.
Object file name matching is case sensitive.
Typical output when run on chrome.dll verbose link output is:
>python tools\win\linker_verbose_tracking.py chrome_verbose_02.txt flac_crc
Database loaded - 11277 xrefs found
flac_crc.obj pulled in for symbol "_FLAC__crc8" by
stream_decoder.obj
bitwriter.obj
stream_decoder.obj pulled in for symbol "_FLAC__stream_decoder_new" by
stream_encoder.obj
bitwriter.obj pulled in for symbol "_FLAC__bitwriter_new" by
stream_encoder.obj
stream_encoder.obj pulled in for symbol "_FLAC__stream_encoder_new" by
Command-line obj file: audio_encoder.obj
"""
import pdb
import re
import sys
def ParseVerbose(input_file):
# This matches line like this:
# Referenced in skia.lib(SkError.obj)
# with the groups()[0] referring to the object file name without the file
# extension.
obj_match = re.compile('.*\((.*)\.obj\)')
# Prefix used for symbols that are referenced:
found_prefix = ' Found'
cross_refs = {}
cross_refed_symbols = {}
references = None
for line in open(input_file):
if line.startswith(found_prefix):
references = []
# Grab the symbol name
symbol = line[len(found_prefix):].strip()
if symbol[0] == '"':
# Strip off leading and trailing quotes if present.
symbol = symbol[1:-1]
continue
if type(references) == type([]):
sub_line = line.strip()
match = obj_match.match(sub_line)
# See if the line is part of the list of places where this symbol was
# referenced
if sub_line.count('Referenced ') > 0:
if match:
# This indicates a match that is xxx.lib(yyy.obj), so a referencing
# .obj file that was itself inside of a library. We discard the
# library name.
reference = match.groups()[0]
else:
# This indicates a match that is just a pure .obj file name
# I think this means that the .obj file was specified on the linker
# command line.
reference = ('Command-line obj file: ' +
sub_line[len('Referenced in '): -len('.obj')])
references.append(reference)
elif sub_line.count('Loaded ') > 0:
if match:
loaded = match.groups()[0]
cross_refs[loaded] = references
cross_refed_symbols[loaded] = symbol
references = None
if line.startswith('Finished pass 1'):
# Stop now because the remaining 90% of the verbose output is
# not of interest. Could probably use /VERBOSE:REF to trim out
# boring information.
break
return cross_refs, cross_refed_symbols
def TrackObj(cross_refs, cross_refed_symbols, obj_name):
if obj_name.lower().endswith('.obj'):
obj_name = obj_name[:-len('.obj')]
# Keep track of which references we've already followed.
tracked = {}
# Initial set of object files that we are tracking.
targets = [obj_name]
printed = False
for i in range(100):
new_targets = {}
for target in targets:
if not target in tracked:
tracked[target] = True
if target in cross_refs.keys():
symbol = cross_refed_symbols[target]
printed = True
print '%s.obj pulled in for symbol "%s" by' % (target, symbol)
for ref in cross_refs[target]:
print '\t%s.obj' % ref
new_targets[ref] = True
if len(new_targets) == 0:
break
print
targets = new_targets.keys()
if not printed:
print 'No references to %s.obj found.' % obj_name
def main():
if len(sys.argv) < 3:
print r'Usage: %s <verbose_output_file> <objfile>' % sys.argv[0]
print r'Sample: %s chrome_dll_verbose.txt SkTLS' % sys.argv[0]
return 0
cross_refs, cross_refed_symbols = ParseVerbose(sys.argv[1])
print 'Database loaded - %d xrefs found' % len(cross_refs)
TrackObj(cross_refs, cross_refed_symbols, sys.argv[2])
if __name__ == '__main__':
sys.exit(main())
|
setup.py | berryweinst/pytorch-attention | 149 | 12764133 | from distutils.core import setup
setup(
name='attention',
version='0.1.0',
author='tllake',
author_email='<EMAIL>',
packages=['attention'],
description='An attention function for PyTorch.',
long_description=open('README.md').read()) |
switch.py | yangchuansheng/WSL-Distribution-Switcher | 1,917 | 12764152 | #!/usr/bin/env python3
# coding=utf-8
import glob
import sys
import os.path
import subprocess
from utils import Fore, parse_image_arg, probe_wsl, get_label, path_trans, handle_sigint
# handle arguments
handle_sigint()
if len(sys.argv) < 2:
# print usage information
print('usage: ./switch.py image[:tag]')
# check if there are any installations
basedir, lxpath, bashpath = probe_wsl(True)
if basedir:
#fix basedir to add LocalState\rootfs
basedir = os.path.join(basedir, 'LocalState')
names = glob.glob(os.path.join(basedir, 'rootfs*'))
not_debian = True
has_debian = False
if len(names) > 0:
print('\nThe following distributions are currently installed:\n')
for name in names:
active = os.path.basename(name) == 'rootfs'
name = get_label(name).split('_', 1)
if len(name) != 2:
continue
if name[0] == 'debian' and name[1] == '9':
has_debian = True
if active:
not_debian = False
print(' - %s%s%s:%s%s%s%s' % (Fore.YELLOW, name[0], Fore.RESET, Fore.YELLOW, name[1], Fore.RESET, ('%s*%s' % (Fore.GREEN, Fore.RESET) if active else '')))
if not_debian:
print()
if has_debian:
print('To switch back to the default distribution, specify %sdebian%s:%s9%s as the argument.' % (Fore.YELLOW, Fore.RESET, Fore.YELLOW, Fore.RESET))
else:
print('You do not seem to have the default distribution installed anymore.\nTo reinstall it, run %slxrun /uninstall%s and %slxrun /install%s from the command prompt.' % (Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET))
sys.exit(-1)
image, tag, fname, label = parse_image_arg(sys.argv[1], False)
# sanity checks
print('%s[*]%s Probing the Linux subsystem...' % (Fore.GREEN, Fore.RESET))
basedir, lxpath, bashpath = probe_wsl()
#fix basedir to add LocalState\rootfs
basedir = os.path.join(basedir, 'LocalState')
# read label of current distribution
clabel = get_label(os.path.join(basedir, 'rootfs'))
if not clabel:
clabel = 'debian_9'
if label == clabel:
print('%s[!]%s No %s/.switch_label%s found, and the target rootfs is %subuntu%s:%strusty%s. Cannot continue.' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.YELLOW, Fore.RESET, Fore.YELLOW, Fore.RESET))
print('%s[!]%s To fix this, run %secho some_tag > /.switch_label%s (replacing %ssome_tag%s with something like %sdebian_sid%s) from the current Bash terminal.' % (Fore.RED, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET))
sys.exit(-1)
else:
print('%s[!]%s No %s/.switch_label%s found, assuming current rootfs is %subuntu%s:%strusty%s.' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.YELLOW, Fore.RESET, Fore.YELLOW, Fore.RESET))
# sanity checks, take two
if clabel == label:
print('%s[!]%s The %s%s%s:%s%s%s rootfs is the current installation.' % (Fore.YELLOW, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET))
sys.exit(-1)
if not os.path.isdir(os.path.join(basedir, 'rootfs_' + label)):
print('%s[!]%s The %s%s%s:%s%s%s rootfs is not installed.' % (Fore.RED, Fore.RESET, Fore.YELLOW, image, Fore.RESET, Fore.YELLOW, tag, Fore.RESET))
sys.exit(-1)
# do the switch
print('%s[*]%s Moving current %srootfs%s to %srootfs_%s%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, Fore.RESET, Fore.BLUE, clabel, Fore.RESET))
try:
subprocess.check_output(['cmd', '/C', 'move', path_trans(os.path.join(basedir, 'rootfs')), path_trans(os.path.join(basedir, 'rootfs_' + clabel))])
except subprocess.CalledProcessError as err:
print('%s[!]%s Failed to backup current %srootfs%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, err))
sys.exit(-1)
print('%s[*]%s Moving desired %srootfs_%s%s to %srootfs%s...' % (Fore.GREEN, Fore.RESET, Fore.BLUE, label, Fore.RESET, Fore.BLUE, Fore.RESET))
try:
subprocess.check_output(['cmd', '/C', 'move', path_trans(os.path.join(basedir, 'rootfs_' + label)), path_trans(os.path.join(basedir, 'rootfs'))])
except subprocess.CalledProcessError as err:
print('%s[!]%s Failed to switch to new %srootfs%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, err))
print('%s[*]%s Rolling back to old %srootfs%s...' % (Fore.YELLOW, Fore.RESET, Fore.BLUE, Fore.RESET))
try:
subprocess.check_output(['cmd', '/C', 'move', path_trans(os.path.join(basedir, 'rootfs_' + clabel)), path_trans(os.path.join(basedir, 'rootfs'))])
except subprocess.CalledProcessError as err:
print('%s[!]%s Failed to roll back to old %srootfs%s: %s' % (Fore.RED, Fore.RESET, Fore.BLUE, Fore.RESET, err))
print('%s[!]%s You are now the proud owner of one broken Linux subsystem! To fix it, run %slxrun /uninstall%s and %slxrun /install%s from the command prompt.' % (Fore.RED, Fore.RESET, Fore.GREEN, Fore.RESET, Fore.GREEN, Fore.RESET))
sys.exit(-1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.