max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
monai/_extensions/loader.py | tatuanb/monai_V1 | 2,971 | 24275 | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
from _thread import interrupt_main
from contextlib import contextmanager
from glob import glob
from os import path
from threading import Timer
from typing import Optional
import torch
from monai.utils.module import get_torch_version_tuple, optional_import
dir_path = path.dirname(path.realpath(__file__))
@contextmanager
def timeout(time, message):
timer = None
try:
timer = Timer(time, interrupt_main)
timer.daemon = True
yield timer.start()
except KeyboardInterrupt as e:
if timer is not None and timer.is_alive():
raise e # interrupt from user?
raise TimeoutError(message) from e
finally:
if timer is not None:
try:
timer.cancel()
finally:
pass
def load_module(
module_name: str, defines: Optional[dict] = None, verbose_build: bool = False, build_timeout: int = 300
):
"""
Handles the loading of c++ extension modules.
Args:
module_name: Name of the module to load.
Must match the name of the relevant source directory in the `_extensions` directory.
defines: Dictionary containing names and values of compilation defines.
verbose_build: Set to true to enable build logging.
build_timeout: Time in seconds before the build will throw an exception to prevent hanging.
"""
# Ensuring named module exists in _extensions directory.
module_dir = path.join(dir_path, module_name)
if not path.exists(module_dir):
raise ValueError(f"No extension module named {module_name}")
platform_str = f"_{platform.system()}_{platform.python_version()}_"
platform_str += "".join(f"{v}" for v in get_torch_version_tuple()[:2])
# Adding configuration to module name.
if defines is not None:
module_name = "_".join([module_name] + [f"{v}" for v in defines.values()])
# Gathering source files.
source = glob(path.join(module_dir, "**", "*.cpp"), recursive=True)
if torch.cuda.is_available():
source += glob(path.join(module_dir, "**", "*.cu"), recursive=True)
platform_str += f"_{torch.version.cuda}"
# Constructing compilation argument list.
define_args = [] if not defines else [f"-D {key}={defines[key]}" for key in defines]
# Ninja may be blocked by something out of our control.
# This will error if the build takes longer than expected.
with timeout(build_timeout, "Build appears to be blocked. Is there a stopped process building the same extension?"):
load, _ = optional_import("torch.utils.cpp_extension", name="load") # main trigger some JIT config in pytorch
# This will either run the build or return the existing .so object.
name = module_name + platform_str.replace(".", "_")
module = load(
name=name, sources=source, extra_cflags=define_args, extra_cuda_cflags=define_args, verbose=verbose_build
)
return module
|
tests/ext/test_paginator.py | Descent098/hyde | 804 | 24283 | <filename>tests/ext/test_paginator.py
# -*- coding: utf-8 -*-
"""
Use nose
`$ pip install nose`
`$ nosetests`
"""
from textwrap import dedent
from hyde.generator import Generator
from hyde.site import Site
from fswrap import File
TEST_SITE = File(__file__).parent.parent.child_folder('_test')
class TestPaginator(object):
def setUp(self):
TEST_SITE.make()
TEST_SITE.parent.child_folder(
'sites/test_paginator').copy_contents_to(TEST_SITE)
self.s = Site(TEST_SITE)
self.deploy = TEST_SITE.child_folder('deploy')
self.gen = Generator(self.s)
self.gen.load_site_if_needed()
self.gen.load_template_if_needed()
self.gen.generate_all()
def tearDown(self):
TEST_SITE.delete()
def test_page_no_paginator(self):
f = File(self.deploy.child('empty.txt'))
assert f.exists
def test_pages_of_one(self):
pages = ['pages_of_one.txt', 'page2/pages_of_one.txt',
'page3/pages_of_one.txt', 'page4/pages_of_one.txt']
files = [File(self.deploy.child(p)) for p in pages]
for f in files:
assert f.exists
page5 = File(self.deploy.child('page5/pages_of_one.txt'))
assert not page5.exists
def test_pages_of_one_content(self):
expected_page1_content = dedent('''\
Another Sad Post
/page2/pages_of_one.txt''')
expected_page2_content = dedent('''\
A Happy Post
/pages_of_one.txt
/page3/pages_of_one.txt''')
expected_page3_content = dedent('''\
An Angry Post
/page2/pages_of_one.txt
/page4/pages_of_one.txt''')
expected_page4_content = dedent('''\
A Sad Post
/page3/pages_of_one.txt
''')
page1 = self.deploy.child('pages_of_one.txt')
content = File(page1).read_all()
assert expected_page1_content == content
page2 = self.deploy.child('page2/pages_of_one.txt')
content = File(page2).read_all()
assert expected_page2_content == content
page3 = self.deploy.child('page3/pages_of_one.txt')
content = File(page3).read_all()
assert expected_page3_content == content
page4 = self.deploy.child('page4/pages_of_one.txt')
content = File(page4).read_all()
assert expected_page4_content == content
def test_pages_of_ten(self):
page1 = self.deploy.child('pages_of_ten.txt')
page2 = self.deploy.child('page2/pages_of_ten.txt')
assert File(page1).exists
assert not File(page2).exists
def test_pages_of_ten_depends(self):
depends = self.gen.deps['pages_of_ten.txt']
assert depends
assert len(depends) == 4
assert 'blog/sad-post.html' in depends
assert 'blog/another-sad-post.html' in depends
assert 'blog/angry-post.html' in depends
assert 'blog/happy-post.html' in depends
def test_pages_of_ten_content(self):
expected_content = dedent('''\
Another Sad Post
A Happy Post
An Angry Post
A Sad Post
''')
page = self.deploy.child('pages_of_ten.txt')
content = File(page).read_all()
assert expected_content == content
def test_pages_of_one_depends(self):
depends = self.gen.deps['pages_of_one.txt']
assert depends
assert len(depends) == 4
assert 'blog/sad-post.html' in depends
assert 'blog/another-sad-post.html' in depends
assert 'blog/angry-post.html' in depends
assert 'blog/happy-post.html' in depends
def test_custom_file_pattern(self):
page1 = self.deploy.child('custom_file_pattern.txt')
page2 = self.deploy.child('custom_file_pattern-2.txt')
assert File(page1).exists
assert File(page2).exists
|
veros/setups/global_flexible/global_flexible.py | AkasDutta/veros | 115 | 24299 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import h5netcdf
import scipy.ndimage
from veros import veros_routine, veros_kernel, KernelOutput, VerosSetup, runtime_settings as rs, runtime_state as rst
from veros.variables import Variable, allocate
from veros.core.utilities import enforce_boundaries
from veros.core.operators import numpy as npx, update, at
import veros.tools
import veros.time
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_FILES = veros.tools.get_assets("global_flexible", os.path.join(BASE_PATH, "assets.json"))
class GlobalFlexibleResolutionSetup(VerosSetup):
"""
Global model with flexible resolution.
"""
# global settings
min_depth = 10.0
max_depth = 5400.0
equatorial_grid_spacing_factor = 0.5
polar_grid_spacing_factor = None
@veros_routine
def set_parameter(self, state):
settings = state.settings
settings.identifier = "UNNAMED"
settings.nx = 360
settings.ny = 160
settings.nz = 60
settings.dt_mom = settings.dt_tracer = 900
settings.runlen = 86400 * 10
settings.x_origin = 90.0
settings.y_origin = -80.0
settings.coord_degree = True
settings.enable_cyclic_x = True
# friction
settings.enable_hor_friction = True
settings.A_h = 5e4
settings.enable_hor_friction_cos_scaling = True
settings.hor_friction_cosPower = 1
settings.enable_tempsalt_sources = True
settings.enable_implicit_vert_friction = True
settings.eq_of_state_type = 5
# isoneutral
settings.enable_neutral_diffusion = True
settings.K_iso_0 = 1000.0
settings.K_iso_steep = 50.0
settings.iso_dslope = 0.005
settings.iso_slopec = 0.005
settings.enable_skew_diffusion = True
# tke
settings.enable_tke = True
settings.c_k = 0.1
settings.c_eps = 0.7
settings.alpha_tke = 30.0
settings.mxl_min = 1e-8
settings.tke_mxl_choice = 2
settings.kappaM_min = 2e-4
settings.kappaH_min = 2e-5
settings.enable_kappaH_profile = True
settings.enable_tke_superbee_advection = True
# eke
settings.enable_eke = True
settings.eke_k_max = 1e4
settings.eke_c_k = 0.4
settings.eke_c_eps = 0.5
settings.eke_cross = 2.0
settings.eke_crhin = 1.0
settings.eke_lmin = 100.0
settings.enable_eke_superbee_advection = True
settings.enable_eke_isopycnal_diffusion = True
# idemix
settings.enable_idemix = False
settings.enable_eke_diss_surfbot = True
settings.eke_diss_surfbot_frac = 0.2
settings.enable_idemix_superbee_advection = True
settings.enable_idemix_hor_diffusion = True
# custom variables
state.dimensions["nmonths"] = 12
state.var_meta.update(
t_star=Variable("t_star", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
s_star=Variable("s_star", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
qnec=Variable("qnec", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
qnet=Variable("qnet", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
qsol=Variable("qsol", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
divpen_shortwave=Variable("divpen_shortwave", ("zt",), "", "", time_dependent=False),
taux=Variable("taux", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
tauy=Variable("tauy", ("xt", "yt", "nmonths"), "", "", time_dependent=False),
)
def _get_data(self, var, idx=None):
if idx is None:
idx = Ellipsis
else:
idx = idx[::-1]
kwargs = {}
if rst.proc_num > 1:
kwargs.update(
driver="mpio",
comm=rs.mpi_comm,
)
with h5netcdf.File(DATA_FILES["forcing"], "r", **kwargs) as forcing_file:
var_obj = forcing_file.variables[var]
return npx.array(var_obj[idx]).T
@veros_routine(dist_safe=False, local_variables=["dxt", "dyt", "dzt"])
def set_grid(self, state):
vs = state.variables
settings = state.settings
if settings.ny % 2:
raise ValueError("ny has to be an even number of grid cells")
vs.dxt = update(vs.dxt, at[...], 360.0 / settings.nx)
if self.equatorial_grid_spacing_factor is not None:
eq_spacing = self.equatorial_grid_spacing_factor * 160.0 / settings.ny
else:
eq_spacing = None
if self.polar_grid_spacing_factor is not None:
polar_spacing = self.polar_grid_spacing_factor * 160.0 / settings.ny
else:
polar_spacing = None
vs.dyt = update(
vs.dyt,
at[2:-2],
veros.tools.get_vinokur_grid_steps(
settings.ny, 160.0, eq_spacing, upper_stepsize=polar_spacing, two_sided_grid=True
),
)
vs.dzt = veros.tools.get_vinokur_grid_steps(settings.nz, self.max_depth, self.min_depth, refine_towards="lower")
@veros_routine
def set_coriolis(self, state):
vs = state.variables
settings = state.settings
vs.coriolis_t = update(
vs.coriolis_t, at[...], 2 * settings.omega * npx.sin(vs.yt[npx.newaxis, :] / 180.0 * settings.pi)
)
def _shift_longitude_array(self, vs, lon, arr):
wrap_i = npx.where((lon[:-1] < vs.xt.min()) & (lon[1:] >= vs.xt.min()))[0][0]
new_lon = npx.concatenate((lon[wrap_i:-1], lon[:wrap_i] + 360.0))
new_arr = npx.concatenate((arr[wrap_i:-1, ...], arr[:wrap_i, ...]))
return new_lon, new_arr
@veros_routine(dist_safe=False, local_variables=["kbot", "xt", "yt", "zt"])
def set_topography(self, state):
vs = state.variables
settings = state.settings
with h5netcdf.File(DATA_FILES["topography"], "r") as topography_file:
topo_x, topo_y, topo_z = (npx.array(topography_file.variables[k], dtype="float").T for k in ("x", "y", "z"))
topo_z = npx.minimum(topo_z, 0.0)
# smooth topography to match grid resolution
gaussian_sigma = (0.5 * len(topo_x) / settings.nx, 0.5 * len(topo_y) / settings.ny)
topo_z_smoothed = scipy.ndimage.gaussian_filter(topo_z, sigma=gaussian_sigma)
topo_z_smoothed = npx.where(topo_z >= -1, 0, topo_z_smoothed)
topo_x_shifted, topo_z_shifted = self._shift_longitude_array(vs, topo_x, topo_z_smoothed)
coords = (vs.xt[2:-2], vs.yt[2:-2])
z_interp = allocate(state.dimensions, ("xt", "yt"), local=False)
z_interp = update(
z_interp,
at[2:-2, 2:-2],
veros.tools.interpolate((topo_x_shifted, topo_y), topo_z_shifted, coords, kind="nearest", fill=False),
)
depth_levels = 1 + npx.argmin(npx.abs(z_interp[:, :, npx.newaxis] - vs.zt[npx.newaxis, npx.newaxis, :]), axis=2)
vs.kbot = update(vs.kbot, at[2:-2, 2:-2], npx.where(z_interp < 0.0, depth_levels, 0)[2:-2, 2:-2])
vs.kbot = npx.where(vs.kbot < settings.nz, vs.kbot, 0)
vs.kbot = enforce_boundaries(vs.kbot, settings.enable_cyclic_x, local=True)
# remove marginal seas
# (dilate to close 1-cell passages, fill holes, undo dilation)
marginal = scipy.ndimage.binary_erosion(
scipy.ndimage.binary_fill_holes(scipy.ndimage.binary_dilation(vs.kbot == 0))
)
vs.kbot = npx.where(marginal, 0, vs.kbot)
@veros_routine
def set_initial_conditions(self, state):
vs = state.variables
settings = state.settings
rpart_shortwave = 0.58
efold1_shortwave = 0.35
efold2_shortwave = 23.0
t_grid = (vs.xt[2:-2], vs.yt[2:-2], vs.zt)
xt_forc, yt_forc, zt_forc = (self._get_data(k) for k in ("xt", "yt", "zt"))
zt_forc = zt_forc[::-1]
# coordinates must be monotonous for this to work
assert npx.diff(xt_forc).all() > 0
assert npx.diff(yt_forc).all() > 0
# determine slice to read from forcing file
data_subset = (
slice(
max(0, int(npx.argmax(xt_forc >= vs.xt.min())) - 1),
len(xt_forc) - max(0, int(npx.argmax(xt_forc[::-1] <= vs.xt.max())) - 1),
),
slice(
max(0, int(npx.argmax(yt_forc >= vs.yt.min())) - 1),
len(yt_forc) - max(0, int(npx.argmax(yt_forc[::-1] <= vs.yt.max())) - 1),
),
Ellipsis,
)
xt_forc = xt_forc[data_subset[0]]
yt_forc = yt_forc[data_subset[1]]
# initial conditions
temp_raw = self._get_data("temperature", idx=data_subset)[..., ::-1]
temp_data = veros.tools.interpolate((xt_forc, yt_forc, zt_forc), temp_raw, t_grid)
vs.temp = update(vs.temp, at[2:-2, 2:-2, :, :], (temp_data * vs.maskT[2:-2, 2:-2, :])[..., npx.newaxis])
salt_raw = self._get_data("salinity", idx=data_subset)[..., ::-1]
salt_data = veros.tools.interpolate((xt_forc, yt_forc, zt_forc), salt_raw, t_grid)
vs.salt = update(vs.salt, at[2:-2, 2:-2, :, :], (salt_data * vs.maskT[2:-2, 2:-2, :])[..., npx.newaxis])
# wind stress on MIT grid
time_grid = (vs.xt[2:-2], vs.yt[2:-2], npx.arange(12))
taux_raw = self._get_data("tau_x", idx=data_subset)
taux_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), taux_raw, time_grid)
vs.taux = update(vs.taux, at[2:-2, 2:-2, :], taux_data)
tauy_raw = self._get_data("tau_y", idx=data_subset)
tauy_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), tauy_raw, time_grid)
vs.tauy = update(vs.tauy, at[2:-2, 2:-2, :], tauy_data)
vs.taux = enforce_boundaries(vs.taux, settings.enable_cyclic_x)
vs.tauy = enforce_boundaries(vs.tauy, settings.enable_cyclic_x)
# Qnet and dQ/dT and Qsol
qnet_raw = self._get_data("q_net", idx=data_subset)
qnet_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), qnet_raw, time_grid)
vs.qnet = update(vs.qnet, at[2:-2, 2:-2, :], -qnet_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
qnec_raw = self._get_data("dqdt", idx=data_subset)
qnec_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), qnec_raw, time_grid)
vs.qnec = update(vs.qnec, at[2:-2, 2:-2, :], qnec_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
qsol_raw = self._get_data("swf", idx=data_subset)
qsol_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), qsol_raw, time_grid)
vs.qsol = update(vs.qsol, at[2:-2, 2:-2, :], -qsol_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
# SST and SSS
sst_raw = self._get_data("sst", idx=data_subset)
sst_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), sst_raw, time_grid)
vs.t_star = update(vs.t_star, at[2:-2, 2:-2, :], sst_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
sss_raw = self._get_data("sss", idx=data_subset)
sss_data = veros.tools.interpolate((xt_forc, yt_forc, npx.arange(12)), sss_raw, time_grid)
vs.s_star = update(vs.s_star, at[2:-2, 2:-2, :], sss_data * vs.maskT[2:-2, 2:-2, -1, npx.newaxis])
if settings.enable_idemix:
tidal_energy_raw = self._get_data("tidal_energy", idx=data_subset)
tidal_energy_data = veros.tools.interpolate((xt_forc, yt_forc), tidal_energy_raw, t_grid[:-1])
mask_x, mask_y = (i + 2 for i in npx.indices((vs.nx, vs.ny)))
mask_z = npx.maximum(0, vs.kbot[2:-2, 2:-2] - 1)
tidal_energy_data[:, :] *= vs.maskW[mask_x, mask_y, mask_z] / vs.rho_0
vs.forc_iw_bottom[2:-2, 2:-2] = tidal_energy_data
"""
Initialize penetration profile for solar radiation and store divergence in divpen
note that pen is set to 0.0 at the surface instead of 1.0 to compensate for the
shortwave part of the total surface flux
"""
swarg1 = vs.zw / efold1_shortwave
swarg2 = vs.zw / efold2_shortwave
pen = rpart_shortwave * npx.exp(swarg1) + (1.0 - rpart_shortwave) * npx.exp(swarg2)
pen = update(pen, at[-1], 0.0)
vs.divpen_shortwave = update(vs.divpen_shortwave, at[1:], (pen[1:] - pen[:-1]) / vs.dzt[1:])
vs.divpen_shortwave = update(vs.divpen_shortwave, at[0], pen[0] / vs.dzt[0])
@veros_routine
def set_forcing(self, state):
vs = state.variables
vs.update(set_forcing_kernel(state))
@veros_routine
def set_diagnostics(self, state):
settings = state.settings
diagnostics = state.diagnostics
diagnostics["cfl_monitor"].output_frequency = settings.dt_tracer * 100
diagnostics["tracer_monitor"].output_frequency = settings.dt_tracer * 100
diagnostics["snapshot"].output_frequency = 30 * 86400.0
diagnostics["overturning"].output_frequency = 360 * 86400
diagnostics["overturning"].sampling_frequency = 86400.0
diagnostics["energy"].output_frequency = 360 * 86400
diagnostics["energy"].sampling_frequency = 10 * settings.dt_tracer
diagnostics["averages"].output_frequency = 30 * 86400
diagnostics["averages"].sampling_frequency = settings.dt_tracer
average_vars = [
"surface_taux",
"surface_tauy",
"forc_temp_surface",
"forc_salt_surface",
"psi",
"temp",
"salt",
"u",
"v",
"w",
"Nsqr",
"Hd",
"rho",
"kappaH",
]
if settings.enable_skew_diffusion:
average_vars += ["B1_gm", "B2_gm"]
if settings.enable_TEM_friction:
average_vars += ["kappa_gm", "K_diss_gm"]
if settings.enable_tke:
average_vars += ["tke", "Prandtlnumber", "mxl", "tke_diss", "forc_tke_surface", "tke_surf_corr"]
if settings.enable_idemix:
average_vars += ["E_iw", "forc_iw_surface", "iw_diss", "c0", "v0"]
if settings.enable_eke:
average_vars += ["eke", "K_gm", "L_rossby", "L_rhines"]
diagnostics["averages"].output_variables = average_vars
@veros_routine
def after_timestep(self, state):
pass
@veros_kernel
def set_forcing_kernel(state):
vs = state.variables
settings = state.settings
t_rest = 30.0 * 86400.0
cp_0 = 3991.86795711963 # J/kg /K
year_in_seconds = veros.time.convert_time(1.0, "years", "seconds")
(n1, f1), (n2, f2) = veros.tools.get_periodic_interval(vs.time, year_in_seconds, year_in_seconds / 12.0, 12)
# linearly interpolate wind stress and shift from MITgcm U/V grid to this grid
vs.surface_taux = update(vs.surface_taux, at[:-1, :], f1 * vs.taux[1:, :, n1] + f2 * vs.taux[1:, :, n2])
vs.surface_tauy = update(vs.surface_tauy, at[:, :-1], f1 * vs.tauy[:, 1:, n1] + f2 * vs.tauy[:, 1:, n2])
if settings.enable_tke:
vs.forc_tke_surface = update(
vs.forc_tke_surface,
at[1:-1, 1:-1],
npx.sqrt(
(0.5 * (vs.surface_taux[1:-1, 1:-1] + vs.surface_taux[:-2, 1:-1]) / settings.rho_0) ** 2
+ (0.5 * (vs.surface_tauy[1:-1, 1:-1] + vs.surface_tauy[1:-1, :-2]) / settings.rho_0) ** 2
)
** (3.0 / 2.0),
)
# W/m^2 K kg/J m^3/kg = K m/s
t_star_cur = f1 * vs.t_star[..., n1] + f2 * vs.t_star[..., n2]
qqnec = f1 * vs.qnec[..., n1] + f2 * vs.qnec[..., n2]
qqnet = f1 * vs.qnet[..., n1] + f2 * vs.qnet[..., n2]
vs.forc_temp_surface = (
(qqnet + qqnec * (t_star_cur - vs.temp[..., -1, vs.tau])) * vs.maskT[..., -1] / cp_0 / settings.rho_0
)
s_star_cur = f1 * vs.s_star[..., n1] + f2 * vs.s_star[..., n2]
vs.forc_salt_surface = 1.0 / t_rest * (s_star_cur - vs.salt[..., -1, vs.tau]) * vs.maskT[..., -1] * vs.dzt[-1]
# apply simple ice mask
mask1 = vs.temp[:, :, -1, vs.tau] * vs.maskT[:, :, -1] > -1.8
mask2 = vs.forc_temp_surface > 0
ice = npx.logical_or(mask1, mask2)
vs.forc_temp_surface *= ice
vs.forc_salt_surface *= ice
# solar radiation
if settings.enable_tempsalt_sources:
vs.temp_source = (
(f1 * vs.qsol[..., n1, None] + f2 * vs.qsol[..., n2, None])
* vs.divpen_shortwave[None, None, :]
* ice[..., None]
* vs.maskT[..., :]
/ cp_0
/ settings.rho_0
)
return KernelOutput(
surface_taux=vs.surface_taux,
surface_tauy=vs.surface_tauy,
temp_source=vs.temp_source,
forc_tke_surface=vs.forc_tke_surface,
forc_temp_surface=vs.forc_temp_surface,
forc_salt_surface=vs.forc_salt_surface,
)
|
sagemaker/ssedata.py | cleveranjos/Rapid-ML-Gateway | 147 | 24303 | <gh_stars>100-1000
from enum import Enum
class ArgType(Enum):
"""
Represents data types that can be used
as arguments in different script functions.
"""
Undefined = -1
Empty = 0
String = 1
Numeric = 2
Mixed = 3
class ReturnType(Enum):
"""
Represents return types that can
be used in script evaluation.
"""
Undefined = -1
String = 0
Numeric = 1
Dual = 2
class FunctionType(Enum):
"""
Represents function types.
"""
Scalar = 0
Aggregation = 1
Tensor = 2
|
ferminet/utils/system.py | shishaochen/ferminet | 469 | 24304 | <reponame>shishaochen/ferminet
# Lint as: python3
# Copyright 2020 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to create different kinds of systems."""
from typing import Sequence
import attr
from ferminet.utils import elements
from ferminet.utils import units as unit_conversion
import numpy as np
# Default bond lengths in angstrom for some diatomics.
# Bond lengths from either the G3 dataset:
# 1. http://www.cse.anl.gov/OldCHMwebsiteContent/compmat/comptherm.htm
# 2. <NAME>, <NAME>, <NAME>, and <NAME>,
# J. Chem. Phys, 109, 42 (1998).
# or from NIST (https://cccbdb.nist.gov/diatomicexpbondx.asp).
diatomic_bond_lengths = {
'BeH': 1.348263,
'CN': 1.134797,
'ClF': 1.659091,
'F2': 1.420604,
'H2': 0.737164,
'HCl': 1.2799799,
'Li2': 2.77306,
'LiH': 1.639999,
'N2': 1.129978,
'NH': 1.039428,
'CO': 1.150338,
'BH': 1.2324,
'PN': 1.491,
'AlH': 1.648,
'AlN': 1.786,
}
# Default spin polarisation for a few diatomics of interest.
# Otherwise default to either singlet (doublet) for even (odd) numbers of
# electrons. Units: number of unpaired electrons.
diatomic_spin_polarisation = {
'B2': 2,
'O2': 2,
'NH': 2,
'AlN': 2,
}
@attr.s
class Atom: # pytype: disable=invalid-function-definition
"""Atom information for Hamiltonians.
The nuclear charge is inferred from the symbol if not given, in which case the
symbol must be the IUPAC symbol of the desired element.
Attributes:
symbol: Element symbol.
coords: An iterable of atomic coordinates. Always a list of floats and in
bohr after initialisation. Default: place atom at origin.
charge: Nuclear charge. Default: nuclear charge (atomic number) of atom of
the given name.
atomic_number: Atomic number associated with element. Default: atomic number
of element of the given symbol. Should match charge unless fractional
nuclear charges are being used.
units: String giving units of coords. Either bohr or angstrom. Default:
bohr. If angstrom, coords are converted to be in bohr and units to the
string 'bohr'.
coords_angstrom: list of atomic coordinates in angstrom.
coords_array: Numpy array of atomic coordinates in bohr.
element: elements.Element corresponding to the symbol.
"""
symbol = attr.ib()
coords = attr.ib(
converter=lambda xs: tuple(float(x) for x in xs),
default=(0.0, 0.0, 0.0)) # type: Sequence[float]
charge = attr.ib(converter=float)
atomic_number = attr.ib(converter=int)
units = attr.ib(
default='bohr', validator=attr.validators.in_(['bohr', 'angstrom']))
@charge.default
def _set_default_charge(self):
return self.element.atomic_number
@atomic_number.default
def _set_default_atomic_number(self):
return self.element.atomic_number
def __attrs_post_init__(self):
if self.units == 'angstrom':
self.coords = [unit_conversion.angstrom2bohr(x) for x in self.coords]
self.units = 'bohr'
@property
def coords_angstrom(self):
return [unit_conversion.bohr2angstrom(x) for x in self.coords]
@property
def coords_array(self):
if not hasattr(self, '_coords_arr'):
self._coords_arr = np.array(self.coords)
return self._coords_arr
@property
def element(self):
return elements.SYMBOLS[self.symbol]
def atom(symbol, spins=None, charge=0):
"""Return configuration for a single atom.
Args:
symbol: The atomic symbol from the periodic table
spins (optional): A tuple with the number of spin-up and spin-down electrons
charge (optional): If zero (default), create a neutral atom, otherwise
create an anion if charge is negative or cation if charge is positive.
Returns:
A list with a single Atom object located at zero, and a tuple with the spin
configuration of the electrons.
"""
atomic_number = elements.SYMBOLS[symbol].atomic_number
if charge > atomic_number:
raise ValueError('Cannot have a cation with charge larger than the '
'atomic number. Charge: {}, Atomic Number{}'.format(
charge, atomic_number))
if spins is None:
spin_polarisation = elements.ATOMIC_NUMS[atomic_number-charge].spin_config
nalpha = (atomic_number + spin_polarisation) // 2
spins = (nalpha, atomic_number - charge - nalpha)
return [Atom(symbol=symbol, coords=(0.0, 0.0, 0.0))], spins
def diatomic(symbol1, symbol2, bond_length, spins=None, charge=0, units='bohr'):
"""Return configuration for a diatomic molecule."""
if spins is None:
atomic_number_1 = elements.SYMBOLS[symbol1].atomic_number
atomic_number_2 = elements.SYMBOLS[symbol2].atomic_number
total_charge = atomic_number_1 + atomic_number_2 - charge
if total_charge % 2 == 0:
spins = (total_charge // 2, total_charge // 2)
else:
spins = ((total_charge + 1)// 2, (total_charge - 1) // 2)
return [
Atom(symbol=symbol1, coords=(0.0, 0.0, bond_length/2.0), units=units),
Atom(symbol=symbol2, coords=(0.0, 0.0, -bond_length/2.0), units=units)
], spins
def molecule(symbol, bond_length=0.0, units='bohr'):
"""Hardcoded molecular geometries from the original Fermi Net paper."""
if symbol in diatomic_bond_lengths:
if symbol[-1] == '2':
symbs = [symbol[:-1], symbol[:-1]]
else: # Split a camel-case string on the second capital letter
split_idx = None
for i in range(1, len(symbol)):
if split_idx is None and symbol[i].isupper():
split_idx = i
if split_idx is None:
raise ValueError('Cannot find second atomic symbol: {}'.format(symbol))
symbs = [symbol[:split_idx], symbol[split_idx:]]
atomic_number_1 = elements.SYMBOLS[symbs[0]].atomic_number
atomic_number_2 = elements.SYMBOLS[symbs[1]].atomic_number
total_charge = atomic_number_1 + atomic_number_2
if symbol in diatomic_spin_polarisation:
spin_pol = diatomic_spin_polarisation[symbol]
spins = ((total_charge + spin_pol) // 2, (total_charge + spin_pol) // 2)
elif total_charge % 2 == 0:
spins = (total_charge // 2, total_charge // 2)
else:
spins = ((total_charge + 1)// 2, (total_charge - 1) // 2)
if bond_length == 0.0:
bond_length = diatomic_bond_lengths[symbol]
units = 'angstrom'
return diatomic(symbs[0], symbs[1],
bond_length,
units=units,
spins=spins)
if bond_length != 0.0:
raise ValueError('Bond length argument only appropriate for diatomics.')
if symbol == 'CH4':
return [
Atom(symbol='C', coords=(0.0, 0.0, 0.0), units='bohr'),
Atom(symbol='H', coords=(1.18886, 1.18886, 1.18886), units='bohr'),
Atom(symbol='H', coords=(-1.18886, -1.18886, 1.18886), units='bohr'),
Atom(symbol='H', coords=(1.18886, -1.18886, -1.18886), units='bohr'),
Atom(symbol='H', coords=(-1.18886, 1.18886, -1.18886), units='bohr'),
], (5, 5)
if symbol == 'NH3':
return [
Atom(symbol='N', coords=(0.0, 0.0, 0.22013), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.77583, -0.51364), units='bohr'),
Atom(symbol='H', coords=(1.53791, -0.88791, -0.51364), units='bohr'),
Atom(symbol='H', coords=(-1.53791, -0.88791, -0.51364), units='bohr'),
], (5, 5)
if symbol in ('C2H4', 'ethene', 'ethylene'):
return [
Atom(symbol='C', coords=(0.0, 0.0, 1.26135), units='bohr'),
Atom(symbol='C', coords=(0.0, 0.0, -1.26135), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.74390, 2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, -1.74390, 2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, 1.74390, -2.33889), units='bohr'),
Atom(symbol='H', coords=(0.0, -1.74390, -2.33889), units='bohr'),
], (8, 8)
if symbol in ('C4H6', 'bicyclobutane'):
return [
Atom(symbol='C', coords=(0.0, 2.13792, 0.58661), units='bohr'),
Atom(symbol='C', coords=(0.0, -2.13792, 0.58661), units='bohr'),
Atom(symbol='C', coords=(1.41342, 0.0, -0.58924), units='bohr'),
Atom(symbol='C', coords=(-1.41342, 0.0, -0.58924), units='bohr'),
Atom(symbol='H', coords=(0.0, 2.33765, 2.64110), units='bohr'),
Atom(symbol='H', coords=(0.0, 3.92566, -0.43023), units='bohr'),
Atom(symbol='H', coords=(0.0, -2.33765, 2.64110), units='bohr'),
Atom(symbol='H', coords=(0.0, -3.92566, -0.43023), units='bohr'),
Atom(symbol='H', coords=(2.67285, 0.0, -2.19514), units='bohr'),
Atom(symbol='H', coords=(-2.67285, 0.0, -2.19514), units='bohr'),
], (15, 15)
raise ValueError('Not a recognized molecule: {}'.format(symbol))
def hn(n, r, charge=0, units='bohr'):
"""Return a hydrogen chain with n atoms and separation r."""
m = n - charge # number of electrons
if m % 2 == 0:
spins = (m//2, m//2)
else:
spins = ((m+1)//2, (m-1)//2)
lim = r * (n-1) / 2.0
return [Atom(symbol='H', coords=(0.0, 0.0, z), units=units)
for z in np.linspace(-lim, lim, n)], spins
def h4_circle(r, theta, units='bohr'):
"""Return 4 hydrogen atoms arranged in a circle, a failure case of CCSD(T)."""
return [
Atom(symbol='H',
coords=(r*np.cos(theta), r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(-r*np.cos(theta), r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(r*np.cos(theta), -r*np.sin(theta), 0.0),
units=units),
Atom(symbol='H',
coords=(-r*np.cos(theta), -r*np.sin(theta), 0.0),
units=units)
], (2, 2)
|
awx/main/tests/unit/test_signals.py | gitEdouble/awx | 11,396 | 24308 | from awx.main import signals
class TestCleanupDetachedLabels:
def test_cleanup_detached_labels_on_deleted_parent(self, mocker):
mock_labels = [mocker.MagicMock(), mocker.MagicMock()]
mock_instance = mocker.MagicMock()
mock_instance.labels.all = mocker.MagicMock()
mock_instance.labels.all.return_value = mock_labels
mock_labels[0].is_candidate_for_detach.return_value = True
mock_labels[1].is_candidate_for_detach.return_value = False
signals.cleanup_detached_labels_on_deleted_parent(None, mock_instance)
mock_labels[0].is_candidate_for_detach.assert_called_with()
mock_labels[1].is_candidate_for_detach.assert_called_with()
mock_labels[0].delete.assert_called_with()
mock_labels[1].delete.assert_not_called()
|
data/data_augment.py | ZHANGHeng19931123/MutualGuide | 124 | 24327 | <reponame>ZHANGHeng19931123/MutualGuide
#!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import numpy as np
import random
import math
import torch
def _crop(image, boxes, labels, p=0.75, min_iou=0.75, max_iou=0.25):
def matrix_iou(a, b):
lt = np.maximum(a[:, np.newaxis, :2], b[:, :2])
rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:])
area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2)
area_a = np.prod(a[:, 2:] - a[:, :2], axis=1)
return area_i / area_a[:, np.newaxis]
if random.random() > p:
return (image, boxes, labels)
(height, width, _) = image.shape
while True:
scale = random.uniform(0.5, 1.)
min_ratio = max(0.5, scale * scale)
max_ratio = min(2, 1. / scale / scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
w = int(scale * ratio * width)
h = int(scale / ratio * height)
l = random.randrange(width - w)
t = random.randrange(height - h)
roi = np.array((l, t, l + w, t + h))
iou = matrix_iou(boxes, roi[np.newaxis])
iou = iou[iou < min_iou]
iou = iou[iou >= max_iou]
if len(iou) > 0:
continue
image_t = image[roi[1]:roi[3], roi[0]:roi[2]]
centers = (boxes[:, :2] + boxes[:, 2:]) / 2
mask = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1)
boxes_t = boxes[mask].copy()
labels_t = labels[mask].copy()
if len(boxes_t) == 0:
continue
boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2])
boxes_t[:, :2] -= roi[:2]
boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:])
boxes_t[:, 2:] -= roi[:2]
return (image_t, boxes_t, labels_t)
def _distort(image):
def _convert(image, alpha=1, beta=0):
tmp = image.astype(float) * alpha + beta
tmp[tmp < 0] = 0
tmp[tmp > 255] = 255
image[:] = tmp
image = image.copy()
if random.randrange(2):
_convert(image, beta=random.uniform(-32, 32))
if random.randrange(2):
_convert(image, alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if random.randrange(2):
tmp = image[:, :, 0].astype(int) + random.randint(-18, 18)
tmp %= 180
image[:, :, 0] = tmp
if random.randrange(2):
_convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5))
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def _expand(image, boxes, labels, p=0.75):
if random.random() > p:
return (image, boxes, labels)
(height, width, depth) = image.shape
while True:
scale = random.uniform(1, 2)
min_ratio = max(0.5, 1. / scale / scale)
max_ratio = min(2, scale * scale)
ratio = math.sqrt(random.uniform(min_ratio, max_ratio))
ws = scale * ratio
hs = scale / ratio
if ws < 1 or hs < 1:
continue
w = int(ws * width)
h = int(hs * height)
left = random.randint(0, w - width)
top = random.randint(0, h - height)
boxes_t = boxes.copy()
boxes_t[:, :2] += (left, top)
boxes_t[:, 2:] += (left, top)
expand_image = np.ones((h, w, depth)) * 114.0
expand_image[top:top + height, left:left + width] = image
image = expand_image
return (image, boxes_t, labels)
def _mirror(image, boxes):
(_, width, _) = image.shape
if random.randrange(2):
image = image[:, ::-1]
boxes = boxes.copy()
boxes[:, 0::2] = width - boxes[:, 2::-2]
return (image, boxes)
def preproc_for_test(image, insize, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), swap=(2, 0, 1)):
image = cv2.resize(image, (insize, insize), interpolation=cv2.INTER_LINEAR)
image = image.astype(np.float32)
image = image[:, :, ::-1]
image /= 255.0
if mean is not None:
image -= mean
if std is not None:
image /= std
image = image.transpose(swap)
image = np.ascontiguousarray(image, dtype=np.float32)
return image
def preproc_for_train(image, targets, insize):
boxes = targets[:, :-1].copy()
labels = targets[:, -1].copy()
if len(boxes) == 0:
targets = np.zeros((1, 5))
image = preproc_for_test(image, insize)
return (torch.from_numpy(image), targets)
image_o = image.copy()
targets_o = targets.copy()
(height_o, width_o, _) = image_o.shape
boxes_o = targets_o[:, :-1]
labels_o = targets_o[:, -1]
boxes_o[:, 0::2] /= width_o
boxes_o[:, 1::2] /= height_o
labels_o = np.expand_dims(labels_o, 1)
targets_o = np.hstack((boxes_o, labels_o))
image_t = _distort(image)
(image_t, boxes, labels) = _crop(image_t, boxes, labels)
(image_t, boxes, labels) = _expand(image_t, boxes, labels)
(image_t, boxes) = _mirror(image_t, boxes)
(height, width, _) = image_t.shape
image_t = preproc_for_test(image_t, insize)
boxes = boxes.copy()
boxes[:, 0::2] /= width
boxes[:, 1::2] /= height
b_w = (boxes[:, 2] - boxes[:, 0]) * 1.
b_h = (boxes[:, 3] - boxes[:, 1]) * 1.
mask_b = np.minimum(b_w, b_h) > (8. / insize)
boxes_t = boxes[mask_b]
labels_t = labels[mask_b].copy()
if len(boxes_t) == 0:
image = preproc_for_test(image_o, insize)
return (torch.from_numpy(image), targets_o)
labels_t = np.expand_dims(labels_t, 1)
targets_t = np.hstack((boxes_t, labels_t))
return (torch.from_numpy(image_t), targets_t)
def detection_collate(batch):
""" Custom collate fn for images and boxes """
targets = []
imgs = []
for _, sample in enumerate(batch):
for _, tup in enumerate(sample):
if torch.is_tensor(tup):
imgs.append(tup)
elif isinstance(tup, type(np.empty(0))):
annos = torch.from_numpy(tup).float()
targets.append(annos)
return (torch.stack(imgs, 0), targets)
|
preprocessing/reid_preprocessing.py | Mithilesh1609/assembled-cnn | 363 | 24345 | <gh_stars>100-1000
# This code is adapted from the https://github.com/tensorflow/models/tree/master/official/r1/resnet.
# ==========================================================================================
# NAVER’s modifications are Copyright 2020 NAVER corp. All rights reserved.
# ==========================================================================================
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
Training images are sampled using the provided bounding boxes, and subsequently
cropped to the sampled bounding box. Images are additionally flipped randomly,
then resized to the target output size (without aspect-ratio preservation).
Images used during evaluation are resized (with aspect-ratio preservation) and
centrally cropped.
All images undergo mean color subtraction.
Note that these steps are colloquially referred to as "ResNet preprocessing,"
and they differ from "VGG preprocessing," which does not use bounding boxes
and instead does an aspect-preserving resize followed by random crop during
training. (These both differ from "Inception preprocessing," which introduces
color distortion steps.)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from preprocessing import autoaugment
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN]
_MEAN = [0.485, 0.456, 0.406]
_STD = [0.229, 0.224, 0.225]
# The lower bound for the smallest side of the image for aspect-preserving
# resizing. For example, if an image is 500 x 1000, it will be resized to
# _RESIZE_MIN x (_RESIZE_MIN * 2).
_RESIZE_MIN = 256
def central_crop(image, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image: a 3-D image tensor
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
3-D tensor with cropped image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
amount_to_be_cropped_h = (height - crop_height)
crop_top = amount_to_be_cropped_h // 2
amount_to_be_cropped_w = (width - crop_width)
crop_left = amount_to_be_cropped_w // 2
return tf.slice(
image, [crop_top, crop_left, 0], [crop_height, crop_width, -1])
def _mean_image_subtraction(image, means, num_channels):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image))
return image - means
def _normalization(image, means, stds, num_channels):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
num_channels: number of color channels in the image that will be distorted.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
# We have a 1-D tensor of means; convert to 3-D.
# Note(b/130245863): we explicitly call `broadcast` instead of simply
# expanding dimensions for better performance.
means = tf.broadcast_to(means, tf.shape(image))
stds = tf.broadcast_to(stds, tf.shape(image))
return (image - means) / stds
def _smallest_size_at_least(height, width, resize_min):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: an int32 scalar tensor indicating the new width.
"""
resize_min = tf.cast(resize_min, tf.float32)
# Convert to floats to make subsequent calculations go smoothly.
height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32)
smaller_dim = tf.minimum(height, width)
scale_ratio = resize_min / smaller_dim
# Convert back to ints to make heights and widths that TF ops will accept.
new_height = tf.cast(height * scale_ratio, tf.int32)
new_width = tf.cast(width * scale_ratio, tf.int32)
return new_height, new_width
def _aspect_preserving_resize(image, resize_min):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
resize_min: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
shape = tf.shape(image)
height, width = shape[0], shape[1]
new_height, new_width = _smallest_size_at_least(height, width, resize_min)
return _resize_image(image, new_height, new_width)
def _resize_image(image, height, width):
"""Simple wrapper around tf.resize_images.
This is primarily to make sure we use the same `ResizeMethod` and other
details each time.
Args:
image: A 3-D image `Tensor`.
height: The target height for the resized image.
width: The target width for the resized image.
Returns:
resized_image: A 3-D tensor containing the resized image. The first two
dimensions have the shape [height, width].
"""
return tf.image.resize_images(
image, [height, width], method=tf.image.ResizeMethod.BILINEAR,
align_corners=False)
def _ten_crop(image, crop_h, crop_w):
def _crop(img, center_offset):
# input img shape is [h,w,c]
img = tf.image.extract_glimpse(
[img], [crop_w, crop_h], offsets=tf.to_float([center_offset]),
centered=False, normalized=False)
return tf.squeeze(img, 0)
def _crop5(img):
# img shape is [h,w,c]
im_shape = tf.shape(image)
height, width = im_shape[0], im_shape[1]
ch, cw = tf.to_int32(height / 2), tf.to_int32(width / 2) # center offset
hh, hw = tf.to_int32(crop_h / 2), tf.to_int32(crop_w / 2) # half crop size
ct = _crop(img, [ch, cw])
lu = _crop(img, [hh, hw])
ld = _crop(img, [height - hh, hw])
ru = _crop(img, [hh, width - hw])
rd = _crop(img, [height - hh, width - hw])
return tf.stack([lu, ru, ld, rd, ct])
lhs = _crop5(image)
rhs = tf.image.flip_left_right(lhs)
return tf.concat([lhs, rhs], axis=0)
def preprocess_image_ten_crop(image_buffer, output_height, output_width, num_channels):
image = tf.image.decode_jpeg(image_buffer, channels=num_channels)
image = _aspect_preserving_resize(image, _RESIZE_MIN)
images = _ten_crop(image, output_height, output_width)
images.set_shape([10, output_height, output_width, num_channels])
images = tf.map_fn(lambda x: _mean_image_subtraction(x, _CHANNEL_MEANS, num_channels), images)
return images
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _get_random_crop_coord(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return tf.stack([offset_height, offset_width, crop_height, crop_width])
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
image_shape = control_flow_ops.with_dependencies(
[rank_assertions[0]],
tf.shape(image_list[0]))
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
shape = control_flow_ops.with_dependencies([rank_assertions[i]],
tf.shape(image))
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
max_offset_height = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_height - crop_height + 1, []))
max_offset_width = control_flow_ops.with_dependencies(
asserts, tf.reshape(image_width - crop_width + 1, []))
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def pad_shorter(image):
shape = tf.shape(image)
height, width = shape[0], shape[1]
larger_dim = tf.maximum(height, width)
h1 = (larger_dim - height) // 2
h2 = (larger_dim - height) - h1
w1 = tf.maximum((larger_dim - width) // 2, 0)
w2 = (larger_dim - width) - w1
pad_shape = [[h1, h2], [w1, w2], [0, 0]]
return tf.pad(image, pad_shape)
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def resize_func(image, size, method):
if method == 0:
image = _resize_image(image, _RESIZE_MIN, _RESIZE_MIN)
image = _random_crop([image], size[0], size[1])[0]
else:
image = _resize_image(image, size[0], size[1])
return image
def preprocess_image(image_buffer,
output_height,
output_width,
num_channels,
dct_method='',
is_training=False,
autoaugment_type=None,
eval_large_resolution=True):
if is_training:
image = tf.image.decode_jpeg(image_buffer, channels=num_channels, dct_method=dct_method)
image = apply_with_random_selector(
image,
lambda x, method: resize_func(x, [output_height, output_width], method),
num_cases=2)
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
image = tf.image.random_flip_left_right(image)
if autoaugment_type:
tf.logging.info('Apply AutoAugment policy {}'.format(autoaugment_type))
image = tf.clip_by_value(image, 0.0, 255.0)
dtype = image.dtype
image = tf.cast(image, dtype=tf.uint8)
image = autoaugment.distort_image_with_autoaugment(
image, autoaugment_type)
image = tf.cast(image, dtype=dtype)
image.set_shape([output_height, output_width, num_channels])
else:
if eval_large_resolution:
output_height = int(output_height * (1.0 / 0.875))
output_width = int(output_width * (1.0 / 0.875))
# For validation, we want to decode, resize, then just crop the middle.
image = tf.image.decode_jpeg(image_buffer, channels=num_channels, dct_method=dct_method)
image = _resize_image(image, output_height, output_width)
image = tf.to_float(image)
image.set_shape([output_height, output_width, num_channels])
return _mean_image_subtraction(image, _CHANNEL_MEANS, num_channels)
|
src/robot/libdocpkg/jsonbuilder.py | rdagum/robotframework | 7,073 | 24348 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os.path
from robot.running import ArgInfo, ArgumentSpec
from robot.errors import DataError
from .model import LibraryDoc, KeywordDoc
class JsonDocBuilder:
def build(self, path):
spec = self._parse_spec_json(path)
return self.build_from_dict(spec)
def build_from_dict(self, spec):
libdoc = LibraryDoc(name=spec['name'],
doc=spec['doc'],
version=spec['version'],
type=spec['type'],
scope=spec['scope'],
doc_format=spec['docFormat'],
source=spec['source'],
lineno=int(spec.get('lineno', -1)))
libdoc.data_types.update(spec['dataTypes'].get('enums', []))
libdoc.data_types.update(spec['dataTypes'].get('typedDicts', []))
libdoc.inits = [self._create_keyword(kw) for kw in spec['inits']]
libdoc.keywords = [self._create_keyword(kw) for kw in spec['keywords']]
return libdoc
def _parse_spec_json(self, path):
if not os.path.isfile(path):
raise DataError("Spec file '%s' does not exist." % path)
with open(path) as json_source:
libdoc_dict = json.load(json_source)
return libdoc_dict
def _create_keyword(self, kw):
return KeywordDoc(name=kw.get('name'),
args=self._create_arguments(kw['args']),
doc=kw['doc'],
shortdoc=kw['shortdoc'],
tags=kw['tags'],
source=kw['source'],
lineno=int(kw.get('lineno', -1)))
def _create_arguments(self, arguments):
spec = ArgumentSpec()
setters = {
ArgInfo.POSITIONAL_ONLY: spec.positional_only.append,
ArgInfo.POSITIONAL_ONLY_MARKER: lambda value: None,
ArgInfo.POSITIONAL_OR_NAMED: spec.positional_or_named.append,
ArgInfo.VAR_POSITIONAL: lambda value: setattr(spec, 'var_positional', value),
ArgInfo.NAMED_ONLY_MARKER: lambda value: None,
ArgInfo.NAMED_ONLY: spec.named_only.append,
ArgInfo.VAR_NAMED: lambda value: setattr(spec, 'var_named', value),
}
for arg in arguments:
name = arg['name']
setters[arg['kind']](name)
default = arg.get('defaultValue')
if default is not None:
spec.defaults[name] = default
arg_types = arg['types']
if not spec.types:
spec.types = {}
spec.types[name] = tuple(arg_types)
return spec
|
src/ros/rosmake/test/test_parallel_build.py | jungleni/ros_code_reading | 742 | 24354 | <filename>src/ros/rosmake/test/test_parallel_build.py
#!/usr/bin/env python
# Copyright (c) 2009, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import unittest
from rosmake import parallel_build
class TestDependencyTracker(unittest.TestCase):
def setUp(self):
self.deps = {}
self.deps1 = {}
self.deps["a"] = [ "b", "c", "d","e"]
self.deps1["a"] = ["b"]
self.deps["b"] = ["c"]
self.deps1["b"] = ["c"]
self.deps["d"] = ["c", "e"]
self.deps1["d"] = ["c", "e"]
self.dt = parallel_build.DependencyTracker()
self.dt.load_fake_deps(self.deps, self.deps1)
def test_deps_1(self):
self.assertEquals(self.deps1["a"], self.dt.get_deps_1("a"))
self.assertEquals(self.deps1["b"], self.dt.get_deps_1("b"))
self.assertEquals(self.deps1["d"], self.dt.get_deps_1("d"))
def test_deps(self):
self.assertEquals(self.deps["a"], self.dt.get_deps("a"))
self.assertEquals(self.deps["b"], self.dt.get_deps("b"))
self.assertEquals(self.deps["d"], self.dt.get_deps("d"))
def test_not_package(self):
self.assertEquals([], self.dt.get_deps("This is not a valid package name"))
self.assertEquals([], self.dt.get_deps_1("This is not a valid package name"))
class TestBuildQueue(unittest.TestCase):
def setUp(self):
deps = {}
deps1 = {}
deps1["a"] = ["b"]
deps["a"] = ["b", "c", "d", "e", "f"]
deps1["b"] = ["c"]
deps["b"] = ["c", "d", "e", "f"]
deps1["c"] = ["d"]
deps["c"] = ["d", "e", "f"]
deps1["d"] = ["e"]
deps["d"] = ["e", "f"]
deps["e"] = ["f"]
deps1["e"] = ["f"]
deps["f"] = []
deps1["f"] = []
self.serial_tracker = parallel_build.DependencyTracker()
self.serial_tracker.load_fake_deps(deps, deps1)
deps = {}
deps1 = {}
deps["a"] = ["b", "c", "d", "e", "f"]
deps1["a"] = ["b", "c", "d", "e", "f"]
deps["b"] = []
deps1["b"] = []
deps["c"] = []
deps1["c"] = []
deps["d"] = []
deps1["d"] = []
deps["e"] = []
deps1["e"] = []
deps["f"] = []
deps1["f"] = []
self.parallel_tracker = parallel_build.DependencyTracker()
self.parallel_tracker.load_fake_deps(deps, deps1)
# full queue
def test_full_build(self):
bq = parallel_build.BuildQueue(["a", "b", "c", "d", "e", "f"], self.serial_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("f", bq.get_valid_package())
self.assertEqual(0, len(bq.built))
bq.return_built("f")
self.assertEqual(1, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("e", bq.get_valid_package())
bq.return_built("e")
self.assertEqual(2, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("d", bq.get_valid_package())
bq.return_built("d")
self.assertEqual(3, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("c", bq.get_valid_package())
bq.return_built("c")
self.assertEqual(4, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("b", bq.get_valid_package())
bq.return_built("b")
self.assertEqual(5, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("a", bq.get_valid_package())
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built("a")
self.assertEqual(6, len(bq.built))
self.assertTrue (bq.is_done())
self.assertTrue (bq.succeeded())
# partial build
def test_partial_build(self):
bq = parallel_build.BuildQueue(["d", "e", "f"], self.serial_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("f", bq.get_valid_package())
self.assertEqual(0, len(bq.built))
bq.return_built("f")
self.assertEqual(1, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("e", bq.get_valid_package())
bq.return_built("e")
self.assertEqual(2, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("d", bq.get_valid_package())
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built("d")
self.assertEqual(3, len(bq.built))
self.assertTrue(bq.is_done())
self.assertTrue(bq.succeeded())
# abort early
def test_abort_early(self):
bq = parallel_build.BuildQueue(["a", "b", "c", "d", "e", "f"], self.serial_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual(0, len(bq.built))
self.assertEqual("f", bq.get_valid_package())
bq.return_built("f")
self.assertEqual(1, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("e", bq.get_valid_package())
bq.return_built("e")
self.assertEqual(2, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("d", bq.get_valid_package())
bq.return_built("d")
self.assertEqual(3, len(bq.built))
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.stop()
self.assertTrue(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual(None, bq.get_valid_package())
# many parallel
def test_parallel_build(self):
bq = parallel_build.BuildQueue(["a", "b", "c", "d", "e", "f"], self.parallel_tracker)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
dependents = ["b", "c", "d", "e", "f"]
count = 0
total = 6
while len(dependents) > 0:
result= bq.get_valid_package()
done = len(bq.built)
pkgs = bq._total_pkgs
self.assertTrue(result in dependents)
#print result, done, pkgs
dependents.remove(result)
self.assertEqual(count, done)
self.assertEqual(total, pkgs)
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built(result)
count = count + 1
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
self.assertEqual("a", bq.get_valid_package())
self.assertFalse(bq.is_done())
self.assertFalse(bq.succeeded())
bq.return_built("a")
self.assertTrue (bq.is_done())
self.assertTrue (bq.succeeded())
# stalled(future)
|
qiskit_optimization/converters/linear_inequality_to_penalty.py | X-Libor/qiskit-optimization | 109 | 24369 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Converter to convert a problem with inequality constraints to unconstrained with penalty terms."""
import logging
from typing import Optional, Union, Tuple, List, Dict
import numpy as np
from .quadratic_program_converter import QuadraticProgramConverter
from ..exceptions import QiskitOptimizationError
from ..problems.constraint import Constraint, ConstraintSense
from ..problems.quadratic_objective import QuadraticObjective
from ..problems.quadratic_program import QuadraticProgram
from ..problems.variable import Variable
logger = logging.getLogger(__name__)
class LinearInequalityToPenalty(QuadraticProgramConverter):
r"""Convert linear inequality constraints to penalty terms of the objective function.
There are some linear constraints which do not require slack variables to
construct penalty terms [1]. This class supports the following inequality constraints.
.. math::
\begin{array}{}
\text { Inequality constraint } & & \text { Penalty term } \\
x \leq y & \rightarrow & P(x-x y) \\
x \geq y & \rightarrow & P(y-x y) \\
\sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\
\sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j)
\end{array}
Note that x, y, z and :math:`x_i` are binary variables, and P is a penalty factor,
where the value of P is automatically determined or supplied by users.
If constraints match with any of the patterns, they are converted into penalty terms and added
to the objective function. Otherwise, constraints are kept as is.
References:
[1]: <NAME>, et al. (2019),
A Tutorial on Formulating and Using QUBO Models,
`arXiv:1811.11538 <https://arxiv.org/abs/1811.11538>`_.
"""
def __init__(self, penalty: Optional[float] = None) -> None:
"""
Args:
penalty: Penalty factor to scale equality constraints that are added to objective.
If None is passed, a penalty factor will be automatically calculated on
every conversion.
"""
self._src_num_vars: Optional[int] = None
self._dst: Optional[QuadraticProgram] = None
self._penalty: Optional[float] = penalty
self._should_define_penalty: bool = penalty is None
def convert(self, problem: QuadraticProgram) -> QuadraticProgram:
r"""Convert inequality constraints into penalty terms of the objective function.
This methods converts the following patterns where x, y, and :math:`x_i` are binary variables
and P is a penalty factor.
.. math::
\begin{array}{}
\text { Inequality constraint } & & \text { Penalty term } \\
x \leq y & \rightarrow & P(x-x y) \\
x \geq y & \rightarrow & P(y-x y) \\
\sum_{i=1}^n x_i \leq 1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} x_i x_j\\
\sum_{i=1}^n x_i \geq n-1, n \geq 2 & \rightarrow & P \sum_{i, j : i < j} (1 - x_i) (1 - x_j)
\end{array}
Args:
problem: The problem to be solved.
Returns:
The converted problem
Raises:
QiskitOptimizationError: If an unsupported-type variable exists.
"""
# create empty QuadraticProgram model
self._src_num_vars = problem.get_num_vars()
self._dst = QuadraticProgram(name=problem.name)
# If no penalty was given, set the penalty coefficient by _auto_define_penalty()
if self._should_define_penalty:
penalty = self._auto_define_penalty(problem)
else:
penalty = self._penalty
# Set variables
for x in problem.variables:
if x.vartype == Variable.Type.CONTINUOUS:
self._dst.continuous_var(x.lowerbound, x.upperbound, x.name)
elif x.vartype == Variable.Type.BINARY:
self._dst.binary_var(x.name)
elif x.vartype == Variable.Type.INTEGER:
self._dst.integer_var(x.lowerbound, x.upperbound, x.name)
else:
raise QiskitOptimizationError(f"Unsupported vartype: {x.vartype}")
# get original objective terms
offset = problem.objective.constant
linear = problem.objective.linear.to_dict()
quadratic = problem.objective.quadratic.to_dict()
sense = problem.objective.sense.value
# convert linear constraints into penalty terms
for constraint in problem.linear_constraints:
# special constraint check function here
if not self._is_matched_constraint(problem, constraint):
self._dst.linear_constraint(
constraint.linear.coefficients,
constraint.sense,
constraint.rhs,
constraint.name,
)
continue
conv_offset, conv_linear, conv_quadratic, varmap = self._conversion_table(constraint)
# constant part
offset += sense * penalty * conv_offset
# linear parts of penalty
for j, j_2 in varmap.items():
# if j already exists in the linear terms dic, add a penalty term
# into existing value else create new key and value in the linear_term dict
if conv_linear[j] != 0:
linear[j_2] = linear.get(j_2, 0.0) + sense * penalty * conv_linear[j]
# quadratic parts of penalty
for j, j_2 in varmap.items():
for k in range(j, len(varmap)):
# if j and k already exist in the quadratic terms dict,
# add a penalty term into existing value
# else create new key and value in the quadratic term dict
if conv_quadratic[j][k] != 0:
tup = (j_2, varmap[k])
quadratic[tup] = (
quadratic.get(tup, 0.0) + sense * penalty * conv_quadratic[j][k]
)
# Copy quadratic_constraints
for quadratic_constraint in problem.quadratic_constraints:
self._dst.quadratic_constraint(
quadratic_constraint.linear.coefficients,
quadratic_constraint.quadratic.coefficients,
quadratic_constraint.sense,
quadratic_constraint.rhs,
quadratic_constraint.name,
)
if problem.objective.sense == QuadraticObjective.Sense.MINIMIZE:
self._dst.minimize(offset, linear, quadratic)
else:
self._dst.maximize(offset, linear, quadratic)
# Update the penalty to the one just used
self._penalty = penalty
return self._dst
@staticmethod
def _conversion_table(
constraint,
) -> Tuple[int, np.ndarray, np.ndarray, Dict[int, int]]:
"""Construct conversion matrix for special constraint.
Returns:
Return conversion table which is used to construct
penalty term in main function.
Raises:
QiskitOptimizationError: if the constraint is invalid.
"""
vars_dict = constraint.linear.to_dict()
coeffs = list(vars_dict.values())
varmap = dict(enumerate(vars_dict.keys()))
rhs = constraint.rhs
sense = constraint.sense
num_vars = len(vars_dict)
# initialize return values, these are used for converted offset, linear
# and quadratic terms
offset = 0
linear = np.zeros(num_vars, dtype=int)
quadratic = np.zeros((num_vars, num_vars), dtype=int)
# rhs = num_vars - 1 correspond to multiple variable with >= n - 1 case.
if sense == ConstraintSense.GE and rhs == num_vars - 1:
# x_1 + ... + x_n >= n - 1
# The number of offset is combination ( nC2 )
offset = num_vars * (num_vars - 1) // 2
linear = np.full(num_vars, 1 - num_vars, dtype=int)
quadratic = np.triu(np.ones((num_vars, num_vars), dtype=int), k=1)
elif sense == ConstraintSense.LE and rhs == 1:
# x_1 + ... + x_n <= 1
quadratic = np.triu(np.ones((num_vars, num_vars), dtype=int), k=1)
elif rhs == 0:
if num_vars != 2:
raise QiskitOptimizationError(
f"Internal error: invalid number of variables {num_vars} {constraint.name}"
)
quadratic = np.array([[0, -1], [0, 0]])
if sense == ConstraintSense.GE:
# x >= y case
if coeffs[0] < 0.0:
linear[0] = 1
else:
linear[1] = 1
elif sense == ConstraintSense.LE:
# x <= y case
if coeffs[0] > 0.0:
linear[0] = 1
else:
linear[1] = 1
else:
raise QiskitOptimizationError(f"Internal error: invalid constraint {constraint.name}")
return offset, linear, quadratic, varmap
@staticmethod
def _is_matched_constraint(problem, constraint) -> bool:
"""Determine if constraint is special or not.
Returns:
True: when constraint is special
False: when constraint is not special
"""
params = constraint.linear.to_dict()
num_vars = len(params)
rhs = constraint.rhs
sense = constraint.sense
coeff_array = np.array(list(params.values()))
# Binary parameter?
if any(problem.variables[i].vartype != Variable.Type.BINARY for i in params.keys()):
return False
if num_vars == 2 and rhs == 0:
if sense in (Constraint.Sense.LE, Constraint.Sense.GE):
# x-y<=0
# x-y>=0
return coeff_array.min() == -1.0 and coeff_array.max() == 1.0
elif num_vars >= 2:
if sense == Constraint.Sense.LE and rhs == 1:
if all(i == 1 for i in params.values()):
# x1+x2+...<=1
return True
elif sense == Constraint.Sense.GE and rhs == num_vars - 1:
if all(i == 1 for i in params.values()):
# x1+x2+...>=n-1
return True
return False
@staticmethod
def _auto_define_penalty(problem) -> float:
"""Automatically define the penalty coefficient.
Returns:
Return the minimum valid penalty factor calculated
from the upper bound and the lower bound of the objective function.
If a constraint has a float coefficient,
return the default value for the penalty factor.
"""
default_penalty = 1e5
# Check coefficients of constraints.
# If a constraint has a float coefficient, return the default value for the penalty factor.
terms = []
for constraint in problem.linear_constraints:
terms.append(constraint.rhs)
terms.extend(constraint.linear.to_array().tolist())
if any(isinstance(term, float) and not term.is_integer() for term in terms):
logger.warning(
"Warning: Using %f for the penalty coefficient because "
"a float coefficient exists in constraints. \n"
"The value could be too small. "
"If so, set the penalty coefficient manually.",
default_penalty,
)
return default_penalty
lin_b = problem.objective.linear.bounds
quad_b = problem.objective.quadratic.bounds
return 1.0 + (lin_b.upperbound - lin_b.lowerbound) + (quad_b.upperbound - quad_b.lowerbound)
def interpret(self, x: Union[np.ndarray, List[float]]) -> np.ndarray:
"""Convert the result of the converted problem back to that of the original problem
Args:
x: The result of the converted problem or the given result in case of FAILURE.
Returns:
The result of the original problem.
Raises:
QiskitOptimizationError: if the number of variables in the result differs from
that of the original problem.
"""
if len(x) != self._src_num_vars:
raise QiskitOptimizationError(
f"The number of variables in the passed result ({len(x)}) differs from "
f"that of the original problem ({self._src_num_vars})."
)
return np.asarray(x)
@property
def penalty(self) -> Optional[float]:
"""Returns the penalty factor used in conversion.
Returns:
The penalty factor used in conversion.
"""
return self._penalty
@penalty.setter
def penalty(self, penalty: Optional[float]) -> None:
"""Set a new penalty factor.
Args:
penalty: The new penalty factor.
If None is passed, a penalty factor will be automatically calculated
on every conversion.
"""
self._penalty = penalty
self._should_define_penalty = penalty is None
|
djangox/lib/python3.8/site-packages/allauth/socialaccount/providers/draugiem/urls.py | DemarcusL/django_wiki_lab | 6,342 | 24371 | from django.urls import path
from . import views
urlpatterns = [
path("draugiem/login/", views.login, name="draugiem_login"),
path("draugiem/callback/", views.callback, name="draugiem_callback"),
]
|
Python/venv/lib/python3.7/site-packages/IPython/core/inputtransformer2.py | HenriqueBuzin/TCC | 445 | 24372 | <gh_stars>100-1000
"""Input transformer machinery to support IPython special syntax.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
deprecated in 7.0.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from codeop import compile_command
import re
import tokenize
from typing import List, Tuple, Union
import warnings
_indent_re = re.compile(r'^[ \t]+')
def leading_indent(lines):
"""Remove leading indentation.
If the first line starts with a spaces or tabs, the same whitespace will be
removed from each following line in the cell.
"""
if not lines:
return lines
m = _indent_re.match(lines[0])
if not m:
return lines
space = m.group(0)
n = len(space)
return [l[n:] if l.startswith(space) else l
for l in lines]
class PromptStripper:
"""Remove matching input prompts from a block of input.
Parameters
----------
prompt_re : regular expression
A regular expression matching any input prompt (including continuation,
e.g. ``...``)
initial_re : regular expression, optional
A regular expression matching only the initial prompt, but not continuation.
If no initial expression is given, prompt_re will be used everywhere.
Used mainly for plain Python prompts (``>>>``), where the continuation prompt
``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
If initial_re and prompt_re differ,
only initial_re will be tested against the first line.
If any prompt is found on the first two lines,
prompts will be stripped from the rest of the block.
"""
def __init__(self, prompt_re, initial_re=None):
self.prompt_re = prompt_re
self.initial_re = initial_re or prompt_re
def _strip(self, lines):
return [self.prompt_re.sub('', l, count=1) for l in lines]
def __call__(self, lines):
if not lines:
return lines
if self.initial_re.match(lines[0]) or \
(len(lines) > 1 and self.prompt_re.match(lines[1])):
return self._strip(lines)
return lines
classic_prompt = PromptStripper(
prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
initial_re=re.compile(r'^>>>( |$)')
)
ipython_prompt = PromptStripper(re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)'))
def cell_magic(lines):
if not lines or not lines[0].startswith('%%'):
return lines
if re.match(r'%%\w+\?', lines[0]):
# This case will be handled by help_end
return lines
magic_name, _, first_line = lines[0][2:-1].partition(' ')
body = ''.join(lines[1:])
return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
% (magic_name, first_line, body)]
def _find_assign_op(token_line) -> Union[int, None]:
"""Get the index of the first assignment in the line ('=' not inside brackets)
Note: We don't try to support multiple special assignment (a = b = %foo)
"""
paren_level = 0
for i, ti in enumerate(token_line):
s = ti.string
if s == '=' and paren_level == 0:
return i
if s in {'(','[','{'}:
paren_level += 1
elif s in {')', ']', '}'}:
if paren_level > 0:
paren_level -= 1
def find_end_of_continued_line(lines, start_line: int):
"""Find the last line of a line explicitly extended using backslashes.
Uses 0-indexed line numbers.
"""
end_line = start_line
while lines[end_line].endswith('\\\n'):
end_line += 1
if end_line >= len(lines):
break
return end_line
def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
r"""Assemble a single line from multiple continued line pieces
Continued lines are lines ending in ``\``, and the line following the last
``\`` in the block.
For example, this code continues over multiple lines::
if (assign_ix is not None) \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix+1].string == '%') \
and (line[assign_ix+2].type == tokenize.NAME):
This statement contains four continued line pieces.
Assembling these pieces into a single line would give::
if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
This uses 0-indexed line numbers. *start* is (lineno, colno).
Used to allow ``%magic`` and ``!system`` commands to be continued over
multiple lines.
"""
parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
return ' '.join([p[:-2] for p in parts[:-1]] # Strip backslash+newline
+ [parts[-1][:-1]]) # Strip newline from last line
class TokenTransformBase:
"""Base class for transformations which examine tokens.
Special syntax should not be transformed when it occurs inside strings or
comments. This is hard to reliably avoid with regexes. The solution is to
tokenise the code as Python, and recognise the special syntax in the tokens.
IPython's special syntax is not valid Python syntax, so tokenising may go
wrong after the special syntax starts. These classes therefore find and
transform *one* instance of special syntax at a time into regular Python
syntax. After each transformation, tokens are regenerated to find the next
piece of special syntax.
Subclasses need to implement one class method (find)
and one regular method (transform).
The priority attribute can select which transformation to apply if multiple
transformers match in the same place. Lower numbers have higher priority.
This allows "%magic?" to be turned into a help call rather than a magic call.
"""
# Lower numbers -> higher priority (for matches in the same location)
priority = 10
def sortby(self):
return self.start_line, self.start_col, self.priority
def __init__(self, start):
self.start_line = start[0] - 1 # Shift from 1-index to 0-index
self.start_col = start[1]
@classmethod
def find(cls, tokens_by_line):
"""Find one instance of special syntax in the provided tokens.
Tokens are grouped into logical lines for convenience,
so it is easy to e.g. look at the first token of each line.
*tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
This should return an instance of its class, pointing to the start
position it has found, or None if it found no match.
"""
raise NotImplementedError
def transform(self, lines: List[str]):
"""Transform one instance of special syntax found by ``find()``
Takes a list of strings representing physical lines,
returns a similar list of transformed lines.
"""
raise NotImplementedError
class MagicAssign(TokenTransformBase):
"""Transformer for assignments from magics (a = %foo)"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first magic assignment (a = %foo) in the cell.
"""
for line in tokens_by_line:
assign_ix = _find_assign_op(line)
if (assign_ix is not None) \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix+1].string == '%') \
and (line[assign_ix+2].type == tokenize.NAME):
return cls(line[assign_ix+1].start)
def transform(self, lines: List[str]):
"""Transform a magic assignment found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
lhs = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
assert rhs.startswith('%'), rhs
magic_name, _, args = rhs[1:].partition(' ')
lines_before = lines[:start_line]
call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
new_line = lhs + call + '\n'
lines_after = lines[end_line+1:]
return lines_before + [new_line] + lines_after
class SystemAssign(TokenTransformBase):
"""Transformer for assignments from system commands (a = !foo)"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first system assignment (a = !foo) in the cell.
"""
for line in tokens_by_line:
assign_ix = _find_assign_op(line)
if (assign_ix is not None) \
and not line[assign_ix].line.strip().startswith('=') \
and (len(line) >= assign_ix + 2) \
and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
ix = assign_ix + 1
while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
if line[ix].string == '!':
return cls(line[ix].start)
elif not line[ix].string.isspace():
break
ix += 1
def transform(self, lines: List[str]):
"""Transform a system assignment found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
lhs = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
assert rhs.startswith('!'), rhs
cmd = rhs[1:]
lines_before = lines[:start_line]
call = "get_ipython().getoutput({!r})".format(cmd)
new_line = lhs + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
def _make_help_call(target, esc, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return 'get_ipython().run_line_magic(%r, %r)' % (t_magic_name, t_magic_arg_s)
else:
return 'get_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(next_input, t_magic_name, t_magic_arg_s)
def _tr_help(content):
"""Translate lines escaped with: ?
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '?')
def _tr_help2(content):
"""Translate lines escaped with: ??
A naked help line should fire the intro help screen (shell.show_usage())
"""
if not content:
return 'get_ipython().show_usage()'
return _make_help_call(content, '??')
def _tr_magic(content):
"Translate lines escaped with a percent sign: %"
name, _, args = content.partition(' ')
return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
def _tr_quote(content):
"Translate lines escaped with a comma: ,"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, '", "'.join(args.split()) )
def _tr_quote2(content):
"Translate lines escaped with a semicolon: ;"
name, _, args = content.partition(' ')
return '%s("%s")' % (name, args)
def _tr_paren(content):
"Translate lines escaped with a slash: /"
name, _, args = content.partition(' ')
return '%s(%s)' % (name, ", ".join(args.split()))
tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help2,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
class EscapedCommand(TokenTransformBase):
"""Transformer for escaped commands like %foo, !foo, or /foo"""
@classmethod
def find(cls, tokens_by_line):
"""Find the first escaped command (%foo, !foo, etc.) in the cell.
"""
for line in tokens_by_line:
if not line:
continue
ix = 0
ll = len(line)
while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
if ix >= ll:
continue
if line[ix].string in ESCAPE_SINGLES:
return cls(line[ix].start)
def transform(self, lines):
"""Transform an escaped line found by the ``find()`` classmethod.
"""
start_line, start_col = self.start_line, self.start_col
indent = lines[start_line][:start_col]
end_line = find_end_of_continued_line(lines, start_line)
line = assemble_continued_line(lines, (start_line, start_col), end_line)
if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
escape, content = line[:2], line[2:]
else:
escape, content = line[:1], line[1:]
if escape in tr:
call = tr[escape](content)
else:
call = ''
lines_before = lines[:start_line]
new_line = indent + call + '\n'
lines_after = lines[end_line + 1:]
return lines_before + [new_line] + lines_after
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
class HelpEnd(TokenTransformBase):
"""Transformer for help syntax: obj? and obj??"""
# This needs to be higher priority (lower number) than EscapedCommand so
# that inspecting magics (%foo?) works.
priority = 5
def __init__(self, start, q_locn):
super().__init__(start)
self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
self.q_col = q_locn[1]
@classmethod
def find(cls, tokens_by_line):
"""Find the first help command (foo?) in the cell.
"""
for line in tokens_by_line:
# Last token is NEWLINE; look at last but one
if len(line) > 2 and line[-2].string == '?':
# Find the first token that's not INDENT/DEDENT
ix = 0
while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
return cls(line[ix].start, line[-2].start)
def transform(self, lines):
"""Transform a help command found by the ``find()`` classmethod.
"""
piece = ''.join(lines[self.start_line:self.q_line+1])
indent, content = piece[:self.start_col], piece[self.start_col:]
lines_before = lines[:self.start_line]
lines_after = lines[self.q_line + 1:]
m = _help_end_re.search(content)
if not m:
raise SyntaxError(content)
assert m is not None, content
target = m.group(1)
esc = m.group(3)
# If we're mid-command, put it back on the next prompt for the user.
next_input = None
if (not lines_before) and (not lines_after) \
and content.strip() != m.group(0):
next_input = content.rstrip('?\n')
call = _make_help_call(target, esc, next_input=next_input)
new_line = indent + call + '\n'
return lines_before + [new_line] + lines_after
def make_tokens_by_line(lines:List[str]):
"""Tokenize a series of lines and group tokens by line.
The tokens for a multiline Python string or expression are grouped as one
line. All lines except the last lines should keep their line ending ('\\n',
'\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
for example when passing block of text to this function.
"""
# NL tokens are used inside multiline expressions, but also after blank
# lines or comments. This is intentional - see https://bugs.python.org/issue17061
# We want to group the former case together but split the latter, so we
# track parentheses level, similar to the internals of tokenize.
NEWLINE, NL = tokenize.NEWLINE, tokenize.NL
tokens_by_line = [[]]
if len(lines) > 1 and not lines[0].endswith(('\n', '\r', '\r\n', '\x0b', '\x0c')):
warnings.warn("`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified")
parenlev = 0
try:
for token in tokenize.generate_tokens(iter(lines).__next__):
tokens_by_line[-1].append(token)
if (token.type == NEWLINE) \
or ((token.type == NL) and (parenlev <= 0)):
tokens_by_line.append([])
elif token.string in {'(', '[', '{'}:
parenlev += 1
elif token.string in {')', ']', '}'}:
if parenlev > 0:
parenlev -= 1
except tokenize.TokenError:
# Input ended in a multiline string or expression. That's OK for us.
pass
if not tokens_by_line[-1]:
tokens_by_line.pop()
return tokens_by_line
def show_linewise_tokens(s: str):
"""For investigation and debugging"""
if not s.endswith('\n'):
s += '\n'
lines = s.splitlines(keepends=True)
for line in make_tokens_by_line(lines):
print("Line -------")
for tokinfo in line:
print(" ", tokinfo)
# Arbitrary limit to prevent getting stuck in infinite loops
TRANSFORM_LOOP_LIMIT = 500
class TransformerManager:
"""Applies various transformations to a cell or code block.
The key methods for external use are ``transform_cell()``
and ``check_complete()``.
"""
def __init__(self):
self.cleanup_transforms = [
leading_indent,
classic_prompt,
ipython_prompt,
]
self.line_transforms = [
cell_magic,
]
self.token_transformers = [
MagicAssign,
SystemAssign,
EscapedCommand,
HelpEnd,
]
def do_one_token_transform(self, lines):
"""Find and run the transform earliest in the code.
Returns (changed, lines).
This method is called repeatedly until changed is False, indicating
that all available transformations are complete.
The tokens following IPython special syntax might not be valid, so
the transformed code is retokenised every time to identify the next
piece of special syntax. Hopefully long code cells are mostly valid
Python, not using lots of IPython special syntax, so this shouldn't be
a performance issue.
"""
tokens_by_line = make_tokens_by_line(lines)
candidates = []
for transformer_cls in self.token_transformers:
transformer = transformer_cls.find(tokens_by_line)
if transformer:
candidates.append(transformer)
if not candidates:
# Nothing to transform
return False, lines
ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
for transformer in ordered_transformers:
try:
return True, transformer.transform(lines)
except SyntaxError:
pass
return False, lines
def do_token_transforms(self, lines):
for _ in range(TRANSFORM_LOOP_LIMIT):
changed, lines = self.do_one_token_transform(lines)
if not changed:
return lines
raise RuntimeError("Input transformation still changing after "
"%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
def transform_cell(self, cell: str) -> str:
"""Transforms a cell of input code"""
if not cell.endswith('\n'):
cell += '\n' # Ensure the cell has a trailing newline
lines = cell.splitlines(keepends=True)
for transform in self.cleanup_transforms + self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
return ''.join(lines)
def check_complete(self, cell: str):
"""Return whether a block of code is ready to execute, or should be continued
Parameters
----------
source : string
Python input code, which can be multiline.
Returns
-------
status : str
One of 'complete', 'incomplete', or 'invalid' if source is not a
prefix of valid code.
indent_spaces : int or None
The number of spaces by which to indent the next line of code. If
status is not 'incomplete', this is None.
"""
# Remember if the lines ends in a new line.
ends_with_newline = False
for character in reversed(cell):
if character == '\n':
ends_with_newline = True
break
elif character.strip():
break
else:
continue
if not ends_with_newline:
# Append an newline for consistent tokenization
# See https://bugs.python.org/issue33899
cell += '\n'
lines = cell.splitlines(keepends=True)
if not lines:
return 'complete', None
if lines[-1].endswith('\\'):
# Explicit backslash continuation
return 'incomplete', find_last_indent(lines)
try:
for transform in self.cleanup_transforms:
lines = transform(lines)
except SyntaxError:
return 'invalid', None
if lines[0].startswith('%%'):
# Special case for cell magics - completion marked by blank line
if lines[-1].strip():
return 'incomplete', find_last_indent(lines)
else:
return 'complete', None
try:
for transform in self.line_transforms:
lines = transform(lines)
lines = self.do_token_transforms(lines)
except SyntaxError:
return 'invalid', None
tokens_by_line = make_tokens_by_line(lines)
if not tokens_by_line:
return 'incomplete', find_last_indent(lines)
if tokens_by_line[-1][-1].type != tokenize.ENDMARKER:
# We're in a multiline string or expression
return 'incomplete', find_last_indent(lines)
newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER}
# Pop the last line which only contains DEDENTs and ENDMARKER
last_token_line = None
if {t.type for t in tokens_by_line[-1]} in [
{tokenize.DEDENT, tokenize.ENDMARKER},
{tokenize.ENDMARKER}
] and len(tokens_by_line) > 1:
last_token_line = tokens_by_line.pop()
while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
tokens_by_line[-1].pop()
if len(tokens_by_line) == 1 and not tokens_by_line[-1]:
return 'incomplete', 0
if tokens_by_line[-1][-1].string == ':':
# The last line starts a block (e.g. 'if foo:')
ix = 0
while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
ix += 1
indent = tokens_by_line[-1][ix].start[1]
return 'incomplete', indent + 4
if tokens_by_line[-1][0].line.endswith('\\'):
return 'incomplete', None
# At this point, our checks think the code is complete (or invalid).
# We'll use codeop.compile_command to check this with the real parser
try:
with warnings.catch_warnings():
warnings.simplefilter('error', SyntaxWarning)
res = compile_command(''.join(lines), symbol='exec')
except (SyntaxError, OverflowError, ValueError, TypeError,
MemoryError, SyntaxWarning):
return 'invalid', None
else:
if res is None:
return 'incomplete', find_last_indent(lines)
if last_token_line and last_token_line[0].type == tokenize.DEDENT:
if ends_with_newline:
return 'complete', None
return 'incomplete', find_last_indent(lines)
# If there's a blank line at the end, assume we're ready to execute
if not lines[-1].strip():
return 'complete', None
return 'complete', None
def find_last_indent(lines):
m = _indent_re.match(lines[-1])
if not m:
return 0
return len(m.group(0).replace('\t', ' '*4))
|
integration/bazel_workspace_tests/bazel_ngtsc_plugin/packages.bzl | John-Cassidy/angular | 95,154 | 24397 | <filename>integration/bazel_workspace_tests/bazel_ngtsc_plugin/packages.bzl
ANGULAR_PACKAGES_CONFIG = [
("@angular/animations", struct(entry_points = ["browser"])),
("@angular/common", struct(entry_points = ["http/testing", "http", "testing"])),
("@angular/compiler", struct(entry_points = ["testing"])),
("@angular/core", struct(entry_points = ["testing"])),
("@angular/forms", struct(entry_points = [])),
("@angular/platform-browser", struct(entry_points = ["testing", "animations"])),
("@angular/platform-browser-dynamic", struct(entry_points = ["testing"])),
("@angular/router", struct(entry_points = [])),
]
ANGULAR_PACKAGES = [
struct(
name = name[len("@angular/"):],
entry_points = config.entry_points,
platform = config.platform if hasattr(config, "platform") else "browser",
module_name = name,
)
for name, config in ANGULAR_PACKAGES_CONFIG
]
|
cogdl/layers/gin_layer.py | cenyk1230/cogdl | 1,072 | 24413 | import torch
import torch.nn as nn
from cogdl.utils import spmm
class GINLayer(nn.Module):
r"""Graph Isomorphism Network layer from paper `"How Powerful are Graph
Neural Networks?" <https://arxiv.org/pdf/1810.00826.pdf>`__.
.. math::
h_i^{(l+1)} = f_\Theta \left((1 + \epsilon) h_i^{l} +
\mathrm{sum}\left(\left\{h_j^{l}, j\in\mathcal{N}(i)
\right\}\right)\right)
Parameters
----------
apply_func : callable layer function)
layer or function applied to update node feature
eps : float32, optional
Initial `\epsilon` value.
train_eps : bool, optional
If True, `\epsilon` will be a learnable parameter.
"""
def __init__(self, apply_func=None, eps=0, train_eps=True):
super(GINLayer, self).__init__()
if train_eps:
self.eps = torch.nn.Parameter(torch.FloatTensor([eps]))
else:
self.register_buffer("eps", torch.FloatTensor([eps]))
self.apply_func = apply_func
def forward(self, graph, x):
out = (1 + self.eps) * x + spmm(graph, x)
if self.apply_func is not None:
out = self.apply_func(out)
return out
|
mmselfsup/models/algorithms/cae.py | mitming/mmselfsup | 355 | 24421 | <reponame>mitming/mmselfsup
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Sequence
import torch
from torchvision.transforms import Normalize
from ..builder import ALGORITHMS, build_backbone, build_head, build_neck
from .base import BaseModel
@ALGORITHMS.register_module()
class CAE(BaseModel):
"""CAE.
Implementation of `Context Autoencoder for Self-Supervised Representation
Learning <https://arxiv.org/abs/2202.03026>`_.
Args:
backbone (dict, optional): Config dict for module of backbone.
neck (dict, optional): Config dict for module of deep features to
compact feature vectors. Defaults to None.
head (dict, optional): Config dict for module of loss functions.
Defaults to None.
base_momentum (float): The base momentum coefficient for the target
network. Defaults to 0.0.
init_cfg (dict, optional): the config to control the initialization.
"""
def __init__(self,
backbone: dict = None,
neck: dict = None,
head: dict = None,
base_momentum: float = 0.0,
init_cfg: dict = None,
**kwargs) -> None:
super(CAE, self).__init__(init_cfg)
assert backbone is not None
self.backbone = build_backbone(backbone)
self.teacher = build_backbone(backbone)
assert neck is not None
self.neck = build_neck(neck)
assert head is not None
self.head = build_head(head)
self.momentum = base_momentum
self.img_norm = Normalize(
mean=torch.tensor((0.485, 0.456, 0.406)),
std=torch.tensor((0.229, 0.224, 0.225)))
def init_weights(self) -> None:
super().init_weights()
self._init_teacher()
def _init_teacher(self) -> None:
# init the weights of teacher with those of backbone
for param_backbone, param_teacher in zip(self.backbone.parameters(),
self.teacher.parameters()):
param_teacher.detach()
param_teacher.data.copy_(param_backbone.data)
param_teacher.requires_grad = False
def momentum_update(self) -> None:
"""Momentum update of the teacher network."""
for param_bacbone, param_teacher in zip(self.backbone.parameters(),
self.teacher.parameters()):
param_teacher.data = param_teacher.data * self.momentum + \
param_bacbone.data * (1. - self.momentum)
def extract_feat(self, img: torch.Tensor,
mask: torch.Tensor) -> torch.Tensor:
x = self.backbone(img, mask)
return x
def forward_train(self, samples: Sequence, **kwargs) -> dict:
img, img_target, mask = samples
# normalize images and the images to get the target
img_list = [self.img_norm(x).unsqueeze(0) for x in img]
img = torch.cat(img_list)
img_target = 0.8 * img_target + 0.1
mask = mask.flatten(1).to(torch.bool)
unmasked = self.backbone(img, mask)
# get the latent prediction for the masked patches
with torch.no_grad():
latent_target = self.teacher(img, ~mask)
latent_target = latent_target[:, 1:, :]
self.momentum_update()
pos_embed = self.backbone.pos_embed.expand(img.shape[0], -1, -1)
pos_embed_masked = pos_embed[:,
1:][mask].reshape(img.shape[0], -1,
pos_embed.shape[-1])
pos_embed_unmasked = pos_embed[:, 1:][~mask].reshape(
img.shape[0], -1, pos_embed.shape[-1])
# input the unmasked tokens and masked tokens to the decoder
logits, latent_pred = self.neck(unmasked[:, 1:], pos_embed_masked,
pos_embed_unmasked)
logits = logits.view(-1, logits.shape[-1])
losses = self.head(img_target, logits, latent_pred, latent_target,
mask)
return losses
|
pyperformance/run.py | brandtbucher/pyperformance | 255 | 24426 | <reponame>brandtbucher/pyperformance<gh_stars>100-1000
from collections import namedtuple
import hashlib
import sys
import time
import traceback
import pyperformance
from . import _utils, _python, _pythoninfo
from .venv import VenvForBenchmarks, REQUIREMENTS_FILE
from . import _venv
class BenchmarkException(Exception):
pass
class RunID(namedtuple('RunID', 'python compat bench timestamp')):
def __new__(cls, python, compat, bench, timestamp):
self = super().__new__(
cls,
python,
compat,
bench or None,
int(timestamp) if timestamp else None,
)
return self
def __str__(self):
if not self.timestamp:
return self.name
return f'{self.name}-{self.timestamp}'
@property
def name(self):
try:
return self._name
except AttributeError:
name = f'{self.python}-compat-{self.compat}'
if self.bench:
name = f'{name}-bm-{self.bench.name}'
self._name = name
return self._name
def get_run_id(python, bench=None):
py_id = _python.get_id(python, prefix=True)
compat_id = get_compatibility_id(bench)
ts = time.time()
return RunID(py_id, compat_id, bench, ts)
def run_benchmarks(should_run, python, options):
to_run = sorted(should_run)
info = _pythoninfo.get_info(python)
runid = get_run_id(info)
unique = getattr(options, 'unique_venvs', False)
if not unique:
common = VenvForBenchmarks.ensure(
_venv.get_venv_root(runid.name, python=info),
info,
upgrade='oncreate',
inherit_environ=options.inherit_environ,
)
benchmarks = {}
venvs = set()
for i, bench in enumerate(to_run):
bench_runid = runid._replace(bench=bench)
assert bench_runid.name, (bench, bench_runid)
name = bench_runid.name
venv_root = _venv.get_venv_root(name, python=info)
print()
print('='*50)
print(f'({i+1:>2}/{len(to_run)}) creating venv for benchmark ({bench.name})')
print()
if not unique:
print('(trying common venv first)')
# Try the common venv first.
try:
common.ensure_reqs(bench)
except _venv.RequirementsInstallationFailedError:
print('(falling back to unique venv)')
else:
benchmarks[bench] = (common, bench_runid)
continue
venv = VenvForBenchmarks.ensure(
venv_root,
info,
upgrade='oncreate',
inherit_environ=options.inherit_environ,
)
try:
# XXX Do not override when there is a requirements collision.
venv.ensure_reqs(bench)
except _venv.RequirementsInstallationFailedError:
print('(benchmark will be skipped)')
print()
venv = None
venvs.add(venv_root)
benchmarks[bench] = (venv, bench_runid)
print()
suite = None
run_count = str(len(to_run))
errors = []
pyperf_opts = get_pyperf_opts(options)
import pyperf
for index, bench in enumerate(to_run):
name = bench.name
print("[%s/%s] %s..." %
(str(index + 1).rjust(len(run_count)), run_count, name))
sys.stdout.flush()
def add_bench(dest_suite, obj):
if isinstance(obj, pyperf.BenchmarkSuite):
results = obj
else:
results = (obj,)
version = pyperformance.__version__
for res in results:
res.update_metadata({'performance_version': version})
if dest_suite is not None:
dest_suite.add_benchmark(res)
else:
dest_suite = pyperf.BenchmarkSuite([res])
return dest_suite
bench_venv, bench_runid = benchmarks.get(bench)
if bench_venv is None:
print("ERROR: Benchmark %s failed: could not install requirements" % name)
errors.append(name)
continue
try:
result = bench.run(
bench_venv.python,
bench_runid,
pyperf_opts,
venv=bench_venv,
verbose=options.verbose,
)
except Exception as exc:
print("ERROR: Benchmark %s failed: %s" % (name, exc))
traceback.print_exc()
errors.append(name)
else:
suite = add_bench(suite, result)
print()
return (suite, errors)
# Utility functions
def get_compatibility_id(bench=None):
# XXX Do not include the pyperformance reqs if a benchmark was provided?
reqs = sorted(_utils.iter_clean_lines(REQUIREMENTS_FILE))
if bench:
lockfile = bench.requirements_lockfile
if lockfile and os.path.exists(lockfile):
reqs += sorted(_utils.iter_clean_lines(lockfile))
data = [
# XXX Favor pyperf.__version__ instead?
pyperformance.__version__,
'\n'.join(reqs),
]
h = hashlib.sha256()
for value in data:
h.update(value.encode('utf-8'))
compat_id = h.hexdigest()
# XXX Return the whole string?
compat_id = compat_id[:12]
return compat_id
def get_pyperf_opts(options):
opts = []
if options.debug_single_value:
opts.append('--debug-single-value')
elif options.rigorous:
opts.append('--rigorous')
elif options.fast:
opts.append('--fast')
if options.verbose:
opts.append('--verbose')
if options.affinity:
opts.append('--affinity=%s' % options.affinity)
if options.track_memory:
opts.append('--track-memory')
if options.inherit_environ:
opts.append('--inherit-environ=%s' % ','.join(options.inherit_environ))
return opts
|
Python_10_Plot_Bokeh_Candlestick.py | rogerolowski/SimpleStockAnalysisPython | 195 | 24457 | <reponame>rogerolowski/SimpleStockAnalysisPython
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 27 08:09:11 2020
@author: Tin
"""
# Plot Candlestick in bokeh
import pandas as pd # Dataframe Library
from math import pi
from bokeh.plotting import figure, show, output_file
pd.set_option('max_columns', None) # To show all columns
import yfinance as yf
yf.pdr_override()
# input
symbol = 'AAPL'
start = '2019-12-01'
end = '2020-01-01'
# dataframe
df = yf.download(symbol,start,end)
df["Date"] = pd.to_datetime(df.index)
mids = (df['Open'] + df['Adj Close'])/2
spans = abs(df['Adj Close']-df['Open'])
inc = df['Adj Close'] > df['Open']
dec = df['Open'] > df['Adj Close']
w = 12*60*60*1000 # half day in ms
TOOLS = "pan,wheel_zoom,box_zoom,reset,save"
p = figure(x_axis_type="datetime", tools=TOOLS, plot_width=1000, title = symbol + " Candlestick")
p.xaxis.major_label_orientation = pi/4
p.grid.grid_line_alpha=0.3
p.segment(df.Date, df.High, df.Date, df.Low, color="black")
p.vbar(df.Date[inc], w, df.Open[inc], df['Adj Close'][inc], fill_color="#D5E1DD", line_color="black")
p.vbar(df.Date[dec], w, df.Open[dec], df['Adj Close'][dec], fill_color="#F2583E", line_color="black")
output_file("candlestick.html", title= symbol + " candlestick")
show(p) # open a browser |
atlas/foundations_contrib/src/test/test_lazy_bucket.py | DeepLearnI/atlas | 296 | 24494 |
import unittest
from mock import Mock
from foundations_spec.helpers.spec import Spec
from foundations_spec.helpers import let, let_mock, set_up
class TestLazyBucket(Spec):
@let
def lazy_bucket(self):
from foundations_contrib.lazy_bucket import LazyBucket
return LazyBucket(self.bucket_constructor)
@set_up
def set_up(self):
self.bucket_constructor.return_value = self.bucket
bucket_constructor = let_mock()
bucket = let_mock()
name = let_mock()
data = let_mock()
input_file = let_mock()
output_file = let_mock()
dummy = let_mock()
pathname = let_mock()
source = let_mock()
destination = let_mock()
def test_ensure_bucket_is_not_constructed(self):
self.lazy_bucket
self.bucket_constructor.assert_not_called()
def test_upload_from_string_calls_bucket(self):
self.bucket.upload_from_string.return_value = self.dummy
result = self.lazy_bucket.upload_from_string(self.name, self.data)
self.bucket.upload_from_string.assert_called_with(self.name, self.data)
self.assertEqual(self.dummy, result)
def test_upload_from_file_calls_bucket(self):
self.bucket.upload_from_file.return_value = self.dummy
result = self.lazy_bucket.upload_from_file(self.name, self.input_file)
self.bucket.upload_from_file.assert_called_with(self.name, self.input_file)
self.assertEqual(self.dummy, result)
def test_exists_calls_bucket(self):
self.bucket.exists.return_value = self.dummy
result = self.lazy_bucket.exists(self.name)
self.bucket.exists.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_download_as_string_calls_bucket(self):
self.bucket.download_as_string.return_value = self.dummy
result = self.lazy_bucket.download_as_string(self.name)
self.bucket.download_as_string.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_download_to_file_calls_bucket(self):
self.bucket.download_to_file.return_value = self.dummy
result = self.lazy_bucket.download_to_file(self.name, self.output_file)
self.bucket.download_to_file.assert_called_with(self.name, self.output_file)
self.assertEqual(self.dummy, result)
def test_list_files_calls_bucket(self):
self.bucket.list_files.return_value = self.dummy
result = self.lazy_bucket.list_files(self.pathname)
self.bucket.list_files.assert_called_with(self.pathname)
self.assertEqual(self.dummy, result)
def test_remove_calls_bucket(self):
self.bucket.remove.return_value = self.dummy
result = self.lazy_bucket.remove(self.name)
self.bucket.remove.assert_called_with(self.name)
self.assertEqual(self.dummy, result)
def test_move_calls_bucket(self):
self.bucket.move.return_value = self.dummy
result = self.lazy_bucket.move(self.source, self.destination)
self.bucket.move.assert_called_with(self.source, self.destination)
self.assertEqual(self.dummy, result)
|
plugins/vad/snr_vad/test_snr_vad.py | kowo-zahl/Naomi | 194 | 24530 | # -*- coding: utf-8 -*-
from naomi import testutils
from . import snr_vad
class TestSNR_VADPlugin(testutils.Test_VADPlugin):
def setUp(self):
super(TestSNR_VADPlugin, self).setUp()
self.plugin = testutils.get_plugin_instance(
snr_vad.SNRPlugin,
self._test_input
)
# prime by running through one wav file
self.map_file()
|
texel/keys.py | Xen0byte/texel | 119 | 24570 | <filename>texel/keys.py
import curses
class Key:
def __init__(self, *values):
self.values = values
self._hash = hash(values)
self._keyset = set(values)
def __eq__(self, other):
return self._hash == other._hash
def __hash__(self):
return self._hash
class Keys:
ESC = Key(27)
TAB = Key(ord("\t"), ord("n"))
SHIFT_TAB = Key(353, ord("N"))
VISUAL = Key(ord("v"), ord("V"))
COPY = Key(ord("c"), ord("y"))
QUIT = Key(ord("q"))
UP = Key(curses.KEY_UP, ord("k"))
DOWN = Key(curses.KEY_DOWN, ord("j"))
LEFT = Key(curses.KEY_LEFT, ord("h"))
RIGHT = Key(curses.KEY_RIGHT, ord("l"))
HELP = Key(ord("?"))
ALL = [ESC, TAB, SHIFT_TAB, VISUAL, COPY, QUIT, UP, DOWN, LEFT, RIGHT, HELP]
_id_to_key = {id: key for key in ALL for id in key.values}
@staticmethod
def to_key(key: int) -> Key:
return Keys._id_to_key.get(key)
|
tests/tokenizers.py | theeluwin/textrankr | 219 | 24596 | import json
import requests
from typing import List
from konlpy.tag import Okt
from requests.models import Response
class OktTokenizer:
"""
A POS-tagger based tokenizer functor. Note that these are just examples. The `phrases` function usually gives a better result than an ordinary POS tokenizer.
Example:
tokenizer: OktTokenizer = OktTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
okt: Okt = Okt()
def __call__(self, text: str) -> List[str]:
tokens: List[str] = self.okt.phrases(text)
return tokens
class ApiTokenizer:
"""
An API based tokenizer functor, assuming that the response body is a jsonifyable string with content of list of `str` tokens.
Example:
tokenizer: ApiTokenizer = ApiTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
def __init__(self, endpoint: str) -> None:
self.endpoint: str = endpoint
def __call__(self, text: str) -> List[str]:
body: bytes = text.encode('utf-8')
res: Response = requests.post(self.endpoint, data=body)
tokens: List[str] = json.loads(res.text)
return tokens
|
dbs/dal/Whiteport.py | xinghejd/opencanary_web | 633 | 24609 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Author: pirogue
Purpose: 白名单端口表操作
Site: http://pirogue.org
Created: 2018-08-03 17:32:54
"""
from dbs.initdb import DBSession
from dbs.models.Whiteport import Whiteport
from sqlalchemy import desc,asc
from sqlalchemy.exc import InvalidRequestError
# import sys
# sys.path.append("..")
class WhitePort:
"""增删改查"""
def __init__(self):
self.session=DBSession
# 查询白名单表port数据
def select_white_port(self):
try:
white_port_res = self.session.query(Whiteport.dst_port).all()
return white_port_res
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 增加白名单
def insert_white_port(self, dst_port):
try:
wip_insert = Whiteport(dst_port=dst_port)
self.session.merge(wip_insert)
self.session.commit()
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 删除白名单端口表数据
def delete_white_port(self):
try:
self.session.query(Whiteport).delete()
self.session.commit()
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close() |
pwndbg/color/__init__.py | R2S4X/pwndbg | 287 | 24644 | <reponame>R2S4X/pwndbg
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import pwndbg.memoize
NORMAL = "\x1b[0m"
BLACK = "\x1b[30m"
RED = "\x1b[31m"
GREEN = "\x1b[32m"
YELLOW = "\x1b[33m"
BLUE = "\x1b[34m"
PURPLE = "\x1b[35m"
CYAN = "\x1b[36m"
LIGHT_GREY = LIGHT_GRAY = "\x1b[37m"
FOREGROUND = "\x1b[39m"
GREY = GRAY = "\x1b[90m"
LIGHT_RED = "\x1b[91m"
LIGHT_GREEN = "\x1b[92m"
LIGHT_YELLOW = "\x1b[93m"
LIGHT_BLUE = "\x1b[94m"
LIGHT_PURPLE = "\x1b[95m"
LIGHT_CYAN = "\x1b[96m"
WHITE = "\x1b[97m"
BOLD = "\x1b[1m"
UNDERLINE = "\x1b[4m"
def none(x): return str(x)
def normal(x): return colorize(x, NORMAL)
def black(x): return colorize(x, BLACK)
def red(x): return colorize(x, RED)
def green(x): return colorize(x, GREEN)
def yellow(x): return colorize(x, YELLOW)
def blue(x): return colorize(x, BLUE)
def purple(x): return colorize(x, PURPLE)
def cyan(x): return colorize(x, CYAN)
def light_gray(x): return colorize(x, LIGHT_GRAY)
def foreground(x): return colorize(x, FOREGROUND)
def gray(x): return colorize(x, GRAY)
def light_red(x): return colorize(x, LIGHT_RED)
def light_green(x): return colorize(x, LIGHT_GREEN)
def light_yellow(x): return colorize(x, LIGHT_YELLOW)
def light_blue(x): return colorize(x, LIGHT_BLUE)
def light_purple(x): return colorize(x, LIGHT_PURPLE)
def light_cyan(x): return colorize(x, LIGHT_CYAN)
def white(x): return colorize(x, WHITE)
def bold(x): return colorize(x, BOLD)
def underline(x): return colorize(x, UNDERLINE)
def colorize(x, color): return color + terminateWith(str(x), color) + NORMAL
@pwndbg.memoize.reset_on_stop
def generateColorFunctionInner(old, new):
def wrapper(text):
return new(old(text))
return wrapper
def generateColorFunction(config):
function = lambda x: x
for color in str(config).split(','):
function = generateColorFunctionInner(function, globals()[color.lower().replace('-', '_')])
return function
def strip(x):
return re.sub('\x1b\\[\d+m', '', x)
def terminateWith(x, color):
return re.sub('\x1b\\[0m', NORMAL + color, x)
def ljust_colored(x, length, char=' '):
return x + (length - len(strip(x))) * char
|
tools/debug_discovery.py | s1rd4v3/homebridge-tuya-web-es6-js | 172 | 24657 | <reponame>s1rd4v3/homebridge-tuya-web-es6-js<filename>tools/debug_discovery.py
# The script is intended to get a list of all devices available via Tuya Home Assistant API endpoint.
import requests
import pprint
# CHANGE THIS - BEGINNING
USERNAME = ""
PASSWORD = ""
REGION = "eu" # cn, eu, us
COUNTRY_CODE = "1" # Your account country code, e.g., 1 for USA or 86 for China
BIZ_TYPE = "smart_life" # tuya, smart_life, jinvoo_smart
FROM = "tuya" # you likely don't need to touch this
# CHANGE THIS - END
# NO NEED TO CHANGE ANYTHING BELOW
TUYACLOUDURL = "https://px1.tuya{}.com"
pp = pprint.PrettyPrinter(indent=4)
print("Getting credentials")
auth_response = requests.post(
(TUYACLOUDURL + "/homeassistant/auth.do").format(REGION),
data={
"userName": USERNAME,
"password": PASSWORD,
"countryCode": COUNTRY_CODE,
"bizType": BIZ_TYPE,
"from": FROM,
},
)
print("Got credentials")
auth_response = auth_response.json()
pp.pprint(auth_response)
header = {"name": "Discovery", "namespace": "discovery", "payloadVersion": 1}
payload = {"accessToken": auth_response["access_token"]}
data = {"header": header, "payload": payload}
print("Getting devices")
discovery_response = requests.post(
(TUYACLOUDURL + "/homeassistant/skill").format(REGION), json=data
)
print("Got devices")
discovery_response = discovery_response.json()
pp.pprint(discovery_response)
print("!!! NOW REMOVE THIS FILE, SO YOUR CREDENTIALS (username, password) WON'T LEAK !!!")
|
examples/demo_classic.py | ria02/InquirerPy | 120 | 24677 | # NOTE: Following example requires boto3 package.
import boto3
from InquirerPy import prompt
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.validator import PathValidator
client = boto3.client("s3")
def get_bucket(_):
return [bucket["Name"] for bucket in client.list_buckets()["Buckets"]]
def walk_s3_bucket(result):
response = []
paginator = client.get_paginator("list_objects")
for result in paginator.paginate(Bucket=result["bucket"]):
for file in result["Contents"]:
response.append(file["Key"])
return response
def is_upload(result):
return result[0] == "Upload"
questions = [
{
"message": "Select an S3 action:",
"type": "list",
"choices": ["Upload", "Download"],
},
{
"message": "Enter the filepath to upload:",
"type": "filepath",
"when": is_upload,
"validate": PathValidator(),
"only_files": True,
},
{
"message": "Select a bucket:",
"type": "fuzzy",
"choices": get_bucket,
"name": "bucket",
"spinner_enable": True,
},
{
"message": "Select files to download:",
"type": "fuzzy",
"when": lambda _: not is_upload(_),
"choices": walk_s3_bucket,
"multiselect": True,
"spinner_enable": True,
},
{
"message": "Enter destination folder:",
"type": "filepath",
"when": lambda _: not is_upload(_),
"only_directories": True,
"validate": PathValidator(),
},
{"message": "Confirm?", "type": "confirm", "default": False},
]
try:
result = prompt(questions, vi_mode=True)
except InvalidArgument:
print("No available choices")
# Download or Upload the file based on result ...
|
lv_set/Main.py | Ramesh-X/Level-Set | 122 | 24686 | """
This python code demonstrates an edge-based active contour model as an application of the
Distance Regularized Level Set Evolution (DRLSE) formulation in the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, "Distance Regularized Level Set Evolution and Its Application to Image Segmentation",
IEEE Trans. Image Processing, vol. 19 (12), pp. 3243-3254, 2010.
Author: <NAME>
E-mail: <EMAIL>
Released Under MIT License
"""
import numpy as np
from skimage.io import imread
from lv_set.find_lsf import find_lsf
from lv_set.potential_func import *
from lv_set.show_fig import draw_all
def gourd_params():
img = imread('gourd.bmp', True)
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
# initialize LSF as binary step function
c0 = 2
initial_lsf = c0 * np.ones(img.shape)
# generate the initial region R0 as two rectangles
initial_lsf[24:35, 19:25] = -c0
initial_lsf[24:35, 39:50] = -c0
# parameters
return {
'img': img,
'initial_lsf': initial_lsf,
'timestep': 1, # time step
'iter_inner': 10,
'iter_outer': 30,
'lmda': 5, # coefficient of the weighted length term L(phi)
'alfa': -3, # coefficient of the weighted area term A(phi)
'epsilon': 1.5, # parameter that specifies the width of the DiracDelta function
'sigma': 0.8, # scale parameter in Gaussian kernel
'potential_function': DOUBLE_WELL,
}
def two_cells_params():
img = imread('twocells.bmp', True)
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
# initialize LSF as binary step function
c0 = 2
initial_lsf = c0 * np.ones(img.shape)
# generate the initial region R0 as two rectangles
initial_lsf[9:55, 9:75] = -c0
# parameters
return {
'img': img,
'initial_lsf': initial_lsf,
'timestep': 5, # time step
'iter_inner': 5,
'iter_outer': 40,
'lmda': 5, # coefficient of the weighted length term L(phi)
'alfa': 1.5, # coefficient of the weighted area term A(phi)
'epsilon': 1.5, # parameter that specifies the width of the DiracDelta function
'sigma': 1.5, # scale parameter in Gaussian kernel
'potential_function': DOUBLE_WELL,
}
params = gourd_params()
# params = two_cells_params()
phi = find_lsf(**params)
print('Show final output')
draw_all(phi, params['img'], 10)
|
examples/hashtrees.py | ascribe/transactions | 124 | 24741 | # -*- coding: utf-8 -*-
"""
Inspired by:
* https://gist.github.com/shirriff/c9fb5d98e6da79d9a772#file-merkle-py
* https://github.com/richardkiss/pycoin
"""
from __future__ import absolute_import, division, unicode_literals
from builtins import range
import binascii
import hashlib
def merkleroot(hashes):
"""
Args:
hashes: reversed binary form of transactions hashes, e.g.:
``binascii.unhexlify(h)[::-1] for h in block['tx']]``
Returns:
merkle root in hexadecimal form
"""
if len(hashes) == 1:
return binascii.hexlify(bytearray(reversed(hashes[0])))
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
parent_hashes = []
for i in range(0, len(hashes)-1, 2):
first_round_hash = hashlib.sha256(hashes[i] + hashes[i+1]).digest()
second_round_hash = hashlib.sha256(first_round_hash).digest()
parent_hashes.append(second_round_hash)
return merkleroot(parent_hashes)
|
pwncat/modules/linux/enumerate/system/selinux.py | Mitul16/pwncat | 1,454 | 24751 | #!/usr/bin/env python3
from typing import Dict
import rich.markup
from pwncat.db import Fact
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class SELinuxState(Fact):
def __init__(self, source, state, status):
super().__init__(source=source, types=["system.selinux"])
self.state: str = state
self.status: Dict[str, str] = status
def title(self, session):
result = "SELinux is "
if self.state == "enabled":
result += "[red]enabled[/red]"
elif self.state == "disabled":
result += "[green]disabled[/green]"
else:
result += f"[yellow]{rich.markup.escape(self.state)}[/yellow]"
return result
@property
def mode(self) -> str:
return self.status.get("Current mode", "unknown").lower()
@property
def enabled(self) -> bool:
return self.state.lower() == "enabled"
def description(self, session):
width = max(len(x) for x in self.status) + 1
return "\n".join(
f"{key+':':{width}} {value}" for key, value in self.status.items()
)
class Module(EnumerateModule):
"""
Retrieve the current SELinux state
"""
PROVIDES = ["system.selinux"]
SCHEDULE = Schedule.ONCE
PLATFORM = [Linux]
def enumerate(self, session):
try:
output = session.platform.run("sestatus", capture_output=True, text=True)
except (FileNotFoundError, PermissionError):
return
if output:
output = output.stdout.strip()
status = {}
for line in output.split("\n"):
line = line.strip().replace("\t", " ")
values = " ".join([x for x in line.split(" ") if x != ""]).split(":")
key = values[0].rstrip(":").strip()
value = " ".join(values[1:])
status[key] = value.strip()
if "SELinux status" in status:
state = status["SELinux status"]
else:
state = "unknown"
yield SELinuxState(self.name, state, status)
|
tempest/lib/services/placement/base_placement_client.py | rishabh20111990/tempest | 254 | 24792 | <reponame>rishabh20111990/tempest<filename>tempest/lib/services/placement/base_placement_client.py
# Copyright (c) 2019 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common import api_version_utils
from tempest.lib.common import rest_client
PLACEMENT_MICROVERSION = None
class BasePlacementClient(rest_client.RestClient):
api_microversion_header_name = 'OpenStack-API-Version'
version_header_value = 'placement %s'
def get_headers(self):
headers = super(BasePlacementClient, self).get_headers()
if PLACEMENT_MICROVERSION:
headers[self.api_microversion_header_name] = \
self.version_header_value % PLACEMENT_MICROVERSION
return headers
def request(self, method, url, extra_headers=False, headers=None,
body=None, chunked=False):
resp, resp_body = super(BasePlacementClient, self).request(
method, url, extra_headers, headers, body, chunked)
if (PLACEMENT_MICROVERSION and
PLACEMENT_MICROVERSION != api_version_utils.LATEST_MICROVERSION):
api_version_utils.assert_version_header_matches_request(
self.api_microversion_header_name,
self.version_header_value % PLACEMENT_MICROVERSION,
resp)
return resp, resp_body
|
solutions/problem_122.py | ksvr444/daily-coding-problem | 1,921 | 24794 | <reponame>ksvr444/daily-coding-problem
def get_max_coins_helper(matrix, crow, ccol, rows, cols):
cval = matrix[crow][ccol]
if crow == rows - 1 and ccol == cols - 1:
return cval
down, right = cval, cval
if crow < rows - 1:
down += get_max_coins_helper(
matrix, crow + 1, ccol, rows, cols)
if ccol < cols - 1:
right += get_max_coins_helper(
matrix, crow, ccol + 1, rows, cols)
return max(down, right)
def get_max_coins(matrix):
if matrix:
return get_max_coins_helper(
matrix, 0, 0, len(matrix), len(matrix[0]))
coins = [[0, 3, 1, 1],
[2, 0, 0, 4],
[1, 5, 3, 1]]
assert get_max_coins(coins) == 12
coins = [[0, 3, 1, 1],
[2, 8, 9, 4],
[1, 5, 3, 1]]
assert get_max_coins(coins) == 25
|
streamer/output_stream.py | meryacine/shaka-streamer | 154 | 24820 | <gh_stars>100-1000
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains information about each output stream."""
from streamer.bitrate_configuration import AudioCodec, AudioChannelLayout, VideoCodec, VideoResolution
from streamer.input_configuration import Input, MediaType
from streamer.pipe import Pipe
from typing import Dict, Union
class OutputStream(object):
"""Base class for output streams."""
def __init__(self,
type: MediaType,
input: Input,
codec: Union[AudioCodec, VideoCodec, None],
pipe_dir: str,
skip_transcoding: bool = False,
pipe_suffix: str = '') -> None:
self.type: MediaType = type
self.skip_transcoding = skip_transcoding
self.input: Input = input
self.features: Dict[str, str] = {}
self.codec: Union[AudioCodec, VideoCodec, None] = codec
if self.skip_transcoding:
# If skip_transcoding is specified, let the Packager read from a plain
# file instead of an IPC pipe.
self.ipc_pipe = Pipe.create_file_pipe(self.input.name, mode='r')
else:
self.ipc_pipe = Pipe.create_ipc_pipe(pipe_dir, pipe_suffix)
def is_hardware_accelerated(self) -> bool:
"""Returns True if this output stream uses hardware acceleration."""
if self.codec:
return self.codec.is_hardware_accelerated()
return False
def get_ffmpeg_codec_string(self, hwaccel_api: str) -> str:
"""Returns a codec string accepted by FFmpeg for this stream's codec."""
assert self.codec is not None
return self.codec.get_ffmpeg_codec_string(hwaccel_api)
def is_dash_only(self) -> bool:
"""Returns True if the output format is restricted to DASH protocol"""
if self.codec is not None:
return self.codec.get_output_format() == 'webm'
return False
def get_init_seg_file(self) -> Pipe:
INIT_SEGMENT = {
MediaType.AUDIO: 'audio_{language}_{channels}c_{bitrate}_{codec}_init.{format}',
MediaType.VIDEO: 'video_{resolution_name}_{bitrate}_{codec}_init.{format}',
MediaType.TEXT: 'text_{language}_init.{format}',
}
path_templ = INIT_SEGMENT[self.type].format(**self.features)
return Pipe.create_file_pipe(path_templ, mode='w')
def get_media_seg_file(self) -> Pipe:
MEDIA_SEGMENT = {
MediaType.AUDIO: 'audio_{language}_{channels}c_{bitrate}_{codec}_$Number$.{format}',
MediaType.VIDEO: 'video_{resolution_name}_{bitrate}_{codec}_$Number$.{format}',
MediaType.TEXT: 'text_{language}_$Number$.{format}',
}
path_templ = MEDIA_SEGMENT[self.type].format(**self.features)
return Pipe.create_file_pipe(path_templ, mode='w')
def get_single_seg_file(self) -> Pipe:
SINGLE_SEGMENT = {
MediaType.AUDIO: 'audio_{language}_{channels}c_{bitrate}_{codec}.{format}',
MediaType.VIDEO: 'video_{resolution_name}_{bitrate}_{codec}.{format}',
MediaType.TEXT: 'text_{language}.{format}',
}
path_templ = SINGLE_SEGMENT[self.type].format(**self.features)
return Pipe.create_file_pipe(path_templ, mode='w')
class AudioOutputStream(OutputStream):
def __init__(self,
input: Input,
pipe_dir: str,
codec: AudioCodec,
channel_layout: AudioChannelLayout) -> None:
super().__init__(MediaType.AUDIO, input, codec, pipe_dir)
# Override the codec type and specify that it's an audio codec
self.codec: AudioCodec = codec
self.layout = channel_layout
# The features that will be used to generate the output filename.
self.features = {
'language': input.language,
'channels': str(self.layout.max_channels),
'bitrate': self.get_bitrate(),
'format': self.codec.get_output_format(),
'codec': self.codec.value,
}
def get_bitrate(self) -> str:
"""Returns the bitrate for this stream."""
return self.layout.bitrates[self.codec]
class VideoOutputStream(OutputStream):
def __init__(self,
input: Input,
pipe_dir: str,
codec: VideoCodec,
resolution: VideoResolution) -> None:
super().__init__(MediaType.VIDEO, input, codec, pipe_dir)
# Override the codec type and specify that it's an audio codec
self.codec: VideoCodec = codec
self.resolution = resolution
# The features that will be used to generate the output filename.
self.features = {
'resolution_name': self.resolution.get_key(),
'bitrate': self.get_bitrate(),
'format': self.codec.get_output_format(),
'codec': self.codec.value,
}
def get_bitrate(self) -> str:
"""Returns the bitrate for this stream."""
return self.resolution.bitrates[self.codec]
class TextOutputStream(OutputStream):
def __init__(self,
input: Input,
pipe_dir: str,
skip_transcoding: bool):
# We don't have a codec per se for text, but we'd like to generically
# process OutputStream objects in ways that are easier with this attribute
# set, so set it to None.
codec = None
super().__init__(MediaType.TEXT, input, codec, pipe_dir,
skip_transcoding, pipe_suffix='.vtt')
# The features that will be used to generate the output filename.
self.features = {
'language': input.language,
'format': 'mp4',
}
|
deepchem/data/tests/test_shape.py | deloragaskins/deepchem | 3,782 | 24823 | import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_multishard():
"""Test that get_shape works for multisharded disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Should now have 10 shards
dataset.reshard(shard_size=10)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_legacy_shape_single_shard():
"""Test that get_shape works for legacy disk dataset."""
# This is the shape of legacy_data
num_datapoints = 100
num_features = 10
num_tasks = 10
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_disk_dataset_get_legacy_shape_multishard():
"""Test that get_shape works for multisharded legacy disk dataset."""
# This is the shape of legacy_data_reshard
num_datapoints = 100
num_features = 10
num_tasks = 10
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
# Should now have 10 shards
assert dataset.get_number_shards() == 10
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_get_shard_size():
"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(
tasks=[], feature_field="reactions", featurizer=featurizer)
dataset = loader.create_dataset(file_path)
assert dataset.get_shard_size() == 4
|
src/oci/apigateway/models/execution_log_policy.py | Manny27nyc/oci-python-sdk | 249 | 24828 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExecutionLogPolicy(object):
"""
Configures the logging policies for the execution logs of an API Deployment.
"""
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "INFO"
LOG_LEVEL_INFO = "INFO"
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "WARN"
LOG_LEVEL_WARN = "WARN"
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "ERROR"
LOG_LEVEL_ERROR = "ERROR"
def __init__(self, **kwargs):
"""
Initializes a new ExecutionLogPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_enabled:
The value to assign to the is_enabled property of this ExecutionLogPolicy.
:type is_enabled: bool
:param log_level:
The value to assign to the log_level property of this ExecutionLogPolicy.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type log_level: str
"""
self.swagger_types = {
'is_enabled': 'bool',
'log_level': 'str'
}
self.attribute_map = {
'is_enabled': 'isEnabled',
'log_level': 'logLevel'
}
self._is_enabled = None
self._log_level = None
@property
def is_enabled(self):
"""
Gets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:return: The is_enabled of this ExecutionLogPolicy.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:param is_enabled: The is_enabled of this ExecutionLogPolicy.
:type: bool
"""
self._is_enabled = is_enabled
@property
def log_level(self):
"""
Gets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The log_level of this ExecutionLogPolicy.
:rtype: str
"""
return self._log_level
@log_level.setter
def log_level(self, log_level):
"""
Sets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
:param log_level: The log_level of this ExecutionLogPolicy.
:type: str
"""
allowed_values = ["INFO", "WARN", "ERROR"]
if not value_allowed_none_or_none_sentinel(log_level, allowed_values):
log_level = 'UNKNOWN_ENUM_VALUE'
self._log_level = log_level
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
Programs/Evernote/PackMemo/PackMemo.py | Psiphonc/EasierLife | 203 | 24882 | <filename>Programs/Evernote/PackMemo/PackMemo.py
from EvernoteController import EvernoteController
from Memo import Memo
MEMO_NAME = 'Memo'
MEMO_DIR = 'Memo'
MEMO_STORAGE_DIR = 'S-Memo'
def f(fn, *args, **kwargs):
try:
fn(*args, **kwargs)
except:
pass
m = Memo()
e = EvernoteController()
f(e.create_notebook, MEMO_DIR)
f(e.create_notebook, MEMO_STORAGE_DIR)
f(e.move_note, MEMO_DIR+'/'+MEMO_NAME, MEMO_STORAGE_DIR)
e.create_note('Memo', m.raw_memo(), MEMO_DIR)
|
test/src/testing/universal/uart.py | Jcc99/Adafruit_Blinka | 294 | 24886 | <filename>test/src/testing/universal/uart.py
import gc
from unittest import TestCase
from testing import await_true
gc.collect()
class TestGPSInteractive(TestCase):
def test_read_value(self):
import adafruit_blinka
adafruit_blinka.patch_system() # needed before adafruit_gps imports time
import microcontroller.pin
gc.collect()
import busio
gc.collect()
import adafruit_gps
gc.collect()
# configure the last available UART (first uart often for REPL)
uartId, uartTx, uartRx = microcontroller.pin.uartPorts[0]
uart = busio.UART(uartTx, uartRx, baudrate=9600, timeout=3000)
gps = adafruit_gps.GPS(uart)
gps.send_command("PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0")
gps.send_command("PMTK220,1000")
def try_fix():
gps.update()
return gps.has_fix
await_true("GPS fix", try_fix)
self.assertTrue(gps.satellites is not None)
self.assertTrue(-90 <= gps.latitude < 90)
self.assertTrue(-180 <= gps.longitude < 180)
|
src/masonite/presets/React.py | cercos/masonite | 1,816 | 24889 | <reponame>cercos/masonite
"""React Preset"""
import shutil
import os
from .Preset import Preset
from ..utils.filesystem import make_directory
from ..utils.location import resources_path, views_path
class React(Preset):
"""
Configure the front-end scaffolding for the application to use ReactJS
Will also remove Vue as Vue and React are a bit mutally exclusive
"""
key = "react"
packages = {
"react": "^17.0.2",
"react-dom": "^17.0.2",
"@babel/preset-react": "^7.16.5",
}
removed_packages = ["vue", "vue-loader"]
def install(self):
"""Install the preset"""
self.update_packages(dev=True)
self.update_webpack_mix()
self.update_js()
self.add_components()
self.update_css()
self.create_view()
self.remove_node_modules()
def add_components(self):
"""Copy example React component into application (delete example Vue component
if it exists)"""
# make components directory if does not exists
make_directory(resources_path("js/components/Example.js"))
# delete Vue components if exists
vue_files = [
resources_path("js/components/HelloWorld.vue"),
resources_path("js/App.vue"),
]
for vue_file in vue_files:
if os.path.exists(vue_file):
os.remove(vue_file)
# add Vue components
shutil.copyfile(
self.get_template_path("Example.js"),
resources_path("js/components/Example.js"),
)
def create_view(self):
"""Copy an example app view with assets included."""
shutil.copyfile(
self.get_template_path("app.html"), views_path("app_react.html")
)
|
docker/dempcap/pcapminey/core/ThreadPool/Pool.py | JakubOrzol/dockerfiles | 203 | 24927 | <reponame>JakubOrzol/dockerfiles
# -*- coding: utf8 -*-
__author__ = '<NAME>'
from Queue import Queue
from Worker import Worker
class Pool:
def __init__(self, size):
self.size = size
self.workers = []
self.tasks = Queue()
def _removeDeadWorkers(self):
self.workers = [w for w in self.workers if w.isAlive()]
def map_async(self, func, objects, callback):
self._removeDeadWorkers()
if not len(self.workers) == 0:
raise Exception('ThreadPool is still working! Adding new jobs is not allowed!')
for object in objects:
self.tasks.put((func, object, callback))
for id in range(self.size):
self.workers.append(Worker(id, self.tasks))
for worker in self.workers:
worker.start()
def join(self):
for worker in self.workers:
worker.join() |
Codes/gracekoo/interview_33.py | ghoslation/algorithm | 256 | 24931 | # -*- coding: utf-8 -*-
# @Time: 2020/7/3 10:21
# @Author: GraceKoo
# @File: interview_33.py
# @Desc: https://leetcode-cn.com/problems/chou-shu-lcof/
class Solution:
def nthUglyNumber(self, n: int) -> int:
if n <= 0:
return 0
dp, a, b, c = [1] * n, 0, 0, 0
for i in range(1, n):
min_ugly = min(dp[a] * 2, dp[b] * 3, dp[c] * 5)
dp[i] = min_ugly
if min_ugly == dp[a] * 2:
a += 1
if min_ugly == dp[b] * 3:
b += 1
if min_ugly == dp[c] * 5:
c += 1
return dp[-1]
so = Solution()
print(so.nthUglyNumber(10))
|
tests/import/module_getattr.py | sebi5361/micropython | 198 | 24938 | <reponame>sebi5361/micropython
# test __getattr__ on module
# ensure that does_not_exist doesn't exist to start with
this = __import__(__name__)
try:
this.does_not_exist
assert False
except AttributeError:
pass
# define __getattr__
def __getattr__(attr):
if attr == 'does_not_exist':
return False
raise AttributeError
# do feature test (will also test functionality if the feature exists)
if not hasattr(this, 'does_not_exist'):
print('SKIP')
raise SystemExit
# check that __getattr__ works as expected
print(this.does_not_exist)
|
setup.py | gsgoncalves/K-NRM | 198 | 24947 | # Copyright (c) 2017, Carnegie Mellon University. All rights reserved.
#
# Use of the K-NRM package is subject to the terms of the software license set
# forth in the LICENSE file included with this software, and also available at
# https://github.com/AdeDZY/K-NRM/blob/master/LICENSE
from setuptools import setup
from setuptools import find_packages
setup(name='knrm',
version='0',
description='knrm',
author='<NAME> and <NAME>',
install_requires=['numpy', 'traitlets', 'tensorflow'],
packages=find_packages()
)
|
docqa/triviaqa/answer_detection.py | Willyoung2017/doc-qa | 422 | 24992 | import re
import string
import numpy as np
from tqdm import tqdm
from typing import List
from docqa.triviaqa.read_data import TriviaQaQuestion
from docqa.triviaqa.trivia_qa_eval import normalize_answer, f1_score
from docqa.utils import flatten_iterable, split
"""
Tools for turning the aliases and answer strings from TriviaQA into labelled spans
"""
class ExactMatchDetector(object):
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [x.lower() for x in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class NormalizedAnswerDetector(object):
""" Try to labels tokens sequences, such that the extracted sequence would be evaluated as 100% correct
by the official trivia-qa evaluation script """
def __init__(self):
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
words = [normalize_answer(w) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next == "":
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class FastNormalizedAnswerDetector(object):
""" almost twice as fast and very,very close to NormalizedAnswerDetector's output """
def __init__(self):
# These come from the TrivaQA official evaluation script
self.skip = {"a", "an", "the", ""}
self.strip = string.punctuation + "".join([u"‘", u"’", u"´", u"`", "_"])
self.answer_tokens = None
def set_question(self, normalized_aliases):
self.answer_tokens = normalized_aliases
def any_found(self, para):
# Normalize the paragraph
words = [w.lower().strip(self.strip) for w in flatten_iterable(para)]
occurances = []
for answer_ix, answer in enumerate(self.answer_tokens):
# Locations where the first word occurs
word_starts = [i for i, w in enumerate(words) if answer[0] == w]
n_tokens = len(answer)
# Advance forward until we find all the words, skipping over articles
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token] == next:
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
class CarefulAnswerDetector(object):
"""
There are some common false negatives in the above answer detection, in particular plurals of answers are
often not found (nor are counted correct by the official script). This detector makes a stronger effort to
find them, although its unclear if training with these additional answers would hurt/help our overall score
since I never got around to trying it.
"""
def __init__(self):
self.skip = {"a", "an", "the", "&", "and", "-", "\u2019", "\u2018", "\"", ";", "'",
"(", ")", "'s'", "s", ":", ",", "."}
self.answer_regex = None
self.aliases = None
def set_question(self, normalized_aliases):
answer_regex = []
self.aliases = normalized_aliases
for answer in normalized_aliases:
tokens = []
for token in answer:
if len(token) > 1:
tokens.append(token + "s?")
else:
tokens.append(token)
if tokens[-1] == "s":
tokens[-1] = "s?"
answer_regex.append([re.compile(x, re.IGNORECASE) for x in tokens])
self.answer_regex = answer_regex
def any_found(self, para):
words = flatten_iterable(para)
occurances = []
for answer_ix, answer in enumerate(self.answer_regex):
word_starts = [i for i, w in enumerate(words) if answer[0].fullmatch(w)]
n_tokens = len(answer)
for start in word_starts:
end = start + 1
ans_token = 1
while ans_token < n_tokens and end < len(words):
next = words[end]
if answer[ans_token].match(next):
ans_token += 1
end += 1
elif next in self.skip:
end += 1
else:
break
if n_tokens == ans_token:
occurances.append((start, end))
return list(set(occurances))
def evaluate_question_detector(questions, corpus, word_tokenize, detector, reference_detector=None, compute_f1s=False):
""" Just for debugging """
n_no_docs = 0
answer_per_doc = []
answer_f1s = []
for question_ix, q in enumerate(tqdm(questions)):
tokenized_aliases = [word_tokenize(x) for x in q.answer.normalized_aliases]
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
doc = corpus.get_document(doc.doc_id)
if doc is None:
n_no_docs += 1
continue
output = []
for i, para in enumerate(doc):
for s,e in detector.any_found(para):
output.append((i, s, e))
if len(output) == 0 and reference_detector is not None:
if reference_detector is not None:
reference_detector.set_question(tokenized_aliases)
detected = []
for i, para in enumerate(doc):
for s, e in reference_detector.any_found(para):
detected.append((i, s, e))
if len(detected) > 0:
print("Found a difference")
print(q.answer.normalized_aliases)
print(tokenized_aliases)
for p, s, e in detected:
token = flatten_iterable(doc[p])[s:e]
print(token)
answer_per_doc.append(output)
if compute_f1s:
f1s = []
for p, s, e in output:
token = flatten_iterable(doc[p])[s:e]
answer = normalize_answer(" ".join(token))
f1 = 0
for gt in q.answer.normalized_aliases:
f1 = max(f1, f1_score(answer, gt))
f1s.append(f1)
answer_f1s.append(f1s)
n_answers = sum(len(x) for x in answer_per_doc)
print("Found %d answers (av %.4f)" % (n_answers, n_answers/len(answer_per_doc)))
print("%.4f docs have answers" % np.mean([len(x) > 0 for x in answer_per_doc]))
if len(answer_f1s) > 0:
print("Average f1 is %.4f" % np.mean(flatten_iterable(answer_f1s)))
def compute_answer_spans(questions: List[TriviaQaQuestion], corpus, word_tokenize,
detector):
for i, q in enumerate(questions):
if i % 500 == 0:
print("Completed question %d of %d (%.3f)" % (i, len(questions), i/len(questions)))
q.question = word_tokenize(q.question)
if q.answer is None:
continue
tokenized_aliases = [word_tokenize(x) for x in q.answer.all_answers]
if len(tokenized_aliases) == 0:
raise ValueError()
detector.set_question(tokenized_aliases)
for doc in q.all_docs:
text = corpus.get_document(doc.doc_id)
if text is None:
raise ValueError()
spans = []
offset = 0
for para_ix, para in enumerate(text):
for s, e in detector.any_found(para):
spans.append((s+offset, e+offset-1)) # turn into inclusive span
offset += sum(len(s) for s in para)
if len(spans) == 0:
spans = np.zeros((0, 2), dtype=np.int32)
else:
spans = np.array(spans, dtype=np.int32)
doc.answer_spans = spans
def _compute_answer_spans_chunk(questions, corpus, tokenizer, detector):
# We use tokenize_paragraph since some questions can have multiple sentences,
# but we still store the results as a flat list of tokens
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
def compute_answer_spans_par(questions: List[TriviaQaQuestion], corpus,
tokenizer, detector, n_processes: int):
if n_processes == 1:
word_tokenize = tokenizer.tokenize_paragraph_flat
compute_answer_spans(questions, corpus, word_tokenize, detector)
return questions
from multiprocessing import Pool
with Pool(n_processes) as p:
chunks = split(questions, n_processes)
questions = flatten_iterable(p.starmap(_compute_answer_spans_chunk,
[[c, corpus, tokenizer, detector] for c in chunks]))
return questions
def main():
from trivia_qa.build_span_corpus import TriviaQaWebDataset
from data_processing.text_utils import NltkAndPunctTokenizer
dataset = TriviaQaWebDataset()
qs = dataset.get_train()
qs = np.random.RandomState(0).choice(qs, 1000, replace=False)
evaluate_question_detector(qs, dataset.evidence, NltkAndPunctTokenizer().tokenize_paragraph_flat,
FastNormalizedAnswerDetector())
if __name__ == "__main__":
main() |
examples/vacuum_send_command.py | giuseppeg88/lovelace-xiaomi-vacuum-map-card | 798 | 24994 | <filename>examples/vacuum_send_command.py
entity_id = data.get('entity_id')
command = data.get('command')
params = str(data.get('params'))
parsedParams = []
for z in params.replace(' ', '').replace('],[', '|').replace('[', '').replace(']', '').split('|'):
rect = []
for c in z.split(','):
rect.append(int(c))
parsedParams.append(rect)
if command in ["app_goto_target", "app_segment_clean"]:
parsedParams = parsedParams[0]
hass.services.call('vacuum', 'send_command',
{'entity_id': entity_id, 'command': command, 'params': parsedParams}, True)
|
extras/createTestBlocksForReadBlkUpdate.py | Manny27nyc/BitcoinArmory | 505 | 25008 | from sys import path
path.append('..')
from armoryengine import *
TheBDM.setBlocking(True)
TheBDM.setOnlineMode(True)
if not os.path.exists('testmultiblock'):
os.mkdir('testmultiblock')
fout = []
fout.append([0, 101, 'testmultiblock/blk00000.dat'])
fout.append([0, 102, 'testmultiblock/blk00000_test1.dat']) # Add 1 block
fout.append([0, 105, 'testmultiblock/blk00000_test2.dat']) # Add 3 blocks
fout.append([106, 106, 'testmultiblock/blk00001_test3.dat']) # Just block split
fout.append([107, 109, 'testmultiblock/blk00002_test4.dat']) # Another block split 3 blks
fout.append([107, 110, 'testmultiblock/blk00002_test5.dat']) # Add block
fout.append([110, 113, 'testmultiblock/blk00003_test5.dat']) # and split
for start,end,theFile in fout:
if os.path.exists(theFile):
os.remove(theFile)
lastLocation = [0]*len(fout)
openfiles = [[trip[0], trip[1], open(trip[2],'wb')] for trip in fout]
# Assume we are only reading into blk000000.dat, no split
for h in range(120):
head = TheBDM.getHeaderByHeight(h)
blk = head.serializeWholeBlock(MAGIC_BYTES, True)
for i,trip in enumerate(openfiles):
start,end,theFile = trip
if (start <= h <= end):
theFile.write(blk)
lastLocation[i] += len(blk)
for start,end,opnfil in openfiles:
opnfil.close()
for i,trip in enumerate(fout):
start,end,theFile = trip
sz = os.path.getsize(theFile)
f = open(theFile,'ab')
if i<3:
f.write('\x00'*(22000-sz))
else:
f.write('\x00'*(1000-sz))
f.close()
print 'Blocks written out:'
for start,end,fn in fout:
if end-start==0:
print '\t%d in file: %s' % (end,fn)
else:
print '\t%d-%d in file: %s' % (start,end,fn)
|
tests/common/devices/vmhost.py | emilmih/sonic-mgmt | 132 | 25087 | from tests.common.devices.base import AnsibleHostBase
class VMHost(AnsibleHostBase):
"""
@summary: Class for VM server
For running ansible module on VM server
"""
def __init__(self, ansible_adhoc, hostname):
AnsibleHostBase.__init__(self, ansible_adhoc, hostname)
@property
def external_port(self):
if not hasattr(self, "_external_port"):
vm = self.host.options["variable_manager"]
im = self.host.options["inventory_manager"]
hostvars = vm.get_vars(host=im.get_host(self.hostname), include_delegate_to=False)
setattr(self, "_external_port", hostvars["external_port"])
return getattr(self, "_external_port")
|
python/paddle/fluid/tests/unittests/dygraph_to_static/test_isinstance.py | zmxdream/Paddle | 17,085 | 25100 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import numpy as np
import unittest
import paddle
import paddle.nn as nn
class SimpleReturnLayer(nn.Layer):
def forward(self, x):
return x
class AddAttrLayer(nn.Layer):
def __init__(self):
super(AddAttrLayer, self).__init__()
self.attr = None
def forward(self, x):
out = x + self.attr
return out
class IsInstanceLayer(nn.Layer):
def __init__(self, layer):
super(IsInstanceLayer, self).__init__()
self.layer = layer
@paddle.jit.to_static
def forward(self, x):
if isinstance(self.layer, (AddAttrLayer, )):
self.layer.attr = x
res = self.layer(x)
return res
class SequentialLayer(nn.Layer):
def __init__(self, layers):
super(SequentialLayer, self).__init__()
self.layers = nn.LayerList(layers)
@paddle.jit.to_static
def forward(self, x):
res = x
for layer in self.layers:
if isinstance(layer, AddAttrLayer):
layer.attr = x
res = layer(res)
return res
def train(model, to_static):
prog_trans = paddle.jit.ProgramTranslator.get_instance()
prog_trans.enable(to_static)
x = paddle.ones(shape=[2, 3], dtype='int32')
out = model(x)
return out.numpy()
class TestIsinstance(unittest.TestCase):
def test_isinstance_simple_return_layer(self):
model = IsInstanceLayer(SimpleReturnLayer())
self._test_model(model)
def test_isinstance_add_attr_layer(self):
model = IsInstanceLayer(AddAttrLayer())
self._test_model(model)
def test_sequential_layer(self):
layers = []
for i in range(5):
layers.append(SimpleReturnLayer())
layers.append(AddAttrLayer())
model = SequentialLayer(layers)
self._test_model(model)
def _test_model(self, model):
st_out = train(model, to_static=True)
dy_out = train(model, to_static=False)
self.assertTrue(
np.allclose(dy_out, st_out),
msg="dy_out:\n {}\n st_out:\n{}".format(dy_out, st_out))
if __name__ == "__main__":
unittest.main()
|
tests/test_weightings.py | matchup-ir/whooshy | 319 | 25103 | <reponame>matchup-ir/whooshy
from __future__ import with_statement
import inspect
from random import choice, randint
import sys
from whoosh import fields, query, scoring
from whoosh.compat import u, xrange, permutations
from whoosh.filedb.filestore import RamStorage
def _weighting_classes(ignore):
# Get all the subclasses of Weighting in whoosh.scoring
return [c for _, c in inspect.getmembers(scoring, inspect.isclass)
if scoring.Weighting in c.__bases__ and c not in ignore]
def test_all():
domain = [u("alfa"), u("bravo"), u("charlie"), u("delta"), u("echo"),
u("foxtrot")]
schema = fields.Schema(text=fields.TEXT)
storage = RamStorage()
ix = storage.create_index(schema)
w = ix.writer()
for _ in xrange(100):
w.add_document(text=u(" ").join(choice(domain)
for _ in xrange(randint(10, 20))))
w.commit()
# List ABCs that should not be tested
abcs = ()
# provide initializer arguments for any weighting classes that require them
init_args = {"MultiWeighting": ([scoring.BM25F()],
{"text": scoring.Frequency()}),
"ReverseWeighting": ([scoring.BM25F()], {})}
for wclass in _weighting_classes(abcs):
try:
if wclass.__name__ in init_args:
args, kwargs = init_args[wclass.__name__]
weighting = wclass(*args, **kwargs)
else:
weighting = wclass()
except TypeError:
e = sys.exc_info()[1]
raise TypeError("Error instantiating %r: %s" % (wclass, e))
with ix.searcher(weighting=weighting) as s:
try:
for word in domain:
s.search(query.Term("text", word))
except Exception:
e = sys.exc_info()[1]
e.msg = "Error searching with %r: %s" % (wclass, e)
raise
def test_compatibility():
from whoosh.scoring import Weighting
# This is the old way of doing a custom weighting model, check that
# it's still supported...
class LegacyWeighting(Weighting):
use_final = True
def score(self, searcher, fieldname, text, docnum, weight):
return weight + 0.5
def final(self, searcher, docnum, score):
return score * 1.5
schema = fields.Schema(text=fields.TEXT)
ix = RamStorage().create_index(schema)
w = ix.writer()
domain = "alfa bravo charlie delta".split()
for ls in permutations(domain, 3):
w.add_document(text=u(" ").join(ls))
w.commit()
s = ix.searcher(weighting=LegacyWeighting())
r = s.search(query.Term("text", u("bravo")))
assert r.score(0) == 2.25
|
rest_framework_auth0/models.py | robindebois/djangorestframework-auth0 | 107 | 25111 | # Just to keep things like ./manage.py test happy
from django.contrib.auth.models import AbstractUser
# class Group(models.Model):
# """
# Groups are a generic way of categorizing users to apply permissions, or
# some other label, to those users. A user can belong to any number of
# groups.
# A user in a group automatically has all the permissions granted to that
# group. For example, if the group Site editors has the permission
# can_edit_home_page, any user in that group will have that permission.
# Beyond permissions, groups are a convenient way to categorize users to
# apply some label, or extended functionality, to them. For example, you
# could create a group 'Special users', and you could write code that would
# do special things to those users -- such as giving them access to a
# members-only portion of your site, or sending them members-only email
# messages.
# """
# name = models.CharField(_('name'), max_length=80, unique=True)
# permissions = models.ManyToManyField(
# Permission,
# verbose_name=_('permissions'),
# blank=True,
# )
#
# objects = GroupManager()
#
# class Meta:
# verbose_name = _('group')
# verbose_name_plural = _('groups')
#
# def __str__(self):
# return self.name
#
# def natural_key(self):
# return (self.name,)
# class User(AbstractUser):
# """
# Users within the Django authentication system are represented by this
# model.
# Username, password and email are required. Other fields are optional.
# """
# class Meta(AbstractUser.Meta):
# swappable = 'AUTH_USER_MODEL'
|
algs4/max_pq.py | dumpmemory/algs4-py | 230 | 25120 | <reponame>dumpmemory/algs4-py
class MaxPQ:
def __init__(self):
self.pq = []
def insert(self, v):
self.pq.append(v)
self.swim(len(self.pq) - 1)
def max(self):
return self.pq[0]
def del_max(self, ):
m = self.pq[0]
self.pq[0], self.pq[-1] = self.pq[-1], self.pq[0]
self.pq = self.pq[:-1]
self.sink(0)
return m
def is_empty(self, ):
return not self.pq
def size(self, ):
return len(self.pq)
def swim(self, k):
while k > 0 and self.pq[(k - 1) // 2] < self.pq[k]:
self.pq[k], self.pq[
(k - 1) // 2] = self.pq[(k - 1) // 2], self.pq[k]
k = k // 2
def sink(self, k):
N = len(self.pq)
while 2 * k + 1 <= N - 1:
j = 2 * k + 1
if j < N - 1 and self.pq[j] < self.pq[j + 1]:
j += 1
if self.pq[k] > self.pq[j]:
break
self.pq[k], self.pq[j] = self.pq[j], self.pq[k]
k = j
|
metric/metric.py | wyu-du/MultiTurnDialogZoo | 145 | 25144 | <filename>metric/metric.py<gh_stars>100-1000
from nltk.translate.bleu_score import sentence_bleu, corpus_bleu
from nltk.translate.bleu_score import SmoothingFunction
from nltk.collocations import BigramCollocationFinder
from nltk.probability import FreqDist
from .bleu import Bleu
import argparse
import codecs
import numpy as np
import math
from bert_score import score
from rouge import Rouge
import os, re
import ipdb
import numpy as np
# BLEU of NLTK
def cal_BLEU_nltk(refer, candidate, ngram=1):
'''
SmoothingFunction refer to https://github.com/PaddlePaddle/models/blob/a72760dff8574fe2cb8b803e01b44624db3f3eff/PaddleNLP/Research/IJCAI2019-MMPMS/mmpms/utils/metrics.py
'''
smoothie = SmoothingFunction().method7
if ngram == 1:
weight = (1, 0, 0, 0)
elif ngram == 2:
weight = (0.5, 0.5, 0, 0)
elif ngram == 3:
weight = (0.33, 0.33, 0.33, 0)
elif ngram == 4:
weight = (0.25, 0.25, 0.25, 0.25)
return sentence_bleu(refer, candidate,
weights=weight,
smoothing_function=smoothie)
# BLEU of nlg-eval
def cal_BLEU(refs, tgts):
scorer = Bleu(4)
refs = {idx: [line] for idx, line in enumerate(refs)}
tgts = {idx: [line] for idx, line in enumerate(tgts)}
s = scorer.compute_score(refs, tgts)
return s[0]
# BLEU of multibleu.perl
def cal_BLEU_perl(dataset, model):
p = os.popen(f'python ./metric/perl-bleu.py {dataset} {model}').read()
print(f'[!] multi-perl: {p}')
pattern = re.compile(r'(\w+\.\w+)/(\w+\.\w+)/(\w+\.\w+)/(\w+\.\w+)')
bleu1, bleu2, bleu3, bleu4 = pattern.findall(p)[0]
bleu1, bleu2, bleu3, bleu4 = float(bleu1), float(bleu2), float(bleu3), float(bleu4)
return bleu1, bleu2, bleu3, bleu4
def cal_Distinct(corpus):
"""
Calculates unigram and bigram diversity
Args:
corpus: tokenized list of sentences sampled
Returns:
uni_diversity: distinct-1 score
bi_diversity: distinct-2 score
"""
bigram_finder = BigramCollocationFinder.from_words(corpus)
bi_diversity = len(bigram_finder.ngram_fd) / bigram_finder.N
dist = FreqDist(corpus)
uni_diversity = len(dist) / len(corpus)
return uni_diversity, bi_diversity
def cal_ROUGE(refer, candidate):
if len(candidate) == 0:
candidate = ['<unk>']
elif len(candidate) == 1:
candidate.append('<unk>')
if len(refer) == 0:
refer = ['<unk>']
elif len(refer) == 1:
refer.append('<unk>')
rouge = Rouge()
scores = rouge.get_scores(' '.join(candidate), ' '.join(refer))
return scores[0]['rouge-2']['f']
def cal_BERTScore(refer, candidate):
# too slow, fuck it
_, _, bert_scores = score(candidate, refer, lang='en', rescale_with_baseline=True)
bert_scores = bert_scores.tolist()
bert_scores = [0.5 if math.isnan(score) else score for score in bert_scores]
return np.mean(bert_scores)
# ========== fuck nlg-eval fuck ========== #
# ========== Our own embedding-based metric ========== #
def cal_vector_extrema(x, y, dic):
# x and y are the list of the words
# dic is the gensim model which holds 300 the google news word2ved model
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x)
y = vecterize(y)
vec_x = np.max(x, axis=0)
vec_y = np.max(y, axis=0)
assert len(vec_x) == len(vec_y), "len(vec_x) != len(vec_y)"
zero_list = np.zeros(len(vec_x))
if vec_x.all() == zero_list.all() or vec_y.all() == zero_list.all():
return float(1) if vec_x.all() == vec_y.all() else float(0)
res = np.array([[vec_x[i] * vec_y[i], vec_x[i] * vec_x[i], vec_y[i] * vec_y[i]] for i in range(len(vec_x))])
cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
return cos
def cal_embedding_average(x, y, dic):
# x and y are the list of the words
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x)
y = vecterize(y)
vec_x = np.array([0 for _ in range(len(x[0]))])
for x_v in x:
x_v = np.array(x_v)
vec_x = np.add(x_v, vec_x)
vec_x = vec_x / math.sqrt(sum(np.square(vec_x)))
vec_y = np.array([0 for _ in range(len(y[0]))])
#print(len(vec_y))
for y_v in y:
y_v = np.array(y_v)
vec_y = np.add(y_v, vec_y)
vec_y = vec_y / math.sqrt(sum(np.square(vec_y)))
assert len(vec_x) == len(vec_y), "len(vec_x) != len(vec_y)"
zero_list = np.array([0 for _ in range(len(vec_x))])
if vec_x.all() == zero_list.all() or vec_y.all() == zero_list.all():
return float(1) if vec_x.all() == vec_y.all() else float(0)
vec_x = np.mat(vec_x)
vec_y = np.mat(vec_y)
num = float(vec_x * vec_y.T)
denom = np.linalg.norm(vec_x) * np.linalg.norm(vec_y)
cos = num / denom
# res = np.array([[vec_x[i] * vec_y[i], vec_x[i] * vec_x[i], vec_y[i] * vec_y[i]] for i in range(len(vec_x))])
# cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
return cos
def cal_greedy_matching(x, y, dic):
# x and y are the list of words
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x)
y = vecterize(y)
len_x = len(x)
len_y = len(y)
cosine = []
sum_x = 0
for x_v in x:
for y_v in y:
assert len(x_v) == len(y_v), "len(x_v) != len(y_v)"
zero_list = np.zeros(len(x_v))
if x_v.all() == zero_list.all() or y_v.all() == zero_list.all():
if x_v.all() == y_v.all():
cos = float(1)
else:
cos = float(0)
else:
# method 1
res = np.array([[x_v[i] * y_v[i], x_v[i] * x_v[i], y_v[i] * y_v[i]] for i in range(len(x_v))])
cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
cosine.append(cos)
if cosine:
sum_x += max(cosine)
cosine = []
sum_x = sum_x / len_x
cosine = []
sum_y = 0
for y_v in y:
for x_v in x:
assert len(x_v) == len(y_v), "len(x_v) != len(y_v)"
zero_list = np.zeros(len(y_v))
if x_v.all() == zero_list.all() or y_v.all() == zero_list.all():
if (x_v == y_v).all():
cos = float(1)
else:
cos = float(0)
else:
# method 1
res = np.array([[x_v[i] * y_v[i], x_v[i] * x_v[i], y_v[i] * y_v[i]] for i in range(len(x_v))])
cos = sum(res[:, 0]) / (np.sqrt(sum(res[:, 1])) * np.sqrt(sum(res[:, 2])))
cosine.append(cos)
if cosine:
sum_y += max(cosine)
cosine = []
sum_y = sum_y / len_y
score = (sum_x + sum_y) / 2
return score
def cal_greedy_matching_matrix(x, y, dic):
# x and y are the list of words
def vecterize(p):
vectors = []
for w in p:
if w in dic:
vectors.append(dic[w.lower()])
if not vectors:
vectors.append(np.random.randn(300))
return np.stack(vectors)
x = vecterize(x) # [x, 300]
y = vecterize(y) # [y, 300]
len_x = len(x)
len_y = len(y)
matrix = np.dot(x, y.T) # [x, y]
matrix = matrix / np.linalg.norm(x, axis=1, keepdims=True) # [x, 1]
matrix = matrix / np.linalg.norm(y, axis=1).reshape(1, -1) # [1, y]
x_matrix_max = np.mean(np.max(matrix, axis=1)) # [x]
y_matrix_max = np.mean(np.max(matrix, axis=0)) # [y]
return (x_matrix_max + y_matrix_max) / 2
# ========== End of our own embedding-based metric ========== #
if __name__ == "__main__":
path = './processed/dailydialog/GatedGCN-no-correlation/pred.txt'
with open(path) as f:
ref, tgt = [], []
for idx, line in enumerate(f.readlines()):
if idx % 4 == 1:
line = line.replace("user1", "").replace("user0", "").replace("- ref: ", "").replace('<sos>', '').replace('<eos>', '').strip()
ref.append(line.split())
elif idx % 4 == 2:
line = line.replace("user1", "").replace("user0", "").replace("- tgt: ", "").replace('<sos>', '').replace('<eos>', '').strip()
tgt.append(line.split())
# Distinct-1, Distinct-2
candidates, references = [], []
for line1, line2 in zip(tgt, ref):
candidates.extend(line1)
references.extend(line2)
distinct_1, distinct_2 = cal_Distinct(candidates)
rdistinct_1, rdistinct_2 = cal_Distinct(references)
print(distinct_1, distinct_2)
|
neutron/tests/functional/agent/l3/test_metadata_proxy.py | congnt95/neutron | 1,080 | 25166 | <filename>neutron/tests/functional/agent/l3/test_metadata_proxy.py
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os.path
import time
from neutron_lib import constants
import webob
import webob.dec
import webob.exc
from neutron.agent.linux import utils
from neutron.tests.common import machine_fixtures
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l3 import framework
from neutron.tests.functional.agent.linux import helpers
METADATA_REQUEST_TIMEOUT = 60
METADATA_REQUEST_SLEEP = 5
class MetadataFakeProxyHandler(object):
def __init__(self, status):
self.status = status
@webob.dec.wsgify()
def __call__(self, req):
return webob.Response(status=self.status)
class MetadataL3AgentTestCase(framework.L3AgentTestFramework):
SOCKET_MODE = 0o644
def _create_metadata_fake_server(self, status):
server = utils.UnixDomainWSGIServer('metadata-fake-server')
self.addCleanup(server.stop)
# NOTE(cbrandily): TempDir fixture creates a folder with 0o700
# permissions but metadata_proxy_socket folder must be readable by all
# users
self.useFixture(
helpers.RecursivePermDirFixture(
os.path.dirname(self.agent.conf.metadata_proxy_socket), 0o555))
server.start(MetadataFakeProxyHandler(status),
self.agent.conf.metadata_proxy_socket,
workers=0, backlog=4096, mode=self.SOCKET_MODE)
def _query_metadata_proxy(self, machine):
url = 'http://%(host)s:%(port)s' % {'host': constants.METADATA_V4_IP,
'port': constants.METADATA_PORT}
cmd = 'curl', '--max-time', METADATA_REQUEST_TIMEOUT, '-D-', url
i = 0
CONNECTION_REFUSED_TIMEOUT = METADATA_REQUEST_TIMEOUT // 2
while i <= CONNECTION_REFUSED_TIMEOUT:
try:
raw_headers = machine.execute(cmd)
break
except RuntimeError as e:
if 'Connection refused' in str(e):
time.sleep(METADATA_REQUEST_SLEEP)
i += METADATA_REQUEST_SLEEP
else:
self.fail('metadata proxy unreachable '
'on %s before timeout' % url)
if i > CONNECTION_REFUSED_TIMEOUT:
self.fail('Timed out waiting metadata proxy to become available')
return raw_headers.splitlines()[0]
def test_access_to_metadata_proxy(self):
"""Test access to the l3-agent metadata proxy.
The test creates:
* A l3-agent metadata service:
* A router (which creates a metadata proxy in the router namespace),
* A fake metadata server
* A "client" namespace (simulating a vm) with a port on router
internal subnet.
The test queries from the "client" namespace the metadata proxy on
http://169.254.169.254 and asserts that the metadata proxy added
the X-Forwarded-For and X-Neutron-Router-Id headers to the request
and forwarded the http request to the fake metadata server and the
response to the "client" namespace.
"""
router_info = self.generate_router_info(enable_ha=False)
router = self.manage_router(self.agent, router_info)
self._create_metadata_fake_server(webob.exc.HTTPOk.code)
# Create and configure client namespace
router_ip_cidr = self._port_first_ip_cidr(router.internal_ports[0])
br_int = framework.get_ovs_bridge(
self.agent.conf.OVS.integration_bridge)
machine = self.useFixture(
machine_fixtures.FakeMachine(
br_int,
net_helpers.increment_ip_cidr(router_ip_cidr),
router_ip_cidr.partition('/')[0]))
# Query metadata proxy
firstline = self._query_metadata_proxy(machine)
# Check status code
self.assertIn(str(webob.exc.HTTPOk.code), firstline.split())
class UnprivilegedUserMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
"""
SOCKET_MODE = 0o664
def setUp(self):
super(UnprivilegedUserMetadataL3AgentTestCase, self).setUp()
self.agent.conf.set_override('metadata_proxy_user', '65534')
class UnprivilegedUserGroupMetadataL3AgentTestCase(MetadataL3AgentTestCase):
"""Test metadata proxy with least privileged user/group.
The least privileged user has uid=65534 and is commonly named 'nobody' but
not always, that's why we use its uid.
Its group has gid=65534 and is commonly named 'nobody' or 'nogroup', that's
why we use its gid.
"""
SOCKET_MODE = 0o666
def setUp(self):
super(UnprivilegedUserGroupMetadataL3AgentTestCase, self).setUp()
self.agent.conf.set_override('metadata_proxy_user', '65534')
self.agent.conf.set_override('metadata_proxy_group', '65534')
|
glue/core/exceptions.py | HPLegion/glue | 550 | 25177 | <gh_stars>100-1000
class IncompatibleAttribute(Exception):
pass
class IncompatibleDataException(Exception):
pass
class UndefinedROI(Exception):
pass
class InvalidSubscriber(Exception):
pass
class InvalidMessage(Exception):
pass
|
tests/test_version.py | datatags/pyCraft | 759 | 25241 | from distutils.version import StrictVersion as SV
import unittest
import minecraft
class VersionTest(unittest.TestCase):
def test_module_version_is_a_valid_pep_386_strict_version(self):
SV(minecraft.__version__)
def test_protocol_version_is_an_int(self):
for version in minecraft.SUPPORTED_PROTOCOL_VERSIONS:
self.assertTrue(type(version) is int)
|
src/models/sequence_predictor_model.py | rajatdiptabiswas/NN_compression | 211 | 25264 | from __future__ import print_function
import os
import time
import tensorflow as tf
import numpy as np
import sys
from zoneout_wrapper import ZoneoutWrapper
class SequencePredictor():
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors
"""
self.inputs_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="x")
self.labels_placeholder = tf.placeholder(tf.int32, shape=(None, self.config.max_length), name="y")
self.dropout_placeholder = tf.placeholder(tf.float32)
def create_feed_dict(self, inputs_batch, labels_batch=None, initial_state=None, keep_prob=1.0):
"""Creates the feed_dict for the model.
NOTE: You do not have to do anything here.
"""
feed_dict = {
self.inputs_placeholder: inputs_batch,
self.dropout_placeholder: keep_prob,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
if initial_state is not None:
feed_dict[self.in_state] = initial_state
return feed_dict
def add_embedding(self):
""" Creates one-hot encoding for the input. No embedding is used as of now
"""
embedding = tf.one_hot(self.inputs_placeholder, self.config.num_classes)
return embedding
def add_prediction_op(self):
""" Get the input from the embedding layer
"""
x = self.add_embedding()
""" Create a RNN first & define a placeholder for the initial state
"""
if self.config.model_type == "gru":
cell = tf.nn.rnn_cell.GRUCell(self.config.hidden_size)
elif self.config.model_type == "rnn":
cell = tf.nn.rnn_cell.BasicRNNCell(self.config.hidden_size)
else:
raise Exception("Unsuppoprted model type...")
if self.config.regularization == "dropout":
cell = tf.nn.rnn_cell.DropoutWrapper(cell, output_keep_prob=self.dropout_placeholder)
elif self.config.regularization == "zoneout":
cell = ZoneoutWrapper(cell, zoneout_prob=self.dropout_placeholder)
cell = tf.nn.rnn_cell.MultiRNNCell([cell] * self.config.num_layers, state_is_tuple=False)
batch_size = tf.shape(x)[0]
dynamic_max_length = tf.shape(x)[1]
zero_state = cell.zero_state(batch_size, tf.float32)
self.in_state = tf.placeholder_with_default(zero_state, [None, cell.state_size])
""" First find the sequence length and then use it to run the model
"""
#length = tf.reduce_sum(tf.reduce_max(tf.sign(x), 2), 1)
output, self.out_state = tf.nn.dynamic_rnn(cell, x, initial_state=self.in_state)
output = tf.reshape(output, shape=[-1, self.config.hidden_size])
""" Pass it through a linear + Softmax layer to get the predictions
"""
xavier_init = tf.contrib.layers.xavier_initializer()
W = tf.get_variable("W", shape=[self.config.hidden_size, self.config.num_classes], initializer=xavier_init )
b1 = tf.get_variable("b1", shape=[self.config.num_classes], initializer=xavier_init )
preds = tf.add(tf.matmul(output,W),b1)
preds = tf.reshape(preds, shape=[batch_size,dynamic_max_length, self.config.num_classes])
return preds
def add_loss_op(self, preds):
loss = tf.reduce_mean( tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.labels_placeholder, logits=preds) )
scaled_loss = loss/np.log(2)
tf.summary.scalar('loss', scaled_loss);
return scaled_loss
def add_training_op(self, loss):
"""Sets up the training Ops.
"""
global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(self.config.lr)
train_op = optimizer.minimize(loss, global_step=global_step)
return global_step, train_op
def loss_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=1.0)
loss, out_state = sess.run([self.loss,self.out_state], feed_dict=feed)
return loss, out_state
def train_on_batch(self, sess, inputs_batch, labels_batch, initial_state=None, dropout=1.0):
feed = self.create_feed_dict(inputs_batch=inputs_batch, labels_batch=labels_batch, initial_state=initial_state, keep_prob=dropout)
_, loss,out_state,_step, summary = sess.run([self.train_op, self.loss, self.out_state, self.global_step, self.merged_summaries], feed_dict=feed)
return loss, out_state, _step, summary
def build(self):
self.add_placeholders()
self.pred = self.add_prediction_op()
self.loss = self.add_loss_op(self.pred)
self.global_step, self.train_op = self.add_training_op(self.loss)
self.merged_summaries = tf.summary.merge_all()
def __init__(self, config):
self.config = config
self.build()
|
dialogue-engine/test/programytest/security/linking/test_aiml.py | cotobadesign/cotoba-agent-oss | 104 | 25281 | <gh_stars>100-1000
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import unittest
import os
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.security.linking.accountlinker import BasicAccountLinkerService
from programytest.client import TestClient
class AccountLinkerTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_storage(self):
super(AccountLinkerTestClient, self).load_storage()
self.add_default_stores()
self.add_categories_store([os.path.dirname(__file__)])
class AccountLinkerAIMLTests(unittest.TestCase):
def setUp(self):
config = SQLStorageConfiguration()
storage_engine = SQLStorageEngine(config)
storage_engine.initialise()
client = AccountLinkerTestClient()
self.context = client.create_client_context("TESTUSER")
self.context.brain._security._account_linker = BasicAccountLinkerService(storage_engine)
def test_account_link_happy_path(self):
response = self.context.bot.ask_question(self.context, "LINK PRIMARY ACCOUNT USER1 CONSOLE PASSWORD123")
self.assertIsNotNone(response)
self.assertTrue(response.startswith('Your generated key is'))
words = response.split(" ")
self.assertTrue(5, len(words))
generated_key = words[4][:-1]
command = "LINK SECONDARY ACCOUNT USER1 USER2 FACEBOOK PASSWORD<PASSWORD> %s" % generated_key
response = self.context.bot.ask_question(self.context, command)
self.assertIsNotNone(response)
self.assertEqual('Your accounts are now linked.', response)
|
baselines/profiling/profile_main.py | Worm4047/TVR | 106 | 25327 | """
Profile the time needed for retrieval.
We consider retrieval in a corpus of 1M videos, 1K videos are added, 10K queries are retrieved.
Calculate the time needed for adding 1K videos, and performing retrieval for 10K queries.
1, Data Loading time is ignored, consider it is hidden by computation time.
2, Sort time is ignored, since it is the similar among the methods.
"""
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import pprint
from tqdm import tqdm, trange
from baselines.crossmodal_moment_localization.model_xml import XML, xml_base_config
from baselines.mixture_embedding_experts.model import MEE, mee_base_cfg
from baselines.clip_alignment_with_language.model import CALWithSub, cal_base_cfg
from baselines.excl.model import EXCL, excl_base_cfg
from utils.basic_utils import save_json
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO)
def mask_logits(target, mask):
return target * mask + (1 - mask) * (-1e10)
class ProfileBase(object):
N_NewQuery = 1e4
N_NewVideo = 1e3
N_Videos = 1e6
AvgVideoLength = 100
ClipLength = 5
AvgClipPerVideo = int(AvgVideoLength / ClipLength) # max_ctx_l
AvgWordInQuery = 15
# estimated by
# scales=[2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14], => max_proposal = 14
AvgProposalPerVideo = 170
MaxClipPerProposal = 14 # pad to this length
AvgClipPerProposal = 7 # 6.88
VideoFeatureDim = 3074 # 1024 + 2048 + 2 (TEF)
SubFeatureDim = 770
QueryFeatureDim = 768
HiddenSize = 256
N_Runs = 5 # Get the average time
def __init__(self, device=torch.device("cuda:0"), ctx_batch_size=400, query_batch_size=100):
self.device = device
self.ctx_batch_size = ctx_batch_size
self.query_batch_size = query_batch_size
self.model_config = self.get_model_config()
print(self.model_config)
self.model = self.get_model()
def get_model(self):
return None
def get_model_config(self):
return None
def set_ctx_batch_size(self, batch_size):
self.ctx_batch_size = batch_size
def set_query_batch_size(self, batch_size):
self.query_batch_size = batch_size
def cast_dict_inputs_to_device(self, dict_inputs, device):
return {k: v.to(device) for k, v in dict_inputs.items()}
def get_fake_ctx_raw_input_st_ed(self, no_tef=False):
return dict(
video_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l,
self.VideoFeatureDim - 2*no_tef),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l, self.SubFeatureDim - 2*no_tef),
ctx_mask=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l),
)
def get_fake_raw_query(self):
return dict(
query_feat=torch.FloatTensor(self.query_batch_size, self.AvgWordInQuery, self.QueryFeatureDim),
query_mask=torch.ones(self.query_batch_size, self.AvgWordInQuery)
)
"""
from baselines.profiling.profile_main import ProfileXML
profile_xml = ProfileXML(ctx_batch_size=400, query_batch_size=100)
profile_xml.get_ctx_encoding_time()
"""
class ProfileXML(ProfileBase):
def get_model_config(self):
xml_base_config["ctx_mode"] = "video_sub_tef"
xml_base_config["merge_two_stream"] = True
xml_base_config["cross_att"] = True
xml_base_config["max_ctx_l"] = self.AvgClipPerVideo
xml_base_config["visual_input_size"] = self.VideoFeatureDim
xml_base_config["query_input_size"] = self.QueryFeatureDim
xml_base_config["sub_input_size"] = self.SubFeatureDim
xml_base_config["hidden_size"] = self.HiddenSize
return xml_base_config
def get_model(self):
model = XML(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_encoded_ctx(self):
return dict(
ctx_feat=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l, self.HiddenSize),
ctx_mask=torch.FloatTensor(self.ctx_batch_size, self.model_config.max_ctx_l),
)
def get_fake_encoded_query(self):
return dict(query_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize))
def _get_ctx_encoding_time(self, video_feat, sub_feat, ctx_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
self.model.cross_encode_context(video_feat, ctx_mask, sub_feat, ctx_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self):
with torch.no_grad():
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_ctx_raw_input_st_ed(), self.device)
raw_video = fake_ctx_inputs["video_feat"]
raw_sub = fake_ctx_inputs["sub_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(raw_video, raw_sub, ctx_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, raw_query, query_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
encoded_query = self.model.encode_input(raw_query, query_mask,
self.model.query_input_proj,
self.model.query_encoder,
self.model.query_pos_embed) # (N, Lq, D)
# video level
video_query, sub_query = \
self.model.get_modularized_queries(encoded_query, query_mask, return_modular_att=False)
# st ed
video_query = self.model.video_query_linear(video_query)
sub_query = self.model.sub_query_linear(sub_query)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
with torch.no_grad():
query_inputs = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)
raw_query = query_inputs["query_feat"]
query_mask = query_inputs["query_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(raw_query, query_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_retrieval_time(self, encoded_video_query, encoded_video, ctx_mask):
"""Consider the queries are encoded, Calculate in a single modality then multiply by 2."""
torch.cuda.synchronize()
st_time = time.time()
self.model.get_video_level_scores(encoded_video_query, encoded_video, ctx_mask)
torch.cuda.synchronize()
return (time.time() - st_time) * 2
def get_retrieval_time(self):
with torch.no_grad():
encoded_query = self.cast_dict_inputs_to_device(self.get_fake_encoded_query(), self.device)["query_feat"]
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx(), self.device)
encoded_ctx = fake_ctx_inputs["ctx_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_retrieval_time(encoded_query, encoded_ctx, ctx_mask)]
times = torch.FloatTensor(times) # since we have two modalities
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_span_prediction_time(self, query_feat, ctx_feat, ctx_mask):
"""Considered two modalities"""
torch.cuda.synchronize()
st_time = time.time()
similarity = torch.einsum("md,nld->mnl", query_feat, ctx_feat)
similarity = (similarity + similarity) / 2 # (Nq, Nv, L) from query to all videos.
n_q, n_c, l = similarity.shape
similarity = similarity.view(n_q * n_c, 1, l)
st_prob = self.model.merged_st_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
ed_prob = self.model.merged_ed_predictor(similarity).view(n_q, n_c, l) # (Nq, Nv, L)
st_prob = mask_logits(st_prob, ctx_mask) # (N, L)
ed_prob = mask_logits(ed_prob, ctx_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_span_prediction_time(self):
with torch.no_grad():
encoded_query = self.cast_dict_inputs_to_device(self.get_fake_encoded_query(), self.device)["query_feat"]
fake_ctx_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx(), self.device)
encoded_ctx = fake_ctx_inputs["ctx_feat"]
ctx_mask = fake_ctx_inputs["ctx_mask"]
times = []
for _ in trange(self.N_Runs):
times += [self._get_span_prediction_time(encoded_query, encoded_ctx, ctx_mask)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
"""
from baselines.profiling.profile_main import ProfileMEE
profile_mee = ProfileMEE(ctx_batch_size=400, query_batch_size=100)
profile_mee.get_ctx_encoding_time()
"""
class ProfileMEE(ProfileBase):
def get_model_config(self):
mee_base_cfg["ctx_mode"] = "video_sub"
mee_base_cfg["text_input_size"] = self.QueryFeatureDim
mee_base_cfg["vid_input_size"] = self.VideoFeatureDim
mee_base_cfg["output_size"] = self.HiddenSize
return mee_base_cfg
def get_model(self):
model = MEE(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_ctx(self):
return dict(
vid_feat=torch.FloatTensor(self.ctx_batch_size, self.VideoFeatureDim),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.QueryFeatureDim)
)
def get_fake_encoded_ctx_query(self):
return dict(
ctx_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize),
query_feat=torch.FloatTensor(self.ctx_batch_size, self.HiddenSize)
)
def _get_ctx_encoding_time(self, vid_feat, sub_feat):
torch.cuda.synchronize()
st_time = time.time()
self.model.video_gu(vid_feat)
self.model.sub_gu(sub_feat)
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self):
feat_dict = self.cast_dict_inputs_to_device(self.get_fake_raw_ctx(), self.device)
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, query_feat):
"""Considered 2 modalities"""
torch.cuda.synchronize()
st_time = time.time()
pooled_query = self.model.query_pooling(query_feat) # (N, Dt)
video_query = self.model.video_query_gu(pooled_query)
sub_query = self.model.sub_query_gu(pooled_query)
stream_weights = self.model.moe_fc(pooled_query) # (N, 2)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
raw_query = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)["query_feat"]
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(raw_query)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_retrieval_time(self, encoded_query, encoded_ctx):
"""Considered 2 modalities"""
torch.cuda.synchronize()
st_time = time.time()
torch.einsum("md,nd->mn", encoded_query, encoded_ctx) # (N, N)
torch.cuda.synchronize()
return (time.time() - st_time) * 2
def get_retrieval_time(self):
model_inputs = self.cast_dict_inputs_to_device(self.get_fake_encoded_ctx_query(), self.device)
encoded_query = model_inputs["ctx_feat"]
encoded_ctx = model_inputs["query_feat"]
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_retrieval_time(encoded_query, encoded_ctx)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
class ProfileCAL(ProfileBase):
def get_model_config(self):
cal_base_cfg["ctx_mode"] = "video_sub"
cal_base_cfg["embedding_size"] = self.QueryFeatureDim
cal_base_cfg["visual_input_size"] = self.VideoFeatureDim * 2
cal_base_cfg["textual_input_size"] = self.SubFeatureDim * 2
cal_base_cfg["output_size"] = self.HiddenSize
return cal_base_cfg
def get_model(self):
model = CALWithSub(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_ctx(self, model_name="cal"):
"""The features are `*2` since they use both global and local features"""
return dict(
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgProposalPerVideo,
self.AvgClipPerProposal, self.SubFeatureDim * 2),
vid_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgProposalPerVideo,
self.AvgClipPerProposal, self.VideoFeatureDim * 2))
def _get_ctx_encoding_time(self, sub_feat, vid_feat, model_name="cal"):
if model_name == "mcn":
sub_feat = sub_feat.sum(2)
vid_feat = vid_feat.sum(2)
torch.cuda.synchronize()
st_time = time.time()
self.model.moment_encoder(vid_feat, module_name="video")
self.model.moment_encoder(sub_feat, module_name="sub")
torch.cuda.synchronize()
return time.time() - st_time
def get_ctx_encoding_time(self, model_name="cal"):
"""model_name: str, `cal` or `mcn`"""
feat_dict = self.cast_dict_inputs_to_device(
self.get_fake_raw_ctx(model_name=model_name), self.device)
feat_dict["model_name"] = model_name
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_ctx_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
def _get_query_encoding_time(self, query_feat, query_mask):
torch.cuda.synchronize()
st_time = time.time()
self.model.query_encoder(query_feat, query_mask)
torch.cuda.synchronize()
return time.time() - st_time
def get_query_encoding_time(self):
feat_dict = self.cast_dict_inputs_to_device(self.get_fake_raw_query(), self.device)
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_query_encoding_time(**feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
class ProfileExCL(ProfileBase):
def get_model_config(self):
excl_base_cfg["ctx_mode"] = "video_sub"
excl_base_cfg["query_input_size"] = self.QueryFeatureDim
excl_base_cfg["visual_input_size"] = self.VideoFeatureDim
excl_base_cfg["sub_input_size"] = self.SubFeatureDim
excl_base_cfg["output_size"] = self.HiddenSize
return excl_base_cfg
def get_model(self):
model = EXCL(self.model_config)
model.to(self.device)
model.eval()
return model
def get_fake_raw_input(self):
"""The features are `*2` since they use both global and local features"""
return dict(
query_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgWordInQuery, self.QueryFeatureDim),
query_mask=torch.ones((self.ctx_batch_size, self.AvgWordInQuery)),
sub_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, self.SubFeatureDim),
sub_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
video_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, self.VideoFeatureDim),
video_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
tef_feat=torch.FloatTensor(self.ctx_batch_size, self.AvgClipPerVideo, 2),
tef_mask=torch.ones(self.ctx_batch_size, self.AvgClipPerVideo),
st_ed_indices=torch.ones(2, 2), # not used.
)
def _get_prediction_time(self, input_dict):
torch.cuda.synchronize()
st_time = time.time()
self.model(**input_dict)
torch.cuda.synchronize()
return time.time() - st_time
def get_prediction_time(self):
"""model_name: str, `cal` or `mcn`"""
feat_dict = self.cast_dict_inputs_to_device(
self.get_fake_raw_input(), self.device)
feat_dict["is_training"] = False
with torch.no_grad():
times = []
for _ in trange(self.N_Runs):
times += [self._get_prediction_time(feat_dict)]
times = torch.FloatTensor(times)
return dict(avg=float(times.mean()), std=float(times.std()))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, help="")
parser.add_argument("--ctx_batch_size", type=int, default=400)
parser.add_argument("--query_batch_size", type=int, default=100)
parser.add_argument("--save_dir", type=str, default="baselines/profiling/cache")
args = parser.parse_args()
model = args.model
query_batch_size = args.query_batch_size
ctx_batch_size = args.ctx_batch_size
if model == "mee":
profile_mee = ProfileMEE(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_mee.get_ctx_encoding_time()
ctx_enc_time = profile_mee.get_ctx_encoding_time()
query_enc_time = profile_mee.get_query_encoding_time()
elif model == "cal":
profile_cal = ProfileCAL(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_cal.get_ctx_encoding_time()
ctx_enc_time = profile_cal.get_ctx_encoding_time(model_name="cal")
query_enc_time = profile_cal.get_query_encoding_time()
elif model == "mcn":
profile_cal = ProfileCAL(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_cal.get_ctx_encoding_time()
ctx_enc_time = profile_cal.get_ctx_encoding_time(model_name="mcn")
query_enc_time = profile_cal.get_query_encoding_time()
elif model == "xml":
profile_xml = ProfileXML(ctx_batch_size=ctx_batch_size, query_batch_size=query_batch_size)
# use the 2nd one to report time
profile_xml.get_ctx_encoding_time()
ctx_enc_time = profile_xml.get_ctx_encoding_time()
query_enc_time = profile_xml.get_query_encoding_time()
elif model == "excl":
profile_excl = ProfileExCL(ctx_batch_size=ctx_batch_size, query_batch_size=ctx_batch_size)
# use the 2nd one to report time
profile_excl.get_prediction_time()
ctx_enc_time = profile_excl.get_prediction_time()
query_enc_time = 0
# Calculate the total time as ctx_enc_time * (100 * 1M / ctx_batch_size)
else:
raise NotImplementedError
# ctx_enc_time = ctx_enc_time
save_path = os.path.join(args.save_dir, "{}_profile_main.json".format(model))
n_videos = ProfileBase.N_Videos
res = dict(
ctx_enc_time=ctx_enc_time,
ctx_enc_avg_time_all_videos=ctx_enc_time["avg"] * n_videos / ctx_batch_size,
query_enc_time=query_enc_time,
n_videos=n_videos,
ctx_batch_size=ctx_batch_size,
query_batch_size=query_batch_size,
model=model
)
save_json(res, save_path, save_pretty=True)
pprint.pprint(res)
|
egs/wmt14_en_de/nlp1/local/generate_stand_vocab.py | didichuxing/delta | 1,442 | 25354 | # Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import sys
from absl import logging
def generate_stand_vocab(old_vocab, new_vocab):
vocab_file = open(new_vocab, 'w')
vocab_file.write('<pad>' + '\t' + '0' + '\n')
vocab_file.write('<s>' + '\t' + '1' + '\n')
vocab_file.write('</s>' + '\t' + '2' + '\n')
vocab_file.write('<unk>' + '\t' + '3' + '\n')
vocab_file.write('<sos>' + '\t' + '4' + '\n')
vocab_file.write('<eos>' + '\t' + '5' + '\n')
idx = 6
with open(old_vocab, 'r') as f:
for i, line in enumerate(f.readlines()):
if i > 2:
vocab_file.write(line.strip() + '\t' +
str(idx) + '\n')
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
if len(sys.argv) != 3:
logging.error("Usage python {} old_vocab new_vocab".format(sys.argv[0]))
sys.exit(-1)
old_vocab = sys.argv[1]
new_vocab = sys.argv[2]
generate_stand_vocab(old_vocab, new_vocab)
|
test/knxip_tests/hpai_test.py | iligiddi/xknx | 179 | 25375 | <filename>test/knxip_tests/hpai_test.py<gh_stars>100-1000
"""Unit test for KNX/IP HPAI objects."""
import pytest
from xknx.exceptions import ConversionError, CouldNotParseKNXIP
from xknx.knxip import HPAI
class TestKNXIPHPAI:
"""Test class for KNX/IP HPAI objects."""
def test_hpai(self):
"""Test parsing and streaming HPAI KNX/IP fragment."""
raw = (0x08, 0x01, 0xC0, 0xA8, 0x2A, 0x01, 0x84, 0x95)
hpai = HPAI()
assert hpai.from_knx(raw) == 8
assert hpai.ip_addr == "192.168.42.1"
assert hpai.port == 33941
hpai2 = HPAI(ip_addr="192.168.42.1", port=33941)
assert hpai2.to_knx() == list(raw)
def test_from_knx_wrong_input1(self):
"""Test parsing of wrong HPAI KNX/IP packet (wrong length)."""
raw = (0x08, 0x01, 0xC0, 0xA8, 0x2A)
with pytest.raises(CouldNotParseKNXIP):
HPAI().from_knx(raw)
def test_from_knx_wrong_input2(self):
"""Test parsing of wrong HPAI KNX/IP packet (wrong length byte)."""
raw = (0x09, 0x01, 0xC0, 0xA8, 0x2A, 0x01, 0x84, 0x95)
with pytest.raises(CouldNotParseKNXIP):
HPAI().from_knx(raw)
def test_from_knx_wrong_input3(self):
"""Test parsing of wrong HPAI KNX/IP packet (wrong HPAI type)."""
raw = (0x08, 0x02, 0xC0, 0xA8, 0x2A, 0x01, 0x84, 0x95)
with pytest.raises(CouldNotParseKNXIP):
HPAI().from_knx(raw)
def test_to_knx_wrong_ip(self):
"""Test serializing HPAI to KNV/IP with wrong ip type."""
hpai = HPAI(ip_addr=127001)
with pytest.raises(ConversionError):
hpai.to_knx()
|
timemachines/skaters/nproph/nprophetiskaterfactory.py | iklasky/timemachines | 253 | 25385 | <filename>timemachines/skaters/nproph/nprophetiskaterfactory.py
from timemachines.skaters.nproph.nprophetinclusion import using_neuralprophet, NeuralProphet
if using_neuralprophet:
import pandas as pd
from typing import List, Tuple, Any
from timemachines.skatertools.utilities.conventions import wrap
from timemachines.skaters.nproph.nprophparams import NPROPHET_MODEL, NPROPHET_META
def nprophet_iskater_factory(y: [[float]], k: int, a: List = None, t: List = None, e=None, freq: str = None, n_max=1000,
recursive: bool = False, model_params: dict = None, return_forecast=True):
# For now we keep it simple. Will add to this over time
y0s = [wrap(yi)[0] for yi in y]
x, x_std, forecast,m = nprophet_fit_and_predict_simple(y=y0s,k=k,freq=freq,model_params=model_params)
return (x, x_std, forecast, m) if return_forecast else (x, x_std)
def nprophet_fit_and_predict_simple(y: [float], k: int, freq: str = None, model_params: dict = None) -> Tuple[
List, List, Any, Any]:
""" Simpler wrapper for offlinetesting - univariate only """
assert isinstance(y[0],float)
freq = freq or NPROPHET_META['freq']
used_params = NPROPHET_MODEL
used_params.update({'n_forecasts':k})
if model_params:
used_params.update(model_params)
if len(y)<used_params['n_lags']:
x = [wrap(y)[0]]*k
x_std = [1.0]*k
return x, x_std, None, None
else:
model = NeuralProphet(**used_params)
df = pd.DataFrame(columns=['y'], data=y)
df['ds'] = pd.date_range(start='2021-01-01', periods=len(y), freq=freq)
metrics = model.fit(df, freq=freq, epochs=40, progress_bar=False)
future = model.make_future_dataframe(df)
forecast = model.predict(future)
x = [ forecast['yhat'+str(j+1)].values[-k+j] for j in range(k) ]
x_std = [1.0]*k
return x, x_std, forecast, model
if __name__=='__main__':
assert using_neuralprophet,'pip install neuralprophet'
from timemachines.skatertools.data.real import hospital
k = 3
n = 500
y = hospital(n=n)[-200:]
x, x_std, forecast, m = nprophet_iskater_factory(y=y, k=k)
print(x)
assert len(x) == k
x1, x_std1, forecast1, m1 = nprophet_fit_and_predict_simple(y=y, k=k)
if True:
m.plot(forecast)
m1.plot(forecast1)
import matplotlib.pyplot as plt
plt.show() |
api-reference-examples/python/pytx/pytx/threat_descriptor.py | b-bold/ThreatExchange | 997 | 25398 | <filename>api-reference-examples/python/pytx/pytx/threat_descriptor.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from .common import Common
from .vocabulary import ThreatDescriptor as td
from .vocabulary import ThreatExchange as t
class ThreatDescriptor(Common):
_URL = t.URL + t.VERSION + t.THREAT_DESCRIPTORS
_DETAILS = t.URL + t.VERSION
_RELATED = t.URL + t.VERSION
_fields = [
td.ADDED_ON,
td.CONFIDENCE,
td.DESCRIPTION,
td.EXPIRED_ON,
td.FIRST_ACTIVE,
td.ID,
td.INDICATOR,
td.LAST_ACTIVE,
td.LAST_UPDATED,
td.METADATA,
td.MY_REACTIONS,
td.OWNER,
td.PRECISION,
td.PRIVACY_MEMBERS,
td.PRIVACY_TYPE,
td.RAW_INDICATOR,
td.REVIEW_STATUS,
td.SEVERITY,
td.SHARE_LEVEL,
td.SOURCE_URI,
td.STATUS,
td.TAGS,
td.TYPE,
]
_default_fields = [
td.ADDED_ON,
td.CONFIDENCE,
td.DESCRIPTION,
td.EXPIRED_ON,
td.FIRST_ACTIVE,
td.ID,
td.INDICATOR,
td.LAST_ACTIVE,
td.LAST_UPDATED,
td.METADATA,
td.MY_REACTIONS,
td.OWNER,
td.PRECISION,
td.RAW_INDICATOR,
td.REVIEW_STATUS,
td.SEVERITY,
td.SHARE_LEVEL,
td.SOURCE_URI,
td.STATUS,
td.TAGS,
td.TYPE,
]
_connections = [
]
_unique = [
]
|
test/lib-clay/externals/abi/newtypes/run.py | jb55/clay | 185 | 25428 | import sys
sys.path.append('..')
import external_test
external_test.runExternalTest()
|
scripts/sg-toolbox/SG-Glyph-CopyLayer.py | tphinney/science-gothic | 104 | 25455 | <gh_stars>100-1000
#FLM: Glyph: Copy Layer (TypeRig)
# ----------------------------------------
# (C) <NAME>, 2019 (http://www.kateliev.com)
# (C) Karandash Type Foundry (http://www.karandash.eu)
#-----------------------------------------
# www.typerig.com
# No warranties. By using this you agree
# that you use it at your own risk!
# - Dependencies -----------------
import os
from collections import OrderedDict
import fontlab as fl6
from PythonQt import QtCore
from typerig.gui import QtGui
from typerig.gui.widgets import getProcessGlyphs
from typerig.proxy import *
# - Init --------------------------------
app_version = '1.97'
app_name = '[SG] Copy Layers'
# -- Copy Presets (by request)
copy_presets = {'contrast':[('Blk','Blk Ctr'),
('Blk Cnd','Blk Cnd Ctr'),
('Blk Exp','Blk Exp Ctr'),
('Cnd','Cnd Ctr'),
('Medium','Ctr'),
('Exp','Exp Ctr'),
('Lt','Lt Ctr'),
('Lt Cnd','Lt Cnd Ctr'),
('Lt Exp','Lt Exp Ctr')],
'ctr_light':[('Lt','Lt Ctr'),
('Lt Cnd','Lt Cnd Ctr'),
('Lt Exp','Lt Exp Ctr')],
'ctr_light_s':[('Lt','Lt Ctr'),
('Lt Cnd','Lt Cnd Ctr'),
('Lt Exp','Lt Exp Ctr'),
('Lt S','Lt Ctr S'),
('Lt Cnd S','Lt Cnd Ctr S'),
('Lt Exp S','Lt Exp Ctr S')],
'width': [('Blk','Blk Cnd'),
('Medium','Cnd'),
('Lt','Lt Cnd'),
('Blk','Blk Exp'),
('Medium','Exp'),
('Lt','Lt Exp')],
'weight':
[('Medium','Lt'),
('Medium','Blk')],
'slant': [('Lt','Lt S'),
('Medium','Medium S'),
('Blk','Blk S'),
('Lt Cnd','Lt Cnd S'),
('Cnd','Cnd S'),
('Blk Cnd','Blk Cnd S'),
('Lt Exp','Lt Exp S'),
('Exp','Exp S'),
('Blk Exp','Blk Exp S'),
('Lt','Lt Ctr S'),
('Ctr','Ctr S'),
('Blk Ctr','Blk Ctr S'),
('Lt Cnd','Lt Cnd Ctr S'),
('Cnd Ctr','Cnd Ctr S'),
('Blk Cnd Ctr','Blk Cnd Ctr S'),
('Lt Exp','Lt Exp Ctr S'),
('Exp Ctr','Exp Ctr S'),
('Blk Exp Ctr','Blk Exp Ctr S')]
}
# -- GUI related
table_dict = {1:OrderedDict([('Master Name', None), ('SRC', False), ('DST', False)])}
spinbox_range = (-99, 99)
# - Widgets --------------------------------
class WTableView(QtGui.QTableWidget):
def __init__(self, data):
super(WTableView, self).__init__()
# - Init
self.setColumnCount(max(map(len, data.values())))
self.setRowCount(len(data.keys()))
# - Set
self.setTable(data)
self.itemChanged.connect(self.markChange)
# - Styling
self.horizontalHeader().setStretchLastSection(True)
self.setAlternatingRowColors(True)
self.setShowGrid(False)
#self.resizeColumnsToContents()
self.resizeRowsToContents()
def setTable(self, data, data_check=[], reset=False):
name_row, name_column = [], []
self.blockSignals(True)
self.setColumnCount(max(map(len, data.values())))
self.setRowCount(len(data.keys()))
# - Populate
for n, layer in enumerate(sorted(data.keys())):
name_row.append(layer)
for m, key in enumerate(data[layer].keys()):
# -- Build name column
name_column.append(key)
# -- Add first data column
newitem = QtGui.QTableWidgetItem(str(data[layer][key])) if m == 0 else QtGui.QTableWidgetItem()
# -- Selectively colorize missing data
if m == 0 and len(data_check) and data[layer][key] not in data_check: newitem.setBackground(QtGui.QColor('red'))
# -- Build Checkbox columns
if m > 0: newitem.setFlags(QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsEnabled)
if m > 0: newitem.setCheckState(QtCore.Qt.Unchecked if not data[layer][key] else QtCore.Qt.Checked)
self.setItem(n, m, newitem)
self.setHorizontalHeaderLabels(name_column)
self.setVerticalHeaderLabels(name_row)
self.blockSignals(False)
def getTable(self):
returnDict = {}
for row in range(self.rowCount):
#returnDict[self.item(row, 0).text()] = (self.item(row, 1).checkState() == QtCore.Qt.Checked, self.item(row, 2).checkState() == QtCore.Qt.Checked)
if self.item(row, 1).checkState() == QtCore.Qt.Checked:
returnDict.setdefault('SRC',[]).append(self.item(row, 0).text())
if self.item(row, 2).checkState() == QtCore.Qt.Checked:
returnDict.setdefault('DST',[]).append(self.item(row, 0).text())
return returnDict
def markChange(self, item):
item.setBackground(QtGui.QColor('powderblue'))
# - Dialogs --------------------------------
class dlg_CopyLayer(QtGui.QDialog):
def __init__(self):
super(dlg_CopyLayer, self).__init__()
# - Init
self.active_font = pFont()
self.pMode = 0
# - Basic Widgets
self.tab_masters = WTableView(table_dict)
self.table_populate()
self.edt_checkStr = QtGui.QLineEdit()
self.edt_checkStr.setPlaceholderText('DST string')
self.edt_checkStr.setToolTip('Enter search criteria for selectively selecting destination masters.')
self.btn_refresh = QtGui.QPushButton('Clear')
self.btn_checkOn = QtGui.QPushButton('Select')
self.btn_execute = QtGui.QPushButton('Execute Selection')
self.btn_preset_contrast = QtGui.QPushButton('Copy to Contrast Masters')
self.btn_preset_width = QtGui.QPushButton('Copy to Width Masters')
self.btn_preset_weight = QtGui.QPushButton('Copy to Weight Masters')
self.btn_preset_ctrlt = QtGui.QPushButton('Copy to Light Contrast Masters')
self.btn_preset_ctrlts = QtGui.QPushButton('Copy to Light Contrast Masters (incl. Slant)')
self.btn_preset_slant = QtGui.QPushButton('Copy to Slant Masters')
self.btn_refresh.clicked.connect(self.table_populate)
self.btn_checkOn.clicked.connect(lambda: self.table_populate(True))
self.btn_execute.clicked.connect(self.execute_table)
self.btn_preset_contrast.clicked.connect(lambda: self.execute_preset(copy_presets['contrast']))
self.btn_preset_width.clicked.connect(lambda: self.execute_preset(copy_presets['width']))
self.btn_preset_weight.clicked.connect(lambda: self.execute_preset(copy_presets['weight']))
self.btn_preset_ctrlt.clicked.connect(lambda: self.execute_preset(copy_presets['ctr_light']))
self.btn_preset_ctrlts.clicked.connect(lambda: self.execute_preset(copy_presets['ctr_light_s']))
self.btn_preset_slant.clicked.connect(lambda: self.execute_preset(copy_presets['slant']))
self.rad_glyph = QtGui.QRadioButton('Glyph')
self.rad_window = QtGui.QRadioButton('Window')
self.rad_selection = QtGui.QRadioButton('Selection')
self.rad_font = QtGui.QRadioButton('Font')
self.chk_outline = QtGui.QCheckBox('Outline')
self.chk_guides = QtGui.QCheckBox('Guides')
self.chk_anchors = QtGui.QCheckBox('Anchors')
self.chk_lsb = QtGui.QCheckBox('LSB')
self.chk_adv = QtGui.QCheckBox('Advance')
self.chk_rsb = QtGui.QCheckBox('RSB')
self.chk_lnk = QtGui.QCheckBox('Metric Links')
self.chk_crlayer = QtGui.QCheckBox('Add layers')
# -- Set States
self.chk_outline.setCheckState(QtCore.Qt.Checked)
self.chk_adv.setCheckState(QtCore.Qt.Checked)
self.chk_lsb.setCheckState(QtCore.Qt.Checked)
self.chk_anchors.setCheckState(QtCore.Qt.Checked)
self.chk_lnk.setCheckState(QtCore.Qt.Checked)
self.chk_crlayer.setCheckState(QtCore.Qt.Checked)
self.chk_guides.setEnabled(False)
self.rad_glyph.setChecked(True)
self.rad_glyph.setEnabled(True)
self.rad_window.setEnabled(True)
self.rad_selection.setEnabled(True)
self.rad_font.setEnabled(False)
self.rad_glyph.toggled.connect(self.refreshMode)
self.rad_window.toggled.connect(self.refreshMode)
self.rad_selection.toggled.connect(self.refreshMode)
self.rad_font.toggled.connect(self.refreshMode)
# - Build layouts
layoutV = QtGui.QGridLayout()
layoutV.addWidget(QtGui.QLabel('Process Mode:'), 0, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(self.rad_glyph, 1, 0, 1, 2)
layoutV.addWidget(self.rad_window, 1, 2, 1, 2)
layoutV.addWidget(self.rad_selection, 1, 4, 1, 2)
layoutV.addWidget(self.rad_font, 1, 6, 1, 2)
layoutV.addWidget(QtGui.QLabel('Copy Options:'), 2, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(self.chk_outline, 3, 0, 1, 2)
layoutV.addWidget(self.chk_guides, 3, 2, 1, 2)
layoutV.addWidget(self.chk_anchors, 3, 4, 1, 2)
layoutV.addWidget(self.chk_crlayer, 3, 6, 1, 2)
layoutV.addWidget(self.chk_lsb, 4, 0, 1, 2)
layoutV.addWidget(self.chk_adv, 4, 2, 1, 2)
layoutV.addWidget(self.chk_rsb, 4, 4, 1, 2)
layoutV.addWidget(self.chk_lnk, 4, 6, 1, 2)
layoutV.addWidget(QtGui.QLabel('Master Layers: Single source to multiple destinations'), 5, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(QtGui.QLabel('Search:'), 6, 0, 1, 1)
layoutV.addWidget(self.edt_checkStr, 6, 1, 1, 3)
layoutV.addWidget(self.btn_checkOn, 6, 4, 1, 2)
layoutV.addWidget(self.btn_refresh, 6, 6, 1, 2)
layoutV.addWidget(self.tab_masters, 7, 0, 15, 8)
layoutV.addWidget(self.btn_execute, 22, 0, 1,8)
layoutV.addWidget(QtGui.QLabel('Master Layers: Copy Presets'), 23, 0, 1, 8, QtCore.Qt.AlignBottom)
layoutV.addWidget(self.btn_preset_weight, 24, 0, 1,8)
layoutV.addWidget(self.btn_preset_width, 25, 0, 1,8)
layoutV.addWidget(self.btn_preset_contrast, 26, 0, 1,8)
layoutV.addWidget(self.btn_preset_ctrlt, 27, 0, 1,8)
layoutV.addWidget(self.btn_preset_ctrlts, 28, 0, 1,8)
layoutV.addWidget(self.btn_preset_slant, 29, 0, 1,8)
# - Set Widget
self.setLayout(layoutV)
self.setWindowTitle('%s %s' %(app_name, app_version))
self.setGeometry(300, 300, 300, 600)
self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint) # Always on top!!
self.show()
def refreshMode(self):
if self.rad_glyph.isChecked(): self.pMode = 0
if self.rad_window.isChecked(): self.pMode = 1
if self.rad_selection.isChecked(): self.pMode = 2
if self.rad_font.isChecked(): self.pMode = 3
def copyLayer(self, glyph, srcLayerName, dstLayerName, options, cleanDST=False, addLayer=False):
# -- Check if srcLayerExists
if glyph.layer(srcLayerName) is None:
print 'WARN:\tGlyph: %s\tMissing source layer: %s\tSkipped!' %(glyph.name, srcLayerName)
return
# -- Check if dstLayerExists
if glyph.layer(dstLayerName) is None:
print 'WARN:\tGlyph: %s\tMissing destination layer: %s\tAdd new: %s.' %(glyph.name, dstLayerName, addLayer)
if addLayer:
newLayer = fl6.flLayer()
newLayer.name = str(dstLayerName)
glyph.addLayer(newLayer)
else:
return
# -- Outline
if options['out']:
# --- Get shapes
srcShapes = glyph.shapes(srcLayerName)
# --- Cleanup destination layers
if cleanDST:
glyph.layer(dstLayerName).removeAllShapes()
# --- Copy/Paste shapes
for shape in srcShapes:
newShape = glyph.layer(dstLayerName).addShape(shape.cloneTopLevel())
glyph.update()
# -- Metrics
if options['lsb']: glyph.setLSB(glyph.getLSB(srcLayerName), dstLayerName)
if options['adv']: glyph.setAdvance(glyph.getAdvance(srcLayerName), dstLayerName)
if options['rsb']: glyph.setRSB(glyph.getRSB(srcLayerName), dstLayerName)
if options['lnk']:
glyph.setLSBeq(glyph.getSBeq(srcLayerName)[0], dstLayerName)
glyph.setRSBeq(glyph.getSBeq(srcLayerName)[1], dstLayerName)
# -- Anchors
if options['anc']:
if cleanDST:
glyph.clearAnchors(dstLayerName)
for src_anchor in glyph.anchors(srcLayerName):
#glyph.layer(dstLayerName).addAnchor(src_anchor)
glyph.addAnchor((src_anchor.point.x(), src_anchor.point.y()), src_anchor.name, dstLayerName)
def table_populate(self, filterDST=False):
if not filterDST:
self.tab_masters.setTable({n:OrderedDict([('Master Name', master), ('SRC', False), ('DST', False)]) for n, master in enumerate(self.active_font.pMasters.names)})
self.tab_masters.resizeColumnsToContents()
else:
#print ';'.join(sorted(self.active_font.pMasters.names))
self.tab_masters.setTable({n:OrderedDict([('Master Name', master), ('SRC', False), ('DST', self.edt_checkStr.text in master)]) for n, master in enumerate(self.active_font.pMasters.names)})
self.tab_masters.resizeColumnsToContents()
def getCopyOptions(self):
options = {'out': self.chk_outline.isChecked(),
'gui': self.chk_guides.isChecked(),
'anc': self.chk_anchors.isChecked(),
'lsb': self.chk_lsb.isChecked(),
'adv': self.chk_adv.isChecked(),
'rsb': self.chk_rsb.isChecked(),
'lnk': self.chk_lnk.isChecked(),
'ref': self.chk_crlayer.isChecked()
}
return options
def execute_table(self):
# - Init
copy_options = self.getCopyOptions()
process_glyphs = getProcessGlyphs(self.pMode)
# - Process
process_dict = self.tab_masters.getTable()
process_src = process_dict['SRC'][0]
process_dst = process_dict['DST']
for wGlyph in process_glyphs:
for dst_layer in process_dst:
self.copyLayer(wGlyph, process_src, dst_layer, copy_options, True, self.chk_crlayer.isChecked())
wGlyph.update()
wGlyph.updateObject(wGlyph.fl, 'Glyph: /%s;\tCopy Layer | %s -> %s.' %(wGlyph.name, process_src, '; '.join(process_dst)))
def execute_preset(self, preset_list):
# - Init
copy_options = self.getCopyOptions()
process_glyphs = getProcessGlyphs(self.pMode)
print_preset = [' -> '.join(item) for item in preset_list]
# - Process
for wGlyph in process_glyphs:
for process_src, process_dst in preset_list:
self.copyLayer(wGlyph, process_src, process_dst, copy_options, True, self.chk_crlayer.isChecked())
wGlyph.update()
wGlyph.updateObject(wGlyph.fl, 'Glyph: /%s;\tCopy Layer Preset | %s.' %(wGlyph.name, ' | '.join(print_preset)))
# - RUN ------------------------------
dialog = dlg_CopyLayer() |
tests/core/test_base_component.py | strickvl/zenml | 1,275 | 25475 | <reponame>strickvl/zenml
# Copyright (c) ZenML GmbH 2021. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Text
from zenml.core.base_component import BaseComponent
class MockComponent(BaseComponent):
"""Mocking the base component for testing."""
tmp_path: str
def get_serialization_dir(self) -> Text:
"""Mock serialization dir"""
return self.tmp_path
def test_base_component_serialization_logic(tmp_path):
"""Tests the UUID serialization logic of BaseComponent"""
# Application of the monkeypatch to replace Path.home
# with the behavior of mockreturn defined above.
# mc = MockComponent(tmp_path=str(tmp_path))
# Calling getssh() will use mockreturn in place of Path.home
# for this test with the monkeypatch.
# print(mc.get_serialization_dir())
|
tests/unit/local/lambdafn/test_config.py | torresxb1/aws-sam-cli | 859 | 25500 | <reponame>torresxb1/aws-sam-cli<filename>tests/unit/local/lambdafn/test_config.py
from unittest import TestCase
from unittest.mock import Mock
from parameterized import parameterized
from samcli.lib.utils.packagetype import ZIP
from samcli.local.lambdafn.config import FunctionConfig
from samcli.commands.local.cli_common.user_exceptions import InvalidSamTemplateException
class TestFunctionConfig(TestCase):
DEFAULT_MEMORY = 128
DEFAULT_TIMEOUT = 3
def setUp(self):
self.name = "name"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.timeout = 34
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "arm64"
def test_init_with_env_vars(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.memory)
self.assertEqual(config.timeout, self.timeout)
self.assertEqual(config.env_vars, self.env_vars_mock)
self.assertEqual(self.env_vars_mock.handler, self.handler)
self.assertEqual(self.env_vars_mock.memory, self.memory)
self.assertEqual(self.env_vars_mock.timeout, self.timeout)
def test_init_without_optional_values(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.DEFAULT_MEMORY)
self.assertEqual(config.timeout, self.DEFAULT_TIMEOUT)
self.assertIsNotNone(config.env_vars)
self.assertEqual(config.env_vars.handler, self.handler)
self.assertEqual(config.env_vars.memory, self.DEFAULT_MEMORY)
self.assertEqual(config.env_vars.timeout, self.DEFAULT_TIMEOUT)
def test_init_with_timeout_of_int_string(self):
config = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout="34",
env_vars=self.env_vars_mock,
)
self.assertEqual(config.name, self.name)
self.assertEqual(config.runtime, self.runtime)
self.assertEqual(config.handler, self.handler)
self.assertEqual(config.packagetype, self.packagetype)
self.assertEqual(config.imageuri, self.imageuri)
self.assertEqual(config.imageconfig, self.imageconfig)
self.assertEqual(config.code_abs_path, self.code_path)
self.assertEqual(config.layers, self.layers)
self.assertEqual(config.memory, self.memory)
self.assertEqual(config.timeout, 34)
self.assertEqual(config.env_vars, self.env_vars_mock)
self.assertEqual(self.env_vars_mock.handler, self.handler)
self.assertEqual(self.env_vars_mock.memory, self.memory)
self.assertEqual(self.env_vars_mock.timeout, 34)
class TestFunctionConfigInvalidTimeouts(TestCase):
def setUp(self):
self.name = "name"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "x86_64"
@parameterized.expand(
[
("none int string",),
({"dictionary": "is not a string either"},),
("/local/lambda/timeout",),
("3.2",),
("4.2",),
("0.123",),
]
)
def test_init_with_invalid_timeout_values(self, timeout):
with self.assertRaises(InvalidSamTemplateException):
FunctionConfig(
self.name,
self.runtime,
self.imageuri,
self.handler,
self.packagetype,
self.imageconfig,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=timeout,
env_vars=self.env_vars_mock,
)
class TestFunctionConfig_equals(TestCase):
DEFAULT_MEMORY = 128
DEFAULT_TIMEOUT = 3
def setUp(self):
self.name = "name"
self.name2 = "name2"
self.runtime = "runtime"
self.handler = "handler"
self.imageuri = None
self.imageconfig = None
self.packagetype = ZIP
self.code_path = "codepath"
self.memory = 1234
self.timeout = 34
self.env_vars_mock = Mock()
self.layers = ["layer1"]
self.architecture = "arm64"
def test_equals_function_config(self):
config1 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
config2 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertTrue(config1 == config2)
def test_not_equals_function_config(self):
config1 = FunctionConfig(
self.name,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
config2 = FunctionConfig(
self.name2,
self.runtime,
self.handler,
self.imageuri,
self.imageconfig,
self.packagetype,
self.code_path,
self.layers,
self.architecture,
memory=self.memory,
timeout=self.timeout,
env_vars=self.env_vars_mock,
)
self.assertTrue(config1 != config2)
|
examples/lolcode_rockstar.py | hoojaoh/rockstar | 4,603 | 25502 | <filename>examples/lolcode_rockstar.py
from rockstar import RockStar
lolcode_code = """HAI
CAN HAS STDIO?
VISIBLE "HAI WORLD!"
KTHXBYE"""
rock_it_bro = RockStar(days=400, file_name='helloworld.lol', code=lolcode_code)
rock_it_bro.make_me_a_rockstar()
|
test/unit/agent/common/util/math.py | dp92987/nginx-amplify-agent | 308 | 25519 | # -*- coding: utf-8 -*-
from amplify.agent.common.util.math import median
from unittest import TestCase
from hamcrest import *
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class MathTestCase(TestCase):
def test_median(self):
# even length
assert_that(median([1, 3, 5, 7]), equal_to(4.0))
# unsorted
assert_that(median([1, 5, 7, 3]), equal_to(4.0))
# odd length
assert_that(median([1, 2, 3, 4, 5, 6, 7]), equal_to(4.0))
assert_that(median([]), equal_to(None))
|
aea/cli/utils/formatting.py | bryanchriswhite/agents-aea | 126 | 25529 | <filename>aea/cli/utils/formatting.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2020 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Module with formatting utils of the aea cli."""
from typing import Dict, List
from aea.configurations.base import AgentConfig
from aea.configurations.loader import ConfigLoader
from aea.exceptions import enforce
from aea.helpers.io import open_file
def format_items(items: List[Dict]) -> str:
"""Format list of items (protocols/connections) to a string for CLI output."""
list_str = ""
for item in items:
list_str += (
"{line}\n"
"Public ID: {public_id}\n"
"Name: {name}\n"
"Description: {description}\n"
"Author: {author}\n"
"Version: {version}\n"
"{line}\n".format(
name=item["name"],
public_id=item["public_id"],
description=item["description"],
author=item["author"],
version=item["version"],
line="-" * 30,
)
)
return list_str
def retrieve_details(name: str, loader: ConfigLoader, config_filepath: str) -> Dict:
"""Return description of a protocol, skill, connection."""
with open_file(str(config_filepath)) as fp:
config = loader.load(fp)
item_name = config.agent_name if isinstance(config, AgentConfig) else config.name
enforce(item_name == name, "Item names do not match!")
return {
"public_id": str(config.public_id),
"name": item_name,
"author": config.author,
"description": config.description,
"version": config.version,
}
def sort_items(items: List[Dict]) -> List[Dict]:
"""
Sort a list of dict items associated with packages.
:param items: list of dicts that represent items.
:return: sorted list.
"""
return sorted(items, key=lambda k: k["name"])
|
blender/arm/logicnode/math/LN_separate_xyz.py | onelsonic/armory | 2,583 | 25535 | <reponame>onelsonic/armory<filename>blender/arm/logicnode/math/LN_separate_xyz.py<gh_stars>1000+
from arm.logicnode.arm_nodes import *
class SeparateVectorNode(ArmLogicTreeNode):
"""Splits the given vector into X, Y and Z."""
bl_idname = 'LNSeparateVectorNode'
bl_label = 'Separate XYZ'
arm_section = 'vector'
arm_version = 1
def arm_init(self, context):
self.add_input('ArmVectorSocket', 'Vector')
self.add_output('ArmFloatSocket', 'X')
self.add_output('ArmFloatSocket', 'Y')
self.add_output('ArmFloatSocket', 'Z')
|
ml3d/torch/utils/helper_torch.py | inkyusa/Open3D-ML | 447 | 25537 | <filename>ml3d/torch/utils/helper_torch.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class conv2d_transpose(nn.Module):
def __init__(self,
batchNorm,
in_planes,
out_planes,
kernel_size=1,
stride=1,
activation=True):
super(conv2d_transpose, self).__init__()
self.conv = nn.ConvTranspose2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2)
self.biases = self.conv.bias
self.weights = self.conv.weight
self.batchNorm = batchNorm
self.batch_normalization = nn.BatchNorm2d(out_planes,
momentum=0.01,
eps=1e-6)
if activation:
self.activation_fn = nn.LeakyReLU(0.2)
else:
self.activation_fn = nn.Identity()
def forward(self, x):
x = self.conv(x)
if self.batchNorm:
x = self.batch_normalization(x)
x = self.activation_fn(x)
return x
class conv2d(nn.Module):
def __init__(self,
batchNorm,
in_planes,
out_planes,
kernel_size=1,
stride=1,
activation=True):
super(conv2d, self).__init__()
self.conv = nn.Conv2d(in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=(kernel_size - 1) // 2)
self.biases = self.conv.bias
self.weights = self.conv.weight
self.batchNorm = batchNorm
if self.batchNorm:
self.batch_normalization = nn.BatchNorm2d(out_planes,
momentum=0.01,
eps=1e-6)
if activation:
self.activation_fn = nn.LeakyReLU(0.2, inplace=True)
else:
self.activation_fn = nn.Identity()
def forward(self, x):
x = self.conv(x)
if self.batchNorm:
x = self.batch_normalization(x)
x = self.activation_fn(x)
return x
|
service-workers/service-worker/resources/update-max-aged-worker.py | meyerweb/wpt | 14,668 | 25549 | import time
import json
from wptserve.utils import isomorphic_decode, isomorphic_encode
def main(request, response):
headers = [(b'Content-Type', b'application/javascript'),
(b'Cache-Control', b'max-age=86400'),
(b'Last-Modified', isomorphic_encode(time.strftime(u"%a, %d %b %Y %H:%M:%S GMT", time.gmtime())))]
test = request.GET[b'test']
body = u'''
const mainTime = {time:8f};
const testName = {test};
importScripts('update-max-aged-worker-imported-script.py');
addEventListener('message', event => {{
event.source.postMessage({{
mainTime,
importTime,
test: {test}
}});
}});
'''.format(
time=time.time(),
test=json.dumps(isomorphic_decode(test))
)
return headers, body
|
djangae/contrib/googleauth/backends/base.py | sleepyjames/djangae | 467 | 25585 | <gh_stars>100-1000
"""
This is duplicated from Django 3.0 to avoid
starting an import chain that ends up with
ContentTypes which may not be installed in a
Djangae project.
"""
class BaseBackend:
def authenticate(self, request, **kwargs):
return None
@classmethod
def can_authenticate(cls, request):
"""
This is a pre-check to see if the credentials are
available to try to authenticate.
"""
return True
def get_user(self, user_id):
return None
def get_user_permissions(self, user_obj, obj=None):
return set()
def get_group_permissions(self, user_obj, obj=None):
return set()
def get_all_permissions(self, user_obj, obj=None):
return {
*self.get_user_permissions(user_obj, obj=obj),
*self.get_group_permissions(user_obj, obj=obj),
}
def has_perm(self, user_obj, perm, obj=None):
return perm in self.get_all_permissions(user_obj, obj=obj)
|
tests/cell_fabric/test_rect.py | mabrains/ALIGN-public | 119 | 25586 | <filename>tests/cell_fabric/test_rect.py
from align.cell_fabric.transformation import Rect
def test_toList():
r = Rect( 0, 0, 1, 1)
assert r.toList() == [0, 0, 1, 1]
def test_canonical():
r = Rect( 1, 1, 0, 0)
assert r.canonical().toList() == [0, 0, 1, 1]
def test_repr():
r = Rect( 0, 0, 1, 1)
assert r.__repr__() == "[0, 0, 1, 1]"
assert repr(r) == "[0, 0, 1, 1]"
assert str(r) == "[0, 0, 1, 1]"
|
Segment Tree Query II.py | RijuDasgupta9116/LintCode | 321 | 25604 | <filename>Segment Tree Query II.py
"""
For an array, we can build a SegmentTree for it, each node stores an extra attribute count to denote the number of
elements in the the array which value is between interval start and end. (The array may not fully filled by elements)
Design a query method with three parameters root, start and end, find the number of elements in the in array's interval
[start, end] by the given root of value SegmentTree.
Have you met this question in a real interview? Yes
Example
For array [0, empty, 2, 3], the corresponding value Segment Tree is:
[0, 3, count=3]
/ \
[0,1,count=1] [2,3,count=2]
/ \ / \
[0,0,count=1] [1,1,count=0] [2,2,count=1], [3,3,count=1]
query(1, 1), return 0
query(1, 2), return 1
query(2, 3), return 2
query(0, 2), return 2
"""
__author__ = 'Daniel'
DEFAULT = 0
f = lambda x, y: x+y
class Solution:
def query(self, root, s, e):
"""
Segment: [s, e]
:param root: The root of segment tree
:param start: start of segment/interval
:param end: end of segment/interval
:return: The count number in the interval [start, end]
"""
if not root:
return DEFAULT
if s <= root.start and e >= root.end:
return root.count
if s > root.end or e < root.start:
return DEFAULT
l = self.query(root.left, s, e)
r = self.query(root.right, s, e)
return f(l, r)
|
hearthbreaker/cards/spells/neutral.py | souserge/hearthbreaker | 429 | 25628 | from hearthbreaker.cards.base import SpellCard
from hearthbreaker.constants import CHARACTER_CLASS, CARD_RARITY
from hearthbreaker.tags.base import BuffUntil, Buff
from hearthbreaker.tags.event import TurnStarted
from hearthbreaker.tags.status import Stealth, Taunt, Frozen
import hearthbreaker.targeting
class TheCoin(SpellCard):
def __init__(self):
super().__init__("The Coin", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False)
def use(self, player, game):
super().use(player, game)
if player.mana < 10:
player.mana += 1
class ArmorPlating(SpellCard):
def __init__(self):
super().__init__("Armor Plating", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.increase_health(1)
class EmergencyCoolant(SpellCard):
def __init__(self):
super().__init__("Emergency Coolant", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(Buff(Frozen()))
class FinickyCloakfield(SpellCard):
def __init__(self):
super().__init__("Finicky Cloakfield", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(BuffUntil(Stealth(), TurnStarted()))
class ReversingSwitch(SpellCard):
def __init__(self):
super().__init__("Reversing Switch", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
temp_attack = self.target.calculate_attack()
temp_health = self.target.health
if temp_attack == 0:
self.target.die(None)
else:
self.target.set_attack_to(temp_health)
self.target.set_health_to(temp_attack)
class RustyHorn(SpellCard):
def __init__(self):
super().__init__("Rusty Horn", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.add_buff(Buff(Taunt()))
class TimeRewinder(SpellCard):
def __init__(self):
super().__init__("Time Rewinder", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_friendly_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.bounce()
class WhirlingBlades(SpellCard):
def __init__(self):
super().__init__("Whirling Blades", 1, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False,
target_func=hearthbreaker.targeting.find_minion_spell_target)
def use(self, player, game):
super().use(player, game)
self.target.change_attack(1)
spare_part_list = [ArmorPlating(), EmergencyCoolant(), FinickyCloakfield(), TimeRewinder(), ReversingSwitch(),
RustyHorn(), WhirlingBlades()]
class GallywixsCoin(SpellCard):
def __init__(self):
super().__init__("Gallywix's Coin", 0, CHARACTER_CLASS.ALL, CARD_RARITY.COMMON, False)
def use(self, player, game):
super().use(player, game)
if player.mana < 10:
player.mana += 1
|
scripts/cloud/aws/ops-ec2-add-snapshot-tag-to-ebs-volumes.py | fahlmant/openshift-tools | 164 | 25638 | #!/usr/bin/env python
# vim: expandtab:tabstop=4:shiftwidth=4
"""
This is a script that can be used to tag EBS volumes in OpenShift v3.
This script assume that your AWS credentials are setup in ~/.aws/credentials like this:
[default]
aws_access_key_id = xxxx
aws_secret_access_key = xxxx
Or that environment variables are setup:
AWS_ACCESS_KEY_ID=xxxx
AWS_SECRET_ACCESS_KEY=xxxx
"""
# Ignoring module name
# pylint: disable=invalid-name
import argparse
import os
import sys
import logging
from logging.handlers import RotatingFileHandler
from openshift_tools.cloud.aws.ebs_snapshotter import SUPPORTED_SCHEDULES, EbsSnapshotter
from openshift_tools.cloud.aws.ebs_util import EbsUtil
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logFormatter = logging.Formatter('%(asctime)s %(levelname)s %(name)s %(message)s')
logFile = '/var/log/ec2-add-snapshot-tag-to-ebs-volumes.log'
logRFH = RotatingFileHandler(logFile, mode='a', maxBytes=2*1024*1024, backupCount=5, delay=0)
logRFH.setFormatter(logFormatter)
logRFH.setLevel(logging.INFO)
logger.addHandler(logRFH)
logConsole = logging.StreamHandler()
logConsole.setFormatter(logFormatter)
logConsole.setLevel(logging.WARNING)
logger.addHandler(logConsole)
TAGGER_SUPPORTED_SCHEDULES = ['never'] + SUPPORTED_SCHEDULES
ROOT_VOLUME_PURPOSE = "root volume"
DOCKER_VOLUME_PURPOSE = "docker storage volume"
PV_PURPOSE = "customer persistent volume"
class TaggerCli(object):
""" Implements the cli interface to the EBS snapshot tagger. """
def __init__(self):
self.args = None
self.parse_args()
if self.args.verbose:
logConsole.setLevel(logging.INFO)
if self.args.debug:
logConsole.setLevel(logging.DEBUG)
if self.args.skip_boto_logs:
logging.getLogger('boto').setLevel(logging.WARNING)
def parse_args(self):
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='EBS Volume Tagger')
parser.add_argument('--master-root-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that master root volumes ' + \
'should be tagged with.')
parser.add_argument('--node-root-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that node root volumes ' + \
'should be tagged with.')
parser.add_argument('--docker-storage-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that docker storage ' + \
'volumes should be tagged with.')
parser.add_argument('--autoprovisioned-pv-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that autoprovisioned pv ' + \
'volumes should be tagged with.')
parser.add_argument('--manually-provisioned-pv-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that manually provisioned pv ' + \
'volumes should be tagged with.')
parser.add_argument('--unidentified-volumes', choices=TAGGER_SUPPORTED_SCHEDULES,
help='The snapshot schedule that unidentified ' + \
'volumes should be tagged with.')
parser.add_argument('--set-name-tag', action='store_true', default=False,
help='Add the Name tag to volumes of the host where this ' + \
'volume is attached.')
parser.add_argument('--set-purpose-tag', action='store_true', default=False,
help='Add the purpose tag to volumes')
parser.add_argument('--retag-volumes', action='store_true', default=False,
help='Retag volumes that already have a snapshot tag. ' + \
'DANGEROUS - Only do this if you know what you\'re doing!')
parser.add_argument('--aws-creds-profile', required=False,
help='The AWS credentials profile to use.')
parser.add_argument('--dry-run', action='store_true', default=False,
help='Say what would have been done, but don\'t actually do it.')
parser.add_argument('-v', '--verbose', action='store_true', default=None, help='Verbose?')
parser.add_argument('--debug', action='store_true', default=None, help='Debug?')
parser.add_argument('--skip-boto-logs', action='store_true', default=False, help='Skip boto logs')
parser.add_argument('--region', required=True,
help='The region that we want to process snapshots in')
self.args = parser.parse_args()
def set_master_root_volume_tags(self, master_root_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on master root volumes """
logger.debug("Setting master root volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(master_root_vol_ids, self.args.master_root_volumes,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(master_root_vol_ids, ROOT_VOLUME_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_name_tag:
ebs_util.set_volume_name_tag(master_root_vol_ids, prefix=" ", dry_run=self.args.dry_run)
def set_node_root_volume_tags(self, node_root_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on node root volumes """
logger.debug("Setting node root volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(node_root_vol_ids, self.args.node_root_volumes,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(node_root_vol_ids, ROOT_VOLUME_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_name_tag:
ebs_util.set_volume_name_tag(node_root_vol_ids, prefix=" ", dry_run=self.args.dry_run)
def set_docker_storage_volume_tags(self, docker_storage_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on docker storage volumes """
logger.debug("Setting docker storage volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(docker_storage_vol_ids, self.args.docker_storage_volumes,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(docker_storage_vol_ids, DOCKER_VOLUME_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
if self.args.set_name_tag:
ebs_util.set_volume_name_tag(docker_storage_vol_ids, prefix=" ", dry_run=self.args.dry_run)
def set_manually_provisioned_pv_volume_tags(self, manually_provisioned_pv_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on manually provisioned pv volumes """
logger.debug("Setting manually provisioned pv volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(manually_provisioned_pv_vol_ids,
self.args.manually_provisioned_pv_volumes,
prefix=" ", dry_run=self.args.dry_run)
# NOTE: not setting Name tag because PVs don't belong to a specific host.
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(manually_provisioned_pv_vol_ids, PV_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
def set_autoprovisioned_pv_volume_tags(self, autoprovisioned_pv_vol_ids, ebs_snapshotter, ebs_util):
""" Sets tags on autoprovisioned pv volumes """
logger.debug("Setting autoprovisioned pv volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(autoprovisioned_pv_vol_ids,
self.args.autoprovisioned_pv_volumes,
prefix=" ", dry_run=self.args.dry_run)
# NOTE: not setting Name tag because PVs don't belong to a specific host.
if self.args.set_purpose_tag:
ebs_util.set_volume_purpose_tag(autoprovisioned_pv_vol_ids, PV_PURPOSE,
prefix=" ", dry_run=self.args.dry_run)
def set_unidentified_volume_tags(self, unidentified_vol_ids, ebs_snapshotter):
""" Sets tags on unidentified pv volumes """
logger.debug("Setting unidentified volume tags:")
ebs_snapshotter.set_volume_snapshot_tag(unidentified_vol_ids, self.args.unidentified_volumes,
prefix=" ", dry_run=self.args.dry_run)
# NOTE: not setting purpose tag because volumes are unidentified, so we don't know.
# NOTE: not setting Name tag because we don't know if it makes sense in this context.
def main(self):
""" Serves as the entry point for the CLI """
logger.info('Starting snapshot tagging')
if self.args.aws_creds_profile:
os.environ['AWS_PROFILE'] = self.args.aws_creds_profile
ebs_snapshotter = EbsSnapshotter(self.args.region, verbose=True)
if not ebs_snapshotter.is_region_valid(self.args.region):
logger.info("Invalid region")
sys.exit(1)
else:
logger.info("Region: %s:", self.args.region)
ebs_util = EbsUtil(self.args.region, verbose=True)
ebs_snapshotter = EbsSnapshotter(self.args.region, verbose=True)
# filter out the already tagged volumes
skip_volume_ids = []
if not self.args.retag_volumes:
# They don't want us to retag volumes that are already tagged, so
# add the already tagged volumes to the list of volume IDs to skip.
skip_volume_ids += ebs_snapshotter.get_already_tagged_volume_ids()
logger.info('Skipping this many volume ids: %s', len(skip_volume_ids))
vol_ids = ebs_util.get_classified_volume_ids(skip_volume_ids)
for id_name, id_list in vol_ids._asdict().iteritems():
logger.info('name: %s amount: %s', id_name, len(id_list))
## Actually create the snapshot tags now
if self.args.master_root_volumes and vol_ids.master_root:
self.set_master_root_volume_tags(vol_ids.master_root, ebs_snapshotter, ebs_util)
if self.args.node_root_volumes and vol_ids.node_root:
self.set_node_root_volume_tags(vol_ids.node_root, ebs_snapshotter, ebs_util)
if self.args.docker_storage_volumes and vol_ids.docker_storage:
self.set_docker_storage_volume_tags(vol_ids.docker_storage, ebs_snapshotter, ebs_util)
if self.args.manually_provisioned_pv_volumes and vol_ids.manually_provisioned_pv:
self.set_manually_provisioned_pv_volume_tags(vol_ids.manually_provisioned_pv,
ebs_snapshotter, ebs_util)
if self.args.autoprovisioned_pv_volumes and vol_ids.autoprovisioned_pv:
self.set_autoprovisioned_pv_volume_tags(vol_ids.autoprovisioned_pv, ebs_snapshotter,
ebs_util)
if self.args.unidentified_volumes and vol_ids.unidentified:
self.set_unidentified_volume_tags(vol_ids.unidentified, ebs_snapshotter)
if __name__ == "__main__":
TaggerCli().main()
|
extra_tests/snippets/stdlib_subprocess.py | dbrgn/RustPython | 11,058 | 25644 | import subprocess
import time
import sys
import signal
from testutils import assert_raises
is_unix = not sys.platform.startswith("win")
if is_unix:
def echo(text):
return ["echo", text]
def sleep(secs):
return ["sleep", str(secs)]
else:
def echo(text):
return ["cmd", "/C", f"echo {text}"]
def sleep(secs):
# TODO: make work in a non-unixy environment (something with timeout.exe?)
return ["sleep", str(secs)]
p = subprocess.Popen(echo("test"))
time.sleep(0.1)
assert p.returncode is None
assert p.poll() == 0
assert p.returncode == 0
p = subprocess.Popen(sleep(2))
assert p.poll() is None
with assert_raises(subprocess.TimeoutExpired):
assert p.wait(1)
p.wait()
assert p.returncode == 0
p = subprocess.Popen(echo("test"), stdout=subprocess.PIPE)
p.wait()
assert p.stdout.read().strip() == b"test"
p = subprocess.Popen(sleep(2))
p.terminate()
p.wait()
if is_unix:
assert p.returncode == -signal.SIGTERM
else:
assert p.returncode == 1
p = subprocess.Popen(sleep(2))
p.kill()
p.wait()
if is_unix:
assert p.returncode == -signal.SIGKILL
else:
assert p.returncode == 1
p = subprocess.Popen(echo("test"), stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
assert stdout.strip() == b"test"
p = subprocess.Popen(sleep(5), stdout=subprocess.PIPE)
with assert_raises(subprocess.TimeoutExpired):
p.communicate(timeout=1)
|
webs/douban/tasks/__init__.py | billvsme/videoSpider | 216 | 25654 | from . import get_main_movies_base_data
from . import get_main_movies_full_data
from . import get_celebrities_full_data
from . import down_video_images
from . import down_celebrity_images
|
samples/waitforupdates.py | ArpitSharma2800/pyvmomi-community-samples | 931 | 25678 | <reponame>ArpitSharma2800/pyvmomi-community-samples
#!/usr/bin/env python
#
# VMware vSphere Python SDK
# Copyright (c) 2008-2021 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample Python program for monitoring property changes to objects of one
or more types
"""
import atexit
import collections
import sys
from pyVmomi import vim, vmodl
from tools import cli, service_instance, serviceutil
def parse_propspec(propspec):
"""
Parses property specifications. Returns sequence of 2-tuples, each
containing a managed object type and a list of properties applicable
to that type
:type propspec: collections.Sequence
:rtype: collections.Sequence
"""
props = []
for objspec in propspec:
if ':' not in objspec:
raise Exception('property specification \'%s\' does not contain '
'property list' % objspec)
objtype, objprops = objspec.split(':', 1)
motype = getattr(vim, objtype, None)
if motype is None:
raise Exception('referenced type \'%s\' in property specification '
'does not exist,\nconsult the managed object type '
'reference in the vSphere API documentation' %
objtype)
proplist = objprops.split(',')
props.append((motype, proplist,))
return props
def make_wait_options(max_wait_seconds=None, max_object_updates=None):
waitopts = vmodl.query.PropertyCollector.WaitOptions()
if max_object_updates is not None:
waitopts.maxObjectUpdates = max_object_updates
if max_wait_seconds is not None:
waitopts.maxWaitSeconds = max_wait_seconds
return waitopts
def make_property_collector(prop_collector, from_node, props):
"""
:type prop_collector: pyVmomi.VmomiSupport.vmodl.query.PropertyCollector
:type from_node: pyVmomi.VmomiSupport.ManagedObject
:type props: collections.Sequence
:rtype: pyVmomi.VmomiSupport.vmodl.query.PropertyCollector.Filter
"""
# Make the filter spec
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
# Make the object spec
traversal = serviceutil.build_full_traversal()
obj_spec = vmodl.query.PropertyCollector.ObjectSpec(obj=from_node, selectSet=traversal)
obj_specs = [obj_spec]
filter_spec.objectSet = obj_specs
# Add the property specs
prop_set = []
for motype, proplist in props:
prop_spec = \
vmodl.query.PropertyCollector.PropertySpec(type=motype, all=False)
prop_spec.pathSet.extend(proplist)
prop_set.append(prop_spec)
filter_spec.propSet = prop_set
try:
pc_filter = prop_collector.CreateFilter(filter_spec, True)
atexit.register(pc_filter.Destroy)
return pc_filter
except vmodl.MethodFault as ex:
if ex._wsdlName == 'InvalidProperty':
print("InvalidProperty fault while creating PropertyCollector filter : %s"
% ex.name, file=sys.stderr)
else:
print("Problem creating PropertyCollector filter : %s"
% str(ex.faultMessage), file=sys.stderr)
raise
def monitor_property_changes(si, propspec, iterations=None):
"""
:type si: pyVmomi.VmomiSupport.vim.ServiceInstance
:type propspec: collections.Sequence
:type iterations: int or None
"""
prop_collector = si.content.propertyCollector
make_property_collector(prop_collector, si.content.rootFolder, propspec)
waitopts = make_wait_options(30)
version = ''
while True:
if iterations is not None:
if iterations <= 0:
print('Iteration limit reached, monitoring stopped')
break
result = prop_collector.WaitForUpdatesEx(version, waitopts)
# timeout, call again
if result is None:
continue
# process results
for filter_set in result.filterSet:
for object_set in filter_set.objectSet:
moref = getattr(object_set, 'obj', None)
assert moref is not None, \
'object moref should always be present in objectSet'
moref = str(moref).strip('\'')
kind = getattr(object_set, 'kind', None)
assert (
kind is not None
and kind in ('enter', 'modify', 'leave',)), \
'objectSet kind must be valid'
if kind in ('enter', 'modify'):
change_set = getattr(object_set, 'changeSet', None)
assert (change_set is not None
and isinstance(change_set, collections.Sequence)
and len(change_set) > 0), \
'enter or modify objectSet should have non-empty changeSet'
changes = []
for change in change_set:
name = getattr(change, 'name', None)
assert (name is not None), \
'changeset should contain property name'
val = getattr(change, 'val', None)
changes.append((name, val,))
print("== %s ==" % moref)
print('\n'.join(['%s: %s' % (n, v,) for n, v in changes]))
print('\n')
elif kind == 'leave':
print("== %s ==" % moref)
print('(removed)\n')
version = result.version
if iterations is not None:
iterations -= 1
def main():
"""
Sample Python program for monitoring property changes to objects of
one or more types to stdout
"""
parser = cli.Parser()
parser.set_epilog("""
Example usage:
waitforupdates.py -k -s vcenter -u root -p vmware -i 1 -P
VirtualMachine:name,summary.config.numCpu,runtime.powerState,config.uuid -P
-P Datacenter:name -- This will fetch and print a few VM properties and the
name of the datacenters
""")
parser.add_custom_argument('--iterations', type=int, default=None,
action='store',
help="""
The number of updates to receive before exiting
, default is no limit. Must be 1 or more if specified.
""")
parser.add_custom_argument('--propspec', dest='propspec', required=True,
action='append',
help='Property specifications to monitor, e.g. '
'VirtualMachine:name,summary.config. Repetition '
'permitted')
args = parser.get_args()
if args.iterations is not None and args.iterations < 1:
parser.print_help()
print('\nInvalid argument: Iteration count must be omitted or greater than 0',
file=sys.stderr)
sys.exit(-1)
try:
si = service_instance.connect(args)
propspec = parse_propspec(args.propspec)
print("Monitoring property changes. Press ^C to exit")
monitor_property_changes(si, propspec, args.iterations)
except vmodl.MethodFault as ex:
print("Caught vmodl fault :\n%s" % str(ex), file=sys.stderr)
except Exception as ex:
print("Caught exception : " + str(ex), file=sys.stderr)
if __name__ == '__main__':
try:
main()
sys.exit(0)
except Exception as ex:
print("Caught exception : " + str(ex), file=sys.stderr)
except KeyboardInterrupt:
print("Exiting", file=sys.stderr)
sys.exit(0)
# vim: set ts=4 sw=4 expandtab filetype=python:
|
doubly_stochastic_dgp/layer_initializations.py | ayush29/Doubly-Stochastic-DGP | 126 | 25712 |
import tensorflow as tf
import numpy as np
from gpflow.params import DataHolder, Minibatch
from gpflow import autoflow, params_as_tensors, ParamList
from gpflow.models.model import Model
from gpflow.mean_functions import Identity, Linear
from gpflow.mean_functions import Zero
from gpflow.quadrature import mvhermgauss
from gpflow import settings
float_type = settings.float_type
from doubly_stochastic_dgp.layers import SVGP_Layer
def init_layers_linear(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
layers = []
X_running, Z_running = X.copy(), Z.copy()
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim
print(dim_in, dim_out)
if dim_in == dim_out:
mf = Identity()
else:
if dim_in > dim_out: # stepping down, use the pca projection
_, _, V = np.linalg.svd(X_running, full_matrices=False)
W = V[:dim_out, :].T
else: # stepping up, use identity + padding
W = np.concatenate([np.eye(dim_in), np.zeros((dim_in, dim_out - dim_in))], 1)
mf = Linear(W)
mf.set_trainable(False)
layers.append(Layer(kern_in, Z_running, dim_out, mf, white=white))
if dim_in != dim_out:
Z_running = Z_running.dot(W)
X_running = X_running.dot(W)
# final layer
layers.append(Layer(kernels[-1], Z_running, num_outputs, mean_function, white=white))
return layers
def init_layers_input_prop(X, Y, Z, kernels,
num_outputs=None,
mean_function=Zero(),
Layer=SVGP_Layer,
white=False):
num_outputs = num_outputs or Y.shape[1]
D = X.shape[1]
M = Z.shape[0]
layers = []
for kern_in, kern_out in zip(kernels[:-1], kernels[1:]):
dim_in = kern_in.input_dim
dim_out = kern_out.input_dim - D
std_in = kern_in.variance.read_value()**0.5
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kern_in, Z_padded, dim_out, Zero(), white=white, input_prop_dim=D))
dim_in = kernels[-1].input_dim
std_in = kernels[-2].variance.read_value()**0.5 if dim_in > D else 1.
pad = np.random.randn(M, dim_in - D) * 2. * std_in
Z_padded = np.concatenate([Z, pad], 1)
layers.append(Layer(kernels[-1], Z_padded, num_outputs, mean_function, white=white))
return layers
|
anchore/anchore_policy.py | berez23/anchore | 401 | 25798 | import os
import json
import re
import sys
import logging
import hashlib
import uuid
import jsonschema
import tempfile
import controller
import anchore_utils
import anchore_auth
from anchore.util import contexts
_logger = logging.getLogger(__name__)
default_policy_version = '1_0'
default_whitelist_version = '1_0'
default_bundle_version = '1_0'
supported_whitelist_versions = [default_whitelist_version]
supported_bundle_versions = [default_bundle_version]
supported_policy_versions = [default_bundle_version]
# interface operations
def check():
if not load_policymeta():
return (False, "policys are not initialized: please run 'anchore policys sync' and try again")
return (True, "success")
def sync_policymeta(bundlefile=None, outfile=None):
ret = {'success': False, 'text': "", 'status_code': 1}
policyurl = contexts['anchore_config']['policy_url']
policy_timeout = contexts['anchore_config']['policy_conn_timeout']
policy_maxretries = contexts['anchore_config']['policy_max_retries']
policymeta = {}
if bundlefile:
if not os.path.exists(bundlefile):
ret['text'] = "no such file ("+str(bundlefile)+")"
return(False, ret)
try:
with open(bundlefile, 'r') as FH:
policymeta = json.loads(FH.read())
except Exception as err:
ret['text'] = "synced policy bundle cannot be read/is not valid JSON: exception - " +str(err)
return(False, ret)
else:
record = anchore_auth.anchore_auth_get(contexts['anchore_auth'], policyurl, timeout=policy_timeout, retries=policy_maxretries)
if record['success']:
try:
bundleraw = json.loads(record['text'])
policymeta = bundleraw['bundle']
except Exception as err:
ret['text'] = 'failed to parse bundle response from service - exception: ' + str(err)
return(False, ret)
else:
_logger.debug("failed to download policybundle: message from server - " + str(record))
themsg = "unspecificied failure while attempting to download bundle from anchore.io"
try:
if record['status_code'] == 404:
themsg = "no policy bundle found on anchore.io - please create and save a policy using the policy editor in anchore.io and try again"
elif record['status_code'] == 401:
themsg = "cannot download a policy bundle from anchore.io - current user does not have access rights to download custom policies"
except Exception as err:
themsg = "exception while inspecting response from server - exception: " + str(err)
ret['text'] = "failed to download policybundle: " + str(themsg)
return(False, ret)
if not verify_policy_bundle(bundle=policymeta):
_logger.debug("downloaded policy bundle failed to verify: " +str(policymeta))
ret['text'] = "input policy bundle does not conform to policy bundle schema"
return(False, ret)
if outfile:
if outfile != '-':
try:
with open(outfile, 'w') as OFH:
OFH.write(json.dumps(policymeta))
except Exception as err:
ret['text'] = "could not write downloaded policy bundle to specified file ("+str(outfile)+") - exception: " + str(err)
return(False, ret)
else:
if not contexts['anchore_db'].save_policymeta(policymeta):
ret['text'] = "cannot get list of policies from service\nMessage from server: " + record['text']
return (False, ret)
if policymeta:
ret['text'] = json.dumps(policymeta, indent=4)
return(True, ret)
def load_policymeta(policymetafile=None):
ret = {}
if policymetafile:
with open(policymetafile, 'r') as FH:
ret = json.loads(FH.read())
else:
ret = contexts['anchore_db'].load_policymeta()
if not ret:
# use the system default
default_policy_bundle_file = os.path.join(contexts['anchore_config'].config_dir, 'anchore_default_bundle.json')
try:
if os.path.exists(default_policy_bundle_file):
with open(default_policy_bundle_file, 'r') as FH:
ret = json.loads(FH.read())
else:
raise Exception("no such file: " + str(default_policy_bundle_file))
except Exception as err:
_logger.warn("could not load default bundle (" + str(default_policy_bundle_file) + ") - exception: " + str(err))
raise err
return(ret)
def save_policymeta(policymeta):
return(contexts['anchore_db'].save_policymeta(policymeta))
# bundle
# Convert
def convert_to_policy_bundle(name="default", version=default_bundle_version, policy_file=None, policy_version=default_policy_version, whitelist_files=[], whitelist_version=default_whitelist_version):
policies = {}
p = read_policy(name=str(uuid.uuid4()), file=policy_file)
policies.update(p)
whitelists = {}
for wf in whitelist_files:
w = read_whitelist(name=str(uuid.uuid4()), file=wf)
whitelists.update(w)
m = create_mapping(map_name="default", policy_name=policies.keys()[0], whitelists=whitelists.keys(), repotagstring='*/*:*')
mappings.append(m)
bundle = create_policy_bundle(name='default', policies=policies, policy_version=policy_version, whitelists=whitelists, whitelist_version=whitelist_version, mappings=mappings)
if not verify_policy_bundle(bundle=bundle):
return({})
return(bundle)
# C
def create_policy_bundle(name=None, version=default_bundle_version, policies={}, policy_version=default_policy_version, whitelists={}, whitelist_version=default_whitelist_version, mappings=[]):
ret = {
'id': str(uuid.uuid4()),
'name':name,
'version':version,
'policies':[],
'whitelists':[],
'mappings':[]
}
for f in policies:
el = {
'version':policy_version,
'id':f,
'name':f,
'rules':[]
}
el['rules'] = unformat_policy_data(policies[f])
ret['policies'].append(el)
for f in whitelists:
el = {
'version':whitelist_version,
'id':f,
'name':f,
'items':[]
}
el['items'] = unformat_whitelist_data(whitelists[f])
ret['whitelists'].append(el)
for m in mappings:
ret['mappings'].append(m)
_logger.debug("created bundle: ("+str(name)+") : " + json.dumps(ret.keys(), indent=4))
return(ret)
# R
def read_policy_bundle(bundle_file=None):
ret = {}
with open(bundle_file, 'r') as FH:
ret = json.loads(FH.read())
cleanstr = json.dumps(ret).encode('utf8')
ret = json.loads(cleanstr)
if not verify_policy_bundle(bundle=ret):
raise Exception("input bundle does not conform to bundle schema")
return(ret)
# V
def verify_policy_bundle(bundle={}):
bundle_schema = {}
try:
bundle_schema_file = os.path.join(contexts['anchore_config']['pkg_dir'], 'schemas', 'anchore-bundle.schema')
except:
from pkg_resources import Requirement, resource_filename
bundle_schema_file = os.path.join(resource_filename("anchore", ""), 'schemas', 'anchore-bundle.schema')
try:
if os.path.exists(bundle_schema_file):
with open (bundle_schema_file, "r") as FH:
bundle_schema = json.loads(FH.read())
except Exception as err:
_logger.error("could not load bundle schema: " + str(bundle_schema_file))
return(False)
if not bundle_schema:
_logger.error("could not load bundle schema: " + str(bundle_schema_file))
return(False)
else:
try:
jsonschema.validate(bundle, schema=bundle_schema)
except Exception as err:
_logger.error("could not validate bundle against schema: " + str(err))
return(False)
return(True)
# U
def update_policy_bundle(bundle={}, name=None, policies={}, whitelists={}, mappings={}):
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle is incomplete - cannot update bad bundle: " + json.dumps(bundle, indent=4))
ret = {}
ret.update(bundle)
new_bundle = create_policy_bundle(name=name, policies=policies, whitelists=whitelists, mappings=mappings)
for key in ['name', 'policies', 'whitelists', 'mappings']:
if new_bundle[key]:
ret[key] = new_bundle.pop(key, ret[key])
return(ret)
# SAVE
def write_policy_bundle(bundle_file=None, bundle={}):
if not verify_policy_bundle(bundle=bundle):
raise Exception("cannot verify input policy bundle, skipping write: " + str(bundle_file))
with open(bundle_file, 'w') as OFH:
OFH.write(json.dumps(bundle))
return(True)
# mapping
# C
def create_mapping(map_name=None, policy_name=None, whitelists=[], repotagstring=None):
ret = {}
ret['name'] = map_name
ret['policy_id'] = policy_name
ret['whitelist_ids'] = whitelists
image_info = anchore_utils.get_all_image_info(repotagstring)
registry = image_info.pop('registry', "N/A")
repo = image_info.pop('repo', "N/A")
tag = image_info.pop('tag', "N/A")
imageId = image_info.pop('imageId', "N/A")
digest = image_info.pop('digest', "N/A")
ret['registry'] = registry
ret['repository'] = repo
ret['image'] = {
'type':'tag',
'value':tag
}
ret['id'] = str(uuid.uuid4())
return(ret)
# policy/wl
# V
def verify_whitelist(whitelistdata=[], version=default_whitelist_version):
ret = True
if not isinstance(whitelistdata, list):
ret = False
if version in supported_whitelist_versions:
# do 1_0 format/checks
pass
return(ret)
# R
def read_whitelist(name=None, file=None, version=default_whitelist_version):
if not name:
raise Exception("bad input: " + str(name) + " : " + str(file))
if file:
if not os.path.exists(file):
raise Exception("input file does not exist: " + str(file))
wdata = anchore_utils.read_plainfile_tolist(file)
if not verify_whitelist(whitelistdata=wdata, version=version):
raise Exception("cannot verify whitelist data read from file as valid")
else:
wdata = []
ret = {}
ret[name] = wdata
return(ret)
def structure_whitelist(whitelistdata):
ret = []
for item in whitelistdata:
try:
(k,v) = re.match("([^\s]*)\s*([^\s]*)", item).group(1,2)
if not re.match("^\s*#.*", k):
ret.append([k, v])
except Exception as err:
pass
return(ret)
def unformat_whitelist_data(wldata):
ret = []
whitelists = structure_whitelist(wldata)
for wlitem in whitelists:
gate, triggerId = wlitem
el = {
'gate':gate,
'trigger_id':triggerId,
'id':str(uuid.uuid4())
}
ret.append(el)
return(ret)
def format_whitelist_data(wldata):
ret = []
version = wldata['version']
if wldata['version'] == default_whitelist_version:
for item in wldata['items']:
ret.append(' '.join([item['gate'], item['trigger_id']]))
else:
raise Exception ("detected whitelist version format in bundle not supported: " + str(version))
return(ret)
def extract_whitelist_data(bundle, wlid):
for wl in bundle['whitelists']:
if wlid == wl['id']:
return(format_whitelist_data(wl))
# R
def read_policy(name=None, file=None, version=default_bundle_version):
if not name or not file:
raise Exception("input error")
if not os.path.exists(file):
raise Exception("input file does not exist: " + str(file))
pdata = anchore_utils.read_plainfile_tolist(file)
if not verify_policy(policydata=pdata, version=version):
raise Exception("cannot verify policy data read from file as valid")
ret = {}
ret[name] = pdata
return(ret)
def structure_policy(policydata):
policies = {}
for l in policydata:
l = l.strip()
patt = re.compile('^\s*#')
if (l and not patt.match(l)):
polinput = l.split(':')
module = polinput[0]
check = polinput[1]
action = polinput[2]
modparams = ""
if (len(polinput) > 3):
modparams = ':'.join(polinput[3:])
if module not in policies:
policies[module] = {}
if check not in policies[module]:
policies[module][check] = {}
if 'aptups' not in policies[module][check]:
policies[module][check]['aptups'] = []
aptup = [action, modparams]
if aptup not in policies[module][check]['aptups']:
policies[module][check]['aptups'].append(aptup)
policies[module][check]['action'] = action
policies[module][check]['params'] = modparams
return(policies)
# return a give policyId from a bundle in raw poldata format
def extract_policy_data(bundle, polid):
for pol in bundle['policies']:
if polid == pol['id']:
return(format_policy_data(pol))
# convert from policy bundle policy format to raw poldata format
def format_policy_data(poldata):
ret = []
version = poldata['version']
if poldata['version'] == default_policy_version:
for item in poldata['rules']:
polline = ':'.join([item['gate'], item['trigger'], item['action'], ""])
if 'params' in item:
for param in item['params']:
polline = polline + param['name'] + '=' + param['value'] + " "
ret.append(polline)
else:
raise Exception ("detected policy version format in bundle not supported: " + str(version))
return(ret)
# convert from raw poldata format to bundle format
def unformat_policy_data(poldata):
ret = []
policies = structure_policy(poldata)
for gate in policies.keys():
try:
for trigger in policies[gate].keys():
action = policies[gate][trigger]['action']
params = policies[gate][trigger]['params']
el = {
'gate':gate,
'trigger':trigger,
'action':action,
'params':[]
}
for p in params.split():
(k,v) = p.split("=")
el['params'].append({'name':k, 'value':v})
ret.append(el)
except Exception as err:
print str(err)
pass
return(ret)
# V
def verify_policy(policydata=[], version=default_policy_version):
ret = True
if not isinstance(policydata, list):
ret = False
if version in supported_policy_versions:
# do 1_0 format/checks
pass
return(ret)
def run_bundle(anchore_config=None, bundle={}, image=None, matchtags=[], stateless=False, show_whitelisted=True, show_triggerIds=True):
retecode = 0
if not anchore_config or not bundle or not image:
raise Exception("input error")
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle does not conform to bundle schema")
imageId = anchore_utils.discover_imageId(image)
digests = []
if not matchtags:
matchtags = [image]
evalmap = {}
evalresults = {}
for matchtag in matchtags:
_logger.info("evaluating tag: " + str(matchtag))
mapping_results = get_mapping_actions(image=matchtag, imageId=imageId, in_digests=digests, bundle=bundle)
for pol,wl,polname,wlnames,mapmatch,match_json,evalhash in mapping_results:
evalmap[matchtag] = evalhash
_logger.debug("attempting eval: " + evalhash + " : " + matchtag)
if evalhash not in evalresults:
fnames = {}
try:
if stateless:
policies = structure_policy(pol)
whitelists = structure_whitelist(wl)
rc = execute_gates(imageId, policies)
result, fullresult = evaluate_gates_results(imageId, policies, {}, whitelists)
eval_result = structure_eval_results(imageId, fullresult, show_whitelisted=show_whitelisted, show_triggerIds=show_triggerIds, imageName=matchtag)
gate_result = {}
gate_result[imageId] = eval_result
else:
con = controller.Controller(anchore_config=anchore_config, imagelist=[imageId], allimages=contexts['anchore_allimages'], force=True)
for (fname, data) in [('tmppol', pol), ('tmpwl', wl)]:
fh, thefile = tempfile.mkstemp(dir=anchore_config['tmpdir'])
fnames[fname] = thefile
try:
with open(thefile, 'w') as OFH:
for l in data:
OFH.write(l + "\n")
except Exception as err:
raise err
finally:
os.close(fh)
gate_result = con.run_gates(policy=fnames['tmppol'], global_whitelist=fnames['tmpwl'], show_triggerIds=show_triggerIds, show_whitelisted=show_whitelisted)
evalel = {
'results': list(),
'policy_name':"N/A",
'whitelist_names':"N/A",
'policy_data':list(),
'whitelist_data':list(),
'mapmatch':"N/A",
'matched_mapping_rule': {}
}
evalel['results'] = gate_result
evalel['policy_name'] = polname
evalel['whitelist_names'] = wlnames
evalel['policy_data'] = pol
evalel['whitelist_data'] = wl
evalel['mapmatch'] = mapmatch
evalel['matched_mapping_rule'] = match_json
_logger.debug("caching eval result: " + evalhash + " : " + matchtag)
evalresults[evalhash] = evalel
ecode = result_get_highest_action(gate_result)
if ecode == 1:
retecode = 1
elif retecode == 0 and ecode > retecode:
retecode = ecode
except Exception as err:
_logger.error("policy evaluation error: " + str(err))
finally:
for f in fnames.keys():
if os.path.exists(fnames[f]):
os.remove(fnames[f])
else:
_logger.debug("skipping eval, result already cached: " + evalhash + " : " + matchtag)
ret = {}
for matchtag in matchtags:
ret[matchtag] = {}
ret[matchtag]['bundle_name'] = bundle['name']
try:
evalresult = evalresults[evalmap[matchtag]]
ret[matchtag]['evaluations'] = [evalresult]
except Exception as err:
raise err
return(ret, retecode)
def result_get_highest_action(results):
highest_action = 0
for k in results.keys():
action = results[k]['result']['final_action']
if action == 'STOP':
highest_action = 1
elif highest_action == 0 and action == 'WARN':
highest_action = 2
return(highest_action)
def get_mapping_actions(image=None, imageId=None, in_digests=[], bundle={}):
"""
Given an image, image_id, digests, and a bundle, determine which policies and whitelists to evaluate.
:param image: Image obj
:param imageId: image id string
:param in_digests: candidate digests
:param bundle: bundle dict to evaluate
:return: tuple of (policy_data, whitelist_data, policy_name, whitelist_names, matchstring, mapping_rule_json obj, evalhash)
"""
if not image or not bundle:
raise Exception("input error")
if not verify_policy_bundle(bundle=bundle):
raise Exception("input bundle does not conform to bundle schema")
ret = []
image_infos = []
image_info = anchore_utils.get_all_image_info(image)
if image_info and image_info not in image_infos:
image_infos.append(image_info)
for m in bundle['mappings']:
polname = m['policy_id']
wlnames = m['whitelist_ids']
for image_info in image_infos:
#_logger.info("IMAGE INFO: " + str(image_info))
ii = {}
ii.update(image_info)
registry = ii.pop('registry', "N/A")
repo = ii.pop('repo', "N/A")
tags = []
fulltag = ii.pop('fulltag', "N/A")
if fulltag != 'N/A':
tinfo = anchore_utils.parse_dockerimage_string(fulltag)
if 'tag' in tinfo and tinfo['tag']:
tag = tinfo['tag']
for t in [image, fulltag]:
tinfo = anchore_utils.parse_dockerimage_string(t)
if 'tag' in tinfo and tinfo['tag'] and tinfo['tag'] not in tags:
tags.append(tinfo['tag'])
digest = ii.pop('digest', "N/A")
digests = [digest]
for d in image_info['digests']:
dinfo = anchore_utils.parse_dockerimage_string(d)
if 'digest' in dinfo and dinfo['digest']:
digests.append(dinfo['digest'])
p_ids = []
p_names = []
for p in bundle['policies']:
p_ids.append(p['id'])
p_names.append(p['name'])
wl_ids = []
wl_names = []
for wl in bundle['whitelists']:
wl_ids.append(wl['id'])
wl_names.append(wl['name'])
if polname not in p_ids:
_logger.info("policy not in bundle: " + str(polname))
continue
skip=False
for wlname in wlnames:
if wlname not in wl_ids:
_logger.info("whitelist not in bundle" + str(wlname))
skip=True
if skip:
continue
mname = m['name']
mregistry = m['registry']
mrepo = m['repository']
if m['image']['type'] == 'tag':
mtag = m['image']['value']
mdigest = None
mimageId = None
elif m['image']['type'] == 'digest':
mdigest = m['image']['value']
mtag = None
mimageId = None
elif m['image']['type'] == 'id':
mimageId = m['image']['value']
mtag = None
mdigest = None
else:
mtag = mdigest = mimageId = None
mregistry_rematch = mregistry
mrepo_rematch = mrepo
mtag_rematch = mtag
try:
matchtoks = []
for tok in mregistry.split("*"):
matchtoks.append(re.escape(tok))
mregistry_rematch = "^" + '(.*)'.join(matchtoks) + "$"
matchtoks = []
for tok in mrepo.split("*"):
matchtoks.append(re.escape(tok))
mrepo_rematch = "^" + '(.*)'.join(matchtoks) + "$"
matchtoks = []
for tok in mtag.split("*"):
matchtoks.append(re.escape(tok))
mtag_rematch = "^" + '(.*)'.join(matchtoks) + "$"
except Exception as err:
_logger.error("could not set up regular expression matches for mapping check - exception: " + str(err))
_logger.debug("matchset: " + str([mregistry_rematch, mrepo_rematch, mtag_rematch]) + " : " + str([mregistry, mrepo, mtag]) + " : " + str([registry, repo, tag, tags]))
if registry == mregistry or mregistry == '*' or re.match(mregistry_rematch, registry):
_logger.debug("checking mapping for image ("+str(image_info)+") match.")
if repo == mrepo or mrepo == '*' or re.match(mrepo_rematch, repo):
doit = False
matchstring = mname + ": N/A"
if tag:
if False and (mtag == tag or mtag == '*' or mtag in tags or re.match(mtag_rematch, tag)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mtag])
doit = True
else:
for t in tags:
if re.match(mtag_rematch, t):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mtag])
doit = True
break
if not doit and (digest and (mdigest == digest or mdigest in in_digests or mdigest in digests)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mdigest])
doit = True
if not doit and (imageId and (mimageId == imageId)):
matchstring = mname + ":" + ','.join([mregistry, mrepo, mimageId])
doit = True
matchstring = matchstring.encode('utf8')
if doit:
_logger.debug("match found for image ("+str(image_info)+") matchstring ("+str(matchstring)+")")
wldata = []
wldataset = set()
for wlname in wlnames:
wldataset = set(list(wldataset) + extract_whitelist_data(bundle, wlname))
wldata = list(wldataset)
poldata = extract_policy_data(bundle, polname)
wlnames.sort()
evalstr = ','.join([polname] + wlnames)
evalhash = hashlib.md5(evalstr).hexdigest()
ret.append( ( poldata, wldata, polname,wlnames, matchstring, m, evalhash) )
return(ret)
else:
_logger.debug("no match found for image ("+str(image_info)+") match.")
else:
_logger.debug("no match found for image ("+str(image_info)+") match.")
return(ret)
def execute_gates(imageId, policies, refresh=True):
import random
success = True
anchore_config = contexts['anchore_config']
imagename = imageId
gatesdir = '/'.join([anchore_config["scripts_dir"], "gates"])
workingdir = '/'.join([anchore_config['anchore_data_dir'], 'querytmp'])
outputdir = workingdir
_logger.info(imageId + ": evaluating policies...")
for d in [outputdir, workingdir]:
if not os.path.exists(d):
os.makedirs(d)
imgfile = '/'.join([workingdir, "queryimages." + str(random.randint(0, 99999999))])
anchore_utils.write_plainfile_fromstr(imgfile, imageId)
try:
gmanifest, failedgates = anchore_utils.generate_gates_manifest()
if failedgates:
_logger.error("some gates failed to run - check the gate(s) modules for errors: " + str(','.join(failedgates)))
success = False
else:
success = True
for gatecheck in policies.keys():
# get all commands that match the gatecheck
gcommands = []
for gkey in gmanifest.keys():
if gmanifest[gkey]['gatename'] == gatecheck:
gcommands.append(gkey)
# assemble the params from the input policy for this gatecheck
params = []
for trigger in policies[gatecheck].keys():
if 'params' in policies[gatecheck][trigger] and policies[gatecheck][trigger]['params']:
params.append(policies[gatecheck][trigger]['params'])
if not params:
params = ['all']
if gcommands:
for command in gcommands:
cmd = [command] + [imgfile, anchore_config['image_data_store'], outputdir] + params
_logger.debug("running gate command: " + str(' '.join(cmd)))
(rc, sout, cmdstring) = anchore_utils.run_command(cmd)
if rc:
_logger.error("FAILED")
_logger.error("\tCMD: " + str(cmdstring))
_logger.error("\tEXITCODE: " + str(rc))
_logger.error("\tOUTPUT: " + str(sout))
success = False
else:
_logger.debug("")
_logger.debug("\tCMD: " + str(cmdstring))
_logger.debug("\tEXITCODE: " + str(rc))
_logger.debug("\tOUTPUT: " + str(sout))
_logger.debug("")
else:
_logger.warn("WARNING: gatecheck ("+str(gatecheck)+") line in policy, but no gates were found that match this gatecheck")
except Exception as err:
_logger.error("gate evaluation failed - exception: " + str(err))
finally:
if imgfile and os.path.exists(imgfile):
try:
os.remove(imgfile)
except:
_logger.error("could not remove tempfile: " + str(imgfile))
if success:
report = generate_gates_report(imageId)
contexts['anchore_db'].save_gates_report(imageId, report)
_logger.info(imageId + ": evaluated.")
return(success)
def generate_gates_report(imageId):
# this routine reads the results of image gates and generates a formatted report
report = {}
outputs = contexts['anchore_db'].list_gate_outputs(imageId)
for d in outputs:
report[d] = contexts['anchore_db'].load_gate_output(imageId, d)
return(report)
def evaluate_gates_results(imageId, policies, image_whitelist, global_whitelist):
ret = list()
fullret = list()
final_gate_action = 'GO'
for m in policies.keys():
gdata = contexts['anchore_db'].load_gate_output(imageId, m)
for l in gdata:
(k, v) = re.match('(\S*)\s*(.*)', l).group(1, 2)
imageId = imageId
check = m
trigger = k
output = v
triggerId = hashlib.md5(''.join([check,trigger,output])).hexdigest()
# if the output is structured (i.e. decoded as an
# anchore compatible json string) then extract the
# elements for display
try:
json_output = json.loads(output)
if 'id' in json_output:
triggerId = str(json_output['id'])
if 'desc' in json_output:
output = str(json_output['desc'])
except:
pass
if k in policies[m]:
trigger = k
action = policies[check][trigger]['action']
r = {'imageId':imageId, 'check':check, 'triggerId':triggerId, 'trigger':trigger, 'output':output, 'action':action}
# this is where whitelist check should go
whitelisted = False
whitelist_type = "none"
if global_whitelist and ([m, triggerId] in global_whitelist):
whitelisted = True
whitelist_type = "global"
elif image_whitelist and 'ignore' in image_whitelist and (r in image_whitelist['ignore']):
whitelisted = True
whitelist_type = "image"
else:
# look for prefix wildcards
try:
for [gmod, gtriggerId] in global_whitelist:
if gmod == m:
# special case for backward compat
try:
if gmod == 'ANCHORESEC' and not re.match(".*\*.*", gtriggerId) and re.match("^CVE.*|^RHSA.*", gtriggerId):
gtriggerId = gtriggerId + "*"
except Exception as err:
_logger.warn("problem with backward compat modification of whitelist trigger - exception: " + str(err))
matchtoks = []
for tok in gtriggerId.split("*"):
matchtoks.append(re.escape(tok))
rematch = "^" + '(.*)'.join(matchtoks) + "$"
_logger.debug("checking regexp wl<->triggerId for match: " + str(rematch) + " : " + str(triggerId))
if re.match(rematch, triggerId):
_logger.debug("found wildcard whitelist match")
whitelisted = True
whitelist_type = "global"
break
except Exception as err:
_logger.warn("problem with prefix wildcard match routine - exception: " + str(err))
fullr = {}
fullr.update(r)
fullr['whitelisted'] = whitelisted
fullr['whitelist_type'] = whitelist_type
fullret.append(fullr)
if not whitelisted:
if policies[m][k]['action'] == 'STOP':
final_gate_action = 'STOP'
elif final_gate_action != 'STOP' and policies[m][k]['action'] == 'WARN':
final_gate_action = 'WARN'
ret.append(r)
else:
# whitelisted, skip evaluation
pass
ret.append({'imageId':imageId, 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action})
fullret.append({'imageId':imageId, 'check':'FINAL', 'trigger':'FINAL', 'output':"", 'action':final_gate_action, 'whitelisted':False, 'whitelist_type':"none", 'triggerId':"N/A"})
return(ret, fullret)
def structure_eval_results(imageId, evalresults, show_triggerIds=False, show_whitelisted=False, imageName=None):
if not imageName:
imageName = imageId
record = {}
record['result'] = {}
record['result']['header'] = ['Image_Id', 'Repo_Tag']
if show_triggerIds:
record['result']['header'].append('Trigger_Id')
record['result']['header'] += ['Gate', 'Trigger', 'Check_Output', 'Gate_Action']
if show_whitelisted:
record['result']['header'].append('Whitelisted')
record['result']['rows'] = list()
for m in evalresults:
id = imageId
name = imageName
gate = m['check']
trigger = m['trigger']
output = m['output']
triggerId = m['triggerId']
action = m['action']
row = [id[0:12], name]
if show_triggerIds:
row.append(triggerId)
row += [gate, trigger, output, action]
if show_whitelisted:
row.append(m['whitelist_type'])
if not m['whitelisted'] or show_whitelisted:
record['result']['rows'].append(row)
if gate == 'FINAL':
record['result']['final_action'] = action
return(record)
# small test
if __name__ == '__main__':
from anchore.configuration import AnchoreConfiguration
config = AnchoreConfiguration(cliargs={})
anchore_utils.anchore_common_context_setup(config)
policies = {}
whitelists = {}
mappings = []
pol0 = read_policy(name=str(uuid.uuid4()), file='/root/.anchore/conf/anchore_gate.policy')
pol1 = read_policy(name=str(uuid.uuid4()), file='/root/.anchore/conf/anchore_gate.policy')
policies.update(pol0)
policies.update(pol1)
gl0 = read_whitelist(name=str(uuid.uuid4()))
wl0 = read_whitelist(name=str(uuid.uuid4()), file='/root/wl0')
whitelists.update(gl0)
whitelists.update(wl0)
map0 = create_mapping(map_name="default", policy_name=policies.keys()[0], whitelists=whitelists.keys(), repotagstring='*/*:*')
mappings.append(map0)
bundle = create_policy_bundle(name='default', policies=policies, policy_version=default_policy_version, whitelists=whitelists, whitelist_version=default_whitelist_version, mappings=mappings)
print "CREATED BUNDLE: " + json.dumps(bundle, indent=4)
rc = write_policy_bundle(bundle_file="/tmp/bun.json", bundle=bundle)
newbun = read_policy_bundle(bundle_file="/tmp/bun.json")
if newbun != bundle:
print "BUNDLE RESULT DIFFERENT AFTER SAVE/LOAD"
thebun = convert_to_policy_bundle(name='default', policy_file='/root/.anchore/conf/anchore_gate.policy', policy_version=default_policy_version, whitelist_files=['/root/wl0'], whitelist_version=default_whitelist_version)
rc = write_policy_bundle(bundle_file="/tmp/bun1.json", bundle=thebun)
pol0 = read_policy(name="meh", file='/root/.anchore/conf/anchore_gate.policy')
policies = structure_policy(pol0['meh'])
#rc = execute_gates("4a415e3663882fbc554ee830889c68a33b3585503892cc718a4698e91ef2a526", policies)
result, image_ecode = run_bundle(anchore_config=config, image='alpine', matchtags=[], bundle=thebun)
with open("/tmp/a", 'w') as OFH:
OFH.write(json.dumps(result, indent=4))
try:
result, image_ecode = run_bundle_stateless(anchore_config=config, image='alpine', matchtags=[], bundle=thebun)
with open("/tmp/b", 'w') as OFH:
OFH.write(json.dumps(result, indent=4))
except Exception as err:
import traceback
traceback.print_exc()
print str(err)
|
dbModel.py | eric033014/Line-bot | 104 | 25826 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config[
'SQLALCHEMY_DATABASE_URI'] = 'postgres://xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class UserData(db.Model):
__tablename__ = 'UserData'
Id = db.Column(db.Integer, primary_key=True)
Name = db.Column(db.String(64))
Description = db.Column(db.String(256))
CreateDate = db.Column(db.DateTime)
def __init__(self
, Name
, Description
, CreateDate
):
self.Name = Name
self.Description = Description
self.CreateDate = CreateDate
if __name__ == '__main__':
manager.run()
|
GlueCustomConnectors/glueJobValidation/glue_job_validation_update.py | xy1m/aws-glue-samples | 925 | 25875 | <gh_stars>100-1000
# Copyright 2016-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import sys
from awsglue.utils import getResolvedOptions
from awsglue.transforms import *
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark import SparkConf
from awsglue.dynamicframe import DynamicFrame
from awsglue.gluetypes import Field, IntegerType, TimestampType, StructType
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
######################################## test connection options ########################################
## please pick up and customize the right connection options for testing
## If you are using a large testing data set, please consider using column partitioning to parallel the data reading for performance purpose.
# DataSourceTest - - please configure according to your connector type and options
options_dataSourceTest_jdbc = {
"query": "select NumberOfEmployees, CreatedDate from Account",
"className" : "partner.jdbc.some.Driver",
# test parameters
"url": "jdbc:some:url:SecurityToken=abc;",
"user": "user",
"password": "password",
}
# ColumnPartitioningTest
# for JDBC connector only
options_columnPartitioningTest = {
"query": "select NumberOfEmployees, CreatedDate from Account where ",
"url": "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};",
"secretId": "test-partner-driver",
"className" : "partner.jdbc.some.Driver",
# test parameters
"partitionColumn" : "RecordId__c",
"lowerBound" : "0",
"upperBound" : "13",
"numPartitions" : "2",
}
# DataTypeMappingTest
# for JDBC connector only
options_dataTypeMappingTest = {
"query" : "select NumberOfEmployees, CreatedDate from Account where ",
"url" : "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};",
"secretId" : "test-partner-driver",
"className" : "partner.jdbc.some.Driver",
# test parameter
"dataTypeMapping": {"INTEGER" : "STRING"}
}
# DbtableQueryTest
# for JDBC connector only
options_dbtableQueryTest = {
"url" : "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};",
"secretId" : "test-partner-driver",
"className" : "partner.jdbc.some.Driver",
# test parameter
"query": "select NumberOfEmployees, CreatedDate from Account"
# "dbTable" : "Account"
}
# JDBCUrlTest - extra jdbc connections UseBulkAPI appended
# for JDBC connector only
options_JDBCUrlTest = {
"query": "select NumberOfEmployees, CreatedDate from Account",
"secretId": "test-partner-driver",
"className" : "partner.jdbc.some.Driver",
# test parameter
"url": "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};UseBulkAPI=true",
}
# SecretsManagerTest - - please configure according to your connector type and options
options_secretsManagerTest = {
"query": "select NumberOfEmployees, CreatedDate from Account",
"url": "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};",
"className" : "partner.jdbc.some.Driver",
# test parameter
"secretId": "test-partner-driver"
}
# FilterPredicateTest
# for JDBC connector only
options_filterPredicateTest = {
"query": "select NumberOfEmployees, CreatedDate from Account where",
"url": "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};",
"secretId": "test-partner-driver",
"className" : "partner.jdbc.some.Driver",
# test parameter
"filterPredicate": "BillingState='CA'"
}
##################################### read data from data source ######################################
datasource0 = glueContext.create_dynamic_frame_from_options(
connection_type = "marketplace.jdbc",
connection_options = options_secretsManagerTest) # pick up the right test conection options
######################################## validate data reading ########################################
## validate data schema and count
# more data type: https://docs.aws.amazon.com/glue/latest/dg/aws-glue-api-crawler-pyspark-extensions-types.html
expected_schema = StructType([Field("NumberOfEmployees", IntegerType()), Field("CreatedDate", TimestampType())])
expected_count = 2
assert datasource0.schema() == expected_schema
print("expected schema: " + str(expected_schema.jsonValue()))
print("result schema: " + str(datasource0.schema().jsonValue()))
print("result schema in tree structure: ")
datasource0.printSchema()
## validate data count is euqal to expected count
assert datasource0.count() == expected_count
print("expected record count: " + str(expected_count))
print("result record count: " + str(datasource0.count()))
######################################## write data to s3 ########################################
datasource0.write(
connection_type="s3",
connection_options = {"path": "s3://your/output/path/"},
format="json"
)
######################################## DataSinkTest ########################################
## Create a DynamicFrame on the fly
jsonStrings = ['{"Name":"Andrew"}']
rdd = sc.parallelize(jsonStrings)
sql_df = spark.read.json(rdd)
df = DynamicFrame.fromDF(sql_df, glueContext, "new_dynamic_frame")
## DataSinkTest options
options_dataSinkTest = {
"secretId": "test-partner-driver",
"dbtable" : "Account",
"className" : "partner.jdbc.some.Driver",
"url": "jdbc:some:url:user=${user};Password=${Password};SecurityToken=${SecurityToken};"
}
## Write to data target
glueContext.write_dynamic_frame.from_options(frame = df,
connection_type = "marketplace.jdbc",
connection_options = options_dataSinkTest)
## write validation
# You may check data in the database side.
# You may also refer to 'read data from data source' and 'validate data reading' part to compose your own validation logics.
job.commit() |
code/utils/losses_2.py | mantuoluozk/MFC | 161 | 25887 | <filename>code/utils/losses_2.py
# import torch
# from torch.nn import functional as F
import numpy as np
from scipy.ndimage import distance_transform_edt as distance
from skimage import segmentation as skimage_seg
def compute_dtm(img_gt, out_shape, normalize=False, fg=False):
"""
compute the distance transform map of foreground in binary mask
input: segmentation, shape = (batch_size, x, y, z)
output: the foreground Distance Map (SDM)
dtm(x) = 0; x in segmentation boundary
inf|x-y|; x in segmentation
"""
fg_dtm = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
posmask = img_gt[b].astype(np.bool)
if not fg:
if posmask.any():
negmask = 1 - posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
if normalize:
fg_dtm[b] = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) + (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
else:
fg_dtm[b] = posdis + negdis
fg_dtm[b][boundary==1] = 0
else:
if posmask.any():
posdis = distance(posmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
if normalize:
fg_dtm[b] = (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
else:
fg_dtm[b] = posdis
fg_dtm[b][boundary==1] = 0
return fg_dtm
def hd_loss(seg_soft, gt, gt_dtm=None, one_side=True, seg_dtm=None):
"""
compute huasdorff distance loss for binary segmentation
input: seg_soft: softmax results, shape=(b,x,y,z)
gt: ground truth, shape=(b,x,y,z)
seg_dtm: segmentation distance transform map; shape=(b,x,y,z)
gt_dtm: ground truth distance transform map; shape=(b,x,y,z)
output: boundary_loss; sclar
"""
delta_s = (seg_soft - gt.float()) ** 2
g_dtm = gt_dtm ** 2
dtm = g_dtm if one_side else g_dtm + seg_dtm ** 2
multipled = torch.einsum('bxyz, bxyz->bxyz', delta_s, dtm)
# hd_loss = multipled.sum()*1.0/(gt_dtm > 0).sum()
hd_loss = multipled.mean()
return hd_loss
def save_sdf(gt_path=None):
'''
generate SDM for gt segmentation
'''
import nibabel as nib
dir_path = 'C:/Seolen/PycharmProjects/semi_seg/semantic-semi-supervised-master/model/gan_sdfloss3D_0229_04/test'
gt_path = dir_path + '/00_gt.nii.gz'
gt_img = nib.load(gt_path)
gt = gt_img.get_data().astype(np.uint8)
posmask = gt.astype(np.bool)
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
# sdf = (negdis - np.min(negdis)) / (np.max(negdis) - np.min(negdis)) - (posdis - np.min(posdis)) / ( np.max(posdis) - np.min(posdis))
sdf = (posdis - np.min(posdis)) / ( np.max(posdis) - np.min(posdis))
sdf[boundary==1] = 0
sdf = sdf.astype(np.float32)
sdf = nib.Nifti1Image(sdf, gt_img.affine)
save_path = dir_path + '/00_sdm_pos.nii.gz'
nib.save(sdf, save_path)
def compute_sdf(img_gt, out_shape):
"""
compute the signed distance map of binary mask
input: segmentation, shape = (batch_size, x, y, z)
output: the Signed Distance Map (SDM)
sdf(x) = 0; x in segmentation boundary
-inf|x-y|; x in segmentation
+inf|x-y|; x out of segmentation
normalize sdf to [-1,1]
"""
img_gt = img_gt.astype(np.uint8)
normalized_sdf = np.zeros(out_shape)
for b in range(out_shape[0]): # batch size
posmask = img_gt[b].astype(np.bool)
if posmask.any():
negmask = ~posmask
posdis = distance(posmask)
negdis = distance(negmask)
boundary = skimage_seg.find_boundaries(posmask, mode='inner').astype(np.uint8)
sdf = (negdis-np.min(negdis))/(np.max(negdis)-np.min(negdis)) - (posdis-np.min(posdis))/(np.max(posdis)-np.min(posdis))
sdf[boundary==1] = 0
normalized_sdf[b] = sdf
# assert np.min(sdf) == -1.0, print(np.min(posdis), np.max(posdis), np.min(negdis), np.max(negdis))
# assert np.max(sdf) == 1.0, print(np.min(posdis), np.min(negdis), np.max(posdis), np.max(negdis))
return normalized_sdf
def sdf_loss(net_output, gt_sdm):
# print('net_output.shape, gt_sdm.shape', net_output.shape, gt_sdm.shape)
# ([4, 1, 112, 112, 80])
smooth = 1e-5
# compute eq (4)
intersect = torch.sum(net_output * gt_sdm)
pd_sum = torch.sum(net_output ** 2)
gt_sum = torch.sum(gt_sdm ** 2)
L_product = (intersect + smooth) / (intersect + pd_sum + gt_sum + smooth)
# print('L_product.shape', L_product.shape) (4,2)
L_SDF = 1/3 - L_product + torch.norm(net_output - gt_sdm, 1)/torch.numel(net_output)
return L_SDF
def boundary_loss(outputs_soft, gt_sdf):
"""
compute boundary loss for binary segmentation
input: outputs_soft: sigmoid results, shape=(b,2,x,y,z)
gt_sdf: sdf of ground truth (can be original or normalized sdf); shape=(b,2,x,y,z)
output: boundary_loss; sclar
"""
pc = outputs_soft[:,1,...]
dc = gt_sdf[:,1,...]
multipled = torch.einsum('bxyz, bxyz->bxyz', pc, dc)
bd_loss = multipled.mean()
return bd_loss
if __name__ == '__main__':
save_sdf() |
dayu_widgets/radio_button.py | ZSD-tim/dayu_widgets | 157 | 25902 | <reponame>ZSD-tim/dayu_widgets
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: <NAME>
# Date : 2019.2
# Email : <EMAIL>
###################################################################
"""
MRadioButton
"""
from dayu_widgets.mixin import cursor_mixin
from dayu_widgets.qt import QRadioButton
@cursor_mixin
class MRadioButton(QRadioButton):
"""
MRadioButton just use stylesheet and set cursor shape when hover. No more extend.
"""
def __init__(self, text='', parent=None):
super(MRadioButton, self).__init__(text=text, parent=parent)
|
desktop/core/ext-py/docutils-0.14/test/test_parsers/test_parser.py | kokosing/hue | 5,079 | 25936 | <filename>desktop/core/ext-py/docutils-0.14/test/test_parsers/test_parser.py
#! /usr/bin/env python
# $Id: test_parser.py 7463 2012-06-22 19:49:51Z milde $
# Author: <NAME> <strank(AT)strank(DOT)info>
# Copyright: This module has been placed in the public domain.
"""
Tests for basic functionality of parser classes.
"""
import sys
import unittest
import DocutilsTestSupport # must be imported before docutils
import docutils
from docutils import parsers, utils, frontend
from docutils._compat import b
class RstParserTests(unittest.TestCase):
def test_inputrestrictions(self):
parser_class = parsers.get_parser_class('rst')
parser = parser_class()
document = utils.new_document('test data', frontend.OptionParser(
components=(parser, )).get_default_values())
if sys.version_info < (3,):
# supplying string input is supported, but only if ascii-decodable
self.assertRaises(UnicodeDecodeError,
parser.parse, b('hol%s' % chr(224)), document)
else:
# input must be unicode at all times
self.assertRaises(TypeError, parser.parse, b('hol'), document)
if __name__ == '__main__':
unittest.main()
|
ros/genpy/src/genpy/msg/__init__.py | numberen/apollo-platform | 742 | 25968 | <reponame>numberen/apollo-platform
from ._TestFillEmbedTime import *
from ._TestFillSimple import *
from ._TestManyFields import *
from ._TestMsgArray import *
from ._TestPrimitiveArray import *
from ._TestString import *
|
models/treebased/data/data_generator.py | ziyoujiyi/PaddleRec | 2,739 | 25973 | <gh_stars>1000+
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import numpy as np
import sys
import os
import argparse
import json
import random
import multiprocessing as mp
def mp_run(data, process_num, func, *args):
""" run func with multi process
"""
level_start = time.time()
partn = max(len(data) / process_num, 1)
start = 0
p_idx = 0
ps = []
while start < len(data):
local_data = data[start:start + partn]
start += partn
p = mp.Process(target=func, args=(local_data, p_idx) + args)
ps.append(p)
p.start()
p_idx += 1
for p in ps:
p.join()
for p in ps:
p.terminate()
return p_idx
def read(train_data_file, test_data_file):
behavior_dict = dict()
train_sample = dict()
test_sample = dict()
user_id = list()
item_id = list()
cat_id = list()
behav_id = list()
timestamp = list()
start = time.time()
itobj = zip([train_data_file, test_data_file], [train_sample, test_sample])
for filename, sample in itobj:
with open(filename, 'rb') as f:
for line in f:
arr = line.strip().split(',')
if len(arr) != 5:
break
user_id.append(int(arr[0]))
item_id.append(int(arr[1]))
cat_id.append(int(arr[2]))
if arr[3] not in behavior_dict:
i = len(behavior_dict)
behavior_dict[arr[3]] = i
behav_id.append(behavior_dict[arr[3]])
timestamp.append(int(arr[4]))
sample["USERID"] = np.array(user_id)
sample["ITEMID"] = np.array(item_id)
sample["CATID"] = np.array(cat_id)
sample["BEHAV"] = np.array(behav_id)
sample["TS"] = np.array(timestamp)
user_id = []
item_id = []
cat_id = []
behav_id = []
timestamp = []
print("Read data done, {} train records, {} test records"
", elapsed: {}".format(
len(train_sample["USERID"]),
len(test_sample["USERID"]), time.time() - start))
return behavior_dict, train_sample, test_sample
def gen_user_his_behave(train_sample):
user_his_behav = dict()
iterobj = zip(train_sample["USERID"], train_sample["ITEMID"],
train_sample["TS"])
for user_id, item_id, ts in iterobj:
if user_id not in user_his_behav:
user_his_behav[user_id] = list()
user_his_behav[user_id].append((item_id, ts))
for _, value in user_his_behav.items():
value.sort(key=lambda x: x[1])
return user_his_behav
def split_train_sample(train_dir, train_sample_seg_cnt):
segment_filenames = []
segment_files = []
for i in range(train_sample_seg_cnt):
filename = "{}/part_{}".format(train_dir, i)
segment_filenames.append(filename)
segment_files.append(open(filename, 'wb'))
with open("train_tmp", 'rb') as fi:
for line in fi:
i = random.randint(0, train_sample_seg_cnt - 1)
segment_files[i].write(line)
for f in segment_files:
f.close()
os.remove("train_tmp")
# Shuffle
for fn in segment_filenames:
lines = []
with open(fn, 'rb') as f:
for line in f:
lines.append(line)
random.shuffle(lines)
with open(fn, 'wb') as f:
for line in lines:
f.write(line)
def partial_gen_train_sample(users, user_his_behav, filename, pipe, seq_len,
min_len):
stat = dict()
with open(filename, 'wb') as f:
for user in users:
value = user_his_behav[user]
count = len(value)
if count < min_len:
continue
arr = [0 for i in range(seq_len - min_len)] + \
[v[0] for v in value]
for i in range(len(arr) - seq_len + 1):
sample = arr[i:i + seq_len]
f.write('{}_{}'.format(user, i)) # sample id
f.write('\t{}'.format(sample[-1])) # label feature
for j in range(seq_len - 1):
if sample[j] != 0:
f.write("\tslot_{}:{}".format(j + 1, sample[j]))
f.write('\n')
if sample[-1] not in stat:
stat[sample[-1]] = 0
stat[sample[-1]] += 1
pipe.send(stat)
def gen_train_sample(train_sample, args):
user_his_behav = gen_user_his_behave(train_sample)
print("user_his_behav len: {}".format(len(user_his_behav)))
users = user_his_behav.keys()
process = []
pipes = []
parall = args.parall
job_size = int(len(user_his_behav) / parall)
if len(user_his_behav) % parall != 0:
parall += 1
for i in range(parall):
a, b = mp.Pipe()
pipes.append(a)
p = mp.Process(
target=partial_gen_train_sample,
args=(users[i * job_size:(i + 1) * job_size], user_his_behav,
'train_tmp.part_{}'.format(i), b, args.seq_len,
args.min_seq_len))
process.append(p)
p.start()
stat = dict()
for pipe in pipes:
st = pipe.recv()
for k, v in st.items():
if k not in stat:
stat[k] = 0
stat[k] += v
for p in process:
p.join()
# Merge partial files
with open("train_tmp", 'wb') as f:
for i in range(parall):
filename = 'train_tmp.part_{}'.format(i)
with open(filename, 'rb') as f1:
f.write(f1.read())
os.remove(filename)
# Split train sample to segments
split_train_sample(args.train_dir, args.train_sample_seg_cnt)
return stat
def gen_test_sample(test_dir, test_sample, seq_len, min_seq_len):
user_his_behav = gen_user_his_behave(test_sample)
with open("{}/part-0".format(test_dir), 'wb') as f:
for user, value in user_his_behav.items():
if len(value) / 2 + 1 < min_seq_len:
continue
mid = int(len(value) / 2)
left = value[:mid][-seq_len + 1:]
right = value[mid:]
left = [0 for i in range(seq_len - len(left) - 1)] + \
[l[0] for l in left]
f.write('{}_{}'.format(user, 'T')) # sample id
labels = ','.join(map(str, [item[0] for item in right]))
f.write('\t{}'.format(labels))
# kvs
for j in range(seq_len - 1):
if left[j] != 0:
f.write("\tslot_{}:{}".format(j + 1, left[j]))
f.write('\n')
def prepare_sample_set(train_dir, sample_dir, process_num=12, feature_num=69):
def parse_data(files, idx, feature_num=69):
history_ids = [0] * feature_num
samples = dict()
process = 0
for filename in files:
process += 1
print("process {} / {}.".format(process, len(files)))
with open(filename) as f:
print("Begin to handle {}.".format(filename))
for line in f:
features = line.strip().split("\t")
item_id = int(features[1])
for item in features[2:]:
slot, feasign = item.split(":")
slot_id = int(slot.split("_")[1])
history_ids[slot_id - 1] = int(feasign)
if item_id not in samples:
samples[item_id] = list()
samples[item_id].append(history_ids)
with open("parse_data_{}.json".format(idx), 'w') as json_file:
json.dump(samples, json_file)
files = ["{}/{}".format(train_dir, f) for f in os.listdir(train_dir)]
real_process_num = mp_run(files, process_num, parse_data, feature_num)
num = 0
all_samples = dict()
for i in range(real_process_num):
filename = "parse_data_{}.json".format(i)
with open(filename, 'r') as json_file:
each_samples = json.load(json_file)
for key in each_samples:
if key not in all_samples:
all_samples[key] = []
all_samples[key] += each_samples[key]
num += len(each_samples[key])
os.remove(filename)
for ck in all_samples:
with open("{}/samples_{}.json".format(sample_dir, ck), 'w') as f:
json.dump(all_samples[ck], f)
if __name__ == '__main__':
_PARSER = argparse.ArgumentParser(description="DataProcess")
_PARSER.add_argument("--train_file", required=True, help="Train filename")
_PARSER.add_argument("--test_file", required=True, help="Test filename")
_PARSER.add_argument(
"--item_cate_filename",
default="./Item_Cate.txt",
help="item cate filename, used to init the first tree.")
_PARSER.add_argument(
"--stat_file", default="./Stat.txt", help="Stat filename")
_PARSER.add_argument(
"--train_dir", default="./train_data", help="Train directory")
_PARSER.add_argument(
"--sample_dir", default="./samples", help="Sample directory")
_PARSER.add_argument(
"--test_dir", default="./test_data", help="Test directory")
_PARSER.add_argument(
'--parall', type=int, help="parall process used", default=16)
_PARSER.add_argument(
"--train_sample_seg_cnt",
type=int,
default=20,
help="count of train sample segments file")
_PARSER.add_argument(
"--seq_len",
type=int,
help="sequence length of the sample record",
default=70)
_PARSER.add_argument(
"--min_seq_len",
type=int,
help="Min length of the sample sequence record",
default=8)
args = _PARSER.parse_args()
os.system("rm -rf ./{} && mkdir -p {}".format(args.train_dir,
args.train_dir))
os.system("rm -rf ./{} && mkdir -p {}".format(args.test_dir,
args.test_dir))
os.system("rm -rf ./{} && mkdir -p {}".format(args.sample_dir,
args.sample_dir))
behavior_dict, train_sample, test_sample = read(args.train_file,
args.test_file)
print(repr(behavior_dict))
stat = gen_train_sample(train_sample, args)
with open(args.stat_file, 'w') as f:
json.dump(stat, f)
gen_test_sample(args.test_dir, test_sample, args.seq_len, args.min_seq_len)
item_cate = dict()
for sample in [train_sample, test_sample]:
iterobj = zip(sample["ITEMID"], sample["CATID"])
for item_id, cat_id in iterobj:
if item_id not in item_cate:
item_cate[item_id] = cat_id
with open(args.item_cate_filename, 'w') as f:
for key in item_cate:
f.write("{}\t{}\n".format(key, item_cate[key]))
prepare_sample_set(
args.train_dir,
args.sample_dir,
args.parall,
feature_num=args.seq_len - 1)
|
third_party/liblouis/copy_tables.py | google-ar/chromium | 2,151 | 26018 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Copies the liblouis braille translation tables to a destination.'''
import liblouis_list_tables
import optparse
import os
import shutil
def LinkOrCopyFiles(sources, dest_dir):
def LinkOrCopyOneFile(src, dst):
if os.path.exists(dst):
os.unlink(dst)
try:
os.link(src, dst)
except:
shutil.copy(src, dst)
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
for source in sources:
LinkOrCopyOneFile(source, os.path.join(dest_dir, os.path.basename(source)))
def WriteDepfile(depfile, infiles):
stampfile = depfile + '.stamp'
with open(stampfile, 'w'):
os.utime(stampfile, None)
content = '%s: %s' % (stampfile, ' '.join(infiles))
open(depfile, 'w').write(content)
def main():
parser = optparse.OptionParser(description=__doc__)
parser.add_option('-D', '--directory', dest='directories',
action='append', help='Where to search for table files')
parser.add_option('-e', '--extra_file', dest='extra_files', action='append',
default=[], help='Extra liblouis table file to process')
parser.add_option('-d', '--dest_dir', action='store', metavar='DIR',
help=('Destination directory. Used when translating ' +
'input paths to output paths and when copying '
'files.'))
parser.add_option('--depfile', metavar='FILENAME',
help=('Store .d style dependencies in FILENAME and touch '
'FILENAME.stamp after copying the files'))
options, args = parser.parse_args()
if len(args) != 1:
parser.error('Expecting exactly one argument')
if not options.directories:
parser.error('At least one --directory option must be specified')
if not options.dest_dir:
parser.error('At least one --dest_dir option must be specified')
files = liblouis_list_tables.GetTableFiles(args[0], options.directories,
options.extra_files)
LinkOrCopyFiles(files, options.dest_dir)
if options.depfile:
WriteDepfile(options.depfile, files)
if __name__ == '__main__':
main()
|
netbox/dcim/migrations/0133_port_colors.py | TheFlyingCorpse/netbox | 4,994 | 26022 | <reponame>TheFlyingCorpse/netbox
from django.db import migrations
import utilities.fields
class Migration(migrations.Migration):
dependencies = [
('dcim', '0132_cable_length'),
]
operations = [
migrations.AddField(
model_name='frontport',
name='color',
field=utilities.fields.ColorField(blank=True, max_length=6),
),
migrations.AddField(
model_name='frontporttemplate',
name='color',
field=utilities.fields.ColorField(blank=True, max_length=6),
),
migrations.AddField(
model_name='rearport',
name='color',
field=utilities.fields.ColorField(blank=True, max_length=6),
),
migrations.AddField(
model_name='rearporttemplate',
name='color',
field=utilities.fields.ColorField(blank=True, max_length=6),
),
]
|
reppy/util.py | PLPeeters/reppy | 137 | 26032 | <reponame>PLPeeters/reppy
'''Utility functions.'''
import email
def parse_date(string):
'''Return a timestamp for the provided datestring, described by RFC 7231.'''
parsed = email.utils.parsedate_tz(string)
if parsed is None:
raise ValueError("Invalid time.")
parsed = list(parsed)
# Default time zone is GMT/UTC
parsed[9] = 0 if parsed[9] is None else parsed[9]
return email.utils.mktime_tz(parsed)
|
autograd/__init__.py | mattjj/autograd_tutorial | 704 | 26034 | <gh_stars>100-1000
from .differential_operators import make_vjp, grad
|
data_filter_azure/data_filter_azure/documentdb_server.py | abhushansahu/contrib | 238 | 26054 | <filename>data_filter_azure/data_filter_azure/documentdb_server.py
#!/usr/bin/env python
import requests
import base64
import json
from flask import Flask,redirect, jsonify, abort, make_response, g
import config
from flask_bootstrap import Bootstrap
import azure.common
from data_filter_azure import opa
import azure.cosmos.cosmos_client as cosmos_client
app = Flask(__name__)
Bootstrap(app)
def check_access_opa(registry_id, user_id, type, resource_name, action):
decision = query_opa(registry_id, user_id, type, resource_name, action)
if not decision.defined:
raise abort(403)
sql = opa.splice(SELECT='permissions.id', FROM='permissions JOIN map in permissions.map', WHERE=None, decision=decision)
print(sql)
result = query_cosmosdb(sql, args=None, one=True)
if len(result) == 0:
return False
return True
@app.route('/api/registries/<registry_id>/users/<user_id>/<type>/<resource_name>/<action>', methods=["GET"])
def api_check_access(registry_id, user_id, type, resource_name, action):
return jsonify(check_access_opa(registry_id, user_id, type, resource_name, action))
@app.route('/')
def index():
return redirect('https://docs.microsoft.com/en-us/azure/cosmos-db/introduction', code = 302)
def query_cosmosdb(query, args=[], one=False):
dbinfo = get_cosmosdb()
cosmosdbquery = {
"query": query
}
options = {}
options['enableCrossPartitionQuery'] = True
options['maxItemCount'] = 2
client = dbinfo['client']
container = dbinfo['container']
result_iterable = client.QueryItems(container['_self'], cosmosdbquery, options)
values = []
for item in iter(result_iterable):
return item
values.append(item)
return values
def query_opa(registry_id, user_id, type, resourceName, action):
input = {
'registry': registry_id,
'user': user_id,
'type': type,
'resourceName': resourceName,
'action': action
}
return opa.compile(q='data.documentdb.example.allow==true',
input=input,
unknowns=['permissions'])
def get_cosmosdb():
dbinfo = dict();
client = cosmos_client.CosmosClient(url_connection=config.COSMOSDB_ENDPOINT, auth={
'masterKey': config.COSMOSDB_PRIMARYKEY})
dbinfo['client'] = client
id = config.COSMOSDB_DATABASE
databases = list(client.QueryDatabases({
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{ "name":"@id", "value": id }
]
}))
if len(databases) > 0:
db = databases[0]
else:
db = client.CreateDatabase({'id': id})
dbinfo['db'] = db
containerid = 'permissions'
database_link = 'dbs/' + id
collections = list(client.QueryContainers(
database_link,
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [
{ "name":"@id", "value": containerid }
]
}
))
if len(collections) > 0:
container = collections[0]
else:
options = {
'offerThroughput': 400
}
container_definition = {
'id': containerid,
'partitionKey': {'paths': ['/registry']}
}
container = client.CreateContainer(db['_self'], container_definition, options)
dbinfo['container'] = container
return dbinfo
def add_documents():
dbinfo = get_cosmosdb()
client = dbinfo['client']
container = dbinfo['container']
for document in DOCUMENTS:
client.UpsertItem(container['_self'], document)
def init_db():
with app.app_context():
add_documents()
DOCUMENTS = [
{
'registry' : 'registry1',
'user': 'bob',
'id': 'blob',
'map': [
{
"type": "repositories",
"name": "repo1",
"actions": ["read", "write"]
},
{
"type": "repositories",
"name": "repo2",
"actions": ["*"]
},
{
"type": "charts",
"name": "chart1",
"actions": ["read", "write"]
},
{
"type": "pipelines",
"name": "*",
"actions": ["read"]
}
]
},
{
'registry' : 'registry1',
'user': 'alice',
'id': 'alice',
'map': [
{
"type": "repositories",
"name": "*",
"actions": ["*"]
},
{
"type": "charts",
"name": "chart1",
"actions": ["read"]
}
]
}
]
if __name__ == '__main__':
init_db()
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True
app.run(debug=True)
|
absl/flags/tests/argparse_flags_test_helper.py | alexhagen/abseil-py | 1,969 | 26089 | <reponame>alexhagen/abseil-py
# Copyright 2018 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helper for argparse_flags_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from absl import app
from absl import flags
from absl.flags import argparse_flags
FLAGS = flags.FLAGS
flags.DEFINE_string('absl_echo', None, 'The echo message from absl.flags.')
def parse_flags_simple(argv):
"""Simple example for absl.flags + argparse."""
parser = argparse_flags.ArgumentParser(
description='A simple example of argparse_flags.')
parser.add_argument(
'--argparse_echo', help='The echo message from argparse_flags')
return parser.parse_args(argv[1:])
def main_simple(args):
print('--absl_echo is', FLAGS.absl_echo)
print('--argparse_echo is', args.argparse_echo)
def roll_dice(args):
print('Rolled a dice:', random.randint(1, args.num_faces))
def shuffle(args):
inputs = list(args.inputs)
random.shuffle(inputs)
print('Shuffled:', ' '.join(inputs))
def parse_flags_subcommands(argv):
"""Subcommands example for absl.flags + argparse."""
parser = argparse_flags.ArgumentParser(
description='A subcommands example of argparse_flags.')
parser.add_argument('--argparse_echo',
help='The echo message from argparse_flags')
subparsers = parser.add_subparsers(help='The command to execute.')
roll_dice_parser = subparsers.add_parser(
'roll_dice', help='Roll a dice.')
roll_dice_parser.add_argument('--num_faces', type=int, default=6)
roll_dice_parser.set_defaults(command=roll_dice)
shuffle_parser = subparsers.add_parser(
'shuffle', help='Shuffle inputs.')
shuffle_parser.add_argument(
'inputs', metavar='I', nargs='+', help='Inputs to shuffle.')
shuffle_parser.set_defaults(command=shuffle)
return parser.parse_args(argv[1:])
def main_subcommands(args):
main_simple(args)
args.command(args)
if __name__ == '__main__':
main_func_name = os.environ['MAIN_FUNC']
flags_parser_func_name = os.environ['FLAGS_PARSER_FUNC']
app.run(main=globals()[main_func_name],
flags_parser=globals()[flags_parser_func_name])
|
test/win/compiler-flags/calling-convention.gyp | chlorm-forks/gyp | 2,151 | 26106 | <reponame>chlorm-forks/gyp
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'test_cdecl',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 0,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-cdecl.def',
],
},
{
'target_name': 'test_fastcall',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 1,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-fastcall.def',
],
},
{
'target_name': 'test_stdcall',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 2,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-stdcall.def',
],
},
],
'conditions': [
['MSVS_VERSION[0:4]>="2013"', {
'targets': [
{
'target_name': 'test_vectorcall',
'type': 'loadable_module',
'msvs_settings': {
'VCCLCompilerTool': {
'CallingConvention': 3,
},
},
'sources': [
'calling-convention.cc',
'calling-convention-vectorcall.def',
],
},
],
}],
],
}
|
RecoEgamma/EgammaPhotonProducers/python/propOppoMomentumWithMaterialForElectrons_cfi.py | ckamtsikis/cmssw | 852 | 26129 | import FWCore.ParameterSet.Config as cms
import TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi
#PropagatorWithMaterialESProducer
oppositeToMomElePropagator = TrackingTools.MaterialEffects.OppositeMaterialPropagator_cfi.OppositeMaterialPropagator.clone(
Mass = 0.000511,
ComponentName = 'oppositeToMomElePropagator'
)
|
test/__init__.py | gjhiggins/rdflib-sqlalchemy | 112 | 26130 | from rdflib import plugin
from rdflib import store
plugin.register(
"SQLAlchemy",
store.Store,
"rdflib_sqlalchemy.store",
"SQLAlchemy",
)
|
isc_dhcp_leases/test_lease6.py | dholl/python-isc-dhcp-leases | 111 | 26137 | <gh_stars>100-1000
import datetime
from unittest import TestCase
from isc_dhcp_leases.iscdhcpleases import Lease6, utc
from freezegun import freeze_time
__author__ = '<NAME> <<EMAIL>>'
class TestLease6(TestCase):
def setUp(self):
self.lease_time = datetime.datetime(2015, 8, 18, 16, 55, 37, tzinfo=utc)
self.lease_data = {
'binding': 'state active',
'ends': 'never',
'preferred-life': '375',
'max-life': '600'
}
def test_init(self):
lease = Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
self.assertEqual(lease.ip, "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b")
self.assertEqual(lease.host_identifier, b"4dv\xea\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00")
self.assertEqual(lease.valid, True)
self.assertEqual(lease.iaid, 3933627444)
self.assertEqual(lease.duid, b"\x00\x01\x00\x01\x1df\x1f\xe2\n\x00'\x00\x00\x00")
self.assertEqual(lease.active, True)
self.assertEqual(lease.binding_state, 'active')
self.assertEqual(lease.preferred_life, 375)
self.assertEqual(lease.max_life, 600)
self.assertEqual(lease.last_communication, self.lease_time)
self.assertEqual(lease.type, Lease6.NON_TEMPORARY)
def test_repr(self):
lease = Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
self.assertEqual(repr(lease), '<Lease6 fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b>')
def _test_valid(self, now=None):
lease = Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na",
now=now)
self.assertTrue(lease.valid) # Lease is forever
lease.end = datetime.datetime(2015, 7, 6, 13, 57, 4, tzinfo=utc)
self.assertTrue(lease.valid) # Lease is before end
lease.end = lease.end - datetime.timedelta(hours=7)
self.assertFalse(lease.valid) # Lease is ended
@freeze_time("2015-07-6 8:15:0")
def test_valid_frozen(self):
self._test_valid()
def test_valid_historical(self):
self._test_valid(
now=datetime.datetime(2015, 7, 6, 8, 15, 0, tzinfo=utc))
def test_eq(self):
lease_a = Lease6("2fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
lease_b = Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na")
self.assertEqual(lease_a, lease_b)
lease_b.ip = "fc00:e968:6179::de52:7100"
self.assertNotEqual(lease_a, lease_b)
lease_b.ip = "fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"
lease_b.host_identifier = "gd4\352\000\001\000\001\035b\037\322\012\000'\000\000\000"
self.assertNotEqual(lease_a, lease_b)
def test_naive_time(self):
with self.assertRaises(ValueError):
Lease6("fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", self.lease_data, self.lease_time,
"4dv\\352\\000\\001\\000\\001\\035f\\037\\342\\012\\000'\\000\\000\\000", "na",
now=datetime.datetime.now())
|
spectacles/validators/validator.py | felipefrancisco/spectacles | 150 | 26143 | <reponame>felipefrancisco/spectacles
from typing import Optional, List
from abc import ABC, abstractmethod
from spectacles.client import LookerClient
from spectacles.lookml import Project, Model, Dimension
from spectacles.select import is_selected
from spectacles.exceptions import LookMlNotFound
class Validator(ABC): # pragma: no cover
"""Defines abstract base interface for validators.
Not intended to be used directly, only inherited.
Attributes:
client: Looker API client.
"""
def __init__(self, client: LookerClient, project: str):
self.client = client
self.project = Project(project, models=[])
@abstractmethod
def validate(self):
raise NotImplementedError
def build_project(
self,
selectors: Optional[List[str]] = None,
exclusions: Optional[List[str]] = None,
build_dimensions: bool = False,
) -> None:
"""Creates an object representation of the project's LookML.
Args:
selectors: List of selector strings in 'model_name/explore_name' format.
The '*' wildcard selects all models or explores. For instance,
'model_name/*' would select all explores in the 'model_name' model.
"""
# Assign default values for selectors and exclusions
if selectors is None:
selectors = ["*/*"]
if exclusions is None:
exclusions = []
all_models = [
Model.from_json(model)
for model in self.client.get_lookml_models(
fields=["name", "project_name", "explores"]
)
]
project_models = [
model for model in all_models if model.project_name == self.project.name
]
if not project_models:
raise LookMlNotFound(
name="project-models-not-found",
title="No configured models found for the specified project.",
detail=(
f"Go to {self.client.base_url}/projects and confirm "
"a) at least one model exists for the project and "
"b) it has an active configuration."
),
)
for model in project_models:
model.explores = [
explore
for explore in model.explores
if is_selected(model.name, explore.name, selectors, exclusions)
]
if build_dimensions:
for explore in model.explores:
dimensions_json = self.client.get_lookml_dimensions(
model.name, explore.name
)
for dimension_json in dimensions_json:
dimension = Dimension.from_json(
dimension_json, model.name, explore.name
)
dimension.url = self.client.base_url + dimension.url
if not dimension.ignore:
explore.add_dimension(dimension)
self.project.models = [
model for model in project_models if len(model.explores) > 0
]
|
saas/aiops/api/anomalydetection/main/anomaly_detection.py | iuskye/SREWorks | 407 | 26179 | <filename>saas/aiops/api/anomalydetection/main/anomaly_detection.py
import pandas as pd
import json
import time
from bentoml import env, artifacts, api, BentoService
from bentoml.adapters import DataframeInput, JsonInput, StringInput
from bentoml.frameworks.sklearn import SklearnModelArtifact
@env(infer_pip_packages=True)
@artifacts([SklearnModelArtifact('model')])
class AnomalyDetection(BentoService):
"""
A minimum prediction service exposing a Scikit-learn model
"""
@api(input=JsonInput())
def analyse(self, param: json):
"""
An inference API named `analyse` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function iwnput
"""
dic = {}
if param['taskType']=='async':
time.sleep(30)
try:
if len(param['seriesList'])<2:
raise Exception()
else:
series = []
series.append([1635216096000, 23.541])
dic['predictSeriesList'] = series
except Exception as ex:
dic['code'] = 'detectorError'
dic['message'] = 'some error in detector internal!'
return dic
@api(input=DataframeInput(), batch=True)
def predict(self, df: pd.DataFrame):
"""
An inference API named `predict` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function input
"""
return self.artifacts.model.predict(df)
@api(input=JsonInput())
def analyze(self, param: json):
"""
An inference API named `predict` with Dataframe input adapter, which codifies
how HTTP requests or CSV files are converted to a pandas Dataframe object as the
inference API function input
"""
return "good"
@api(input=StringInput())
def doc(self, message: str):
"""
get README.md
"""
f = open("README.md")
doc = f.read()
f.close()
return doc
|
test cases/windows/10 vs module defs generated custom target/subdir/make_def.py | kira78/meson | 4,047 | 26180 | <reponame>kira78/meson
#!/usr/bin/env python3
import sys
with open(sys.argv[1], 'w') as f:
print('EXPORTS', file=f)
print(' somedllfunc', file=f)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.