max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
f5/bigip/tm/gtm/test/functional/test_listener.py | nghia-tran/f5-common-python | 272 | 12755606 | <gh_stars>100-1000
# Copyright 2014-2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import pytest
from distutils.version import LooseVersion
from f5.bigip.tm.gtm.listener import Listener
from f5.sdk_exception import MissingRequiredCreationParameter
from f5.sdk_exception import MissingRequiredReadParameter
from f5.sdk_exception import UnsupportedOperation
from pytest import symbols
from requests.exceptions import HTTPError
from six import iteritems
pytestmark = pytest.mark.skipif(
symbols
and hasattr(symbols, 'modules')
and not symbols.modules['gtm'],
reason='The modules symbol for GTM is set to False.'
)
def delete_listener(mgmt_root, name, partition):
try:
foo = mgmt_root.tm.gtm.listeners.listener.load(name=name,
partition=partition)
except HTTPError as err:
if err.response.status_code != 404:
raise
return
foo.delete()
def setup_create_test(request, mgmt_root, name, partition):
def teardown():
delete_listener(mgmt_root, name, partition)
request.addfinalizer(teardown)
def setup_basic_test(request, mgmt_root, name, address, partition):
def teardown():
delete_listener(mgmt_root, name, partition)
reg1 = mgmt_root.tm.gtm.listeners.listener.create(name=name,
address=address,
partition=partition)
request.addfinalizer(teardown)
return reg1
class TestCreate(object):
def test_create_no_args(self, mgmt_root):
with pytest.raises(MissingRequiredCreationParameter):
mgmt_root.tm.gtm.listeners.listener.create()
def test_create(self, request, mgmt_root):
setup_create_test(request, mgmt_root, 'fake_listener', 'Common')
reg1 = mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', partition='Common', address='10.10.10.10')
assert reg1.name == 'fake_listener'
assert reg1.partition == 'Common'
assert reg1.address == '10.10.10.10'
assert reg1.generation and isinstance(reg1.generation, int)
assert reg1.kind == 'tm:gtm:listener:listenerstate'
assert reg1.selfLink.startswith(
'https://localhost/mgmt/tm/gtm/listener/~Common~fake_listener')
def test_create_optional_args(self, request, mgmt_root):
setup_create_test(request, mgmt_root, 'fake_listener', 'Common')
reg1 = mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', partition='Common', address='10.10.10.10',
description='NewListener')
assert hasattr(reg1, 'description')
assert reg1.description == 'NewListener'
def test_create_duplicate(self, request, mgmt_root):
setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', partition='Common',
address='10.10.10.10')
assert err.value.response.status_code == 409
class TestRefresh(object):
def test_refresh(self, request, mgmt_root):
setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
r1 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
r2 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
assert r1.name == 'fake_listener'
assert r2.name == 'fake_listener'
r2.update(description='NewListener')
assert hasattr(r2, 'description')
assert not hasattr(r1, 'description')
assert r2.description == 'NewListener'
r1.refresh()
assert hasattr(r1, 'description')
assert r1.description == 'NewListener'
class TestLoad(object):
def test_load_no_object(self, mgmt_root):
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
if LooseVersion(pytest.config.getoption('--release')) >= \
LooseVersion('12.0.0'):
assert err.value.response.status_code == 400
else:
assert err.value.response.status_code == 500
def test_load(self, request, mgmt_root):
setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
r1 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
assert r1.name == 'fake_listener'
assert not hasattr(r1, 'description')
r1.update(description='NewListener')
assert hasattr(r1, 'description')
assert r1.description == 'NewListener'
r2 = mgmt_root.tm.gtm.listeners.listener.load(
name='fake_listener', partition='Common')
assert hasattr(r2, 'description')
assert r2.description == 'NewListener'
class TestExists(object):
def test_not_exists(self, request, mgmt_root):
result = mgmt_root.tm.gtm.listeners.listener.exists(
name='my_listener', partition='Common'
)
assert result is False
def test_exists(self, request, mgmt_root):
r1 = setup_basic_test(
request, mgmt_root, 'fake_listener', '10.10.10.10', 'Common'
)
result = mgmt_root.tm.gtm.listeners.listener.exists(
name='fake_listener', partition='Common'
)
assert r1.name == 'fake_listener'
assert result is True
class TestUpdate(object):
def test_update(self, request, mgmt_root):
r1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
assert r1.name == 'fake_listener'
assert not hasattr(r1, 'description')
r1.update(description='NewListener')
assert hasattr(r1, 'description')
assert r1.description == 'NewListener'
class TestModify(object):
def test_modify(self, request, mgmt_root):
r1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
original_dict = copy.copy(r1.__dict__)
value = 'description'
r1.modify(description='NewListener')
for k, v in iteritems(original_dict):
if k != value:
original_dict[k] = r1.__dict__[k]
elif k == value:
assert r1.__dict__[k] == 'NewListener'
class TestDelete(object):
def test_delete(self, request, mgmt_root):
r1 = mgmt_root.tm.gtm.listeners.listener.create(
name='fake_listener', address='10.10.10.10')
r1.delete()
with pytest.raises(HTTPError) as err:
mgmt_root.tm.gtm.listeners.listener.load(
name='fake_region', partition='Common')
if LooseVersion(pytest.config.getoption('--release')) >= \
LooseVersion('12.0.0'):
assert err.value.response.status_code == 400
else:
assert err.value.response.status_code == 500
class TestListenerCollection(object):
def test_listener_collection(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
assert reg1.name == 'fake_listener'
assert reg1.partition == 'Common'
assert reg1.address == '10.10.10.10'
assert reg1.generation and isinstance(reg1.generation, int)
assert reg1.kind == 'tm:gtm:listener:listenerstate'
assert reg1.selfLink.startswith(
'https://localhost/mgmt/tm/gtm/listener/~Common~fake_listener')
rc = mgmt_root.tm.gtm.listeners.get_collection()
assert isinstance(rc, list)
assert len(rc)
assert isinstance(rc[0], Listener)
class TestProfile(object):
def test_load_missing_args(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
profcol = reg1.profiles_s.get_collection()
prname = str(profcol[0].name)
with pytest.raises(MissingRequiredReadParameter):
reg1.profiles_s.profile.load(name=prname)
def test_load(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
profcol = reg1.profiles_s.get_collection()
prname = str(profcol[0].name)
prpart = str(profcol[0].partition)
pr1 = reg1.profiles_s.profile.load(name=prname, partition=prpart)
assert pr1.kind == 'tm:gtm:listener:profiles:profilesstate'
assert pr1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener'
'/~Common~fake_listener/profiles/'
'~Common~dns')
def test_refresh(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
profcol = reg1.profiles_s.get_collection()
prname = str(profcol[0].name)
prpart = str(profcol[0].partition)
pr1 = reg1.profiles_s.profile.load(name=prname, partition=prpart)
assert pr1.kind == 'tm:gtm:listener:profiles:profilesstate'
assert pr1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener'
'/~Common~fake_listener/profiles/'
'~Common~dns')
pr2 = reg1.profiles_s.profile.load(name=prname, partition=prpart)
pr1.refresh()
assert pr1.kind == pr2.kind
assert pr1.selfLink == pr2.selfLink
pr2.refresh()
assert pr2.kind == pr1.kind
assert pr2.selfLink == pr1.selfLink
def test_create_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.create()
def test_modify_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.modify()
def test_update_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.update()
def test_delete_raises(self, request, mgmt_root):
reg1 = setup_basic_test(request, mgmt_root, 'fake_listener',
'10.10.10.10', 'Common')
with pytest.raises(UnsupportedOperation):
reg1.profiles_s.profile.delete()
|
QUANTAXIS/QASU/save_jq.py | vensentzhou/QUANTAXIS | 6,322 | 12755612 | <reponame>vensentzhou/QUANTAXIS
import concurrent.futures
import datetime
import os
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import pandas as pd
import pymongo
import QUANTAXIS as QA
from QUANTAXIS.QAFetch.QATdx import QA_fetch_get_stock_list
from QUANTAXIS.QAUtil import (DATABASE, QA_util_date_stamp,
QA_util_get_real_date, QA_util_log_info,
QA_util_time_stamp, QA_util_to_json_from_pandas,
trade_date_sse)
TRADE_HOUR_END = 17
def now_time():
"""
1. 当前日期如果是交易日且当前时间在 17:00 之前,默认行情取到上个交易日收盘
2. 当前日期如果是交易日且当前时间在 17:00 之后,默认行情取到当前交易日收盘
"""
return (str(
QA_util_get_real_date(
str(datetime.date.today() - datetime.timedelta(days=1)),
trade_date_sse,
-1,
)) + " 17:00:00" if datetime.datetime.now().hour < TRADE_HOUR_END else str(
QA_util_get_real_date(
str(datetime.date.today()), trade_date_sse, -1)) + " 17:00:00")
def QA_SU_save_stock_min(client=DATABASE, ui_log=None, ui_progress=None):
"""
聚宽实现方式
save current day's stock_min data
"""
# 导入聚宽模块且进行登录
try:
import jqdatasdk
# 请自行将 JQUSERNAME 和 JQUSERPASSWD 修改为自己的账号密码
jqdatasdk.auth("JQUSERNAME", "JQUSERPASSWD")
except:
raise ModuleNotFoundError
# 股票代码格式化
code_list = list(
map(
lambda x: x + ".XSHG" if x[0] == "6" else x + ".XSHE",
QA_fetch_get_stock_list().code.unique().tolist(),
))
coll = client.stock_min
coll.create_index([
("code", pymongo.ASCENDING),
("time_stamp", pymongo.ASCENDING),
("date_stamp", pymongo.ASCENDING),
])
err = []
def __transform_jq_to_qa(df, code, type_):
"""
处理 jqdata 分钟数据为 qa 格式,并存入数据库
1. jdatasdk 数据格式:
open close high low volume money
2018-12-03 09:31:00 10.59 10.61 10.61 10.59 8339100.0 88377836.0
2. 与 QUANTAXIS.QAFetch.QATdx.QA_fetch_get_stock_min 获取数据进行匹配,具体处理详见相应源码
open close high low vol amount ...
datetime
2018-12-03 09:31:00 10.99 10.90 10.99 10.90 2.211700e+06 2.425626e+07 ...
"""
if df is None or len(df) == 0:
raise ValueError("没有聚宽数据")
df = df.reset_index().rename(columns={
"index": "datetime",
"volume": "vol",
"money": "amount"
})
df["code"] = code
df["date"] = df.datetime.map(str).str.slice(0, 10)
df = df.set_index("datetime", drop=False)
df["date_stamp"] = df["date"].apply(lambda x: QA_util_date_stamp(x))
df["time_stamp"] = (
df["datetime"].map(str).apply(lambda x: QA_util_time_stamp(x)))
df["type"] = type_
return df[[
"open",
"close",
"high",
"low",
"vol",
"amount",
"datetime",
"code",
"date",
"date_stamp",
"time_stamp",
"type",
]]
def __saving_work(code, coll):
QA_util_log_info(
"##JOB03 Now Saving STOCK_MIN ==== {}".format(code), ui_log=ui_log)
try:
for type_ in ["1min", "5min", "15min", "30min", "60min"]:
col_filter = {"code": str(code)[0:6], "type": type_}
ref_ = coll.find(col_filter)
end_time = str(now_time())[0:19]
if coll.count_documents(col_filter) > 0:
start_time = ref_[coll.count_documents(
col_filter) - 1]["datetime"]
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
["1min",
"5min",
"15min",
"30min",
"60min"].index(type_),
str(code)[0:6],
start_time,
end_time,
type_,
),
ui_log=ui_log,
)
if start_time != end_time:
df = jqdatasdk.get_price(
security=code,
start_date=start_time,
end_date=end_time,
frequency=type_.split("min")[0]+"m",
)
__data = __transform_jq_to_qa(
df, code=code[:6], type_=type_)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
else:
start_time = "2015-01-01 09:30:00"
QA_util_log_info(
"##JOB03.{} Now Saving {} from {} to {} == {}".format(
["1min",
"5min",
"15min",
"30min",
"60min"].index(type_),
str(code)[0:6],
start_time,
end_time,
type_,
),
ui_log=ui_log,
)
if start_time != end_time:
__data == __transform_jq_to_qa(
jqdatasdk.get_price(
security=code,
start_date=start_time,
end_date=end_time,
frequency=type_.split("min")[0]+"m",
),
code=code[:6],
type_=type_
)
if len(__data) > 1:
coll.insert_many(
QA_util_to_json_from_pandas(__data)[1::])
except Exception as e:
QA_util_log_info(e, ui_log=ui_log)
err.append(code)
QA_util_log_info(err, ui_log=ui_log)
# 聚宽之多允许三个线程连接
executor = ThreadPoolExecutor(max_workers=2)
res = {
executor.submit(__saving_work, code_list[i_], coll)
for i_ in range(len(code_list))
}
count = 0
for i_ in concurrent.futures.as_completed(res):
QA_util_log_info(
'The {} of Total {}'.format(count,
len(code_list)),
ui_log=ui_log
)
strProgress = "DOWNLOAD PROGRESS {} ".format(
str(float(count / len(code_list) * 100))[0:4] + "%")
intProgress = int(count / len(code_list) * 10000.0)
QA_util_log_info(
strProgress,
ui_log,
ui_progress=ui_progress,
ui_progress_int_value=intProgress
)
count = count + 1
if len(err) < 1:
QA_util_log_info("SUCCESS", ui_log=ui_log)
else:
QA_util_log_info(" ERROR CODE \n ", ui_log=ui_log)
QA_util_log_info(err, ui_log=ui_log)
if __name__ == "__main__":
QA_SU_save_stock_min()
|
tests/generators/forks/main.py | sifraitech/eth2.0-specs | 497 | 12755643 | from typing import Iterable
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, MINIMAL, MAINNET
from eth2spec.test.helpers.typing import SpecForkName, PresetBaseName
from eth2spec.test.altair.fork import test_altair_fork_basic, test_altair_fork_random
from eth2spec.test.bellatrix.fork import test_bellatrix_fork_basic, test_bellatrix_fork_random
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
def create_provider(tests_src, preset_name: PresetBaseName,
phase: SpecForkName, fork_name: SpecForkName) -> gen_typing.TestProvider:
def prepare_fn() -> None:
return
def cases_fn() -> Iterable[gen_typing.TestCase]:
return generate_from_tests(
runner_name='fork',
handler_name='fork',
src=tests_src,
fork_name=fork_name,
preset_name=preset_name,
phase=phase,
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
def _get_fork_tests_providers():
for preset in [MINIMAL, MAINNET]:
yield create_provider(test_altair_fork_basic, preset, PHASE0, ALTAIR)
yield create_provider(test_altair_fork_random, preset, PHASE0, ALTAIR)
yield create_provider(test_bellatrix_fork_basic, preset, ALTAIR, BELLATRIX)
yield create_provider(test_bellatrix_fork_random, preset, ALTAIR, BELLATRIX)
if __name__ == "__main__":
gen_runner.run_generator("forks", list(_get_fork_tests_providers()))
|
blendergltf/pbr_utils.py | silentorb/blendergltf | 343 | 12755646 | <reponame>silentorb/blendergltf
import math
import bpy
import mathutils
ALPHA_MODE_ITEMS = [
('OPAQUE', 'Opaque', 'The alpha value is ignored and the rendered output is fully opaque'),
('MASK', 'Mask', (
'The rendered output is either fully opaque or fully transparent depending on the '
'alpha value and the specified alpha cutoff value'
)),
('BLEND', 'Blend', 'The alpha value is used to composite the source and destination areas'),
]
def get_base_color_factor(self):
material = self.id_data
diffuse = mathutils.Vector(material.diffuse_color)
diffuse *= material.diffuse_intensity
return [*diffuse, material.alpha]
def set_base_color_factor(self, value):
material = self.id_data
material.diffuse_color = value[:3]
material.diffuse_intensity = 1.0
alpha = value[3]
material.alpha = alpha
if alpha < 1.0:
material.use_transparency = True
material.transparency_method = 'Z_TRANSPARENCY'
else:
material.use_transparency = False
def get_emissive_factor(self):
material = self.id_data
return [min(material.emit, 2.0) * 0.5] * 3
def set_emissive_factor(self, value):
material = self.id_data
material.emit = mathutils.Color(value).v * 2.0
def get_alpha_mode(self):
material = self.id_data
alpha_mode = 'OPAQUE'
if material.use_transparency:
gs_alpha = material.game_settings.alpha_blend
if gs_alpha == 'CLIP':
alpha_mode = 'MASK'
else:
alpha_mode = 'BLEND'
for i, mode in enumerate(ALPHA_MODE_ITEMS):
if mode[0] == alpha_mode:
alpha_mode = i
break
else:
alpha_mode = 0
return alpha_mode
def set_alpha_mode(self, value):
material = self.id_data
value = ALPHA_MODE_ITEMS[value][0]
if value == 'OPAQUE':
material.use_transparency = False
material.game_settings.alpha_blend = 'OPAQUE'
elif value == 'MASK':
material.use_transparency = True
material.game_settings.alpha_blend = 'CLIP'
elif value == 'BLEND':
material.use_transparency = True
material.game_settings.alpha_blend = 'ALPHA'
def get_roughness_factor(self):
material = self.id_data
hardness = material.specular_hardness
if 1.0 < self.hardness_float < 511.0 and not hardness < self.hardness_float < hardness + 1:
self.hardness_float = material.specular_hardness
roughness = pow(2.0 / (self.hardness_float + 2.0), 0.25)
return max(min(roughness, 1.0), 0.0)
def set_roughness_factor(self, value):
material = self.id_data
if value <= 0:
value = 0.00001
roughness_texture = self.metal_roughness_texture
if roughness_texture:
slot = material.texture_slots[roughness_texture]
slot.hardness_factor = value
material.specular_intensity = 0.04 / (math.pi * pow(value, 4.0))
material.specular_color = (1.0, 1.0, 1.0)
self.hardness_float = (2.0 / pow(value, 4.0)) - 2.0
material.specular_hardness = min(math.floor(self.hardness_float), 511)
def get_texture(self, search_func, index_prop):
material = self.id_data
slots = [
t for t in material.texture_slots
if t and t.texture and t.texture_coords == 'UV'
]
slot = None
for slot in slots[::-1]:
if search_func(slot):
break
else:
return ''
if (
bpy.context.space_data
and bpy.context.space_data.type == 'PROPERTIES'
and bpy.context.object
):
uv_layers = bpy.context.object.data.uv_layers
setattr(self, index_prop, uv_layers.find(slot.uv_layer) if slot.uv_layer else 0)
return slot.texture.name
def _clear_slot_settings(slot):
slot.use_map_diffuse = False
slot.use_map_color_diffuse = False
slot.use_map_alpha = False
slot.use_map_translucency = False
slot.use_map_ambient = False
slot.use_map_emit = False
slot.use_map_mirror = False
slot.use_map_raymir = False
slot.use_map_specular = False
slot.use_map_color_spec = False
slot.use_map_hardness = False
slot.use_map_normal = False
slot.use_map_warp = False
slot.use_map_displacement = False
slot.blend_type = 'MIX'
def set_texture(self, value, current_value, update_func):
material = self.id_data
current_index = material.texture_slots.find(current_value)
slot_index = material.texture_slots.find(value)
# Clear slot
if not value:
if current_index != -1:
material.texture_slots.clear(current_index)
return
# Don't do anything if the correct texture is already set
if value == current_value:
return
bl_texture = bpy.data.textures[value]
# Texture is not already in a slot on this material
if current_index == -1 and slot_index == -1:
slot = material.texture_slots.add()
slot.texture = bl_texture
_clear_slot_settings(slot)
update_func(slot)
return
# Adjust existing slot to meet texture criteria
slot = material.texture_slots[slot_index]
_clear_slot_settings(slot)
update_func(slot)
if slot_index < current_index:
material.active_texture_index = slot_index
for _ in range(current_index - slot_index):
bpy.ops.texture.slot_move(type='DOWN')
material.active_texture_index -= 1
def get_base_color_texture(self):
return get_texture(self, lambda t: t.use_map_color_diffuse, 'base_color_text_index')
def set_base_color_texture(self, value):
def update(slot):
slot.use_map_color_diffuse = True
slot.blend_type = 'MULTIPLY'
set_texture(self, value, get_base_color_texture(self), update)
def get_metal_roughness_texture(self):
return get_texture(self, lambda t: t.use_map_hardness, 'metal_rough_text_index')
def set_metal_roughness_texture(self, value):
def update(slot):
slot.use_map_hardness = True
slot.hardness_factor = self.roughness_factor
set_texture(self, value, get_metal_roughness_texture(self), update)
def get_normal_texture(self):
return get_texture(self, lambda t: t.use_map_normal, 'normal_text_index')
def set_normal_texture(self, value):
def update(slot):
slot.use_map_normal = True
set_texture(self, value, get_normal_texture(self), update)
def get_emissive_texture(self):
return get_texture(self, lambda t: t.use_map_emit, 'emissive_text_index')
def set_emissive_texture(self, value):
def update(slot):
slot.use_map_emit = True
set_texture(self, value, get_emissive_texture(self), update)
class PbrSettings(bpy.types.PropertyGroup):
hardness_float = bpy.props.FloatProperty()
base_color_text_index = 0
metal_rough_text_index = 0
normal_text_index = 0
occlusion_text_index = 0
emissive_text_index = 0
base_color_factor = bpy.props.FloatVectorProperty(
name='Base Color Factor',
size=4,
subtype='COLOR',
min=0.0,
max=1.0,
get=get_base_color_factor,
set=set_base_color_factor,
)
base_color_texture = bpy.props.StringProperty(
name='Texture',
get=get_base_color_texture,
set=set_base_color_texture,
)
alpha_mode = bpy.props.EnumProperty(
items=ALPHA_MODE_ITEMS,
name='Alpha Mode',
default='OPAQUE',
get=get_alpha_mode,
set=set_alpha_mode,
)
alpha_cutoff = bpy.props.FloatProperty(
name='Alpha Cutoff',
min=0.0,
max=1.0,
default=0.5,
)
metallic_factor = bpy.props.FloatProperty(
name='Metallic Factor',
min=0.0,
max=1.0,
)
roughness_factor = bpy.props.FloatProperty(
name='Roughness Factor',
min=0.0,
max=1.0,
get=get_roughness_factor,
set=set_roughness_factor,
)
metal_roughness_texture = bpy.props.StringProperty(
name='Texture',
get=get_metal_roughness_texture,
set=set_metal_roughness_texture,
)
normal_texture = bpy.props.StringProperty(
name='Normal',
get=get_normal_texture,
set=set_normal_texture,
)
occlusion_texture = bpy.props.StringProperty(
name='Occlusion',
)
emissive_factor = bpy.props.FloatVectorProperty(
name='Emissive Factor',
size=3,
subtype='COLOR',
min=0.0,
max=1.0,
get=get_emissive_factor,
set=set_emissive_factor,
)
emissive_texture = bpy.props.StringProperty(
name='Texture',
get=get_emissive_texture,
set=set_emissive_texture,
)
class PbrExportPanel(bpy.types.Panel):
bl_idname = 'MATERIAL_PT_pbr_export'
bl_label = 'PBR Export'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = 'material'
@classmethod
def poll(cls, context):
return context.material is not None and hasattr(context.material, 'pbr_export_settings')
def draw(self, context):
settings = context.material.pbr_export_settings
self.layout.label('Base Color:')
box = self.layout.box()
box.prop(settings, 'base_color_factor', text='Factor')
box.prop_search(settings, 'base_color_texture', bpy.data, 'textures')
box.prop(settings, 'alpha_mode')
box.prop(settings, 'alpha_cutoff')
self.layout.label('Roughness:')
box = self.layout.box()
box.prop(settings, 'metallic_factor', text='Metallic')
box.prop(settings, 'roughness_factor', text='Factor')
box.prop_search(settings, 'metal_roughness_texture', bpy.data, 'textures')
self.layout.label('Emissive:')
box = self.layout.box()
box.prop(settings, 'emissive_factor', text='Factor')
box.prop_search(settings, 'emissive_texture', bpy.data, 'textures')
self.layout.prop_search(settings, 'normal_texture', bpy.data, 'textures')
self.layout.prop_search(settings, 'occlusion_texture', bpy.data, 'textures')
self.layout.prop(context.material.game_settings, 'use_backface_culling')
|
stubs/aioredis/__init__.py | rboixaderg/guillotina | 173 | 12755651 | <reponame>rboixaderg/guillotina<filename>stubs/aioredis/__init__.py
from typing import AsyncIterator
class Channel:
async def wait_message(self) -> AsyncIterator[bool]:
...
async def get(self) -> bytes:
...
class Redis:
def __init__(self, conn):
...
|
bin/get-phoneme-examples.py | dbm01z/rhasspy | 942 | 12755670 | #!/usr/bin/env python3
import sys
import re
import argparse
from collections import defaultdict
# This script loads frequently used words in a language, looks up their
# pronunciations in a CMU dictionary, then prints an example word +
# pronunciation for each phoneme.
def main():
parser = argparse.ArgumentParser()
parser.add_argument("frequent_words", help="Path to text file with frequent words")
parser.add_argument("dictionary", help="Path to CMU dictionary")
args = parser.parse_args()
# Download frequently used words in the given language
with open(args.frequent_words, "r") as word_file:
words = set([w.strip().upper() for w in word_file.read().splitlines()])
# phoneme -> [(word, pronunciation), ...]
examples = defaultdict(list)
# Find pronunciations for each frequently used word
with open(args.dictionary, "r") as dict_file:
for line in dict_file:
line = line.strip()
if not line:
continue
parts = re.split(r"[\t ]+", line)
word = parts[0]
if "(" in word:
word = word[: word.index("(")]
# Record example words for each phoneme
upper_word = word.upper()
if upper_word in words:
pronunciation = parts[1:]
for phoneme in pronunciation:
examples[phoneme].append((word, pronunciation))
# Pick unique example words for every phoneme
used_words = set()
for phoneme in sorted(examples):
# Choose the shortest, unused example word for this phoneme.
# Exclude words with 3 or fewer letters.
for word, pron in sorted(examples[phoneme], key=lambda kv: len(kv[0])):
if len(word) > 3 and (word not in used_words):
# Output format is:
# phoneme word pronunciation
print(phoneme, word, " ".join(pron))
used_words.add(word)
break
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main()
|
fnss/netconfig/capacities.py | brucespang/fnss | 114 | 12755703 | """Functions to assign and manipulate link capacities of a topology.
Link capacities can be assigned either deterministically or randomly, according
to various models.
"""
from distutils.version import LooseVersion
import networkx as nx
from fnss.util import random_from_pdf
from fnss.units import capacity_units
__all__ = [
'set_capacities_constant',
'set_capacities_random',
'set_capacities_random_uniform',
'set_capacities_random_power_law',
'set_capacities_random_zipf',
'set_capacities_random_zipf_mandelbrot',
'set_capacities_degree_gravity',
'set_capacities_betweenness_gravity',
'set_capacities_eigenvector_gravity',
'set_capacities_communicability_gravity',
'set_capacities_pagerank_gravity',
'set_capacities_edge_betweenness',
'set_capacities_edge_communicability',
'get_capacities',
'clear_capacities'
]
def set_capacities_constant(topology, capacity, capacity_unit='Mbps',
links=None):
"""
Set constant link capacities
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacity : float
The value of capacity to set
links : iterable, optional
Iterable container of links, represented as (u, v) tuples to which
capacity will be set. If None or not specified, the capacity will be
applied to all links.
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
Examples
--------
>>> import fnss
>>> topology = fnss.erdos_renyi_topology(50, 0.1)
>>> fnss.set_capacities_constant(topology, 10, 'Mbps')
"""
if capacity <= 0:
raise ValueError('Capacity must be positive')
if not capacity_unit in capacity_units:
raise ValueError("The capacity_unit argument is not valid")
conversion_factor = 1
if 'capacity_unit' in topology.graph and links is not None:
# If a capacity_unit is set, that means that some links have already
# been assigned capacities, so set these capacity using the same unit
# already used
curr_capacity_unit = topology.graph['capacity_unit']
if curr_capacity_unit != capacity_unit:
conversion_factor = float(capacity_units[capacity_unit]) \
/ capacity_units[curr_capacity_unit]
else:
topology.graph['capacity_unit'] = capacity_unit
edges = links or topology.edges()
for u, v in edges:
topology.adj[u][v]['capacity'] = capacity * conversion_factor
return
def set_capacities_random(topology, capacity_pdf, capacity_unit='Mbps'):
"""
Set random link capacities according to a given probability density
function
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacity_pdf : dict
A dictionary representing the probability that a capacity value is
assigned to a link
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
links : list, optional
List of links, represented as (u, v) tuples to which capacity will be
set. If None or not specified, the capacity will be applied to all
links.
Examples
--------
>>> import fnss
>>> topology = fnss.erdos_renyi_topology(50, 0.1)
>>> pdf = {10: 0.5, 100: 0.2, 1000: 0.3}
>>> fnss.set_capacities_constant(topology, pdf, 'Mbps')
"""
if not capacity_unit in capacity_units:
raise ValueError("The capacity_unit argument is not valid")
if any((capacity < 0 for capacity in capacity_pdf.keys())):
raise ValueError('All capacities in capacity_pdf must be positive')
topology.graph['capacity_unit'] = capacity_unit
for u, v in topology.edges():
topology.adj[u][v]['capacity'] = random_from_pdf(capacity_pdf)
return
def set_capacities_random_power_law(topology, capacities, capacity_unit='Mbps',
alpha=1.1):
"""
Set random link capacities according to a power-law probability density
function.
The probability that a capacity :math:`c_i` is assigned to a link is:
.. math::
p(c_i) = \\frac{{c_i}^{-\\alpha}}{\\sum_{c_k \\in C}{{c_k}^{-\\alpha}}}.
Where :math:`C` is the set of allowed capacity, i.e. the ``capacities``
argument
Note that this capacity assignment differs from
``set_capacities_random_zipf`` because, while in Zipf assignment the power
law relationship is between the rank of a capacity and the probability of
being assigned to a link, in this assignment, the power law is between the
value of the capacity and the probability of being assigned to a link.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
if alpha <= 0.0:
raise ValueError('alpha must be positive')
capacities = sorted(capacities)
pdf = [capacities[i] ** (-alpha) for i in range(len(capacities))]
norm_factor = sum(pdf)
norm_pdf = {cap: pdf[i] / norm_factor for i, cap in enumerate(capacities)}
set_capacities_random(topology, norm_pdf, capacity_unit=capacity_unit)
def set_capacities_random_zipf_mandelbrot(topology, capacities,
capacity_unit='Mbps', alpha=1.1,
q=0.0, reverse=False):
"""
Set random link capacities according to a Zipf-Mandelbrot probability
density function.
This capacity allocation consists in the following steps:
1. All capacities are sorted in descending or order (or ascending if
reverse is True)
2. The i-th value of the sorted capacities list is then assigned to a link
with probability
.. math::
p(i) = \\frac{1/(i + q)^\\alpha}{\\sum_{i = 1}^{N}{1/(i + q)^\\alpha}}.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
alpha : float, default 1.1
The :math`\alpha` parameter of the Zipf-Mandlebrot density function
q : float, default 0
The :math`q` parameter of the Zipf-Mandlebrot density function
reverse : bool, optional
If False, lower capacity links are the most frequent, if True, higher
capacity links are more frequent
"""
if alpha <= 0.0:
raise ValueError('alpha must be positive')
if q < 0.0:
raise ValueError('q must be >= 0')
capacities = sorted(capacities, reverse=reverse)
pdf = {cap: 1.0 / (i + 1.0 + q) ** alpha for i, cap in enumerate(capacities)}
norm_factor = sum(pdf.values())
norm_pdf = {capacity: pdf[capacity] / norm_factor for capacity in pdf}
set_capacities_random(topology, norm_pdf, capacity_unit=capacity_unit)
def set_capacities_random_zipf(topology, capacities, capacity_unit='Mbps',
alpha=1.1, reverse=False):
"""
Set random link capacities according to a Zipf probability density
function.
The same objective can be achieved by invoking the function
``set_capacities_random_zipf_mandlebrot`` with parameter q set to 0.
This capacity allocation consists in the following steps:
1. All capacities are sorted in descending or order (or ascending if
reverse is True)
2. The i-th value of the sorted capacities list is then assigned to a link
with probability
.. math::
p(i) = \\frac{1/i^\\alpha}{\\sum_{i = 1}^{N}{1/i^\\alpha}}.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
alpha : float, default 1.1
The :math`\alpha` parameter of the Zipf density function
reverse : bool, optional
If False, lower capacity links are the most frequent, if True, higher
capacity links are more frequent
"""
set_capacities_random_zipf_mandelbrot(topology, capacities, alpha=alpha,
q=0.0, reverse=reverse,
capacity_unit=capacity_unit)
def set_capacities_random_uniform(topology, capacities, capacity_unit='Mbps'):
"""
Set random link capacities according to a uniform probability density
function.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
capacity_pdf = {capacity: 1.0 / len(capacities) for capacity in capacities}
set_capacities_random(topology, capacity_pdf, capacity_unit=capacity_unit)
def set_capacities_degree_gravity(topology, capacities, capacity_unit='Mbps'):
"""
Set link capacities proportionally to the product of the degrees of the
two end-points of the link
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
if topology.is_directed():
in_degree = nx.in_degree_centrality(topology)
out_degree = nx.out_degree_centrality(topology)
gravity = {(u, v): out_degree[u] * in_degree[v]
for (u, v) in topology.edges()}
else:
degree = nx.degree_centrality(topology)
gravity = {(u, v): degree[u] * degree[v]
for (u, v) in topology.edges()}
_set_capacities_proportionally(topology, capacities, gravity,
capacity_unit=capacity_unit)
def set_capacities_betweenness_gravity(topology, capacities,
capacity_unit='Mbps', weighted=True):
"""
Set link capacities proportionally to the product of the betweenness
centralities of the two end-points of the link
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
weighted : bool, optional
Indicate whether link weights need to be used to compute shortest
paths. If links do not have link weights or this parameter is False,
shortest paths are calculated based on hop count.
"""
weight = 'weight' if weighted else None
centrality = nx.betweenness_centrality(topology, normalized=False,
weight=weight)
_set_capacities_gravity(topology, capacities, centrality, capacity_unit)
def set_capacities_eigenvector_gravity(topology, capacities,
capacity_unit='Mbps', max_iter=1000):
"""
Set link capacities proportionally to the product of the eigenvector
centralities of the two end-points of the link
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
max_iter : int, optional
The max number of iteration of the algorithm allowed. If a solution is
not found within this period
Raises
------
RuntimeError : if the algorithm does not converge in max_iter iterations
"""
try:
centrality = nx.eigenvector_centrality(topology, max_iter=max_iter)
except nx.NetworkXError:
raise RuntimeError('Algorithm did not converge in %d iterations'
% max_iter)
_set_capacities_gravity(topology, capacities, centrality, capacity_unit)
def set_capacities_pagerank_gravity(topology, capacities, capacity_unit='Mbps',
alpha=0.85, weight=None):
"""
Set link capacities proportionally to the product of the Pagerank
centralities of the two end-points of the link
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
alpha : float, optional
The apha parameter of the PageRank algorithm
weight : str, optional
The name of the link attribute to use for the PageRank algorithm. Valid
attributes include *capacity* *delay* and *weight*. If ``None``, all
links are assigned the same weight.
"""
centrality = nx.pagerank_numpy(topology, alpha=alpha, personalization=None,
weight=weight)
_set_capacities_gravity(topology, capacities, centrality, capacity_unit)
def set_capacities_communicability_gravity(topology, capacities,
capacity_unit='Mbps'):
"""
Set link capacities proportionally to the product of the communicability
centralities of the two end-points of the link
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
if LooseVersion(nx.__version__) < LooseVersion("2.0"):
centrality = nx.communicability_centrality(topology)
else:
centrality = nx.subgraph_centrality(topology)
_set_capacities_gravity(topology, capacities, centrality, capacity_unit)
def set_capacities_edge_betweenness(topology, capacities, capacity_unit='Mbps',
weighted=True):
"""
Set link capacities proportionally to edge betweenness centrality of the
link.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
weighted : bool, optional
Indicate whether link weights need to be used to compute shortest
paths. If links do not have link weights or this parameter is False,
shortest paths are calculated based on hop count.
"""
weight = 'weight' if weighted else None
centrality = nx.edge_betweenness_centrality(topology, normalized=False,
weight=weight)
_set_capacities_proportionally(topology, capacities, centrality,
capacity_unit=capacity_unit)
def set_capacities_edge_communicability(topology, capacities,
capacity_unit='Mbps'):
"""
Set link capacities proportionally to edge communicability centrality of
the link.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
communicability = nx.communicability(topology)
centrality = {(u, v): communicability[u][v]
for (u, v) in topology.edges()}
_set_capacities_proportionally(topology, capacities, centrality,
capacity_unit=capacity_unit)
def _set_capacities_gravity(topology, capacities, node_metric,
capacity_unit='Mbps'):
"""
Set link capacities proportionally to the product of the values of a given
node metric of the two end-points of the link
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
node_metric : dict
A dictionary with all values of the given node metric, keyed by node
name
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
gravity = {(u, v): node_metric[u] * node_metric[v]
for (u, v) in topology.edges()}
_set_capacities_proportionally(topology, capacities, gravity,
capacity_unit=capacity_unit)
def _set_capacities_proportionally(topology, capacities, metric,
capacity_unit='Mbps'):
"""
Set link capacities proportionally to the value of a given edge metric.
Parameters
----------
topology : Topology
The topology to which link capacities will be set
capacities : list
A list of all possible capacity values
metric : dict
A dictionary with all values of the given edge metric, keyed by edge
name
capacity_unit : str, optional
The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..)
"""
if not capacity_unit in capacity_units:
raise ValueError("The capacity_unit argument is not valid")
if any((capacity < 0 for capacity in capacities)):
raise ValueError('All capacities must be positive')
if len(capacities) == 0:
raise ValueError('The list of capacities cannot be empty')
topology.graph['capacity_unit'] = capacity_unit
# If there is only one capacity the capacities list then all links are
# assigned the same capacity
if len(capacities) == 1:
set_capacities_constant(topology, capacities[0], capacity_unit)
return
# get min and max of selected edge metric
min_metric = min(metric.values())
max_metric = max(metric.values())
capacities = sorted(capacities)
min_capacity = capacities[0] - 0.5 * (capacities[1] - capacities[0])
max_capacity = capacities[-1] + 0.5 * (capacities[-1] - capacities[-2])
capacity_boundaries = [0.5 * (capacities[i] + capacities[i + 1])
for i in range(len(capacities) - 1)]
capacity_boundaries.append(max_capacity)
metric_boundaries = [(capacity_boundary - min_capacity) *
((max_metric - min_metric) /
(max_capacity - min_capacity)) + min_metric
for capacity_boundary in capacity_boundaries]
# to prevent float rounding errors
metric_boundaries[-1] = max_metric + 0.1
for (u, v), metric_value in metric.items():
for i, boundary in enumerate(metric_boundaries):
if metric_value <= boundary:
capacity = capacities[i]
topology.adj[u][v]['capacity'] = capacity
break
# if the loop is not stopped yet, it means that because of float
# rounding error, max_capacity < metric_boundaries[-1], so we set the
# greatest capacity value.
# Anyway, the code should never reach this point, because before the
# for loop we are already adjusting the value of metric_boundaries[-1]
# to make it > max_capacity
else: topology.adj[u][v]['capacity'] = capacities[-1]
def get_capacities(topology):
"""
Returns a dictionary with all link capacities.
Parameters
----------
topology : Topology
The topology whose link delays are requested
Returns
-------
capacities : dict
Dictionary of link capacities keyed by link.
Examples
--------
>>> import fnss
>>> topology = fnss.Topology()
>>> topology.add_path([1,2,3])
>>> fnss.set_capacities_constant(topology, 10, 'Mbps')
>>> capacity = get_capacities(topology)
>>> capacity[(1,2)]
10
"""
return nx.get_edge_attributes(topology, 'capacity')
def clear_capacities(topology):
"""
Remove all capacities from the topology.
Parameters
----------
topology : Topology
"""
topology.graph.pop('capacity_unit', None)
for u, v in topology.edges():
topology.adj[u][v].pop('capacity', None)
|
tests/basics/Inspection_36.py | roired/Nuitka | 1,228 | 12755730 | # Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests uncompiled functions and compiled functions responses to inspect and isistance. """
import inspect
import types
# nuitka-project: --python-flag=no_warnings
async def compiledAsyncgen():
yield 1
print(type(compiledAsyncgen()))
assert inspect.isfunction(compiledAsyncgen) is True
assert inspect.isgeneratorfunction(compiledAsyncgen) is False
assert inspect.iscoroutinefunction(compiledAsyncgen) is False
assert inspect.isasyncgenfunction(compiledAsyncgen) is True
assert isinstance(compiledAsyncgen(), types.GeneratorType) is False
assert isinstance(compiledAsyncgen(), types.CoroutineType) is False
assert isinstance(compiledAsyncgen(), types.AsyncGeneratorType) is True
assert type(compiledAsyncgen()) == types.AsyncGeneratorType, type(compiledAsyncgen())
assert isinstance(compiledAsyncgen, types.AsyncGeneratorType) is False
|
trashcode/config/constants.py | paultheron-X/INF581-Trading-agent | 671 | 12755736 | <filename>trashcode/config/constants.py
#Global
PARENT_DIR = "PARENT_DIR"
#Logging
LOG_FILE = "LOG_FILE"
SAVE_DIR = "SAVE_DIR"
TENSORBOARD_LOG_DIR = "TENSORBOARD_LOG_DIR"
#Preprocessing Dataset
DATASET_PATH = "DATASET_PATH"
#DeepSense Parameters
##Dataset Parameters
BATCH_SIZE = "BATCH_SIZE"
HISTORY_LENGTH = "HISTORY_LENGTH"
HORIZON = "HORIZON"
MEMORY_SIZE = "MEMORY_SIZE"
NUM_ACTIONS = "NUM_ACTIONS"
NUM_CHANNELS = "NUM_CHANNELS"
SPLIT_SIZE = "SPLIT_SIZE"
WINDOW_SIZE = "WINDOW_SIZE"
##Dropout Layer Parameters
CONV_KEEP_PROB = "CONV_KEEP_PROB"
DENSE_KEEP_PROB = "DENSE_KEEP_PROB"
GRU_KEEP_PROB = "GRU_KEEP_PROB"
## Convolution Layer Parameters
FILTER_SIZES = "FILTER_SIZES"
KERNEL_SIZES = "KERNEL_SIZES"
PADDING = "PADDING"
SAME = "SAME"
VALID = "VALID"
## GRU Parameters
GRU_CELL_SIZE = "GRU_CELL_SIZE"
GRU_NUM_CELLS = "GRU_NUM_CELLS"
##FullyConnected Layer Parameters
DENSE_LAYER_SIZES = "DENSE_LAYER_SIZES"
#configuration section names
CONVOLUTION = "convolution"
DATASET = "dataset"
DENSE = "dense"
DROPOUT = "dropout"
GLOBAL = "global"
GRU = "gru"
LOGGING = "logging"
PREPROCESSING = "preprocessing" |
app/server.py | hailiang-wang/aclweb-I17-1074 | 361 | 12755738 | ######################################################################
######################################################################
# Copyright <NAME>, Cambridge Dialogue Systems Group, 2017 #
######################################################################
######################################################################
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from urlparse import urlparse, parse_qs
from utils.commandparser import NNSDSOptParser
from nn.NNDialogue import NNDial
import json
import cgi
import ssl
import numpy as np
class ChatHandler(BaseHTTPRequestHandler):
model = None
def do_OPTIONS(self):
self.send_response(200, "ok")
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header("Access-Control-Allow-Headers", "X-Requested-With")
self.send_header("Access-Control-Allow-Headers", "Content-Type")
self.send_header("Access-Control-Allow-Credentials", "false")
def do_GET(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Access-Control-Allow-Credentials", "false")
self.send_header('Content-type','application/json')
self.end_headers()
q = parse_qs(urlparse(self.path).query,\
keep_blank_values=True)
#print q
response = model.reply(
q['user_utt_t'][0], q['generated'][0],
q['selected_venue'][0],q['venue_offered'][0],
q['belief_t'][0] )
self.request.sendall(json.dumps(response))
def do_POST(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header("Access-Control-Allow-Credentials", "false")
self.send_header('Content-type','application/json')
self.end_headers()
data = self.rfile.read(int(self.headers['Content-Length']))
q = json.loads(data)
response = model.reply(
q['user_utt_t'], q['generated'],int(q['selected_venue']),
json.loads(q['venue_offered']), q['belief_t'] )
self.wfile.write(json.dumps(response))
self.wfile.close()
return
if __name__ == '__main__':
# TODO: IP address and port
hostname = 'xxx.xxx.xxx.xxx'
port = ????
# loading neural dialog model
args = NNSDSOptParser()
config = args.config
model = NNDial(config,args)
# handler
ChatHandler.model = model
# launch server
httpd = HTTPServer((hostname,port), ChatHandler)
print 'Server is on - %s:%d' % (hostname,port)
httpd.serve_forever()
|
etc/scripts/docenizers/docenizer6502.py | OfekShilon/compiler-explorer | 4,668 | 12755805 | #!/usr/bin/env python3
import argparse
import enum
import json
import os.path
import re
import urllib.request
DOC_URL_BASE = "https://raw.githubusercontent.com/mist64/c64ref/master/6502/"
doc_files = {f"{DOC_URL_BASE}{filename}":cpu_type for filename, cpu_type in {
"cpu_6502.txt" : "6502",
"cpu_65c02.txt" : "65c02",
}.items()
}
mode_change_regex = re.compile(r"\[(?P<mode_name>.*)\]")
comment_regex = re.compile(r"##")
mnemonic_regex = re.compile(r"(?P<mnemonic>\S+)\s+(?P<name>.*)")
description_start_regex = re.compile(r"(?P<mnemonic>\S+)\s+(?P<long_name>.*)")
description_continue_regex = re.compile(r"\s+(?P<description>.*)")
class ParseMode(enum.Enum):
IGNORE = enum.auto()
MNEMONICS = enum.auto()
DESCRIPTIONS = enum.auto()
class Instruction:
def __init__(self, mnemonic, cpu_type):
self.mnemonic = mnemonic
self.cpu_type = cpu_type
self.name = ""
self.long_name = ""
self.description = []
def html_description(self):
if self.description:
html = ""
for desc_line in self.description:
html += f"<p>{escape_quotes(desc_line)}</p>"
return html
elif self.long_name:
return f"<p>{escape_quotes(self.long_name)}</p>"
elif self.name:
return f"<p>{escape_quotes(self.name)}</p>"
else:
return f"<p>{self.mnemonic}</p>"
def get_instructions():
"""Gathers all instruction data and returns it in a dictionary."""
instructions = {}
for f, t in doc_files.items():
instructions_from_file(f, t, instructions)
return instructions
def instructions_from_file(filename, cpu_type, instructions):
"""Gathers instruction data from a file and adds it to the dictionary."""
with open_file(filename) as response:
print(f"Reading from {filename}...")
parse_mode = ParseMode.IGNORE
parse_funcs = {ParseMode.MNEMONICS: parse_mnemonics,
ParseMode.DESCRIPTIONS: parse_descriptions}
for line_num, line in enumerate(response_to_lines(response), start=1):
#print(str(line_num) + "\t" + str(line))
line = remove_comments(line)
if not line or line.isspace():
continue
regex_match = mode_change_regex.match(line)
if regex_match:
parse_mode = mode_change(regex_match.group("mode_name"))
continue
if parse_mode == ParseMode.IGNORE:
continue
parse_funcs[parse_mode](line, line_num, cpu_type, instructions)
def open_file(filename):
"""Opens a documentation file from the internet."""
return urllib.request.urlopen(filename)
def response_to_lines(response):
"""Converts an HTTP response to a list containing each line of text."""
return response.read().decode("utf-8").replace("\xad", "").split("\n")
def remove_comments(line):
"""Removes comments from a line of a documentation file."""
regex_match = comment_regex.search(line)
if regex_match:
return line[:regex_match.start()]
else:
return line
def mode_change(mode_name):
if mode_name == "mnemos":
return ParseMode.MNEMONICS
elif mode_name == "documentation-mnemos":
return ParseMode.DESCRIPTIONS
else:
return ParseMode.IGNORE
def parse_mnemonics(line, line_num, cpu_type, instructions):
regex_match = mnemonic_regex.match(line)
if regex_match:
mnemonic = regex_match.group("mnemonic")
name = regex_match.group("name")
if mnemonic not in instructions:
instructions[mnemonic] = Instruction(mnemonic, cpu_type)
instructions[mnemonic].name = name
else:
print(f"Mnemonic parsing: Match failure on line {str(line_num)}")
print(" " + line)
def parse_descriptions(line, line_num, cpu_type, instructions):
start_match = description_start_regex.match(line)
continue_match = description_continue_regex.match(line)
if start_match:
mnemonic = start_match.group("mnemonic")
parse_descriptions.last_mnemonic = mnemonic
long_name = start_match.group("long_name")
if mnemonic not in instructions:
instructions[mnemonic] = Instruction(mnemonic, cpu_type)
instructions[mnemonic].long_name = long_name
elif continue_match:
mnemonic = parse_descriptions.last_mnemonic
description = continue_match.group("description")
instructions[mnemonic].description.append(description)
def write_script(filename, instructions):
script = ["export function getAsmOpcode(opcode) {",
" if (!opcode) return;",
" switch (opcode.toUpperCase()) {"]
for inst in instructions.values():
script.append(f" case \"{inst.mnemonic}\":")
script.append(" return {")
html = f"{16 * ' '}\"html\": \""
html += inst.html_description()
html += "\","
script.append(html)
if inst.long_name:
safe_ln = escape_quotes(inst.long_name)
script.append(f"{16 * ' '}\"tooltip\": \"{safe_ln}\",")
elif inst.name:
safe_n = escape_quotes(inst.name)
script.append(f"{16 * ' '}\"tooltip\": \"{safe_n}\",")
else:
script.append(f"{16 * ' '}\"tooltip\": \"{inst.mnemonic}\",")
# Will need to be replaced when other 65xx CPUs are added
s = "https://www.pagetable.com/c64ref/6502/?cpu="
e = "&tab=2#"
t = inst.cpu_type
m = inst.mnemonic
script.append(f"{16 * ' '}\"url\": \"{s}{t}{e}{m}\",")
script.append(12 * " " + "};")
script.append("")
script.append(" }")
script.append("}")
with open(filename, "w") as f:
print(f"Writing output to {filename}...")
f.write("\n".join(script))
#print("\n".join(script))
def escape_quotes(string):
return string.replace("\"", "\\\"")
def get_arguments():
parser = argparse.ArgumentParser()
help_text = "the location to which the script will be written"
relative_path = "/../../../lib/handlers/asm-docs-6502.js"
script_path = os.path.realpath(__file__)
script_dir = os.path.dirname(script_path)
default_path = os.path.normpath(script_dir + relative_path)
parser.add_argument("-o", "--output", help=help_text, default=default_path)
return parser.parse_args()
def main():
args = get_arguments()
instructions = get_instructions()
#for inst in instructions.values():
#print(inst.__dict__)
write_script(args.output, instructions)
if __name__ == "__main__":
main()
|
docs/examples/container/joyent/instantiate_driver.py | dupontz/libcloud | 1,435 | 12755817 | from libcloud.container.types import Provider
from libcloud.container.providers import get_driver
cls = get_driver(Provider.JOYENT)
conn = cls(host='us-east-1.docker.joyent.com', port=2376,
key_file='key.pem', cert_file='~/.sdc/docker/admin/ca.pem')
conn.list_images()
|
tika-parsers/src/main/resources/org/apache/tika/parser/captioning/tf/im2txtapi.py | dedabob/tika | 1,299 | 12755818 | <reponame>dedabob/tika<filename>tika-parsers/src/main/resources/org/apache/tika/parser/captioning/tf/im2txtapi.py
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This script exposes image captioning service over a REST API. Image captioning implementation based on the paper,
"Show and Tell: A Neural Image Caption Generator"
<NAME>, <NAME>, <NAME>, <NAME>
For more details, please visit :
http://arxiv.org/abs/1411.4555
Requirements :
Flask
tensorflow
numpy
requests
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import math
import requests
import sys
from flask import Flask, request, Response, jsonify
from io import BytesIO
from PIL import Image
from time import time
import tensorflow as tf
import xml.etree.ElementTree as ET
import model_wrapper
import vocabulary
import caption_generator
# turning off the traceback by limiting its depth
sys.tracebacklimit = 0
# informative log messages for advanced users to troubleshoot errors when modifying model_info.xml
try:
info = ET.parse('/usr/share/apache-tika/models/dl/image/caption/model_info.xml').getroot()
except IOError:
logging.exception('model_info.xml is not found')
sys.exit(1)
model_main = info.find('model_main')
if model_main is None:
logging.exception('<checkpoint_path> tag under <model_main> tag in model_info.xml is not found')
sys.exit(1)
checkpoint_path = model_main.find('checkpoint_path')
if checkpoint_path is None:
logging.exception('<checkpoint_path> tag under <model_main> tag in model_info.xml is not found')
sys.exit(1)
else:
checkpoint_path = checkpoint_path.text
vocab_file = model_main.find('vocab_file')
if vocab_file is None:
logging.exception('<vocab_file> tag under <model_main> tag in model_info.xml is not found')
sys.exit(1)
else:
vocab_file = vocab_file.text
port = info.get('port')
if port is None:
logging.exception('port attribute in <service> tag in model_info.xml is not found')
sys.exit(1)
# turning on the traceback by setting it to default
sys.tracebacklimit = 1000
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", checkpoint_path, """Directory containing the model checkpoint file.""")
tf.flags.DEFINE_string('vocab_file', vocab_file, """Text file containing the vocabulary.""")
tf.flags.DEFINE_integer('port', port, """Server PORT, default:8764""")
tf.logging.set_verbosity(tf.logging.INFO)
class Initializer(Flask):
"""
Class to initialize the REST API, this class loads the model from the given checkpoint path in model_info.xml
and prepares a caption_generator object
"""
def __init__(self, name):
super(Initializer, self).__init__(name)
# build the inference graph
g = tf.Graph()
with g.as_default():
model = model_wrapper.ModelWrapper()
restore_fn = model.build_graph(FLAGS.checkpoint_path)
g.finalize()
# make the model globally available
self.model = model
# create the vocabulary
self.vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
self.sess = tf.Session(graph=g)
# load the model from checkpoint
restore_fn(self.sess)
def current_time():
"""Returns current time in milli seconds"""
return int(1000 * time())
app = Initializer(__name__)
def get_remote_file(url, success=200, timeout=10):
"""
Given HTTP URL, this api gets the content of it
returns (Content-Type, image_content)
"""
try:
app.logger.info("GET: %s" % url)
auth = None
res = requests.get(url, stream=True, timeout=timeout, auth=auth)
if res.status_code == success:
return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data
except:
pass
return None, None
@app.route("/")
def index():
"""The index page which provide information about other API end points"""
return """
<div>
<h1> Image Captioning REST API </h1>
<h3> The following API end points are valid </h3>
<ul>
<h4> Inception V3 </h4>
<li> <code>/inception/v3/ping </code> - <br/>
<b> Description : </b> checks availability of the service. returns "pong" with status 200 when it is available
</li>
<li> <code>/inception/v3/caption/image</code> - <br/>
<table>
<tr><th align="left"> Description </th><td> This is a service that can caption images</td></tr>
<tr><th align="left"> How to supply Image Content </th></tr>
<tr><th align="left"> With HTTP GET : </th> <td>
Include a query parameter <code>url </code> which is an http url of JPEG image <br/>
Example: <code> curl "localhost:8764/inception/v3/caption/image?url=http://xyz.com/example.jpg"</code>
</td></tr>
<tr><th align="left"> With HTTP POST :</th><td>
POST JPEG image content as binary data in request body. <br/>
Example: <code> curl -X POST "localhost:8764/inception/v3/caption/image" --data-binary @example.jpg </code>
</td></tr>
</table>
</li>
<ul>
</div>
"""
@app.route("/inception/v3/ping", methods=["GET"])
def ping_pong():
"""API to do health check. If this says status code 200, then healthy"""
return "pong"
@app.route("/inception/v3/caption/image", methods=["GET", "POST"])
def caption_image():
"""API to caption images"""
image_format = "not jpeg"
st = current_time()
# get beam_size
beam_size = int(request.args.get("beam_size", "3"))
# get max_caption_length
max_caption_length = int(request.args.get("max_caption_length", "20"))
# get image_data
if request.method == 'POST':
image_data = request.get_data()
else:
url = request.args.get("url")
c_type, image_data = get_remote_file(url)
if not image_data:
return Response(status=400, response=jsonify(error="Could not HTTP GET %s" % url))
if 'image/jpeg' in c_type:
image_format = "jpeg"
# use c_type to find whether image_format is jpeg or not
# if jpeg, don't convert
if image_format == "jpeg":
jpg_image = image_data
# if not jpeg
else:
# open the image from raw bytes
image = Image.open(BytesIO(image_data))
# convert the image to RGB format, otherwise will give errors when converting to jpeg, if the image isn't RGB
rgb_image = image.convert("RGB")
# convert the RGB image to jpeg
image_bytes = BytesIO()
rgb_image.save(image_bytes, format="jpeg", quality=95)
jpg_image = image_bytes.getvalue()
image_bytes.close()
read_time = current_time() - st
# restart counter
st = current_time()
generator = caption_generator.CaptionGenerator(app.model,
app.vocab,
beam_size=beam_size,
max_caption_length=max_caption_length)
captions = generator.beam_search(app.sess, jpg_image)
captioning_time = current_time() - st
app.logger.info("Captioning time : %d" % captioning_time)
array_captions = []
for caption in captions:
sentence = [app.vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
array_captions.append({
'sentence': sentence,
'confidence': math.exp(caption.logprob)
})
response = {
'beam_size': beam_size,
'max_caption_length': max_caption_length,
'captions': array_captions,
'time': {
'read': read_time,
'captioning': captioning_time,
'units': 'ms'
}
}
return Response(response=json.dumps(response), status=200, mimetype="application/json")
def main(_):
if not app.debug:
print("Serving on port %d" % FLAGS.port)
app.run(host="0.0.0.0", port=FLAGS.port)
if __name__ == '__main__':
tf.app.run()
|
tests/core/usage/audit_usage_test.py | paulo-sampaio/detect-secrets | 2,212 | 12755819 | <filename>tests/core/usage/audit_usage_test.py
import pytest
from detect_secrets.core.usage import ParserBuilder
@pytest.fixture
def parser():
return ParserBuilder().add_console_use_arguments()
def test_normal_mode_requires_single_file(parser):
with pytest.raises(SystemExit):
parser.parse_args(['audit', 'fileA', 'fileB'])
def test_diff_mode_requires_two_files(parser):
with pytest.raises(SystemExit):
parser.parse_args(['audit', 'fileA', '--diff'])
|
tests/DataIntegrityTest.py | marcodafonseca/PyOpenWorm | 118 | 12755853 | from __future__ import print_function
from __future__ import absolute_import
import unittest
import csv
from owmeta_core.context import Context
from owmeta_core.command import OWM
from owmeta_core.bundle import Bundle
from owmeta.worm import Worm
from owmeta.cell import Cell
from owmeta.neuron import Neuron
from owmeta.connection import Connection
import rdflib as R
import pytest
@pytest.mark.inttest
@pytest.mark.data_bundle
class DataIntegrityTest(unittest.TestCase):
""" Integration tests that read from the database and ensure that basic
queries have expected answers, as a way to keep data quality high.
"""
@classmethod
def setUpClass(cls):
# grab the list of the names of the 302 neurons
csvfile = open('tests/neurons.csv', 'r')
reader = csv.reader(csvfile, delimiter=';', quotechar='|')
# array that holds the names of the 302 neurons at class-level scope
cls.neurons = []
for row in reader:
if len(row[0]) > 0: # Only saves valid neuron names
cls.neurons.append(row[0])
def setUp(self):
self.bnd = Bundle('openworm/owmeta-data')
self.bnd.initdb()
self.conn = self.bnd.connection
self.conf = self.conn.conf
self.g = self.conf["rdf.graph"]
self.context = self.conn(Context)(ident="http://openworm.org/data")
self.qctx = self.context.stored
def tearDown(self):
self.conn.disconnect()
def test_correct_neuron_number(self):
"""
This test verifies that the worm model has exactly 302 neurons.
"""
# FIXME: Test execution is not properly isolated -- it fails if
# test_compare_to_xls fails. Other conditions may cause
# it to pass
net = self.qctx(Worm).query().get_neuron_network()
self.assertEqual(302, net.neuron.count())
def test_correct_muscle_number(self):
"""
This test verifies that the worm model has exactly 158 muscles.
95 body wall muscles, 37 Pharynx muscles, 26 other muscles
See counts on row 3 here:
https://docs.google.com/spreadsheets/d/1NDx9LRF_B2phR5w4HlEtxJzxx1ZIPT2gA0ZmNmozjos/edit#gid=1
"""
self.assertEqual(158, self.qctx(Worm).query().muscle.count())
def test_INS_26_neuropeptide_neuron_list(self):
"""
This test verifies that the set of neurons which contain the
neuropeptide INS-26 is correct (the list is given below).
"""
neuronlist = self.qctx(Neuron)()
neuronlist.neuropeptide("INS-26")
thlist = set(x.name() for x in neuronlist.load())
self.assertEqual({'ASEL', 'ASER', 'ASIL', 'ASIR'}, thlist)
def test_bentley_expr_data(self):
"""
This verifies that the data in Bentley et. al (2016) receptor expression
has been incorporated, by checking that one of the novel receptor
expression patterns is in the worm.
"""
va9 = self.qctx(Neuron).query('VA9')
self.assertIn('LGC-53', va9.receptors())
def test_unique_neuron_node(self):
"""
There should one and only one unique RDF node for every neuron. If
more than one is present for a given cell name, then our data is
inconsistent. If there is not at least one present, then we are
missing neurons.
"""
results = {}
for n in self.neurons:
# Create a SPARQL query per neuron that looks for all RDF nodes
# that have text matching the name of the neuron
qres = self.g.query(
f"""
SELECT distinct ?n WHERE
{{
?n <{Cell.name.link}> {R.Literal(n).n3()}
}} LIMIT 5
""")
results[n] = (len(qres), [x[0] for x in qres])
# If there is not only one result back, then there is more than one RDF
# node.
more_than_one = [(x, results[x]) for x in results if results[x][0] > 1]
less_than_one = [(x, results[x]) for x in results if results[x][0] < 1]
self.assertEqual(
0,
len(more_than_one),
"Some neurons have more than 1 node: " +
"\n".join(
str(x) for x in more_than_one))
self.assertEqual(
0,
len(less_than_one),
"Some neurons have no node: " +
"\n".join(
str(x) for x in less_than_one))
def test_neurons_have_types(self):
"""
Every Neuron should have a non-blank type
"""
results = set()
for n in self.neurons:
s = f'''SELECT ?v WHERE {{
?k <{Cell.name.link}> {R.Literal(n).n3()} .
?k <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <{Neuron.rdf_type}> .
?k <{Neuron.type.link}> ?v .
}}'''
qres = self.g.query(s)
for x in qres:
v = x[0]
if isinstance(v, R.Literal):
results.add(n)
self.assertEqual(len(results),
len(self.neurons),
"Some neurons are missing a type: {}".format(set(self.neurons) - results))
def test_neuron_GJ_degree(self):
""" Get the number of gap junctions from a representation """
# was 81 -- now retunring 44 -- are we sure this is correct?
self.assertEqual(self.qctx(Neuron).query(name='AVAL').GJ_degree(), 44)
def test_neuron_Syn_degree(self):
""" Get the number of chemical synapses from a representation """
# was 187 -- now returning 105 -- are we sure this is correct?
self.assertEqual(self.qctx(Neuron).query(name='AVAL').Syn_degree(), 105)
@unittest.skip("have not yet defined asserts")
def test_what_nodes_get_type_info(self):
qres = self.g.query("""SELECT ?o ?p ?s WHERE {{
?o <http://openworm.org/entities/SimpleProperty/value> "motor".
?o ?p ?s # for that type ?o, get its value ?v
}} LIMIT 10
""")
for row in qres:
print(row)
def test_all_cells_have_wormbaseID(self):
""" This test verifies that every cell has a Wormbase ID. """
cells = set(self.qctx(Cell)().load())
for cell in cells:
assert cell.wormbaseID() is not None
def test_all_neurons_have_wormbaseID(self):
""" This test verifies that every neuron has a Wormbase ID. """
net = self.qctx(Worm).query().get_neuron_network()
for neuron_object in net.neurons():
assert neuron_object.wormbaseID() is not None
def test_all_muscles_have_wormbaseID(self):
""" This test verifies that every muscle has a Wormbase ID. """
muscles = self.qctx(Worm).query().muscles()
for muscle_object in muscles:
assert muscle_object.wormbaseID() is not None
def test_all_neurons_are_cells(self):
""" This test verifies that all Neuron objects are also Cell objects. """
net = self.qctx(Worm).query().get_neuron_network()
for neuron_object in net.neurons():
self.assertIsInstance(neuron_object, Cell)
def test_all_muscles_are_cells(self):
""" This test verifies that all Muscle objects are also Cell objects. """
muscles = self.qctx(Worm).query().muscles()
for muscle_object in muscles:
self.assertIsInstance(muscle_object, Cell)
def test_correct_connections_number(self):
""" This test verifies that there are exactly 7319 connections. """
net = self.qctx(Worm).query().get_neuron_network()
# XXX: The synapses contain some cells that aren't neurons
self.assertEqual(7319, net.synapses.count())
def test_number_neuron_to_neuron(self):
"""
This test verifies that the worm model has exactly 5805 neuron to neuron
connections.
"""
synapse = self.qctx(Connection)()
synapse.termination('neuron')
self.qctx(Worm).query().get_neuron_network().synapse(synapse)
self.assertEqual(5805, synapse.count())
def test_number_neuron_to_muscle(self):
"""
This test verifies that the worm model has exactly 1111 neuron to muscle
connections.
"""
synapse = self.qctx(Connection)()
synapse.termination('muscle')
self.qctx(Worm).query().get_neuron_network().synapse(synapse)
self.assertEqual(1111, synapse.count())
def test_correct_number_unique_neurons(self):
"""
This test verifies that the worm model has exactly 300 unique neurons
making connections.
"""
synapse = self.qctx(Connection)()
pre = self.qctx(Neuron)()
synapse.pre_cell(pre)
self.qctx(Worm).query().get_neuron_network().synapse(synapse)
self.assertEqual(300, pre.count())
def test_unconnected_neurons(self):
"""
This test verifies that there are exactly 2 unconnected neurons,
i.e., CANL and CANR, in the new connectome.
"""
# In previous tests, there is a check for exactly 302 neurons in total.
# There is also a test for exactly 300 unique neurons making connections.
# That means it should be enough to check that the set {CANL, CANR} and
# the set of neurons making connections are disjoint.
neuron = self.qctx(Neuron)()
synapse = self.qctx(Connection)()
synapse.pre_cell(neuron)
self.qctx(Worm).query().get_neuron_network().synapse(synapse)
connected_neurons = set()
unconnected_neurons = {'CANL', 'CANR'}
for name in neuron.name.get():
connected_neurons.add(name)
self.assertTrue(connected_neurons.isdisjoint(unconnected_neurons))
def test_neuron_lineage_names(self):
"""
Neurons should have lineage names in the bundle
"""
neuron = self.qctx(Neuron)()
self.qctx(Worm).query().get_neuron_network().neuron(neuron)
for n in neuron.load():
assert set(n.lineageName.get())
|
examples/create_new_document.py | MrTeferi/photoshop-python-api | 270 | 12755865 | <reponame>MrTeferi/photoshop-python-api
"""Create a new document."""
# Import local modules
from photoshop import Session
with Session() as ps:
ps.app.preferences.rulerUnits = ps.Units.Pixels
ps.app.documents.add(1920, 1080, name="my_new_document")
|
examples/shap/multiclass_classification.py | PeterSulcs/mlflow | 10,351 | 12755866 | <reponame>PeterSulcs/mlflow
import os
import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
import shap
import mlflow
from utils import to_pandas_Xy
# prepare training data
X, y = to_pandas_Xy(load_iris())
# train a model
model = RandomForestClassifier()
model.fit(X, y)
# log an explanation
with mlflow.start_run() as run:
mlflow.shap.log_explanation(model.predict_proba, X)
# list artifacts
client = mlflow.tracking.MlflowClient()
artifact_path = "model_explanations_shap"
artifacts = [x.path for x in client.list_artifacts(run.info.run_id, artifact_path)]
print("# artifacts:")
print(artifacts)
# load back the logged explanation
dst_path = client.download_artifacts(run.info.run_id, artifact_path)
base_values = np.load(os.path.join(dst_path, "base_values.npy"))
shap_values = np.load(os.path.join(dst_path, "shap_values.npy"))
# show a force plot
shap.force_plot(base_values[0], shap_values[0, 0, :], X.iloc[0, :], matplotlib=True)
|
Validation/HcalHits/python/pion100GeV_HF_cfg.py | ckamtsikis/cmssw | 852 | 12755893 | import FWCore.ParameterSet.Config as cms
process = cms.Process("GEN")
# this will run plig-in energy-flat random particle gun
# and puts particles (HepMCPRoduct) into edm::Event
process.load("SimGeneral.HepPDTESSource.pdt_cfi")
process.RandomNumberGeneratorService = cms.Service("RandomNumberGeneratorService",
moduleSeeds = cms.PSet(
generator = cms.untracked.uint32(456789)
),
sourceSeed = cms.untracked.uint32(54321)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2000)
)
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(211),
MinEta = cms.double(3.5765),
MaxEta = cms.double(3.5765),
MinPhi = cms.double(0.6109),
MaxPhi = cms.double(0.6109),
MinE = cms.double(100.0),
MaxE = cms.double(100.0)
),
AddAntiParticle = cms.bool(False),
psethack = cms.string('single pion 100GeV on fwd hcal'),
Verbosity = cms.untracked.int32(0), ## for printouts, set it to 1 (or greater)
firstRun = cms.untracked.uint32(1)
)
process.GEN = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('mc_pi+100_etaphi344.root')
)
process.p1 = cms.Path(process.generator)
process.p2 = cms.EndPath(process.GEN)
|
actions/lib/comments.py | userlocalhost/stackstorm-datadog | 164 | 12755899 | from base import DatadogBaseAction
from datadog import api
class DatadogCreateComment(DatadogBaseAction):
def _run(self, **kwargs):
return api.Comment.create(**kwargs)
class DatadogDeleteComment(DatadogBaseAction):
def _run(self, **kwargs):
return api.Comment.delete(kwargs.pop("comment_id"))
class DatadogEditComment(DatadogBaseAction):
def _run(self, **kwargs):
return api.Comment.update(kwargs.pop("comment_id"), **kwargs)
|
Chapter 09/ch09_r10.py | PacktPublishing/Modern-Python-Cookbook | 107 | 12755931 | """Python Cookbook
Chapter 9, recipe 10.
"""
import logging
import sys
from logging import Formatter
from pathlib import Path
def create_log():
PROD_LOG_FORMAT = ('[{asctime}]'
' {levelname} in {module}: {message}'
)
with Path('sample.log').open('w') as sample_log_file:
logging.basicConfig( stream=sample_log_file, level=logging.DEBUG )
logger = logging.getLogger()
for handler in logger.handlers:
handler.setFormatter(Formatter(PROD_LOG_FORMAT, style='{'))
logger.info("Sample Message One")
logger.debug("Debugging")
logger.warn("Something might have gone wrong")
import re
from pathlib import Path
import csv
log_pattern = re.compile(
r"\[(?P<timestamp>.*?)\]"
r"\s(?P<levelname>\w+)"
r"\sin\s(?P<module>[\w\._]+):"
r"\s(?P<message>.*)")
def extract_row_iter(source_log_file):
for line in source_log_file:
match = log_pattern.match(line)
if match is None: continue
yield match.groupdict()
def parse_log():
summary_path = Path('summary_log.csv')
with summary_path.open('w') as summary_file:
writer = csv.DictWriter(summary_file,
['timestamp', 'levelname', 'module', 'message'])
writer.writeheader()
source_log_dir = Path('.')
for source_log_path in source_log_dir.glob('*.log'):
with source_log_path.open() as source_log_file:
writer.writerows(
extract_row_iter(source_log_file)
)
print('Converted', source_log_path, 'to', summary_path)
def counting_extract_row_iter(counts, source_log_file):
for line in source_log_file:
match = log_pattern.match(line)
if match is None:
counts['non-match'] += 1
continue
counts['valid'] += 1
yield match.groupdict()
from collections import Counter
def parse_log2():
summary_path = Path('summary_log.csv')
with summary_path.open('w') as summary_file:
writer = csv.DictWriter(summary_file,
['timestamp', 'levelname', 'module', 'message'])
writer.writeheader()
source_log_dir = Path('.')
for source_log_path in source_log_dir.glob('*.log'):
counts = Counter()
with source_log_path.open() as source_log_file:
writer.writerows(
counting_extract_row_iter(counts, source_log_file)
)
print('Converted', source_log_path, 'to', summary_path)
print(counts)
if __name__ == "__main__":
create_log()
parse_log2()
|
intake/__init__.py | mattkram/intake | 149 | 12755935 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors
# All rights reserved.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
import importlib
import re
import logging
import os
import warnings
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .source import registry
from .catalog.base import Catalog
imports = {
"DataSource": "intake.source.base:DataSource",
'Schema': "intake.source.base:Schema",
"load_combo_catalog": "intake.catalog.default:load_combo_catalog",
"upload": "intake.container:upload",
"gui": "intake.interface:instance",
"cat": "intake.catalog:builtin",
"output_notebook": "intake.interface:output_notebook",
"register_driver": "intake.source:register_driver",
"unregister_driver": "intake.source:unregister_driver",
}
openers = set()
logger = logging.getLogger('intake')
def __getattr__(attr):
"""Lazy attribute propagator
Defers inputs of functions until they are needed, according to the
contents of the ``imports`` (submodules and classes) and ``openers``
(functions which instantiate data sources directly)
dicts. All keys in ``openers``
must start with "open_", else they will be ignored.
"""
gl = globals()
if attr in openers and attr[:5] == "open_":
driver = registry[attr[5:]] # "open_..."
gl[attr] = driver
else:
if attr in gl:
return gl[attr]
elif attr in imports:
dest = imports[attr]
modname = dest.split(":", 1)[0]
logger.debug("Importing: %s" % modname)
mod = importlib.import_module(modname)
if ":" in dest:
gl[attr] = getattr(mod, dest.split(":")[1])
else:
gl[attr] = mod
if attr == "__all__":
return __dir__()
try:
return gl[attr]
except KeyError:
raise AttributeError(attr)
def __dir__(*_, **__):
return sorted(list(globals()) + list(openers) + list(imports))
def make_open_functions():
"""From the current state of ``registry``, create open_* functions"""
from .source.discovery import drivers
for name in drivers.enabled_plugins():
func_name = 'open_' + name
if not func_name.isidentifier():
# primitive name normalization
func_name = re.sub('[-=~^&|@+]', '_', func_name)
if func_name.isidentifier():
# stash name for dir() and later fetch
openers.add(func_name)
else:
warnings.warn('Invalid Intake plugin name "%s" found.', name, stacklevel=2)
make_open_functions()
def open_catalog(uri=None, **kwargs):
"""Create a Catalog object
Can load YAML catalog files, connect to an intake server, or create any
arbitrary Catalog subclass instance. In the general case, the user should
supply ``driver=`` with a value from the plugins registry which has a
container type of catalog. File locations can generally be remote, if
specifying a URL protocol.
The default behaviour if not specifying the driver is as follows:
- if ``uri`` is a a single string ending in "yml" or "yaml", open it as a
catalog file
- if ``uri`` is a list of strings, a string containing a glob character
("*") or a string not ending in "y(a)ml", open as a set of catalog
files. In the latter case, assume it is a directory.
- if ``uri`` beings with protocol ``"intake:"``, connect to a remote
Intake server
- if ``uri`` is ``None`` or missing, create a base Catalog object without entries.
Parameters
----------
uri: str or pathlib.Path
Designator for the location of the catalog.
kwargs:
passed to subclass instance, see documentation of the individual
catalog classes. For example, ``yaml_files_cat`` (when specifying
multiple uris or a glob string) takes the additional
parameter ``flatten=True|False``, specifying whether all data sources
are merged in a single namespace, or each file becomes
a sub-catalog.
See also
--------
intake.open_yaml_files_cat, intake.open_yaml_file_cat,
intake.open_intake_remote
"""
driver = kwargs.pop('driver', None)
if isinstance(uri, os.PathLike):
uri = os.fspath(uri)
if driver is None:
if uri:
if ((isinstance(uri, str) and "*" in uri)
or ((isinstance(uri, (list, tuple))) and len(uri) > 1)):
# glob string or list of files/globs
driver = 'yaml_files_cat'
elif isinstance(uri, (list, tuple)) and len(uri) == 1:
uri = uri[0]
if "*" in uri[0]:
# single glob string in a list
driver = 'yaml_files_cat'
else:
# single filename in a list
driver = 'yaml_file_cat'
elif isinstance(uri, str):
# single URL
if uri.startswith('intake:'):
# server
driver = 'intake_remote'
else:
if uri.endswith(('.yml', '.yaml')):
driver = 'yaml_file_cat'
else:
uri = uri.rstrip('/') + '/*.y*ml'
driver = 'yaml_files_cat'
else:
raise ValueError("URI not understood: %s" % uri)
else:
# empty cat
driver = 'catalog'
if '_file' not in driver:
kwargs.pop('fs', None)
if driver not in registry:
raise ValueError('Unknown catalog driver (%s), supply one of: %s'
% (driver, list(sorted(registry))))
return registry[driver](uri, **kwargs)
|
cmake/macros/compilePython.py | chunkified/usd-qt | 124 | 12755954 | <reponame>chunkified/usd-qt<filename>cmake/macros/compilePython.py
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
#
# Usage: compilePython source.py dest.pyc
#
# This program compiles python code, providing a reasonable
# gcc-esque error message if errors occur.
#
# parameters:
# src.py - the source file to report errors for
# file.py - the installed location of the file
# file.pyc - the precompiled python file
import sys
import py_compile
if len(sys.argv) < 4:
print "Usage: %s src.py file.py file.pyc" % sys.argv[0]
sys.exit(1)
try:
py_compile.compile(sys.argv[2], sys.argv[3], sys.argv[1], doraise=True)
except py_compile.PyCompileError as compileError:
exc_value = compileError.exc_value
if compileError.exc_type_name == SyntaxError.__name__:
# py_compile.compile stashes the type name and args of the exception
# in the raised PyCompileError rather than the exception itself. This
# is especially annoying because the args member of some SyntaxError
# instances are lacking the source information tuple, but do have a
# usable lineno.
error = exc_value[0]
try:
linenumber = exc_value[1][1]
line = exc_value[1][3]
print '%s:%s: %s: "%s"' % (sys.argv[1], linenumber, error, line)
except IndexError:
print '%s: Syntax error: "%s"' % (sys.argv[1], error)
else:
print "%s: Unhandled compile error: (%s) %s" % (
sys.argv[1], compileError.exc_type_name, exc_value)
sys.exit(1)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
print "%s: Unhandled exception: %s" % (sys.argv[1], exc_value)
sys.exit(1)
|
simpleeval__examples__calc/get_value_from__url_http_request__call_function.py | DazEB2/SimplePyScripts | 117 | 12755972 | <reponame>DazEB2/SimplePyScripts
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
# pip install simpleeval
from simpleeval import SimpleEval
def get_from_url(value):
import requests
rs = requests.get('https://httpbin.org/get', params={'value': value})
return rs.json()['args']['value']
my_eval = SimpleEval()
my_eval.functions['get'] = get_from_url
print(my_eval.eval("get('123') + get('45')")) # '12345'
print(my_eval.eval("int(get('123')) + int(get('45'))")) # 168
|
tests/query/bugs/fixed_bigint_2031.py | liuqian1990/nebula | 8,586 | 12756011 | # --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License.
import time
from tests.common.nebula_test_suite import NebulaTestSuite
class TestBigInt(NebulaTestSuite):
@classmethod
def prepare(self):
resp = self.execute(
'CREATE SPACE IF NOT EXISTS BigInt2031(partition_num={partition_num}, replica_factor={replica_factor})'
.format(partition_num=self.partition_num,
replica_factor=self.replica_factor))
self.check_resp_succeeded(resp)
time.sleep(self.delay)
resp = self.execute('USE BigInt2031')
self.check_resp_succeeded(resp)
def test_issue2031(self):
time.sleep(self.delay)
resp = self.execute(
'CREATE TAG person1(name string, age bigint)')
self.check_resp_failed(resp)
resp = self.execute(
'CREATE TAG person2(name string, age bigint DEFAULT 100)')
self.check_resp_failed(resp)
resp = self.execute(
'CREATE TAG person3(name string, age Bigint)')
self.check_resp_failed(resp)
resp = self.execute(
'CREATE TAG person4(name string, age BIGINT)')
self.check_resp_failed(resp)
@classmethod
def cleanup(self):
resp = self.execute('drop space BigInt2031')
self.check_resp_succeeded(resp)
|
tests/blocks/signal/iirfilter_spec.py | telent/luaradio | 559 | 12756036 | <reponame>telent/luaradio<filename>tests/blocks/signal/iirfilter_spec.py
import numpy
import scipy.signal
from generate import *
def generate():
def gentaps(n):
b, a = scipy.signal.butter(n - 1, 0.5)
return b.astype(numpy.float32), a.astype(numpy.float32)
def process(b_taps, a_taps, x):
return [scipy.signal.lfilter(b_taps, a_taps, x).astype(type(x[0]))]
vectors = []
x = random_complex64(256)
b_taps, a_taps = gentaps(3)
vectors.append(TestVector([b_taps, a_taps], [x], process(b_taps, a_taps, x), "3 Float32 b taps, 3 Float32 a taps, 256 ComplexFloat32 input, 256 ComplexFloat32 output"))
b_taps, a_taps = gentaps(5)
vectors.append(TestVector([b_taps, a_taps], [x], process(b_taps, a_taps, x), "5 Float32 b taps, 5 Float32 a taps, 256 ComplexFloat32 input, 256 ComplexFloat32 output"))
b_taps, a_taps = gentaps(10)
vectors.append(TestVector([b_taps, a_taps], [x], process(b_taps, a_taps, x), "10 Float32 b taps, 10 Float32 a taps, 256 ComplexFloat32 input, 256 ComplexFloat32 output"))
x = random_float32(256)
b_taps, a_taps = gentaps(3)
vectors.append(TestVector([b_taps, a_taps], [x], process(b_taps, a_taps, x), "3 Float32 b taps, 3 Float32 a taps, 256 Float32 input, 256 Float32 output"))
b_taps, a_taps = gentaps(5)
vectors.append(TestVector([b_taps, a_taps], [x], process(b_taps, a_taps, x), "5 Float32 b taps, 5 Float32 a taps, 256 Float32 input, 256 Float32 output"))
b_taps, a_taps = gentaps(10)
vectors.append(TestVector([b_taps, a_taps], [x], process(b_taps, a_taps, x), "10 Float32 b taps, 10 Float32 a taps, 256 Float32 input, 256 Float32 output"))
return BlockSpec("IIRFilterBlock", vectors, 1e-6)
|
tools/make_patches.py | jiskra/openmv | 1,761 | 12756050 | #!/usr/bin/env python2
# This file is part of the OpenMV project.
#
# Copyright (c) 2013-2021 <NAME> <<EMAIL>>
# Copyright (c) 2013-2021 <NAME> <<EMAIL>>
#
# This work is licensed under the MIT license, see the file LICENSE for details.
#
# This script creates smaller patches from images.
import os, sys
import argparse
import random
import numpy as np
from skimage import io
from skimage import exposure
from sklearn.feature_extraction import image
def main():
# CMD args parser
parser = argparse.ArgumentParser(description='Generate smaller patches from images')
parser.add_argument("--input", action = "store", help = "Input images dir")
parser.add_argument("--output", action = "store", help = "Output images dir")
parser.add_argument("--width", action = "store", help = "Patch width", type=int, default = 32)
parser.add_argument("--height", action = "store", help = "Patch height", type=int, default = 32)
parser.add_argument("--patches", action = "store", help = "Number of patches", type=int, default = 10)
# Parse CMD args
args = parser.parse_args()
if (args.input == None or args.output == None):
parser.print_help()
sys.exit(1)
count = 0
images = os.listdir(args.input)
while (count < args.patches):
random.shuffle(images)
for i in xrange(len(images)):
img = io.imread(args.input+'/'+images[i])
patches = image.extract_patches_2d(img,
patch_size=(args.width, args.height),
max_patches=100, random_state=np.random.RandomState(0))
random.shuffle(patches)
for p in patches:
# Save low contrast patches only
if (exposure.is_low_contrast(p) == False):
io.imsave(args.output+'/patch_%.4d.ppm'%(count), p)
count += 1
break
if (count == args.patches):
break
if __name__ == '__main__':
main()
|
notebooks/solutions/03A_faces_plot.py | agramfort/scipy-2017-sklearn | 659 | 12756051 | <gh_stars>100-1000
faces = fetch_olivetti_faces()
# set up the figure
fig = plt.figure(figsize=(6, 6)) # figure size in inches
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
# plot the faces:
for i in range(64):
ax = fig.add_subplot(8, 8, i + 1, xticks=[], yticks=[])
ax.imshow(faces.images[i], cmap=plt.cm.bone, interpolation='nearest')
|
lib/galaxy/model/migrate/versions/0164_page_format.py | rikeshi/galaxy | 1,085 | 12756063 | <reponame>rikeshi/galaxy
"""
Adds page content format.
"""
import datetime
import logging
from sqlalchemy import Column, MetaData
from galaxy.model.custom_types import TrimmedString
from galaxy.model.migrate.versions.util import add_column, drop_column
now = datetime.datetime.utcnow
log = logging.getLogger(__name__)
metadata = MetaData()
def upgrade(migrate_engine):
metadata.bind = migrate_engine
print(__doc__)
metadata.reflect()
content_format_column = Column('content_format', TrimmedString(32), default='html', server_default="html", nullable=False)
add_column(content_format_column, 'page_revision', metadata)
def downgrade(migrate_engine):
metadata.bind = migrate_engine
metadata.reflect()
drop_column('content_format', 'page_revision', metadata)
|
examples/python/scripted_step.py | nathawes/swift-lldb | 427 | 12756110 | <filename>examples/python/scripted_step.py
#############################################################################
# This script contains two trivial examples of simple "scripted step" classes.
# To fully understand how the lldb "Thread Plan" architecture works, read the
# comments at the beginning of ThreadPlan.h in the lldb sources. The python
# interface is a reduced version of the full internal mechanism, but captures
# most of the power with a much simpler interface.
#
# But I'll attempt a brief summary here.
# Stepping in lldb is done independently for each thread. Moreover, the stepping
# operations are stackable. So for instance if you did a "step over", and in
# the course of stepping over you hit a breakpoint, stopped and stepped again,
# the first "step-over" would be suspended, and the new step operation would
# be enqueued. Then if that step over caused the program to hit another breakpoint,
# lldb would again suspend the second step and return control to the user, so
# now there are two pending step overs. Etc. with all the other stepping
# operations. Then if you hit "continue" the bottom-most step-over would complete,
# and another continue would complete the first "step-over".
#
# lldb represents this system with a stack of "Thread Plans". Each time a new
# stepping operation is requested, a new plan is pushed on the stack. When the
# operation completes, it is pushed off the stack.
#
# The bottom-most plan in the stack is the immediate controller of stepping,
# most importantly, when the process resumes, the bottom most plan will get
# asked whether to set the program running freely, or to instruction-single-step
# the current thread. In the scripted interface, you indicate this by returning
# False or True respectively from the should_step method.
#
# Each time the process stops the thread plan stack for each thread that stopped
# "for a reason", Ii.e. a single-step completed on that thread, or a breakpoint
# was hit), is queried to determine how to proceed, starting from the most
# recently pushed plan, in two stages:
#
# 1) Each plan is asked if it "explains" the stop. The first plan to claim the
# stop wins. In scripted Thread Plans, this is done by returning True from
# the "explains_stop method. This is how, for instance, control is returned
# to the User when the "step-over" plan hits a breakpoint. The step-over
# plan doesn't explain the breakpoint stop, so it returns false, and the
# breakpoint hit is propagated up the stack to the "base" thread plan, which
# is the one that handles random breakpoint hits.
#
# 2) Then the plan that won the first round is asked if the process should stop.
# This is done in the "should_stop" method. The scripted plans actually do
# three jobs in should_stop:
# a) They determine if they have completed their job or not. If they have
# they indicate that by calling SetPlanComplete on their thread plan.
# b) They decide whether they want to return control to the user or not.
# They do this by returning True or False respectively.
# c) If they are not done, they set up whatever machinery they will use
# the next time the thread continues.
#
# Note that deciding to return control to the user, and deciding your plan
# is done, are orthgonal operations. You could set up the next phase of
# stepping, and then return True from should_stop, and when the user next
# "continued" the process your plan would resume control. Of course, the
# user might also "step-over" or some other operation that would push a
# different plan, which would take control till it was done.
#
# One other detail you should be aware of, if the plan below you on the
# stack was done, then it will be popped and the next plan will take control
# and its "should_stop" will be called.
#
# Note also, there should be another method called when your plan is popped,
# to allow you to do whatever cleanup is required. I haven't gotten to that
# yet. For now you should do that at the same time you mark your plan complete.
#
# 3) After the round of negotiation over whether to stop or not is done, all the
# plans get asked if they are "stale". If they are say they are stale
# then they will get popped. This question is asked with the "is_stale" method.
#
# This is useful, for instance, in the FinishPrintAndContinue plan. What might
# happen here is that after continuing but before the finish is done, the program
# could hit another breakpoint and stop. Then the user could use the step
# command repeatedly until they leave the frame of interest by stepping.
# In that case, the step plan is the one that will be responsible for stopping,
# and the finish plan won't be asked should_stop, it will just be asked if it
# is stale. In this case, if the step_out plan that the FinishPrintAndContinue
# plan is driving is stale, so is ours, and it is time to do our printing.
#
# Both examples show stepping through an address range for 20 bytes from the
# current PC. The first one does it by single stepping and checking a condition.
# It doesn't, however handle the case where you step into another frame while
# still in the current range in the starting frame.
#
# That is better handled in the second example by using the built-in StepOverRange
# thread plan.
#
# To use these stepping modes, you would do:
#
# (lldb) command script import scripted_step.py
# (lldb) thread step-scripted -C scripted_step.SimpleStep
# or
#
# (lldb) thread step-scripted -C scripted_step.StepWithPlan
import lldb
class SimpleStep:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.start_address = thread_plan.GetThread().GetFrameAtIndex(0).GetPC()
def explains_stop(self, event):
# We are stepping, so if we stop for any other reason, it isn't
# because of us.
if self.thread_plan.GetThread().GetStopReason() == lldb.eStopReasonTrace:
return True
else:
return False
def should_stop(self, event):
cur_pc = self.thread_plan.GetThread().GetFrameAtIndex(0).GetPC()
if cur_pc < self.start_address or cur_pc >= self.start_address + 20:
self.thread_plan.SetPlanComplete(True)
return True
else:
return False
def should_step(self):
return True
class StepWithPlan:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.start_address = thread_plan.GetThread().GetFrameAtIndex(0).GetPCAddress()
self.step_thread_plan = thread_plan.QueueThreadPlanForStepOverRange(
self.start_address, 20)
def explains_stop(self, event):
# Since all I'm doing is running a plan, I will only ever get askedthis
# if myplan doesn't explain the stop, and in that caseI don'teither.
return False
def should_stop(self, event):
if self.step_thread_plan.IsPlanComplete():
self.thread_plan.SetPlanComplete(True)
return True
else:
return False
def should_step(self):
return False
# Here's another example which does "step over" through the current function,
# and when it stops at each line, it checks some condition (in this example the
# value of a variable) and stops if that condition is true.
class StepCheckingCondition:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.start_frame = thread_plan.GetThread().GetFrameAtIndex(0)
self.queue_next_plan()
def queue_next_plan(self):
cur_frame = self.thread_plan.GetThread().GetFrameAtIndex(0)
cur_line_entry = cur_frame.GetLineEntry()
start_address = cur_line_entry.GetStartAddress()
end_address = cur_line_entry.GetEndAddress()
line_range = end_address.GetFileAddress() - start_address.GetFileAddress()
self.step_thread_plan = self.thread_plan.QueueThreadPlanForStepOverRange(
start_address, line_range)
def explains_stop(self, event):
# We are stepping, so if we stop for any other reason, it isn't
# because of us.
return False
def should_stop(self, event):
if not self.step_thread_plan.IsPlanComplete():
return False
frame = self.thread_plan.GetThread().GetFrameAtIndex(0)
if not self.start_frame.IsEqual(frame):
self.thread_plan.SetPlanComplete(True)
return True
# This part checks the condition. In this case we are expecting
# some integer variable called "a", and will stop when it is 20.
a_var = frame.FindVariable("a")
if not a_var.IsValid():
print "A was not valid."
return True
error = lldb.SBError()
a_value = a_var.GetValueAsSigned(error)
if not error.Success():
print "A value was not good."
return True
if a_value == 20:
self.thread_plan.SetPlanComplete(True)
return True
else:
self.queue_next_plan()
return False
def should_step(self):
return True
# Here's an example that steps out of the current frame, gathers some information
# and then continues. The information in this case is rax. Currently the thread
# plans are not a safe place to call lldb command-line commands, so the information
# is gathered through SB API calls.
class FinishPrintAndContinue:
def __init__(self, thread_plan, dict):
self.thread_plan = thread_plan
self.step_out_thread_plan = thread_plan.QueueThreadPlanForStepOut(
0, True)
self.thread = self.thread_plan.GetThread()
def is_stale(self):
if self.step_out_thread_plan.IsPlanStale():
self.do_print()
return True
else:
return False
def explains_stop(self, event):
return False
def should_stop(self, event):
if self.step_out_thread_plan.IsPlanComplete():
self.do_print()
self.thread_plan.SetPlanComplete(True)
return False
def do_print(self):
frame_0 = self.thread.frames[0]
rax_value = frame_0.FindRegister("rax")
if rax_value.GetError().Success():
print "RAX on exit: ", rax_value.GetValue()
else:
print "Couldn't get rax value:", rax_value.GetError().GetCString()
|
zfit/constraint.py | nsahoo/zfit | 129 | 12756131 | # Copyright (c) 2021 zfit
import tensorflow as tf
from .core.constraint import (GaussianConstraint, PoissonConstraint,
SimpleConstraint, LogNormalConstraint)
from .util import ztyping
__all__ = ["nll_gaussian", "SimpleConstraint", "GaussianConstraint", "PoissonConstraint", "LogNormalConstraint"]
def nll_gaussian(params: ztyping.ParamTypeInput, observation: ztyping.NumericalScalarType,
uncertainty: ztyping.NumericalScalarType) -> tf.Tensor:
"""Return negative log likelihood graph for gaussian constraints on a list of parameters.
Args:
params: The parameters to constraint.
observation: observed values of the parameter.
uncertainty: Uncertainties or covariance/error.
matrix of the observed values. Can either be a single value, a list of values, an array or a tensor.
Returns:
The constraint object.
Raises:
ShapeIncompatibleError: if params, mu and sigma don't have the same size.
"""
return GaussianConstraint(params=params, observation=observation, uncertainty=uncertainty)
|
aliyun-python-sdk-eipanycast/aliyunsdkeipanycast/request/v20200309/AllocateAnycastEipAddressRequest.py | yndu13/aliyun-openapi-python-sdk | 1,001 | 12756144 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkeipanycast.endpoint import endpoint_data
class AllocateAnycastEipAddressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Eipanycast', '2020-03-09', 'AllocateAnycastEipAddress','eipanycast')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Bandwidth(self):
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self,Bandwidth):
self.add_query_param('Bandwidth',Bandwidth)
def get_ServiceLocation(self):
return self.get_query_params().get('ServiceLocation')
def set_ServiceLocation(self,ServiceLocation):
self.add_query_param('ServiceLocation',ServiceLocation)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_InternetChargeType(self):
return self.get_query_params().get('InternetChargeType')
def set_InternetChargeType(self,InternetChargeType):
self.add_query_param('InternetChargeType',InternetChargeType)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_InstanceChargeType(self):
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self,InstanceChargeType):
self.add_query_param('InstanceChargeType',InstanceChargeType) |
scripts/external_libs/scapy-2.4.3/scapy/arch/windows/native.py | timgates42/trex-core | 956 | 12756162 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) <NAME> <<EMAIL>>
# Copyright (C) <NAME> <<EMAIL>>
# This program is published under a GPLv2 license
"""
Native Microsoft Windows sockets (L3 only)
## Notice: ICMP packets
DISCLAIMER: Please use Npcap/Winpcap to send/receive ICMP. It is going to work.
Below is some additional information, mainly implemented in a testing purpose.
When in native mode, everything goes through the Windows kernel.
This firstly requires that the Firewall is open. Be sure it allows ICMPv4/6
packets in and out.
Windows may drop packets that it finds wrong. for instance, answers to
ICMP packets with id=0 or seq=0 may be dropped. It means that sent packets
should (most of the time) be perfectly built.
A perfectly built ICMP req packet on Windows means that its id is 1, its
checksum (IP and ICMP) are correctly built, but also that its seq number is
in the "allowed range".
In fact, every time an ICMP packet is sent on Windows, a global sequence
number is increased, which is only reset at boot time. The seq number of the
received ICMP packet must be in the range [current, current + 3] to be valid,
and received by the socket. The current number is quite hard to get, thus we
provide in this module the get_actual_icmp_seq() function.
Example:
>>> conf.use_pcap = False
>>> a = conf.L3socket()
# This will (most likely) work:
>>> current = get_current_icmp_seq()
>>> a.sr(IP(dst="www.google.com", ttl=128)/ICMP(id=1, seq=current))
# This won't:
>>> a.sr(IP(dst="www.google.com", ttl=128)/ICMP())
PS: on computers where the firewall isn't open, Windows temporarily opens it
when using the `ping` util from cmd.exe. One can first call a ping on cmd,
then do custom calls through the socket using get_current_icmp_seq(). See
the tests (windows.uts) for an example.
"""
import io
import os
import socket
import subprocess
import time
from scapy.automaton import SelectableObject
from scapy.arch.common import _select_nonblock
from scapy.arch.windows.structures import GetIcmpStatistics
from scapy.compat import raw
from scapy.config import conf
from scapy.data import MTU
from scapy.error import Scapy_Exception, warning
from scapy.supersocket import SuperSocket
# Watch out for import loops (inet...)
class L3WinSocket(SuperSocket, SelectableObject):
desc = "a native Layer 3 (IPv4) raw socket under Windows"
nonblocking_socket = True
__slots__ = ["promisc", "cls", "ipv6", "proto"]
def __init__(self, iface=None, proto=socket.IPPROTO_IP,
ttl=128, ipv6=False, promisc=True, **kwargs):
from scapy.layers.inet import IP
from scapy.layers.inet6 import IPv6
for kwarg in kwargs:
warning("Dropping unsupported option: %s" % kwarg)
af = socket.AF_INET6 if ipv6 else socket.AF_INET
self.proto = proto
if ipv6:
from scapy.arch import get_if_addr6
self.host_ip6 = get_if_addr6(conf.iface) or "::1"
if proto == socket.IPPROTO_IP:
# We'll restrict ourselves to UDP, as TCP isn't bindable
# on AF_INET6
self.proto = socket.IPPROTO_UDP
# On Windows, with promisc=False, you won't get much
self.ipv6 = ipv6
self.cls = IPv6 if ipv6 else IP
self.promisc = promisc
# Notes:
# - IPPROTO_RAW only works to send packets.
# - IPPROTO_IPV6 exists in MSDN docs, but using it will result in
# no packets being received. Same for its options (IPV6_HDRINCL...)
# However, using IPPROTO_IP with AF_INET6 will still receive
# the IPv6 packets
try:
self.ins = socket.socket(af,
socket.SOCK_RAW,
self.proto)
self.outs = socket.socket(af,
socket.SOCK_RAW,
socket.IPPROTO_RAW)
except OSError as e:
if e.errno == 10013:
raise OSError("Windows native L3 Raw sockets are only "
"usable as administrator ! "
"Install Winpcap/Npcap to workaround !")
raise
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.outs.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
self.outs.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 2**30)
# IOCTL Include IP headers
self.ins.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
self.outs.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
# set TTL
self.ins.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, ttl)
self.outs.setsockopt(socket.IPPROTO_IP, socket.IP_TTL, ttl)
# Bind on all ports
iface = iface or conf.iface
host = iface.ip if iface.ip else socket.gethostname()
self.ins.bind((host, 0))
self.ins.setblocking(False)
# Get as much data as possible: reduce what is cropped
if ipv6:
try: # Not all Windows versions
self.ins.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.ins.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_HOPLIMIT, 1)
except (OSError, socket.error):
pass
else:
try: # Not Windows XP
self.ins.setsockopt(socket.IPPROTO_IP,
socket.IP_RECVDSTADDR, 1)
except (OSError, socket.error):
pass
try: # Windows 10+ recent builds only
self.ins.setsockopt(socket.IPPROTO_IP, socket.IP_RECVTTL, 1)
except (OSError, socket.error):
pass
if promisc:
# IOCTL Receive all packets
self.ins.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)
def send(self, x):
data = raw(x)
if self.cls not in x:
raise Scapy_Exception("L3WinSocket can only send IP/IPv6 packets !"
" Install Npcap/Winpcap to send more")
dst_ip = str(x[self.cls].dst)
self.outs.sendto(data, (dst_ip, 0))
def nonblock_recv(self, x=MTU):
return self.recv()
# https://docs.microsoft.com/en-us/windows/desktop/winsock/tcp-ip-raw-sockets-2 # noqa: E501
# - For IPv4 (address family of AF_INET), an application receives the IP
# header at the front of each received datagram regardless of the
# IP_HDRINCL socket option.
# - For IPv6 (address family of AF_INET6), an application receives
# everything after the last IPv6 header in each received datagram
# regardless of the IPV6_HDRINCL socket option. The application does
# not receive any IPv6 headers using a raw socket.
def recv_raw(self, x=MTU):
try:
data, address = self.ins.recvfrom(x)
except io.BlockingIOError:
return None, None, None
from scapy.layers.inet import IP
from scapy.layers.inet6 import IPv6
if self.ipv6:
# AF_INET6 does not return the IPv6 header. Let's build it
# (host, port, flowinfo, scopeid)
host, _, flowinfo, _ = address
header = raw(IPv6(src=host,
dst=self.host_ip6,
fl=flowinfo,
nh=self.proto, # fixed for AF_INET6
plen=len(data)))
return IPv6, header + data, time.time()
else:
return IP, data, time.time()
def check_recv(self):
return True
def close(self):
if not self.closed and self.promisc:
self.ins.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)
super(L3WinSocket, self).close()
@staticmethod
def select(sockets, remain=None):
return _select_nonblock(sockets, remain=remain)
class L3WinSocket6(L3WinSocket):
desc = "a native Layer 3 (IPv6) raw socket under Windows"
def __init__(self, **kwargs):
super(L3WinSocket6, self).__init__(ipv6=True, **kwargs)
def open_icmp_firewall(host):
"""Temporarily open the ICMP firewall. Tricks Windows into allowing
ICMP packets for a short period of time (~ 1 minute)"""
# We call ping with a timeout of 1ms: will return instantly
with open(os.devnull, 'wb') as DEVNULL:
return subprocess.Popen("ping -4 -w 1 -n 1 %s" % host,
shell=True,
stdout=DEVNULL,
stderr=DEVNULL).wait()
def get_current_icmp_seq():
"""See help(scapy.arch.windows.native) for more information.
Returns the current ICMP seq number."""
return GetIcmpStatistics()['stats']['icmpOutStats']['dwEchos']
|
nose2/tests/functional/support/such/test_such_timing.py | deeplow/nose2 | 637 | 12756179 | <reponame>deeplow/nose2
import time
from nose2.tools import such
def slow_blocking_init():
print("YEAH2")
time.sleep(1)
print("a second elapsed")
time.sleep(1)
print("a second elapsed")
return True
class Layer1(object):
description = "Layer1 description"
@classmethod
def setUp(cls):
print("YEAH")
it.obj = False
class Layer2(object):
description = "Layer2 description"
@classmethod
def setUp(cls):
it.obj = slow_blocking_init()
with such.A("system with a fast initial setup layer") as it:
it.uses(Layer1)
@it.should("not have obj initialized")
def test():
assert not it.obj
with it.having("a second slow setup layer"):
it.uses(Layer2)
@it.should("have obj initialized")
def test2():
assert it.obj
it.createTests(globals())
|
scratchpad/issues_streamlit/issue_cannot_cache_class_method.py | R-fred/awesome-streamlit | 1,194 | 12756185 | <reponame>R-fred/awesome-streamlit
import streamlit as st
class App:
def run(self):
st.title("Cannot st.cache classmethod issue")
App.get_data1()
st.info("data1 loaded")
self.get_data2()
st.info("data2 loaded")
@classmethod
@st.cache
def get_data1(cls):
pass
@st.cache
@classmethod
def get_data2(cls):
pass
App().run()
|
keras/legacy_tf_layers/migration_utils_test.py | tsheaff/keras | 37,222 | 12756192 | """Tests for migration_utils."""
from keras.initializers import GlorotUniform as V2GlorotUniform
from keras.legacy_tf_layers import migration_utils
import tensorflow as tf
class DeterministicRandomTestToolTest(tf.test.TestCase):
def test_constant_mode_no_seed(self):
"""Test random tensor generation consistancy in constant mode.
Verify that the random tensor generated without using the seed is
consistant between graph and eager mode
"""
# Generate three random tensors to show how the stateful random number
# generation and glorot_uniform_initializer match between sessions and
# eager execution.
random_tool = migration_utils.DeterministicRandomTestTool()
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
a = tf.compat.v1.random.uniform(shape=(3, 1))
# adding additional computation/ops to the graph and ensuring consistant
# random number generation
a = a * 3
b = tf.compat.v1.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v1.random.uniform(shape=(3, 3))
c = c * 3
d = tf.compat.v1.glorot_uniform_initializer()(
shape=(6, 6), dtype=tf.float32)
graph_a, graph_b, graph_c, graph_d = sess.run([a, b, c, d])
a = tf.compat.v2.random.uniform(shape=(3, 1))
a = a * 3
b = tf.compat.v2.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v2.random.uniform(shape=(3, 3))
c = c * 3
d = V2GlorotUniform()(shape=(6, 6), dtype=tf.float32)
# validate that the generated random tensors match
self.assertAllClose(graph_a, a)
self.assertAllClose(graph_b, b)
self.assertAllClose(graph_c, c)
self.assertAllClose(graph_d, d)
# In constant mode, because b and c were generated with the same seed within
# the same scope and have the same shape, they will have exactly the same
# values.
# validate that b and c are the same, also graph_b and graph_c
self.assertAllClose(b, c)
self.assertAllClose(graph_b, graph_c)
def test_constant_mode_seed_argument(self):
"""Test random tensor generation consistancy in constant mode.
Verify that the random tensor generated by setting the global seeed
in the args is consistant between graph and eager mode.
"""
random_tool = migration_utils.DeterministicRandomTestTool()
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
# adding additional computation/ops to the graph and ensuring consistant
# random number generation
a = tf.compat.v1.random.uniform(shape=(3, 1), seed=1234)
a = a * 3
b = tf.compat.v1.random.uniform(shape=(3, 3), seed=1234)
b = b * 3
c = tf.compat.v1.glorot_uniform_initializer(seed=1234)(
shape=(6, 6), dtype=tf.float32)
graph_a, graph_b, graph_c = sess.run([a, b, c])
a = tf.compat.v2.random.uniform(shape=(3, 1), seed=1234)
a = a * 3
b = tf.compat.v2.random.uniform(shape=(3, 3), seed=1234)
b = b * 3
c = V2GlorotUniform(seed=1234)(shape=(6, 6), dtype=tf.float32)
# validate that the generated random tensors match
self.assertAllClose(graph_a, a)
self.assertAllClose(graph_b, b)
self.assertAllClose(graph_c, c)
def test_num_rand_ops(self):
"""Test random tensor generation consistancy in num_random_ops mode.
Verify that the random tensor generated without using the seed is
consistant between graph and eager mode.
Random tensor generated should be different based on random ops ordering
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
graph = tf.Graph()
with graph.as_default(), tf.compat.v1.Session(graph=graph) as sess:
# adding additional computation/ops to the graph and ensuring consistant
# random number generation
a = tf.compat.v1.random.uniform(shape=(3, 1))
a = a * 3
b = tf.compat.v1.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v1.random.uniform(shape=(3, 3))
c = c * 3
d = tf.compat.v1.glorot_uniform_initializer()(
shape=(6, 6), dtype=tf.float32)
graph_a, graph_b, graph_c, graph_d = sess.run([a, b, c, d])
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
a = tf.compat.v2.random.uniform(shape=(3, 1))
a = a * 3
b = tf.compat.v2.random.uniform(shape=(3, 3))
b = b * 3
c = tf.compat.v2.random.uniform(shape=(3, 3))
c = c * 3
d = V2GlorotUniform()(shape=(6, 6), dtype=tf.float32)
# validate that the generated random tensors match
self.assertAllClose(graph_a, a)
self.assertAllClose(graph_b, b)
self.assertAllClose(graph_c, c)
self.assertAllClose(graph_d, d)
# validate that the tensors differ based on ops ordering
self.assertNotAllClose(b, c)
self.assertNotAllClose(graph_b, graph_c)
def test_num_rand_ops_program_order(self):
"""Test random tensor generation consistancy in num_random_ops mode.
validate that in this mode random number generation is sensitive to program
order, so the generated random tesnors should not match.
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
a = tf.random.uniform(shape=(3, 1))
# adding additional computation/ops to the graph and ensuring consistant
# random number generation
a = a * 3
b = tf.random.uniform(shape=(3, 3))
b = b * 3
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
b_prime = tf.random.uniform(shape=(3, 3))
# adding additional computation/ops to the graph and ensuring consistant
# random number generation
b_prime = b_prime * 3
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
# validate that the tensors are different
self.assertNotAllClose(a, a_prime)
self.assertNotAllClose(b, b_prime)
def test_num_rand_ops_operation_seed(self):
"""Test random tensor generation consistancy in num_random_ops mode.
validate if random number generation match across two different program
orders.
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
# operation seed = 0
a = tf.random.uniform(shape=(3, 1))
a = a * 3
# operation seed = 1
b = tf.random.uniform(shape=(3, 3))
b = b * 3
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
random_tool.operation_seed = 1
b_prime = tf.random.uniform(shape=(3, 3))
b_prime = b_prime * 3
random_tool.operation_seed = 0
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
self.assertAllClose(a, a_prime)
self.assertAllClose(b, b_prime)
def test_num_rand_ops_disallow_repeated_ops_seed(self):
"""Test random tensor generation consistancy in num_random_ops mode.
validate if DeterministicRandomTestTool disallows reusing already-used
operation seeds.
"""
random_tool = migration_utils.DeterministicRandomTestTool(
mode="num_random_ops")
with random_tool.scope():
random_tool.operation_seed = 1
b_prime = tf.random.uniform(shape=(3, 3))
b_prime = b_prime * 3
random_tool.operation_seed = 0
a_prime = tf.random.uniform(shape=(3, 1))
a_prime = a_prime * 3
error_string = "An exception should have been raised before this"
error_raised = "An exception should have been raised before this"
try:
c = tf.random.uniform(shape=(3, 1))
raise RuntimeError(error_string)
except ValueError as err:
err_raised = err
self.assertNotEqual(err_raised, error_string)
if __name__ == "__main__":
tf.test.main()
|
build/python/tests/lib/lib.py | allansrc/fuchsia | 210 | 12756252 | #!/usr/bin/env python3.8
# Copyright 2021 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def f():
print('lib.f')
def truthy():
return True
def falsy():
return False
|
tf2_utils/main.py | NoAchache/aster | 704 | 12756254 | <filename>tf2_utils/main.py
import argparse
import os
from typing import List
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tf2_utils.inferer import Inferer
TARGET_IMAGE_HEIGHT = 64
TARGET_IMAGE_WIDTH = 256
CHAR_VECTOR = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
def infere_images(images_path: List[str]):
inferer = Inferer()
tokenizer = get_tokenizer()
for image_path in images_path:
image = load_image(image_path)
logits = inferer(image)
sequence_length = [logits.shape[1]]
sequences_decoded = tf.nn.ctc_greedy_decoder(
tf.transpose(logits, [1, 0, 2]), sequence_length, merge_repeated=False
)[0][0]
sequences_decoded = tf.sparse.to_dense(sequences_decoded).numpy()
word = tokenizer.sequences_to_texts(sequences_decoded)[0]
print(word)
def get_tokenizer():
tokenizer = Tokenizer(char_level=True, lower=False, oov_token="<OOV>")
tokenizer.fit_on_texts(CHAR_VECTOR)
return tokenizer
def load_image(image_path:str):
image = cv2.imread(os.path.join(image_path))
image = cv2.resize(
image, (TARGET_IMAGE_WIDTH, TARGET_IMAGE_HEIGHT)
)
image = image.astype(np.float32) / 127.5 - 1.0
return tf.expand_dims(tf.constant(image), 0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--images_path",
nargs="+",
type=str,
help="Path to the images to infere",
)
args = parser.parse_args()
infere_images(args.images_path)
|
tests/test_global.py | gitmoneyyy123/VintageEx | 134 | 12756255 | <filename>tests/test_global.py
import unittest
from vex.parsers.g_cmd import GlobalLexer
class TestGlobalLexer(unittest.TestCase):
def setUp(self):
self.lexer = GlobalLexer()
def testCanMatchFullPattern(self):
actual = self.lexer.parse(r'/foo/p#')
self.assertEqual(actual, ['foo', 'p#'])
def testCanMatchEmtpySearch(self):
actual = self.lexer.parse(r'//p#')
self.assertEqual(actual, ['', 'p#'])
def testCanEscapeCharactersInSearchPattern(self):
actual = self.lexer.parse(r'/\/foo\//p#')
self.assertEqual(actual, ['/foo/', 'p#'])
def testCanEscapeBackSlashes(self):
actual = self.lexer.parse(r'/\\/p#')
self.assertEqual(actual, ['\\', 'p#'])
if __name__ == '__main__':
unittest.main()
|
test/hummingbot/client/ui/test_hummingbot_cli.py | BGTCapital/hummingbot | 3,027 | 12756256 | import unittest
from prompt_toolkit.widgets import Button
from unittest.mock import patch, MagicMock
from hummingbot.client.tab.data_types import CommandTab
from hummingbot.client.ui.hummingbot_cli import HummingbotCLI
from hummingbot.client.ui.custom_widgets import CustomTextArea
class HummingbotCLITest(unittest.TestCase):
command_name = "command_1"
def setUp(self) -> None:
super().setUp()
tabs = {self.command_name: CommandTab(self.command_name, None, None, None, MagicMock())}
self.mock_hb = MagicMock()
self.app = HummingbotCLI(None, None, None, tabs)
self.app.app = MagicMock()
def test_handle_tab_command_on_close_argument(self):
tab = self.app.command_tabs[self.command_name]
tab.close_button = MagicMock()
tab.button = MagicMock()
tab.output_field = MagicMock()
self.app.handle_tab_command(self.mock_hb, self.command_name, {"close": True})
self.assertIsNone(tab.button)
self.assertIsNone(tab.close_button)
self.assertIsNone(tab.output_field)
self.assertFalse(tab.is_selected)
self.assertEqual(tab.tab_index, 0)
def test_handle_tab_command_create_new_tab_and_display(self):
tab = self.app.command_tabs[self.command_name]
self.app.handle_tab_command(self.mock_hb, self.command_name, {"close": False})
self.assertIsInstance(tab.button, Button)
self.assertIsInstance(tab.close_button, Button)
self.assertIsInstance(tab.output_field, CustomTextArea)
self.assertEqual(tab.tab_index, 1)
self.assertTrue(tab.is_selected)
self.assertTrue(tab.tab_class.display.called)
@patch("hummingbot.client.ui.layout.Layout")
@patch("hummingbot.client.ui.layout.FloatContainer")
@patch("hummingbot.client.ui.layout.ConditionalContainer")
@patch("hummingbot.client.ui.layout.Box")
@patch("hummingbot.client.ui.layout.HSplit")
@patch("hummingbot.client.ui.layout.VSplit")
def test_handle_tab_command_on_existing_tab(self, mock_vsplit, mock_hsplit, mock_box, moc_cc, moc_fc, mock_layout):
tab = self.app.command_tabs[self.command_name]
tab.button = MagicMock()
tab.output_field = MagicMock()
tab.close_button = MagicMock()
tab.is_selected = False
self.app.handle_tab_command(self.mock_hb, self.command_name, {"close": False})
self.assertTrue(tab.is_selected)
self.assertTrue(tab.tab_class.display.call_count == 1)
# Test display not called if there is a running task
tab.is_selected = False
tab.task = MagicMock()
tab.task.done.return_value = False
self.app.handle_tab_command(self.mock_hb, self.command_name, {"close": False})
self.assertTrue(tab.is_selected)
self.assertTrue(tab.tab_class.display.call_count == 1)
@patch("hummingbot.client.ui.layout.Layout")
@patch("hummingbot.client.ui.layout.FloatContainer")
@patch("hummingbot.client.ui.layout.ConditionalContainer")
@patch("hummingbot.client.ui.layout.Box")
@patch("hummingbot.client.ui.layout.HSplit")
@patch("hummingbot.client.ui.layout.VSplit")
def test_tab_navigation(self, mock_vsplit, mock_hsplit, mock_box, moc_cc, moc_fc, mock_layout):
tab2 = CommandTab("command_2", None, None, None, MagicMock(), False)
self.app.command_tabs["command_2"] = tab2
tab1 = self.app.command_tabs[self.command_name]
self.app.handle_tab_command(self.mock_hb, self.command_name, {"close": False})
self.app.handle_tab_command(self.mock_hb, "command_2", {"close": False})
self.assertTrue(tab2.is_selected)
self.app.tab_navigate_left()
self.assertTrue(tab1.is_selected)
self.assertFalse(tab2.is_selected)
self.app.tab_navigate_left()
self.assertTrue(all(not t.is_selected for t in self.app.command_tabs.values()))
self.app.tab_navigate_left()
self.assertTrue(all(not t.is_selected for t in self.app.command_tabs.values()))
self.app.tab_navigate_right()
self.assertTrue(tab1.is_selected)
self.app.tab_navigate_right()
self.assertFalse(tab1.is_selected)
self.assertTrue(tab2.is_selected)
self.app.tab_navigate_right()
self.assertFalse(tab1.is_selected)
self.assertTrue(tab2.is_selected)
|
src/pytorch_metric_learning/regularizers/sparse_centers_regularizer.py | cwkeam/pytorch-metric-learning | 4,357 | 12756265 | import torch
from ..distances import CosineSimilarity
from ..reducers import DivisorReducer
from ..utils import common_functions as c_f
from .base_regularizer import BaseRegularizer
class SparseCentersRegularizer(BaseRegularizer):
def __init__(self, num_classes, centers_per_class, **kwargs):
super().__init__(**kwargs)
assert centers_per_class > 1
c_f.assert_distance_type(self, CosineSimilarity)
self.set_class_masks(num_classes, centers_per_class)
self.add_to_recordable_attributes(
list_of_names=["num_classes", "centers_per_class"], is_stat=False
)
self.add_to_recordable_attributes(
list_of_names=["same_class_center_sim", "diff_class_center_sim"],
is_stat=True,
)
def compute_loss(self, weights):
center_similarities = self.distance(weights)
small_val = c_f.small_val(weights.dtype)
center_similarities_masked = torch.clamp(
2.0 * center_similarities[self.same_class_mask], max=2
)
divisor = 2 * torch.sum(self.same_class_mask)
reg = torch.sqrt(2.0 + small_val - center_similarities_masked)
self.set_stats(center_similarities)
return {
"loss": {
"losses": reg,
"indices": c_f.torch_arange_from_size(reg),
"reduction_type": "element",
"divisor": divisor,
}
}
def set_class_masks(self, num_classes, centers_per_class):
total_num_centers = num_classes * centers_per_class
self.diff_class_mask = torch.ones(
total_num_centers, total_num_centers, dtype=torch.bool
)
self.same_class_mask = torch.zeros(
total_num_centers, total_num_centers, dtype=torch.bool
)
for i in range(num_classes):
s, e = i * centers_per_class, (i + 1) * centers_per_class
curr_block = torch.ones(centers_per_class, centers_per_class)
curr_block = torch.triu(curr_block, diagonal=1)
self.same_class_mask[s:e, s:e] = curr_block
self.diff_class_mask[s:e, s:e] = 0
def set_stats(self, center_similarities):
if self.collect_stats:
with torch.no_grad():
self.same_class_center_sim = torch.mean(
center_similarities[self.same_class_mask]
).item()
self.diff_class_center_sim = torch.mean(
center_similarities[self.diff_class_mask]
).item()
def get_default_distance(self):
return CosineSimilarity()
def get_default_reducer(self):
return DivisorReducer()
|
tha2/nn/backcomp/nn/conv.py | luuil/talking-head-anime-2-demo | 626 | 12756287 | <gh_stars>100-1000
from torch.nn import Conv2d, Module, Sequential, InstanceNorm2d, ReLU, ConvTranspose2d
from tha2.nn.backcomp.nn.init_function import create_init_function
def Conv7(in_channels: int, out_channels: int, initialization_method='he') -> Module:
init = create_init_function(initialization_method)
return init(Conv2d(in_channels, out_channels, kernel_size=7, stride=1, padding=3, bias=False))
def Conv3(in_channels: int, out_channels: int, initialization_method='he') -> Module:
init = create_init_function(initialization_method)
return init(Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False))
def Conv7Block(in_channels: int, out_channels: int, initialization_method='he') -> Module:
return Sequential(
Conv7(in_channels, out_channels, initialization_method),
InstanceNorm2d(out_channels, affine=True),
ReLU(inplace=True))
def DownsampleBlock(in_channels: int, initialization_method='he') -> Module:
init = create_init_function(initialization_method)
return Sequential(
init(Conv2d(in_channels, in_channels * 2, kernel_size=4, stride=2, padding=1, bias=False)),
InstanceNorm2d(in_channels * 2, affine=True),
ReLU(inplace=True))
def UpsampleBlock(in_channels: int, out_channels: int, initialization_method='he') -> Module:
init = create_init_function(initialization_method)
return Sequential(
init(ConvTranspose2d(in_channels, out_channels, kernel_size=4, stride=2, padding=1, bias=False)),
InstanceNorm2d(out_channels, affine=True),
ReLU(inplace=True))
|
test/python/algorithms/test_amplitude_estimators.py | TheGupta2012/qiskit-terra | 1,599 | 12756290 | <filename>test/python/algorithms/test_amplitude_estimators.py
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test the quantum amplitude estimation algorithm."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
import numpy as np
from ddt import ddt, idata, data, unpack
from qiskit import QuantumRegister, QuantumCircuit, BasicAer
from qiskit.circuit.library import QFT, GroverOperator
from qiskit.utils import QuantumInstance
from qiskit.algorithms import (
AmplitudeEstimation,
MaximumLikelihoodAmplitudeEstimation,
IterativeAmplitudeEstimation,
FasterAmplitudeEstimation,
EstimationProblem,
)
from qiskit.quantum_info import Operator, Statevector
class BernoulliStateIn(QuantumCircuit):
"""A circuit preparing sqrt(1 - p)|0> + sqrt(p)|1>."""
def __init__(self, probability):
super().__init__(1)
angle = 2 * np.arcsin(np.sqrt(probability))
self.ry(angle, 0)
class BernoulliGrover(QuantumCircuit):
"""The Grover operator corresponding to the Bernoulli A operator."""
def __init__(self, probability):
super().__init__(1, global_phase=np.pi)
self.angle = 2 * np.arcsin(np.sqrt(probability))
self.ry(2 * self.angle, 0)
def power(self, power, matrix_power=False):
if matrix_power:
return super().power(power, True)
powered = QuantumCircuit(1)
powered.ry(power * 2 * self.angle, 0)
return powered
class SineIntegral(QuantumCircuit):
r"""Construct the A operator to approximate the integral
\int_0^1 \sin^2(x) d x
with a specified number of qubits.
"""
def __init__(self, num_qubits):
qr_state = QuantumRegister(num_qubits, "state")
qr_objective = QuantumRegister(1, "obj")
super().__init__(qr_state, qr_objective)
# prepare 1/sqrt{2^n} sum_x |x>_n
self.h(qr_state)
# apply the sine/cosine term
self.ry(2 * 1 / 2 / 2 ** num_qubits, qr_objective[0])
for i, qubit in enumerate(qr_state):
self.cry(2 * 2 ** i / 2 ** num_qubits, qubit, qr_objective[0])
@ddt
class TestBernoulli(QiskitAlgorithmsTestCase):
"""Tests based on the Bernoulli A operator.
This class tests
* the estimation result
* the constructed circuits
"""
def setUp(self):
super().setUp()
self._statevector = QuantumInstance(
backend=BasicAer.get_backend("statevector_simulator"),
seed_simulator=2,
seed_transpiler=2,
)
self._unitary = QuantumInstance(
backend=BasicAer.get_backend("unitary_simulator"),
shots=1,
seed_simulator=42,
seed_transpiler=91,
)
def qasm(shots=100):
return QuantumInstance(
backend=BasicAer.get_backend("qasm_simulator"),
shots=shots,
seed_simulator=2,
seed_transpiler=2,
)
self._qasm = qasm
@idata(
[
[0.2, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.2}],
[0.49, AmplitudeEstimation(3), {"estimation": 0.5, "mle": 0.49}],
[0.2, MaximumLikelihoodAmplitudeEstimation([0, 1, 2]), {"estimation": 0.2}],
[0.49, MaximumLikelihoodAmplitudeEstimation(3), {"estimation": 0.49}],
[0.2, IterativeAmplitudeEstimation(0.1, 0.1), {"estimation": 0.2}],
[0.49, IterativeAmplitudeEstimation(0.001, 0.01), {"estimation": 0.49}],
[0.2, FasterAmplitudeEstimation(0.1, 3, rescale=False), {"estimation": 0.2}],
[0.12, FasterAmplitudeEstimation(0.1, 2, rescale=False), {"estimation": 0.12}],
]
)
@unpack
def test_statevector(self, prob, qae, expect):
"""statevector test"""
qae.quantum_instance = self._statevector
problem = EstimationProblem(BernoulliStateIn(prob), 0, BernoulliGrover(prob))
result = qae.estimate(problem)
self.assertGreaterEqual(self._statevector.time_taken, 0.0)
self._statevector.reset_execution_results()
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[0.2, 100, AmplitudeEstimation(4), {"estimation": 0.14644, "mle": 0.193888}],
[0.0, 1000, AmplitudeEstimation(2), {"estimation": 0.0, "mle": 0.0}],
[
0.2,
100,
MaximumLikelihoodAmplitudeEstimation([0, 1, 2, 4, 8]),
{"estimation": 0.199606},
],
[0.8, 10, IterativeAmplitudeEstimation(0.1, 0.05), {"estimation": 0.811711}],
[0.2, 1000, FasterAmplitudeEstimation(0.1, 3, rescale=False), {"estimation": 0.198640}],
[
0.12,
100,
FasterAmplitudeEstimation(0.01, 3, rescale=False),
{"estimation": 0.119037},
],
]
)
@unpack
def test_qasm(self, prob, shots, qae, expect):
"""qasm test"""
qae.quantum_instance = self._qasm(shots)
problem = EstimationProblem(BernoulliStateIn(prob), [0], BernoulliGrover(prob))
result = qae.estimate(problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@data(True, False)
def test_qae_circuit(self, efficient_circuit):
"""Test circuits resulting from canonical amplitude estimation.
Build the circuit manually and from the algorithm and compare the resulting unitaries.
"""
prob = 0.5
problem = EstimationProblem(BernoulliStateIn(prob), objective_qubits=[0])
for m in [2, 5]:
qae = AmplitudeEstimation(m)
angle = 2 * np.arcsin(np.sqrt(prob))
# manually set up the inefficient AE circuit
qr_eval = QuantumRegister(m, "a")
qr_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(qr_eval, qr_objective)
# initial Hadamard gates
for i in range(m):
circuit.h(qr_eval[i])
# A operator
circuit.ry(angle, qr_objective)
if efficient_circuit:
qae.grover_operator = BernoulliGrover(prob)
for power in range(m):
circuit.cry(2 * 2 ** power * angle, qr_eval[power], qr_objective[0])
else:
oracle = QuantumCircuit(1)
oracle.z(0)
state_preparation = QuantumCircuit(1)
state_preparation.ry(angle, 0)
grover_op = GroverOperator(oracle, state_preparation)
for power in range(m):
circuit.compose(
grover_op.power(2 ** power).control(),
qubits=[qr_eval[power], qr_objective[0]],
inplace=True,
)
# fourier transform
iqft = QFT(m, do_swaps=False).inverse().reverse_bits()
circuit.append(iqft.to_instruction(), qr_eval)
actual_circuit = qae.construct_circuit(problem, measurement=False)
self.assertEqual(Operator(circuit), Operator(actual_circuit))
@data(True, False)
def test_iqae_circuits(self, efficient_circuit):
"""Test circuits resulting from iterative amplitude estimation.
Build the circuit manually and from the algorithm and compare the resulting unitaries.
"""
prob = 0.5
problem = EstimationProblem(BernoulliStateIn(prob), objective_qubits=[0])
for k in [2, 5]:
qae = IterativeAmplitudeEstimation(0.01, 0.05)
angle = 2 * np.arcsin(np.sqrt(prob))
# manually set up the inefficient AE circuit
q_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(q_objective)
# A operator
circuit.ry(angle, q_objective)
if efficient_circuit:
qae.grover_operator = BernoulliGrover(prob)
circuit.ry(2 * k * angle, q_objective[0])
else:
oracle = QuantumCircuit(1)
oracle.z(0)
state_preparation = QuantumCircuit(1)
state_preparation.ry(angle, 0)
grover_op = GroverOperator(oracle, state_preparation)
for _ in range(k):
circuit.compose(grover_op, inplace=True)
actual_circuit = qae.construct_circuit(problem, k, measurement=False)
self.assertEqual(Operator(circuit), Operator(actual_circuit))
@data(True, False)
def test_mlae_circuits(self, efficient_circuit):
"""Test the circuits constructed for MLAE"""
prob = 0.5
problem = EstimationProblem(BernoulliStateIn(prob), objective_qubits=[0])
for k in [2, 5]:
qae = MaximumLikelihoodAmplitudeEstimation(k)
angle = 2 * np.arcsin(np.sqrt(prob))
# compute all the circuits used for MLAE
circuits = []
# 0th power
q_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(q_objective)
circuit.ry(angle, q_objective)
circuits += [circuit]
# powers of 2
for power in range(k):
q_objective = QuantumRegister(1, "q")
circuit = QuantumCircuit(q_objective)
# A operator
circuit.ry(angle, q_objective)
# Q^(2^j) operator
if efficient_circuit:
qae.grover_operator = BernoulliGrover(prob)
circuit.ry(2 * 2 ** power * angle, q_objective[0])
else:
oracle = QuantumCircuit(1)
oracle.z(0)
state_preparation = QuantumCircuit(1)
state_preparation.ry(angle, 0)
grover_op = GroverOperator(oracle, state_preparation)
for _ in range(2 ** power):
circuit.compose(grover_op, inplace=True)
circuits += [circuit]
actual_circuits = qae.construct_circuits(problem, measurement=False)
for actual, expected in zip(actual_circuits, circuits):
self.assertEqual(Operator(actual), Operator(expected))
@ddt
class TestSineIntegral(QiskitAlgorithmsTestCase):
"""Tests based on the A operator to integrate sin^2(x).
This class tests
* the estimation result
* the confidence intervals
"""
def setUp(self):
super().setUp()
self._statevector = QuantumInstance(
backend=BasicAer.get_backend("statevector_simulator"),
seed_simulator=123,
seed_transpiler=41,
)
def qasm(shots=100):
return QuantumInstance(
backend=BasicAer.get_backend("qasm_simulator"),
shots=shots,
seed_simulator=7192,
seed_transpiler=90000,
)
self._qasm = qasm
@idata(
[
[2, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.270290}],
[4, MaximumLikelihoodAmplitudeEstimation(4), {"estimation": 0.272675}],
[3, IterativeAmplitudeEstimation(0.1, 0.1), {"estimation": 0.272082}],
[3, FasterAmplitudeEstimation(0.01, 1), {"estimation": 0.272082}],
]
)
@unpack
def test_statevector(self, n, qae, expect):
"""Statevector end-to-end test"""
# construct factories for A and Q
# qae.state_preparation = SineIntegral(n)
qae.quantum_instance = self._statevector
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
# result = qae.run(self._statevector)
result = qae.estimate(estimation_problem)
self.assertGreaterEqual(self._statevector.time_taken, 0.0)
self._statevector.reset_execution_results()
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[4, 10, AmplitudeEstimation(2), {"estimation": 0.5, "mle": 0.333333}],
[3, 10, MaximumLikelihoodAmplitudeEstimation(2), {"estimation": 0.256878}],
[3, 1000, IterativeAmplitudeEstimation(0.01, 0.01), {"estimation": 0.271790}],
[3, 1000, FasterAmplitudeEstimation(0.1, 4), {"estimation": 0.274168}],
]
)
@unpack
def test_qasm(self, n, shots, qae, expect):
"""QASM simulator end-to-end test."""
# construct factories for A and Q
qae.quantum_instance = self._qasm(shots)
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
result = qae.estimate(estimation_problem)
for key, value in expect.items():
self.assertAlmostEqual(
value, getattr(result, key), places=3, msg=f"estimate `{key}` failed"
)
@idata(
[
[
AmplitudeEstimation(3),
"mle",
{
"likelihood_ratio": (0.2494734, 0.3003771),
"fisher": (0.2486176, 0.2999286),
"observed_fisher": (0.2484562, 0.3000900),
},
],
[
MaximumLikelihoodAmplitudeEstimation(3),
"estimation",
{
"likelihood_ratio": (0.2598794, 0.2798536),
"fisher": (0.2584889, 0.2797018),
"observed_fisher": (0.2659279, 0.2722627),
},
],
]
)
@unpack
def test_confidence_intervals(self, qae, key, expect):
"""End-to-end test for all confidence intervals."""
n = 3
qae.quantum_instance = self._statevector
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
# statevector simulator
result = qae.estimate(estimation_problem)
self.assertGreater(self._statevector.time_taken, 0.0)
self._statevector.reset_execution_results()
methods = ["lr", "fi", "oi"] # short for likelihood_ratio, fisher, observed_fisher
alphas = [0.1, 0.00001, 0.9] # alpha shouldn't matter in statevector
for alpha, method in zip(alphas, methods):
confint = qae.compute_confidence_interval(result, alpha, method)
# confidence interval based on statevector should be empty, as we are sure of the result
self.assertAlmostEqual(confint[1] - confint[0], 0.0)
self.assertAlmostEqual(confint[0], getattr(result, key))
# qasm simulator
shots = 100
alpha = 0.01
qae.quantum_instance = self._qasm(shots)
result = qae.estimate(estimation_problem)
for method, expected_confint in expect.items():
confint = qae.compute_confidence_interval(result, alpha, method)
np.testing.assert_array_almost_equal(confint, expected_confint)
self.assertTrue(confint[0] <= getattr(result, key) <= confint[1])
def test_iqae_confidence_intervals(self):
"""End-to-end test for the IQAE confidence interval."""
n = 3
qae = IterativeAmplitudeEstimation(0.1, 0.01, quantum_instance=self._statevector)
expected_confint = (0.1984050, 0.3511015)
estimation_problem = EstimationProblem(SineIntegral(n), objective_qubits=[n])
# statevector simulator
result = qae.estimate(estimation_problem)
self.assertGreaterEqual(self._statevector.time_taken, 0.0)
self._statevector.reset_execution_results()
confint = result.confidence_interval
# confidence interval based on statevector should be empty, as we are sure of the result
self.assertAlmostEqual(confint[1] - confint[0], 0.0)
self.assertAlmostEqual(confint[0], result.estimation)
# qasm simulator
shots = 100
qae.quantum_instance = self._qasm(shots)
result = qae.estimate(estimation_problem)
confint = result.confidence_interval
np.testing.assert_array_almost_equal(confint, expected_confint)
self.assertTrue(confint[0] <= result.estimation <= confint[1])
@ddt
class TestFasterAmplitudeEstimation(QiskitAlgorithmsTestCase):
"""Specific tests for Faster AE."""
def test_rescaling(self):
"""Test the rescaling."""
amplitude = 0.8
scaling = 0.25
circuit = QuantumCircuit(1)
circuit.ry(2 * np.arcsin(amplitude), 0)
problem = EstimationProblem(circuit, objective_qubits=[0])
rescaled = problem.rescale(scaling)
rescaled_amplitude = Statevector.from_instruction(rescaled.state_preparation).data[3]
self.assertAlmostEqual(scaling * amplitude, rescaled_amplitude)
def test_run_without_rescaling(self):
"""Run Faster AE without rescaling if the amplitude is in [0, 1/4]."""
# construct estimation problem
prob = 0.11
a_op = QuantumCircuit(1)
a_op.ry(2 * np.arcsin(np.sqrt(prob)), 0)
problem = EstimationProblem(a_op, objective_qubits=[0])
# construct algo without rescaling
backend = BasicAer.get_backend("statevector_simulator")
fae = FasterAmplitudeEstimation(0.1, 1, rescale=False, quantum_instance=backend)
# run the algo
result = fae.estimate(problem)
# assert the result is correct
self.assertAlmostEqual(result.estimation, prob)
# assert no rescaling was used
theta = np.mean(result.theta_intervals[-1])
value_without_scaling = np.sin(theta) ** 2
self.assertAlmostEqual(result.estimation, value_without_scaling)
def test_rescaling_with_custom_grover_raises(self):
"""Test that the rescaling option fails if a custom Grover operator is used."""
prob = 0.8
a_op = BernoulliStateIn(prob)
q_op = BernoulliGrover(prob)
problem = EstimationProblem(a_op, objective_qubits=[0], grover_operator=q_op)
# construct algo without rescaling
backend = BasicAer.get_backend("statevector_simulator")
fae = FasterAmplitudeEstimation(0.1, 1, quantum_instance=backend)
# run the algo
with self.assertWarns(Warning):
_ = fae.estimate(problem)
@data(("statevector_simulator", 0.2), ("qasm_simulator", 0.199440))
@unpack
def test_good_state(self, backend_str, expect):
"""Test with a good state function."""
def is_good_state(bitstr):
return bitstr[1] == "1"
# construct the estimation problem where the second qubit is ignored
a_op = QuantumCircuit(2)
a_op.ry(2 * np.arcsin(np.sqrt(0.2)), 0)
# oracle only affects first qubit
oracle = QuantumCircuit(2)
oracle.z(0)
# reflect only on first qubit
q_op = GroverOperator(oracle, a_op, reflection_qubits=[0])
# but we measure both qubits (hence both are objective qubits)
problem = EstimationProblem(
a_op, objective_qubits=[0, 1], grover_operator=q_op, is_good_state=is_good_state
)
# construct algo
backend = QuantumInstance(
BasicAer.get_backend(backend_str), seed_simulator=2, seed_transpiler=2
)
# cannot use rescaling with a custom grover operator
fae = FasterAmplitudeEstimation(0.01, 5, rescale=False, quantum_instance=backend)
# run the algo
result = fae.estimate(problem)
# assert the result is correct
self.assertAlmostEqual(result.estimation, expect, places=5)
if __name__ == "__main__":
unittest.main()
|
examples/tensorflow/NTM/main.py | 5m477/samples-for-ai | 443 | 12756302 | <reponame>5m477/samples-for-ai
from __future__ import absolute_import
import importlib
import tensorflow as tf
from ntm_cell import NTMCell
from ntm import NTM
import os
import re
# import sh
# from smart_open import smart_open
import shutil
from utils import pp
flags = tf.app.flags
flags.DEFINE_string("task", "copy", "Task to run [copy, recall]")
flags.DEFINE_integer("epoch", 100000, "Epoch to train [100000]")
flags.DEFINE_integer("input_dim", 10, "Dimension of input [10]")
flags.DEFINE_integer("output_dim", 10, "Dimension of output [10]")
flags.DEFINE_integer("min_length", 1, "Minimum length of input sequence [1]")
flags.DEFINE_integer("max_length", 10, "Maximum length of output sequence [10]")
flags.DEFINE_integer("controller_layer_size", 1, "The size of LSTM controller [1]")
flags.DEFINE_integer("controller_dim", 100, "Dimension of LSTM controller [100]")
flags.DEFINE_integer("write_head_size", 1, "The number of write head [1]")
flags.DEFINE_integer("read_head_size", 1, "The number of read head [1]")
flags.DEFINE_integer("test_max_length", 120, "Maximum length of output sequence [120]")
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Directory name to save the checkpoints [checkpoint]")
flags.DEFINE_boolean("is_train", False, "True for training, False for testing [False]")
flags.DEFINE_boolean("continue_train", None, "True to continue training from saved checkpoint. False for restarting. None for automatic [None]")
# Submit job to microsoft PAI cluster
# Read/Write WebHDFS
#flags.DEFINE_string("pai_data_dir", "", "PAI data directory")
#flags.DEFINE_boolean("hdfs", False, "True if read/write files on webhdfs")
FLAGS = flags.FLAGS
def create_ntm(config, sess, **ntm_args):
cell = NTMCell(
input_dim=config.input_dim,
output_dim=config.output_dim,
controller_layer_size=config.controller_layer_size,
controller_dim=config.controller_dim,
write_head_size=config.write_head_size,
read_head_size=config.read_head_size)
scope = ntm_args.pop('scope', 'NTM-%s' % config.task)
ntm = NTM(
cell, sess, config.min_length, config.max_length,
test_max_length=config.test_max_length, scope=scope, **ntm_args)
return cell, ntm
# Change hdfs url to webhdfs and change port
def UrlConvert(hdfspath):
regex=re.compile('^hdfs://')
if re.match(regex, hdfspath):
webhdfs = hdfspath.replace('hdfs', 'webhdfs', 1).replace(':9000', ':50070', 1)
return webhdfs
#def write_file_to_local(hdfspath, localpath):
# lines = list()
# for line in smart_open(UrlConvert(hdfspath)):
# lines.append(line)
# with open(localpath, 'wb+') as f:
# f.writelines(lines)
#def write_data_to_local(src, dest):
# if not os.path.exists(dest):
# os.makedirs(dest)
# files = [line.rsplit(None,1)[-1] for line in sh.hdfs('dfs','-ls',src).split('\n') if len(line.rsplit(None,1))][1:]
# for f in files:
# print(f)
# write_file_to_local(f, os.path.join(dest, f.split('/')[-1]))
def main(_):
pp.pprint(flags.FLAGS.__flags)
with tf.device('/cpu:0'), tf.Session() as sess:
try:
task = importlib.import_module('tasks.%s' % FLAGS.task)
except ImportError:
print("task '%s' does not have implementation" % FLAGS.task)
raise
if FLAGS.is_train:
cell, ntm = create_ntm(FLAGS, sess)
task.train(ntm, FLAGS, sess)
else:
cell, ntm = create_ntm(FLAGS, sess, forward_only=True)
#if FLAGS.hdfs:
# hdfspath = "%s/%s/%s_%s" % (FLAGS.pai_data_dir, FLAGS.checkpoint_dir, FLAGS.task, FLAGS.max_length)
# localpath = "%s/%s_%s" % (FLAGS.checkpoint_dir, FLAGS.task, FLAGS.max_length)
# write_data_to_local(hdfspath, localpath)
ntm.load(FLAGS.checkpoint_dir, FLAGS.task)
if FLAGS.task == 'copy':
task.run(ntm, int(FLAGS.test_max_length * 1 / 3), sess)
print
task.run(ntm, int(FLAGS.test_max_length * 2 / 3), sess)
print
task.run(ntm, int(FLAGS.test_max_length * 3 / 3), sess)
else:
task.run(ntm, int(FLAGS.test_max_length), sess)
if __name__ == '__main__':
tf.app.run()
|
library/source2/data_blocks/base_block.py | BlenderAddonsArchive/SourceIO | 199 | 12756342 | <reponame>BlenderAddonsArchive/SourceIO<filename>library/source2/data_blocks/base_block.py<gh_stars>100-1000
from ...utils.byte_io_mdl import ByteIO
class DataBlock:
def __init__(self, valve_file, info_block):
from ..resource_types import ValveCompiledResource
from .compiled_file_header import InfoBlock
self._valve_file: ValveCompiledResource = valve_file
self.info_block: InfoBlock = info_block
with self._valve_file.reader.save_current_pos():
self._valve_file.reader.seek(self.info_block.absolute_offset)
self.reader = ByteIO(self._valve_file.reader.read(self.info_block.block_size))
self.data = {}
self.parsed = False
def read(self):
self.parsed = True
raise NotImplementedError()
def __repr__(self):
template = '<{} {}>'
return template.format(type(self).__name__, self.info_block.block_name)
|
tests/sphinx_docstrings.py | s-weigand/darglint | 405 | 12756344 | """A collection of sphinx docstrings from the wild."""
import ast
FunctionDef = ast.FunctionDef
if hasattr(ast, 'AsyncFunctionDef'):
FunctionDef = (ast.FunctionDef, ast.AsyncFunctionDef)
def publish_msgstr(app, source, source_path, source_line, config, settings):
# From https://github.com/sphinx-doc/sphinx
# File: sphinx/transforms/il8n.py
"""Publish msgstr (single line) into docutils document
:param sphinx.application.Sphinx app: sphinx application
:param unicode source: source text
:param unicode source_path: source path for warning indication
:param source_line: source line for warning indication
:param sphinx.config.Config config: sphinx config
:param docutils.frontend.Values settings: docutils settings
:return: document
:rtype: docutils.nodes.document
"""
...
# Expected item head to end with TokenType.COLON but was TokenType.WORD 'app' # noqa
def _strip_basic_auth(url):
# From https://github.com/sphinx-doc/sphinx
# File: sphinx/ext/intersphinx.py
"""Returns *url* with basic auth credentials removed. Also returns the
basic auth username and password if they're present in *url*.
E.g.: https://user:[email protected] => https://example.com
*url* need not include basic auth credentials.
:param url: url which may or may not contain basic auth credentials
:type url: ``str``
:return: *url* with any basic auth creds removed
:rtype: ``str``
"""
...
def extract_original_messages(self):
# From https://github.com/sphinx-doc/sphinx
# File: sphinx/addnodes.py
"""Extract translation messages.
:returns: list of extracted messages or messages generator
"""
...
def read_requirements(fh, resolve=False):
# From https://github.com/pypa/pipenv
# File: pipenv/patched/safety/util.py
"""
Reads requirements from a file like object and (optionally) from referenced files.
:param fh: file like object to read from
:param resolve: boolean. resolves referenced files.
:return: generator
""" # noqa
...
def copytree(self, destination, symlinks=False):
# File: sphinx/testing/path.py
"""
Recursively copy a directory to the given `destination`. If the given
`destination` does not exist it will be created.
:param symlinks:
If ``True`` symbolic links in the source tree result in symbolic
links in the destination tree otherwise the contents of the files
pointed to by the symbolic links are copied.
"""
# Expected item to start with TokenType.COLON but was TokenType.INDENT
def rmtree(self, ignore_errors=False, onerror=None):
# File: sphinx/testing/path.py
"""
Removes the file or directory and any files or directories it may
contain.
:param ignore_errors:
If ``True`` errors are silently ignored, otherwise an exception
is raised in case an error occurs.
:param onerror:
A callback which gets called with the arguments `func`, `path` and
`exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove`
or :func:`os.rmdir`. `path` is the argument to the function which
caused it to fail and `exc_info` is a tuple as returned by
:func:`sys.exc_info`.
"""
# Expected item to start with TokenType.COLON but was TokenType.INDENT
def test_params(request):
# File: sphinx/testing/fixtures.py
"""
test parameters that is specified by 'pytest.mark.test_params'
:param Union[str] shared_result:
If the value is provided, app._status and app._warning objects will be
shared in the parametrized test functions and/or test functions that
have same 'shared_result' value.
**NOTE**: You can not specify shared_result and srcdir in same time.
"""
# Expected item head to end with TokenType.COLON but was TokenType.WORD 'shared_result'
def add_uids(doctree, condition):
# File: sphinx/versioning.py
"""Add a unique id to every node in the `doctree` which matches the
condition and yield the nodes.
:param doctree:
A :class:`docutils.nodes.document` instance.
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
# Expected item to start with TokenType.COLON but was TokenType.INDENT
def merge_doctrees(old, new, condition):
# File: sphinx/versioning.py
"""Merge the `old` doctree with the `new` one while looking at nodes
matching the `condition`.
Each node which replaces another one or has been added to the `new` doctree
will be yielded.
:param condition:
A callable which returns either ``True`` or ``False`` for a given node.
"""
# Expected item to start with TokenType.COLON but was TokenType.INDENT
def _read_from_url(url, config=None):
# File: sphinx/ext/intersphinx.py
"""Reads data from *url* with an HTTP *GET*.
This function supports fetching from resources which use basic HTTP auth as
laid out by RFC1738 § 3.1. See § 5 for grammar definitions for URLs.
.. seealso:
https://www.ietf.org/rfc/rfc1738.txt
:param url: URL of an HTTP resource
:type url: ``str``
:return: data read from resource described by *url*
:rtype: ``file``-like object
"""
# Expected item to start with TokenType.COLON but was TokenType.NEWLINE
def _get_safe_url(url):
# File: sphinx/ext/intersphinx.py
"""Gets version of *url* with basic auth passwords obscured. This function
returns results suitable for printing and logging.
E.g.: https://user:[email protected] => https://[email protected]
:param url: a url
:type url: ``str``
:return: *url* with password removed
:rtype: ``str``
"""
# Expected item to start with TokenType.COLON but was TokenType.NEWLINE
def find_catalog_source_files(*args):
# File: sphinx/util/i18n.py
"""
:param list locale_dirs:
list of path as `['locale_dir1', 'locale_dir2', ...]` to find
translation catalogs. Each path contains a structure such as
`<locale>/LC_MESSAGES/domain.po`.
:param str locale: a language as `'en'`
:param list domains: list of domain names to get. If empty list or None
is specified, get all domain names. default is None.
:param boolean gettext_compact:
* False: keep domains directory structure (default).
* True: domains in the sub directory will be merged into 1 file.
:param boolean force_all:
Set True if you want to get all catalogs rather than updated catalogs.
default is False.
:return: [CatalogInfo(), ...]
"""
# Expected item head to end with TokenType.COLON but was TokenType.WORD 'locale'
def get_full_module_name(node):
# File: sphinx/util/nodes.py
"""
return full module dotted path like: 'docutils.nodes.paragraph'
:param nodes.Node node: target node
:return: full module dotted path
"""
# Expected item head to end with TokenType.COLON but was TokenType.WORD 'node'
def set_application(self, app):
# File: sphinx/parsers.py
"""set_application will be called from Sphinx to set app and other instance variables
:param sphinx.application.Sphinx app: Sphinx application object
"""
# Expected item head to end with TokenType.COLON but was TokenType.WORD 'app'
def write_bytes(sef, bytes, append=False):
# File: sphinx/testing/path.py
"""
Writes the given `bytes` to the file.
:param append:
If ``True`` given `bytes` are added at the end of the file.
"""
# Expected item to start with TokenType.COLON but was TokenType.INDENT
def repr_domxml(node, length=80):
# File: sphinx/util/nodes.py
"""
return DOM XML representation of the specified node like:
'<paragraph translatable="False"><inline classes="versionmodified">New in version...'
:param nodes.Node node: target node
:param int length:
length of return value to be striped. if false-value is specified, repr_domxml
returns full of DOM XML representation.
:return: DOM XML representation
"""
# Expected item head to end with TokenType.COLON but was TokenType.WORD 'node'
def docstrings():
"""Get all of the docstrings in this file (including this one.)
:return: The docstrings in this file.
:rtype: List[str]
"""
with open(__file__, 'r') as fin:
data = fin.read()
this_script = ast.parse(data)
functions = [x for x in this_script.body
if isinstance(x, FunctionDef)]
return list(map(ast.get_docstring, functions))
|
data/transcoder_evaluation_gfg/python/DECIMAL_BINARY_CONVERSION_WITHOUT_USING_ARITHMETIC_OPERATORS.py | mxl1n/CodeGen | 241 | 12756347 | <gh_stars>100-1000
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( n ) :
if ( n == 0 ) :
return "0" ;
bin = "" ;
while ( n > 0 ) :
if ( n & 1 == 0 ) :
bin = '0' + bin ;
else :
bin = '1' + bin ;
n = n >> 1 ;
return bin ;
#TOFILL
if __name__ == '__main__':
param = [
(35,),
(17,),
(8,),
(99,),
(57,),
(39,),
(99,),
(14,),
(22,),
(7,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
kitsune/questions/management/commands/auto_archive_old_questions.py | erdal-pb/kitsune | 929 | 12756381 | import logging
from datetime import datetime, timedelta
from django.conf import settings
from django.core.management.base import BaseCommand
from django.db import connection, transaction
from kitsune.questions.models import Question, Answer
from kitsune.search.es7_utils import index_objects_bulk
log = logging.getLogger("k.cron")
class Command(BaseCommand):
help = "Archive all questions that were created over 180 days ago."
def handle(self, **options):
# Set up logging so it doesn't send Ricky email.
logging.basicConfig(level=logging.ERROR)
# Get a list of ids of questions we're going to go change. We need
# a list of ids so that we can feed it to the update, but then
# also know what we need to update in the index.
days_180 = datetime.now() - timedelta(days=180)
q_ids = list(
Question.objects.filter(is_archived=False)
.filter(created__lte=days_180)
.values_list("id", flat=True)
)
if q_ids:
log.info("Updating %d questions", len(q_ids))
sql = """
UPDATE questions_question
SET is_archived = 1
WHERE id IN (%s)
""" % ",".join(
map(str, q_ids)
)
cursor = connection.cursor()
cursor.execute(sql)
if not transaction.get_connection().in_atomic_block:
transaction.commit()
if settings.ES_LIVE_INDEXING:
# elastic v7 code:
answer_ids = list(
Answer.objects.filter(question_id__in=q_ids).values_list("id", flat=True)
)
index_objects_bulk.delay("QuestionDocument", q_ids)
index_objects_bulk.delay("AnswerDocument", answer_ids)
|
utils/extras.py | ovichiro/sublime-boxy-theme | 102 | 12756401 | # -*- coding: utf-8 -*-
"""
Boxy Theme Extras
"""
import sublime
import sublime_plugin
from collections import OrderedDict
NO_SELECTION = -1
SUBLIME_LINTER = 'SublimeLinter'
PLAIN_TASKS = 'PlainTasks'
PLAIN_NOTES = 'PlainNotes'
EXTRAS = OrderedDict(
[
(
'PlainNotes',
{
'name': 'Plain Notes',
'settings': 'Note.sublime-settings',
'desc': 'Choose a color scheme'
}
),
(
'PlainTasks',
{
'name': 'Plain Tasks',
'settings': 'PlainTasks.sublime-settings',
'desc': 'Choose a color scheme'
}
),
(
'SublimeLinter',
{
'name': 'Sublime Linter',
'settings': 'SublimeLinter.sublime-settings',
'desc': 'Activate a gutter theme',
'revert': 'Revert the gutter theme to the defaults',
'boxy': 'Packages/Boxy Theme/extras/SublimeLinter/Boxy.gutter-theme',
'default': 'Packages/SublimeLinter/gutter-themes/Default/Default.gutter-theme'
}
)
]
)
THEMES = [
'Boxy Monokai',
'Boxy Nova',
'Boxy Ocean',
'Boxy Solarized Dark',
'Boxy Solarized Light',
'Boxy Tomorrow',
'Boxy Yesterday'
]
def get_settings(pkg):
return sublime.load_settings(EXTRAS[pkg].get('settings'))
def save_settings(pkg):
return sublime.save_settings(EXTRAS[pkg].get('settings'))
def get_theme(pkg):
settings = get_settings(pkg)
if pkg is SUBLIME_LINTER:
items = settings.get('user', '')
if items != '':
return items.get('gutter_theme', '')
if pkg in (PLAIN_TASKS, PLAIN_NOTES):
return settings.get('color_scheme', '')
def set_theme(pkg, path):
settings = get_settings(pkg)
if pkg is SUBLIME_LINTER:
items = settings.get('user', '')
if items != '':
items['gutter_theme'] = path
return settings.set('user', items)
if pkg in (PLAIN_TASKS, PLAIN_NOTES):
return settings.set('color_scheme', path)
def activate_theme(pkg, path):
set_theme(pkg, path)
return save_settings(pkg)
def revert_theme(pkg, path):
if path is '':
get_settings(pkg).erase('color_scheme')
else:
set_theme(pkg, path)
return save_settings(pkg)
class BoxyExtrasCommand(sublime_plugin.WindowCommand):
def display_list(self, extras):
self.extras = extras
self.quick_list = []
name = ''
desc = ''
for extra in self.extras:
name = self.extras[extra].get('name')
desc = self.extras[extra].get('desc')
if extra is SUBLIME_LINTER:
if get_theme(SUBLIME_LINTER) == self.extras[
SUBLIME_LINTER
].get('boxy'):
desc = self.extras[SUBLIME_LINTER].get('revert')
self.quick_list.append([name, desc])
self.window.show_quick_panel(self.quick_list, self.on_done)
def on_done(self, index):
if index is NO_SELECTION:
return
if index is 0:
self.window.run_command('boxy_plain_notes')
if index is 1:
self.window.run_command('boxy_plain_tasks')
if index is 2:
current = get_theme(SUBLIME_LINTER)
boxy = self.extras[SUBLIME_LINTER].get('boxy')
default = self.extras[SUBLIME_LINTER].get('default')
if current == boxy:
return revert_theme(SUBLIME_LINTER, default)
else:
return activate_theme(SUBLIME_LINTER, boxy)
def run(self):
self.display_list(EXTRAS)
class BoxyPlainTasksCommand(sublime_plugin.WindowCommand):
def display_list(self, themes):
self.themes = themes
self.initial_theme = get_theme(PLAIN_TASKS)
quick_list = [theme for theme in self.themes]
self.quick_list = quick_list
self.window.show_quick_panel(quick_list, self.on_done,
on_highlight=self.on_highlighted)
def on_highlighted(self, index):
set_theme(PLAIN_TASKS, self._quick_list_to_theme(index))
def on_done(self, index):
if index is NO_SELECTION:
revert_theme(PLAIN_TASKS, self.initial_theme)
return
activate_theme(PLAIN_TASKS, self._quick_list_to_theme(index))
def _quick_list_to_theme(self, index):
return ('Packages/Boxy Theme/extras/PlainTasks/%s.hidden-tmTheme' %
self.quick_list[index])
def run(self):
self.display_list(THEMES)
class BoxyPlainNotesCommand(sublime_plugin.WindowCommand):
def display_list(self, themes):
self.themes = themes
self.initial_theme = get_theme(PLAIN_NOTES)
quick_list = [theme for theme in self.themes]
self.quick_list = quick_list
self.window.show_quick_panel(quick_list, self.on_done,
on_highlight=self.on_highlighted)
def on_highlighted(self, index):
set_theme(PLAIN_NOTES, self._quick_list_to_theme(index))
def on_done(self, index):
if index is NO_SELECTION:
revert_theme(PLAIN_NOTES, self.initial_theme)
return
activate_theme(PLAIN_NOTES, self._quick_list_to_theme(index))
def _quick_list_to_theme(self, index):
return ('Packages/Boxy Theme/schemes/%s.tmTheme' %
self.quick_list[index])
def run(self):
self.display_list(THEMES)
|
phi/jax/__init__.py | eliasdjo/PhiFlow | 556 | 12756415 | """
Jax integration.
Importing this module registers the Jax backend with `phi.math`.
Without this, Jax tensors cannot be handled by `phi.math` functions.
To make Jax the default backend, import `phi.jax.flow`.
"""
from phi import math as _math
from ._jax_backend import JaxBackend as _JaxBackend
JAX = _JaxBackend()
"""Backend for Jax operations."""
_math.backend.BACKENDS.append(JAX)
__all__ = [key for key in globals().keys() if not key.startswith('_')]
|
demo/cict_demo/collect_pm.py | timothijoe/DI-drive | 219 | 12756478 | <reponame>timothijoe/DI-drive
import numpy as np
import cv2
import carla
from camera.parameters import CameraParams, IntrinsicParams, ExtrinsicParams
from camera.coordinate_transformation import CoordinateTransformation, rotationMatrix3D
def rad_lim(rad):
while (rad > np.pi):
rad -= (2 * np.pi)
while (rad < -np.pi):
rad += (2 * np.pi)
return rad
def getLinearPose(pose1, pose2, min_dist):
x1, x2 = pose1.location.x, pose2.location.x
y1, y2 = pose1.location.y, pose2.location.y
z1, z2 = pose1.location.z, pose2.location.z
roll1, roll2 = np.deg2rad(pose1.rotation.roll), np.deg2rad(pose2.rotation.roll)
pitch1, pitch2, = np.deg2rad(pose1.rotation.pitch), np.deg2rad(pose2.rotation.pitch)
yaw1, yaw2, = np.deg2rad(pose1.rotation.yaw), np.deg2rad(pose2.rotation.yaw)
distance = pose1.location.distance(pose2.location)
total = int(distance / min_dist)
result_list = []
tt = np.arange(total) / total
x, y, z = tt * x2 + (1 - tt) * x1, tt * y2 + (1 - tt) * y1, tt * z2 + (1 - tt) * z1
roll = np.rad2deg(rad_lim(roll2 - roll1) * tt + roll1)
pitch = np.rad2deg(rad_lim(pitch2 - pitch1) * tt + pitch1)
yaw = np.rad2deg(rad_lim(yaw2 - yaw1) * tt + yaw1)
for i in range(total):
location = carla.Location(x=x[i], y=y[i], z=z[i])
rotation = carla.Rotation(roll=roll[i], pitch=pitch[i], yaw=yaw[i])
result_list.append(carla.Transform(location, rotation))
return result_list
class CollectPerspectiveImage(object):
def __init__(self, param, sensor):
self.longitudinal_sample_number_near = param.longitudinal_sample_number_near
self.longitudinal_sample_number_far = param.longitudinal_sample_number_far
self.vehicle_half_width = param.vehicle_width / 2
self.lateral_step_factor = param.lateral_step_factor
self.lateral_sample_array = np.linspace(
-self.vehicle_half_width, self.vehicle_half_width, param.lateral_sample_number
)
self.sensor = sensor
self.camera_params = CameraParams(IntrinsicParams(sensor), ExtrinsicParams(sensor))
self.img_width = eval(sensor.attributes['image_size_x'])
self.img_height = eval(sensor.attributes['image_size_y'])
self.max_pixel = np.array([self.img_height, self.img_width]).reshape([2, 1])
self.min_pixel = np.zeros((2, 1))
self.empty_image = np.zeros((self.img_height // 2, self.img_width // 2), dtype=np.dtype("uint8"))
def data_augmentation(self, traj_pose_list):
result_list = []
for i in range(len(traj_pose_list) - 1):
p1 = traj_pose_list[i][1]
p2 = traj_pose_list[i + 1][1]
if float(i) / len(traj_pose_list) < 0.4:
min_dist = 0.04
elif float(i) / len(traj_pose_list) < 0.6:
min_dist = 0.08
else:
min_dist = 0.12
result_list.extend(getLinearPose(p1, p2, min_dist))
return result_list
def drawDestInImage(self, dest_vec, location, rotation):
empty_image = np.zeros((self.img_height // 2, self.img_width // 2, 3), dtype=np.dtype("uint8"))
R = rotationMatrix3D(np.deg2rad(rotation[2]), np.deg2rad(rotation[0]), np.deg2rad(rotation[1]))
t = location.reshape(3, 1)
vehicle_vec = CoordinateTransformation.world3DToCamera3D(dest_vec, R, t)
pixel_vec = CoordinateTransformation.world3DToImage2D(
vehicle_vec, self.camera_params.K, self.camera_params.R, self.camera_params.t
)
pixel_vec = pixel_vec[::-1, :]
x_pixel = pixel_vec.astype(int)[0, 0]
y_pixel = pixel_vec.astype(int)[1, 0]
#print(dest_vec,pixel_vec)
x_pixel = np.clip(x_pixel, 10, self.img_height - 10)
y_pixel = np.clip(y_pixel, 10, self.img_width - 10)
x_pixel, y_pixel = np.meshgrid(
np.arange(max(0, x_pixel // 2 - 5), min(self.img_height // 2 - 1, x_pixel // 2 + 5)),
np.arange(max(0, y_pixel // 2 - 5), min(self.img_width // 2 - 1, y_pixel // 2 + 5)),
indexing='ij'
)
empty_image[x_pixel, y_pixel, 2] = 255
return cv2.resize(empty_image, (self.img_width, self.img_height), interpolation=cv2.INTER_CUBIC)
def drawLineInImage(self, traj_pose, vehicle_transform):
#traj_position = traj_pose.location
traj_vec = np.array([traj_pose.location.x, traj_pose.location.y, traj_pose.location.z]).reshape(3, 1)
rotation = vehicle_transform.rotation
location = vehicle_transform.location
R = rotationMatrix3D(np.deg2rad(rotation.roll), np.deg2rad(rotation.pitch), np.deg2rad(rotation.yaw))
t = np.array([location.x, location.y, location.z]).reshape(3, 1)
# along lateral
theta = np.deg2rad(traj_pose.rotation.yaw + 90)
start_vec = np.array([self.vehicle_half_width * np.cos(theta), self.vehicle_half_width * np.sin(theta), 0]
).reshape(3, 1) + traj_vec
start_vehicle_vec = CoordinateTransformation.world3DToCamera3D(start_vec, R, t)
start_pixel_vec = CoordinateTransformation.world3DToImage2D(
start_vehicle_vec, self.camera_params.K, self.camera_params.R, self.camera_params.t
)
start_pixel_vec = start_pixel_vec[::-1, :]
theta = np.deg2rad(traj_pose.rotation.yaw - 90)
end_vec = np.array([self.vehicle_half_width * np.cos(theta), self.vehicle_half_width * np.sin(theta), 0]
).reshape(3, 1) + traj_vec
end_vehicle_vec = CoordinateTransformation.world3DToCamera3D(end_vec, R, t)
end_pixel_vec = CoordinateTransformation.world3DToImage2D(
end_vehicle_vec, self.camera_params.K, self.camera_params.R, self.camera_params.t
)
end_pixel_vec = end_pixel_vec[::-1, :]
flag1 = (start_pixel_vec >= self.min_pixel).all() and (start_pixel_vec < self.max_pixel).all()
flag2 = (end_pixel_vec >= self.min_pixel).all() and (end_pixel_vec < self.max_pixel).all()
if not flag1 and not flag2:
return
length = np.linalg.norm(end_pixel_vec - start_pixel_vec)
direction = (end_pixel_vec - start_pixel_vec) / length
lateral_sample_number = round(length / self.lateral_step_factor) + 1
distance_array = np.linspace(0, length, lateral_sample_number)
pixel_vec = start_pixel_vec + distance_array * direction
x_pixel = pixel_vec.astype(int)[0]
y_pixel = pixel_vec.astype(int)[1]
mask = np.where((x_pixel >= 0) & (x_pixel < self.img_height))[0]
x_pixel = x_pixel[mask]
y_pixel = y_pixel[mask]
mask = np.where((y_pixel >= 0) & (y_pixel < self.img_width))[0]
x_pixel = x_pixel[mask]
y_pixel = y_pixel[mask]
self.empty_image[x_pixel // 2, y_pixel // 2] = 255
self.empty_image[np.clip(x_pixel // 2 + 1, 0, self.img_height // 2 - 1), y_pixel // 2] = 255
self.empty_image[np.max(x_pixel // 2 - 1, 0), y_pixel // 2] = 255
return
def getPM(self, traj_pose_list, vehicle_transform):
self.empty_image = np.zeros((self.img_height // 2, self.img_width // 2), dtype=np.dtype("uint8"))
aug_traj_pose_list = self.data_augmentation(traj_pose_list)
for traj_pose in aug_traj_pose_list:
self.drawLineInImage(traj_pose, vehicle_transform)
kernel = np.ones((
5,
5,
), np.uint8)
self.empty_image = cv2.dilate(self.empty_image, kernel, iterations=1)
self.empty_image = cv2.erode(self.empty_image, kernel, iterations=1)
return cv2.resize(self.empty_image, (self.img_width, self.img_height), interpolation=cv2.INTER_CUBIC)
class InversePerspectiveMapping(object):
def __init__(self, param, sensor):
self.sensor = sensor
self.camera_params = CameraParams(IntrinsicParams(sensor), ExtrinsicParams(sensor))
self.img_width = 400
self.img_height = 200
self.empty_image = np.zeros((self.img_height, self.img_width), dtype=np.uint8)
self.longutudinal_length = param.longitudinal_length
self.ksize = param.ksize
f = float(self.img_height) / self.longutudinal_length
self.pesudo_K = np.array([[f, 0, self.img_width / 2], [0, f, self.img_height], [0, 0, 1]])
self.reverseXY = rotationMatrix3D(0, 0, -np.pi / 2)
def getIPM(self, image):
self.empty_image = np.zeros((self.img_height, self.img_width), dtype=np.uint8)
index_array = np.argwhere(image > 200)
index_array = index_array[:, :2]
index_array = np.unique(index_array, axis=0)
index_array = np.array([index_array[:, 1], index_array[:, 0]])
vehicle_vec = CoordinateTransformation.image2DToWorld3D2(
index_array, self.camera_params.K, self.camera_params.R, self.camera_params.t
)
vehicle_vec[:, 2, 0] = 1.0
temp = np.dot(self.pesudo_K, self.reverseXY)
vehicle_vec = np.squeeze(vehicle_vec, axis=2)
new_image_vec = np.dot(temp, vehicle_vec.T)
new_image_vec = new_image_vec[:2, :]
new_image_vec = new_image_vec[::-1, :]
new_image_y_pixel = new_image_vec[0, :].astype(int)
new_image_x_pixel = new_image_vec[1, :].astype(int)
#self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255
mask = np.where((new_image_x_pixel >= 0) & (new_image_x_pixel < self.img_width))[0]
new_image_x_pixel = new_image_x_pixel[mask]
new_image_y_pixel = new_image_y_pixel[mask]
mask = np.where((new_image_y_pixel >= 0) & (new_image_y_pixel < self.img_height))[0]
new_image_x_pixel = new_image_x_pixel[mask]
new_image_y_pixel = new_image_y_pixel[mask]
self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255
self.empty_image[np.clip(new_image_y_pixel + 1, 0, self.img_height - 1), new_image_x_pixel] = 255
self.empty_image[np.clip(new_image_y_pixel - 1, 0, self.img_height - 1), new_image_x_pixel] = 255
#self.empty_image = cv2.GaussianBlur(self.empty_image, (self.ksize, self.ksize), 25)
return self.empty_image
def get_cost_map(self, ipm, lidar):
lidar = -lidar
mask = np.where((lidar[:, 0] > 1.2) | (lidar[:, 0] < -1.2) | (lidar[:, 1] > 2.0) | (lidar[:, 1] < -4.0))[0]
lidar = lidar[mask, :]
mask = np.where(lidar[:, 2] > -1.95)[0]
lidar = lidar[mask, :]
img2 = np.zeros((self.img_height, self.img_width), np.uint8)
img2.fill(255)
pixel_per_meter = float(self.img_height) / self.longutudinal_length
u = (self.img_height - lidar[:, 1] * pixel_per_meter).astype(int)
v = (-lidar[:, 0] * pixel_per_meter + self.img_width // 2).astype(int)
mask = np.where((u >= 0) & (u < self.img_height))[0]
u = u[mask]
v = v[mask]
mask = np.where((v >= 0) & (v < self.img_width))[0]
u = u[mask]
v = v[mask]
img2[u, v] = 0
#print(u,v)
kernel = np.ones((17, 17), np.uint8)
img2 = cv2.erode(img2, kernel, iterations=1)
kernel_size = (3, 3)
img = cv2.dilate(ipm, kernel_size, iterations=3)
img = cv2.addWeighted(img, 0.5, img2, 0.5, 0)
mask = np.where((img2 < 50))
u = mask[0]
v = mask[1]
img[u, v] = 0
#kernel_size = (17, 17)
#kernel_size = (9, 9)
#sigma = 9#21
#img = cv2.GaussianBlur(img, kernel_size, sigma)
return img
|
main.py | cyy0523xc/chineseocr | 5,049 | 12756486 | <reponame>cyy0523xc/chineseocr
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 01:01:37 2019
main
@author: chineseocr
"""
from text.detector.detectors import TextDetector
from apphelper.image import rotate_cut_img,sort_box
import numpy as np
from PIL import Image
class TextOcrModel(object):
def __init__(self,ocrModel,textModel,angleModel):
self.ocrModel = ocrModel
self.textModel = textModel
self.angleModel = angleModel
def detect_angle(self,img):
"""
detect text angle in [0,90,180,270]
@@img:np.array
"""
angle = self.angleModel(img)
if angle==90:
im = Image.fromarray(img).transpose(Image.ROTATE_90)
img = np.array(im)
elif angle==180:
im = Image.fromarray(img).transpose(Image.ROTATE_180)
img = np.array(im)
elif angle==270:
im = Image.fromarray(img).transpose(Image.ROTATE_270)
img = np.array(im)
return img,angle
def detect_box(self,img,scale=600,maxScale=900):
"""
detect text angle in [0,90,180,270]
@@img:np.array
"""
boxes,scores = self.textModel(img,scale,maxScale)
return boxes,scores
def box_cluster(self,img,boxes,scores,**args):
MAX_HORIZONTAL_GAP= args.get('MAX_HORIZONTAL_GAP',100)
MIN_V_OVERLAPS = args.get('MIN_V_OVERLAPS',0.6)
MIN_SIZE_SIM = args.get('MIN_SIZE_SIM',0.6)
textdetector = TextDetector(MAX_HORIZONTAL_GAP,MIN_V_OVERLAPS,MIN_SIZE_SIM)
shape = img.shape[:2]
TEXT_PROPOSALS_MIN_SCORE = args.get('TEXT_PROPOSALS_MIN_SCORE',0.7)
TEXT_PROPOSALS_NMS_THRESH = args.get('TEXT_PROPOSALS_NMS_THRESH',0.3)
TEXT_LINE_NMS_THRESH = args.get('TEXT_LINE_NMS_THRESH',0.3)
LINE_MIN_SCORE = args.get('LINE_MIN_SCORE',0.8)
boxes,scores = textdetector.detect(boxes,
scores[:, np.newaxis],
shape,
TEXT_PROPOSALS_MIN_SCORE,
TEXT_PROPOSALS_NMS_THRESH,
TEXT_LINE_NMS_THRESH,
LINE_MIN_SCORE
)
return boxes,scores
def ocr_batch(self,img,boxes,leftAdjustAlph=0.0,rightAdjustAlph=0.0):
"""
batch for ocr
"""
im = Image.fromarray(img)
newBoxes = []
for index,box in enumerate(boxes):
partImg,box = rotate_cut_img(im,box,leftAdjustAlph,rightAdjustAlph)
box['img'] = partImg.convert('L')
newBoxes.append(box)
res = self.ocrModel(newBoxes)
return res
def model(self,img,**args):
detectAngle = args.get('detectAngle',False)
if detectAngle:
img,angle = self.detect_angle(img)
else:
angle = 0
scale = args.get('scale',608)
maxScale = args.get('maxScale',608)
boxes,scores = self.detect_box(img,scale,maxScale)##文字检测
boxes,scores = self.box_cluster(img,boxes,scores,**args)
boxes = sort_box(boxes)
leftAdjustAlph = args.get('leftAdjustAlph',0)
rightAdjustAlph = args.get('rightAdjustAlph',0)
res = self.ocr_batch(img,boxes,leftAdjustAlph,rightAdjustAlph)
return res,angle
|
loss/SoftTriple.py | ZhenGong1997/SoftTriple | 181 | 12756501 | # Implementation of SoftTriple Loss
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn import init
class SoftTriple(nn.Module):
def __init__(self, la, gamma, tau, margin, dim, cN, K):
super(SoftTriple, self).__init__()
self.la = la
self.gamma = 1./gamma
self.tau = tau
self.margin = margin
self.cN = cN
self.K = K
self.fc = Parameter(torch.Tensor(dim, cN*K))
self.weight = torch.zeros(cN*K, cN*K, dtype=torch.bool).cuda()
for i in range(0, cN):
for j in range(0, K):
self.weight[i*K+j, i*K+j+1:(i+1)*K] = 1
init.kaiming_uniform_(self.fc, a=math.sqrt(5))
return
def forward(self, input, target):
centers = F.normalize(self.fc, p=2, dim=0)
simInd = input.matmul(centers)
simStruc = simInd.reshape(-1, self.cN, self.K)
prob = F.softmax(simStruc*self.gamma, dim=2)
simClass = torch.sum(prob*simStruc, dim=2)
marginM = torch.zeros(simClass.shape).cuda()
marginM[torch.arange(0, marginM.shape[0]), target] = self.margin
lossClassify = F.cross_entropy(self.la*(simClass-marginM), target)
if self.tau > 0 and self.K > 1:
simCenter = centers.t().matmul(centers)
reg = torch.sum(torch.sqrt(2.0+1e-5-2.*simCenter[self.weight]))/(self.cN*self.K*(self.K-1.))
return lossClassify+self.tau*reg
else:
return lossClassify
|
crabageprediction/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_pickle.py | 13rianlucero/CrabAgePrediction | 28,899 | 12756513 | <filename>crabageprediction/venv/Lib/site-packages/pandas/tests/indexes/timedeltas/test_pickle.py
from pandas import timedelta_range
import pandas._testing as tm
class TestPickle:
def test_pickle_after_set_freq(self):
tdi = timedelta_range("1 day", periods=4, freq="s")
tdi = tdi._with_freq(None)
res = tm.round_trip_pickle(tdi)
tm.assert_index_equal(res, tdi)
|
var/spack/repos/builtin/packages/ply/package.py | kkauder/spack | 2,360 | 12756527 | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ply(AutotoolsPackage):
"""A light-weight dynamic tracer for Linux that leverages the
kernel's BPF VM in concert with kprobes and tracepoints to attach
probes to arbitrary points in the kernel."""
homepage = "https://github.com/iovisor/ply"
git = "https://github.com/iovisor/ply.git"
version('2.1.1', commit='<PASSWORD>')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
def autoreconf(self, spec, prefix):
bash = which("bash")
bash('./autogen.sh')
|
tests/unit/v1/test_base_query.py | anna-hope/python-firestore | 140 | 12756536 | <gh_stars>100-1000
# Copyright 2017 Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import mock
import pytest
def _make_base_query(*args, **kwargs):
from google.cloud.firestore_v1.base_query import BaseQuery
return BaseQuery(*args, **kwargs)
def _make_base_query_all_fields(
limit=9876, offset=12, skip_fields=(), parent=None, all_descendants=True,
):
kwargs = {
"projection": mock.sentinel.projection,
"field_filters": mock.sentinel.filters,
"orders": mock.sentinel.orders,
"limit": limit,
"offset": offset,
"start_at": mock.sentinel.start_at,
"end_at": mock.sentinel.end_at,
"all_descendants": all_descendants,
}
for field in skip_fields:
kwargs.pop(field)
if parent is None:
parent = mock.sentinel.parent
return _make_base_query(parent, **kwargs)
def test_basequery_constructor_defaults():
query = _make_base_query(mock.sentinel.parent)
assert query._parent is mock.sentinel.parent
assert query._projection is None
assert query._field_filters == ()
assert query._orders == ()
assert query._limit is None
assert query._offset is None
assert query._start_at is None
assert query._end_at is None
assert not query._all_descendants
def test_basequery_constructor_explicit():
limit = 234
offset = 56
query = _make_base_query_all_fields(limit=limit, offset=offset)
assert query._parent is mock.sentinel.parent
assert query._projection is mock.sentinel.projection
assert query._field_filters is mock.sentinel.filters
assert query._orders == mock.sentinel.orders
assert query._limit == limit
assert query._offset == offset
assert query._start_at is mock.sentinel.start_at
assert query._end_at is mock.sentinel.end_at
assert query._all_descendants
def test_basequery__client_property():
parent = mock.Mock(_client=mock.sentinel.client, spec=["_client"])
query = _make_base_query(parent)
assert query._client is mock.sentinel.client
def test_basequery___eq___other_type():
query = _make_base_query_all_fields()
other = object()
assert not (query == other)
def test_basequery___eq___different_parent():
parent = mock.sentinel.parent
other_parent = mock.sentinel.other_parent
query = _make_base_query_all_fields(parent=parent)
other = _make_base_query_all_fields(parent=other_parent)
assert not (query == other)
def test_basequery___eq___different_projection():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, skip_fields=("projection",))
query._projection = mock.sentinel.projection
other = _make_base_query_all_fields(parent=parent, skip_fields=("projection",))
other._projection = mock.sentinel.other_projection
assert not (query == other)
def test_basequery___eq___different_field_filters():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, skip_fields=("field_filters",))
query._field_filters = mock.sentinel.field_filters
other = _make_base_query_all_fields(parent=parent, skip_fields=("field_filters",))
other._field_filters = mock.sentinel.other_field_filters
assert not (query == other)
def test_basequery___eq___different_orders():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, skip_fields=("orders",))
query._orders = mock.sentinel.orders
other = _make_base_query_all_fields(parent=parent, skip_fields=("orders",))
other._orders = mock.sentinel.other_orders
assert not (query == other)
def test_basequery___eq___different_limit():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, limit=10)
other = _make_base_query_all_fields(parent=parent, limit=20)
assert not (query == other)
def test_basequery___eq___different_offset():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, offset=10)
other = _make_base_query_all_fields(parent=parent, offset=20)
assert not (query == other)
def test_basequery___eq___different_start_at():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, skip_fields=("start_at",))
query._start_at = mock.sentinel.start_at
other = _make_base_query_all_fields(parent=parent, skip_fields=("start_at",))
other._start_at = mock.sentinel.other_start_at
assert not (query == other)
def test_basequery___eq___different_end_at():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, skip_fields=("end_at",))
query._end_at = mock.sentinel.end_at
other = _make_base_query_all_fields(parent=parent, skip_fields=("end_at",))
other._end_at = mock.sentinel.other_end_at
assert not (query == other)
def test_basequery___eq___different_all_descendants():
parent = mock.sentinel.parent
query = _make_base_query_all_fields(parent=parent, all_descendants=True)
other = _make_base_query_all_fields(parent=parent, all_descendants=False)
assert not (query == other)
def test_basequery___eq___hit():
query = _make_base_query_all_fields()
other = _make_base_query_all_fields()
assert query == other
def _compare_queries(query1, query2, *attr_names):
attrs1 = query1.__dict__.copy()
attrs2 = query2.__dict__.copy()
assert len(attrs1) == len(attrs2)
# The only different should be in ``attr_name``.
for attr_name in attr_names:
attrs1.pop(attr_name)
attrs2.pop(attr_name)
for key, value in attrs1.items():
assert value is attrs2[key]
def test_basequery_select_invalid_path():
query = _make_base_query(mock.sentinel.parent)
with pytest.raises(ValueError):
query.select(["*"])
def test_basequery_select():
from google.cloud.firestore_v1.base_query import BaseQuery
query1 = _make_base_query_all_fields(all_descendants=True)
field_paths2 = ["foo", "bar"]
query2 = query1.select(field_paths2)
assert query2 is not query1
assert isinstance(query2, BaseQuery)
assert query2._projection == _make_projection_for_select(field_paths2)
_compare_queries(query1, query2, "_projection")
# Make sure it overrides.
field_paths3 = ["foo.baz"]
query3 = query2.select(field_paths3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._projection == _make_projection_for_select(field_paths3)
_compare_queries(query2, query3, "_projection")
def test_basequery_where_invalid_path():
query = _make_base_query(mock.sentinel.parent)
with pytest.raises(ValueError):
query.where("*", "==", 1)
def test_basequery_where():
from google.cloud.firestore_v1.base_query import BaseQuery
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
query_inst = _make_base_query_all_fields(
skip_fields=("field_filters",), all_descendants=True
)
new_query = query_inst.where("power.level", ">", 9000)
assert query_inst is not new_query
assert isinstance(new_query, BaseQuery)
assert len(new_query._field_filters) == 1
field_pb = new_query._field_filters[0]
expected_pb = query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="power.level"),
op=StructuredQuery.FieldFilter.Operator.GREATER_THAN,
value=document.Value(integer_value=9000),
)
assert field_pb == expected_pb
_compare_queries(query_inst, new_query, "_field_filters")
def _where_unary_helper(value, op_enum, op_string="=="):
from google.cloud.firestore_v1.base_query import BaseQuery
from google.cloud.firestore_v1.types import StructuredQuery
query_inst = _make_base_query_all_fields(skip_fields=("field_filters",))
field_path = "feeeld"
new_query = query_inst.where(field_path, op_string, value)
assert query_inst is not new_query
assert isinstance(new_query, BaseQuery)
assert len(new_query._field_filters) == 1
field_pb = new_query._field_filters[0]
expected_pb = StructuredQuery.UnaryFilter(
field=StructuredQuery.FieldReference(field_path=field_path), op=op_enum
)
assert field_pb == expected_pb
_compare_queries(query_inst, new_query, "_field_filters")
def test_basequery_where_eq_null():
from google.cloud.firestore_v1.types import StructuredQuery
op_enum = StructuredQuery.UnaryFilter.Operator.IS_NULL
_where_unary_helper(None, op_enum)
def test_basequery_where_gt_null():
with pytest.raises(ValueError):
_where_unary_helper(None, 0, op_string=">")
def test_basequery_where_eq_nan():
from google.cloud.firestore_v1.types import StructuredQuery
op_enum = StructuredQuery.UnaryFilter.Operator.IS_NAN
_where_unary_helper(float("nan"), op_enum)
def test_basequery_where_le_nan():
with pytest.raises(ValueError):
_where_unary_helper(float("nan"), 0, op_string="<=")
def test_basequery_where_w_delete():
from google.cloud.firestore_v1 import DELETE_FIELD
with pytest.raises(ValueError):
_where_unary_helper(DELETE_FIELD, 0)
def test_basequery_where_w_server_timestamp():
from google.cloud.firestore_v1 import SERVER_TIMESTAMP
with pytest.raises(ValueError):
_where_unary_helper(SERVER_TIMESTAMP, 0)
def test_basequery_where_w_array_remove():
from google.cloud.firestore_v1 import ArrayRemove
with pytest.raises(ValueError):
_where_unary_helper(ArrayRemove([1, 3, 5]), 0)
def test_basequery_where_w_array_union():
from google.cloud.firestore_v1 import ArrayUnion
with pytest.raises(ValueError):
_where_unary_helper(ArrayUnion([2, 4, 8]), 0)
def test_basequery_order_by_invalid_path():
query = _make_base_query(mock.sentinel.parent)
with pytest.raises(ValueError):
query.order_by("*")
def test_basequery_order_by():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.base_query import BaseQuery
query1 = _make_base_query_all_fields(skip_fields=("orders",), all_descendants=True)
field_path2 = "a"
query2 = query1.order_by(field_path2)
assert query2 is not query1
assert isinstance(query2, BaseQuery)
order = _make_order_pb(field_path2, StructuredQuery.Direction.ASCENDING)
assert query2._orders == (order,)
_compare_queries(query1, query2, "_orders")
# Make sure it appends to the orders.
field_path3 = "b"
query3 = query2.order_by(field_path3, direction=BaseQuery.DESCENDING)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
order_pb3 = _make_order_pb(field_path3, StructuredQuery.Direction.DESCENDING)
assert query3._orders == (order, order_pb3)
_compare_queries(query2, query3, "_orders")
def test_basequery_limit():
from google.cloud.firestore_v1.base_query import BaseQuery
query1 = _make_base_query_all_fields(all_descendants=True)
limit2 = 100
query2 = query1.limit(limit2)
assert not query2._limit_to_last
assert query2 is not query1
assert isinstance(query2, BaseQuery)
assert query2._limit == limit2
_compare_queries(query1, query2, "_limit")
# Make sure it overrides.
limit3 = 10
query3 = query2.limit(limit3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._limit == limit3
_compare_queries(query2, query3, "_limit")
def test_basequery_limit_to_last():
from google.cloud.firestore_v1.base_query import BaseQuery
query1 = _make_base_query_all_fields(all_descendants=True)
limit2 = 100
query2 = query1.limit_to_last(limit2)
assert query2._limit_to_last
assert query2 is not query1
assert isinstance(query2, BaseQuery)
assert query2._limit == limit2
_compare_queries(query1, query2, "_limit", "_limit_to_last")
# Make sure it overrides.
limit3 = 10
query3 = query2.limit(limit3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._limit == limit3
_compare_queries(query2, query3, "_limit", "_limit_to_last")
def test_basequery__resolve_chunk_size():
# With a global limit
query = _make_client().collection("asdf").limit(5)
assert query._resolve_chunk_size(3, 10) == 2
assert query._resolve_chunk_size(3, 1) == 1
assert query._resolve_chunk_size(3, 2) == 2
# With no limit
query = _make_client().collection("asdf")._query()
assert query._resolve_chunk_size(3, 10) == 10
assert query._resolve_chunk_size(3, 1) == 1
assert query._resolve_chunk_size(3, 2) == 2
def test_basequery_offset():
from google.cloud.firestore_v1.base_query import BaseQuery
query1 = _make_base_query_all_fields(all_descendants=True)
offset2 = 23
query2 = query1.offset(offset2)
assert query2 is not query1
assert isinstance(query2, BaseQuery)
assert query2._offset == offset2
_compare_queries(query1, query2, "_offset")
# Make sure it overrides.
offset3 = 35
query3 = query2.offset(offset3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._offset == offset3
_compare_queries(query2, query3, "_offset")
def test_basequery__cursor_helper_w_dict():
values = {"a": 7, "b": "foo"}
query1 = _make_base_query(mock.sentinel.parent)
query1._all_descendants = True
query2 = query1._cursor_helper(values, True, True)
assert query2._parent is mock.sentinel.parent
assert query2._projection is None
assert query2._field_filters == ()
assert query2._orders == query1._orders
assert query2._limit is None
assert query2._offset is None
assert query2._end_at is None
assert query2._all_descendants
cursor, before = query2._start_at
assert cursor == values
assert before
def test_basequery__cursor_helper_w_tuple():
values = (7, "foo")
query1 = _make_base_query(mock.sentinel.parent)
query2 = query1._cursor_helper(values, False, True)
assert query2._parent is mock.sentinel.parent
assert query2._projection is None
assert query2._field_filters == ()
assert query2._orders == query1._orders
assert query2._limit is None
assert query2._offset is None
assert query2._end_at is None
cursor, before = query2._start_at
assert cursor == list(values)
assert not before
def test_basequery__cursor_helper_w_list():
values = [7, "foo"]
query1 = _make_base_query(mock.sentinel.parent)
query2 = query1._cursor_helper(values, True, False)
assert query2._parent is mock.sentinel.parent
assert query2._projection is None
assert query2._field_filters == ()
assert query2._orders == query1._orders
assert query2._limit is None
assert query2._offset is None
assert query2._start_at is None
cursor, before = query2._end_at
assert cursor == values
assert cursor == values
assert before
def test_basequery__cursor_helper_w_snapshot_wrong_collection():
values = {"a": 7, "b": "foo"}
docref = _make_docref("there", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query = _make_base_query(collection)
with pytest.raises(ValueError):
query._cursor_helper(snapshot, False, False)
def test_basequery__cursor_helper_w_snapshot_other_collection_all_descendants():
values = {"a": 7, "b": "foo"}
docref = _make_docref("there", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query1 = _make_base_query(collection, all_descendants=True)
query2 = query1._cursor_helper(snapshot, False, False)
assert query2._parent is collection
assert query2._projection is None
assert query2._field_filters == ()
assert query2._orders == ()
assert query2._limit is None
assert query2._offset is None
assert query2._start_at is None
cursor, before = query2._end_at
assert cursor is snapshot
assert not before
def test_basequery__cursor_helper_w_snapshot():
values = {"a": 7, "b": "foo"}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query1 = _make_base_query(collection)
query2 = query1._cursor_helper(snapshot, False, False)
assert query2._parent is collection
assert query2._projection is None
assert query2._field_filters == ()
assert query2._orders == ()
assert query2._limit is None
assert query2._offset is None
assert query2._start_at is None
cursor, before = query2._end_at
assert cursor is snapshot
assert not before
def test_basequery_start_at():
from google.cloud.firestore_v1.base_query import BaseQuery
collection = _make_collection("here")
query1 = _make_base_query_all_fields(
parent=collection, skip_fields=("orders",), all_descendants=True
)
query2 = query1.order_by("hi")
document_fields3 = {"hi": "mom"}
query3 = query2.start_at(document_fields3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._start_at == (document_fields3, True)
_compare_queries(query2, query3, "_start_at")
# Make sure it overrides.
query4 = query3.order_by("bye")
values5 = {"hi": "zap", "bye": 88}
docref = _make_docref("here", "doc_id")
document_fields5 = _make_snapshot(docref, values5)
query5 = query4.start_at(document_fields5)
assert query5 is not query4
assert isinstance(query5, BaseQuery)
assert query5._start_at == (document_fields5, True)
_compare_queries(query4, query5, "_start_at")
def test_basequery_start_after():
from google.cloud.firestore_v1.base_query import BaseQuery
collection = _make_collection("here")
query1 = _make_base_query_all_fields(parent=collection, skip_fields=("orders",))
query2 = query1.order_by("down")
document_fields3 = {"down": 99.75}
query3 = query2.start_after(document_fields3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._start_at == (document_fields3, False)
_compare_queries(query2, query3, "_start_at")
# Make sure it overrides.
query4 = query3.order_by("out")
values5 = {"down": 100.25, "out": b"\x00\x01"}
docref = _make_docref("here", "doc_id")
document_fields5 = _make_snapshot(docref, values5)
query5 = query4.start_after(document_fields5)
assert query5 is not query4
assert isinstance(query5, BaseQuery)
assert query5._start_at == (document_fields5, False)
_compare_queries(query4, query5, "_start_at")
def test_basequery_end_before():
from google.cloud.firestore_v1.base_query import BaseQuery
collection = _make_collection("here")
query1 = _make_base_query_all_fields(parent=collection, skip_fields=("orders",))
query2 = query1.order_by("down")
document_fields3 = {"down": 99.75}
query3 = query2.end_before(document_fields3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._end_at == (document_fields3, True)
_compare_queries(query2, query3, "_end_at")
# Make sure it overrides.
query4 = query3.order_by("out")
values5 = {"down": 100.25, "out": b"\x00\x01"}
docref = _make_docref("here", "doc_id")
document_fields5 = _make_snapshot(docref, values5)
query5 = query4.end_before(document_fields5)
assert query5 is not query4
assert isinstance(query5, BaseQuery)
assert query5._end_at == (document_fields5, True)
_compare_queries(query4, query5, "_end_at")
_compare_queries(query4, query5, "_end_at")
def test_basequery_end_at():
from google.cloud.firestore_v1.base_query import BaseQuery
collection = _make_collection("here")
query1 = _make_base_query_all_fields(parent=collection, skip_fields=("orders",))
query2 = query1.order_by("hi")
document_fields3 = {"hi": "mom"}
query3 = query2.end_at(document_fields3)
assert query3 is not query2
assert isinstance(query3, BaseQuery)
assert query3._end_at == (document_fields3, False)
_compare_queries(query2, query3, "_end_at")
# Make sure it overrides.
query4 = query3.order_by("bye")
values5 = {"hi": "zap", "bye": 88}
docref = _make_docref("here", "doc_id")
document_fields5 = _make_snapshot(docref, values5)
query5 = query4.end_at(document_fields5)
assert query5 is not query4
assert isinstance(query5, BaseQuery)
assert query5._end_at == (document_fields5, False)
_compare_queries(query4, query5, "_end_at")
def test_basequery__filters_pb_empty():
query = _make_base_query(mock.sentinel.parent)
assert len(query._field_filters) == 0
assert query._filters_pb() is None
def test_basequery__filters_pb_single():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
query1 = _make_base_query(mock.sentinel.parent)
query2 = query1.where("x.y", ">", 50.5)
filter_pb = query2._filters_pb()
expected_pb = query.StructuredQuery.Filter(
field_filter=query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="x.y"),
op=StructuredQuery.FieldFilter.Operator.GREATER_THAN,
value=document.Value(double_value=50.5),
)
)
assert filter_pb == expected_pb
def test_basequery__filters_pb_multi():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
query1 = _make_base_query(mock.sentinel.parent)
query2 = query1.where("x.y", ">", 50.5)
query3 = query2.where("ABC", "==", 123)
filter_pb = query3._filters_pb()
op_class = StructuredQuery.FieldFilter.Operator
expected_pb = query.StructuredQuery.Filter(
composite_filter=query.StructuredQuery.CompositeFilter(
op=StructuredQuery.CompositeFilter.Operator.AND,
filters=[
query.StructuredQuery.Filter(
field_filter=query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="x.y"),
op=op_class.GREATER_THAN,
value=document.Value(double_value=50.5),
)
),
query.StructuredQuery.Filter(
field_filter=query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="ABC"),
op=op_class.EQUAL,
value=document.Value(integer_value=123),
)
),
],
)
)
assert filter_pb == expected_pb
def test_basequery__normalize_projection_none():
query = _make_base_query(mock.sentinel.parent)
assert query._normalize_projection(None) is None
def test_basequery__normalize_projection_empty():
projection = _make_projection_for_select([])
query = _make_base_query(mock.sentinel.parent)
normalized = query._normalize_projection(projection)
field_paths = [field_ref.field_path for field_ref in normalized.fields]
assert field_paths == ["__name__"]
def test_basequery__normalize_projection_non_empty():
projection = _make_projection_for_select(["a", "b"])
query = _make_base_query(mock.sentinel.parent)
assert query._normalize_projection(projection) is projection
def test_basequery__normalize_orders_wo_orders_wo_cursors():
query = _make_base_query(mock.sentinel.parent)
expected = []
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_w_orders_wo_cursors():
query = _make_base_query(mock.sentinel.parent).order_by("a")
expected = [query._make_order("a", "ASCENDING")]
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_wo_orders_w_snapshot_cursor():
values = {"a": 7, "b": "foo"}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query = _make_base_query(collection).start_at(snapshot)
expected = [query._make_order("__name__", "ASCENDING")]
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_w_name_orders_w_snapshot_cursor():
values = {"a": 7, "b": "foo"}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query = (
_make_base_query(collection)
.order_by("__name__", "DESCENDING")
.start_at(snapshot)
)
expected = [query._make_order("__name__", "DESCENDING")]
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_exists():
values = {"a": 7, "b": "foo"}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query = (
_make_base_query(collection)
.where("c", "<=", 20)
.order_by("c", "DESCENDING")
.start_at(snapshot)
)
expected = [
query._make_order("c", "DESCENDING"),
query._make_order("__name__", "DESCENDING"),
]
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_where():
values = {"a": 7, "b": "foo"}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query = _make_base_query(collection).where("c", "<=", 20).end_at(snapshot)
expected = [
query._make_order("c", "ASCENDING"),
query._make_order("__name__", "ASCENDING"),
]
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_wo_orders_w_snapshot_cursor_w_isnull_where():
values = {"a": 7, "b": "foo"}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
collection = _make_collection("here")
query = _make_base_query(collection).where("c", "==", None).end_at(snapshot)
expected = [
query._make_order("__name__", "ASCENDING"),
]
assert query._normalize_orders() == expected
def test_basequery__normalize_orders_w_name_orders_w_none_cursor():
collection = _make_collection("here")
query = (
_make_base_query(collection).order_by("__name__", "DESCENDING").start_at(None)
)
expected = [query._make_order("__name__", "DESCENDING")]
assert query._normalize_orders() == expected
def test_basequery__normalize_cursor_none():
query = _make_base_query(mock.sentinel.parent)
assert query._normalize_cursor(None, query._orders) is None
def test_basequery__normalize_cursor_no_order():
cursor = ([1], True)
query = _make_base_query(mock.sentinel.parent)
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_as_list_mismatched_order():
cursor = ([1, 2], True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_as_dict_mismatched_order():
cursor = ({"a": 1}, True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_as_dict_extra_orders_ok():
cursor = ({"name": "Springfield"}, True)
query = _make_base_query(mock.sentinel.parent).order_by("name").order_by("state")
normalized = query._normalize_cursor(cursor, query._orders)
assert normalized == (["Springfield"], True)
def test_basequery__normalize_cursor_extra_orders_ok():
cursor = (["Springfield"], True)
query = _make_base_query(mock.sentinel.parent).order_by("name").order_by("state")
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_w_delete():
from google.cloud.firestore_v1 import DELETE_FIELD
cursor = ([DELETE_FIELD], True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_w_server_timestamp():
from google.cloud.firestore_v1 import SERVER_TIMESTAMP
cursor = ([SERVER_TIMESTAMP], True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_w_array_remove():
from google.cloud.firestore_v1 import ArrayRemove
cursor = ([ArrayRemove([1, 3, 5])], True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_w_array_union():
from google.cloud.firestore_v1 import ArrayUnion
cursor = ([ArrayUnion([2, 4, 8])], True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
with pytest.raises(ValueError):
query._normalize_cursor(cursor, query._orders)
def test_basequery__normalize_cursor_as_list_hit():
cursor = ([1], True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
assert query._normalize_cursor(cursor, query._orders) == ([1], True)
def test_basequery__normalize_cursor_as_dict_hit():
cursor = ({"b": 1}, True)
query = _make_base_query(mock.sentinel.parent).order_by("b", "ASCENDING")
assert query._normalize_cursor(cursor, query._orders) == ([1], True)
def test_basequery__normalize_cursor_as_dict_with_dot_key_hit():
cursor = ({"b.a": 1}, True)
query = _make_base_query(mock.sentinel.parent).order_by("b.a", "ASCENDING")
assert query._normalize_cursor(cursor, query._orders) == ([1], True)
def test_basequery__normalize_cursor_as_dict_with_inner_data_hit():
cursor = ({"b": {"a": 1}}, True)
query = _make_base_query(mock.sentinel.parent).order_by("b.a", "ASCENDING")
assert query._normalize_cursor(cursor, query._orders) == ([1], True)
def test_basequery__normalize_cursor_as_snapshot_hit():
values = {"b": 1}
docref = _make_docref("here", "doc_id")
snapshot = _make_snapshot(docref, values)
cursor = (snapshot, True)
collection = _make_collection("here")
query = _make_base_query(collection).order_by("b", "ASCENDING")
assert query._normalize_cursor(cursor, query._orders) == ([1], True)
def test_basequery__normalize_cursor_w___name___w_reference():
db_string = "projects/my-project/database/(default)"
client = mock.Mock(spec=["_database_string"])
client._database_string = db_string
parent = mock.Mock(spec=["_path", "_client"])
parent._client = client
parent._path = ["C"]
query = _make_base_query(parent).order_by("__name__", "ASCENDING")
docref = _make_docref("here", "doc_id")
values = {"a": 7}
snapshot = _make_snapshot(docref, values)
expected = docref
cursor = (snapshot, True)
assert query._normalize_cursor(cursor, query._orders) == ([expected], True)
def test_basequery__normalize_cursor_w___name___wo_slash():
db_string = "projects/my-project/database/(default)"
client = mock.Mock(spec=["_database_string"])
client._database_string = db_string
parent = mock.Mock(spec=["_path", "_client", "document"])
parent._client = client
parent._path = ["C"]
document = parent.document.return_value = mock.Mock(spec=[])
query = _make_base_query(parent).order_by("__name__", "ASCENDING")
cursor = (["b"], True)
expected = document
assert query._normalize_cursor(cursor, query._orders) == ([expected], True)
parent.document.assert_called_once_with("b")
def test_basequery__to_protobuf_all_fields():
from google.protobuf import wrappers_pb2
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="cat", spec=["id"])
query1 = _make_base_query(parent)
query2 = query1.select(["X", "Y", "Z"])
query3 = query2.where("Y", ">", 2.5)
query4 = query3.order_by("X")
query5 = query4.limit(17)
query6 = query5.offset(3)
query7 = query6.start_at({"X": 10})
query8 = query7.end_at({"X": 25})
structured_query_pb = query8._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"select": query.StructuredQuery.Projection(
fields=[
query.StructuredQuery.FieldReference(field_path=field_path)
for field_path in ["X", "Y", "Z"]
]
),
"where": query.StructuredQuery.Filter(
field_filter=query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="Y"),
op=StructuredQuery.FieldFilter.Operator.GREATER_THAN,
value=document.Value(double_value=2.5),
)
),
"order_by": [_make_order_pb("X", StructuredQuery.Direction.ASCENDING)],
"start_at": query.Cursor(
values=[document.Value(integer_value=10)], before=True
),
"end_at": query.Cursor(values=[document.Value(integer_value=25)]),
"offset": 3,
"limit": wrappers_pb2.Int32Value(value=17),
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_select_only():
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="cat", spec=["id"])
query1 = _make_base_query(parent)
field_paths = ["a.b", "a.c", "d"]
query2 = query1.select(field_paths)
structured_query_pb = query2._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"select": query.StructuredQuery.Projection(
fields=[
query.StructuredQuery.FieldReference(field_path=field_path)
for field_path in field_paths
]
),
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_where_only():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="dog", spec=["id"])
query1 = _make_base_query(parent)
query2 = query1.where("a", "==", u"b")
structured_query_pb = query2._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"where": query.StructuredQuery.Filter(
field_filter=query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="a"),
op=StructuredQuery.FieldFilter.Operator.EQUAL,
value=document.Value(string_value=u"b"),
)
),
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_order_by_only():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="fish", spec=["id"])
query1 = _make_base_query(parent)
query2 = query1.order_by("abc")
structured_query_pb = query2._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"order_by": [_make_order_pb("abc", StructuredQuery.Direction.ASCENDING)],
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_start_at_only():
# NOTE: "only" is wrong since we must have ``order_by`` as well.
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="phish", spec=["id"])
query_inst = (
_make_base_query(parent).order_by("X.Y").start_after({"X": {"Y": u"Z"}})
)
structured_query_pb = query_inst._to_protobuf()
query_kwargs = {
"from_": [StructuredQuery.CollectionSelector(collection_id=parent.id)],
"order_by": [_make_order_pb("X.Y", StructuredQuery.Direction.ASCENDING)],
"start_at": query.Cursor(values=[document.Value(string_value=u"Z")]),
}
expected_pb = StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_end_at_only():
# NOTE: "only" is wrong since we must have ``order_by`` as well.
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="ghoti", spec=["id"])
query_inst = _make_base_query(parent).order_by("a").end_at({"a": 88})
structured_query_pb = query_inst._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"order_by": [_make_order_pb("a", StructuredQuery.Direction.ASCENDING)],
"end_at": query.Cursor(values=[document.Value(integer_value=88)]),
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_offset_only():
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="cartt", spec=["id"])
query1 = _make_base_query(parent)
offset = 14
query2 = query1.offset(offset)
structured_query_pb = query2._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"offset": offset,
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery__to_protobuf_limit_only():
from google.protobuf import wrappers_pb2
from google.cloud.firestore_v1.types import query
parent = mock.Mock(id="donut", spec=["id"])
query1 = _make_base_query(parent)
limit = 31
query2 = query1.limit(limit)
structured_query_pb = query2._to_protobuf()
query_kwargs = {
"from_": [query.StructuredQuery.CollectionSelector(collection_id=parent.id)],
"limit": wrappers_pb2.Int32Value(value=limit),
}
expected_pb = query.StructuredQuery(**query_kwargs)
assert structured_query_pb == expected_pb
def test_basequery_comparator_no_ordering():
query = _make_base_query(mock.sentinel.parent)
query._orders = []
doc1 = mock.Mock()
doc1.reference._path = ("col", "adocument1")
doc2 = mock.Mock()
doc2.reference._path = ("col", "adocument2")
sort = query._comparator(doc1, doc2)
assert sort == -1
def test_basequery_comparator_no_ordering_same_id():
query = _make_base_query(mock.sentinel.parent)
query._orders = []
doc1 = mock.Mock()
doc1.reference._path = ("col", "adocument1")
doc2 = mock.Mock()
doc2.reference._path = ("col", "adocument1")
sort = query._comparator(doc1, doc2)
assert sort == 0
def test_basequery_comparator_ordering():
query = _make_base_query(mock.sentinel.parent)
orderByMock = mock.Mock()
orderByMock.field.field_path = "last"
orderByMock.direction = 1 # ascending
query._orders = [orderByMock]
doc1 = mock.Mock()
doc1.reference._path = ("col", "adocument1")
doc1._data = {
"first": {"stringValue": "Ada"},
"last": {"stringValue": "secondlovelace"},
}
doc2 = mock.Mock()
doc2.reference._path = ("col", "adocument2")
doc2._data = {
"first": {"stringValue": "Ada"},
"last": {"stringValue": "lovelace"},
}
sort = query._comparator(doc1, doc2)
assert sort == 1
def test_basequery_comparator_ordering_descending():
query = _make_base_query(mock.sentinel.parent)
orderByMock = mock.Mock()
orderByMock.field.field_path = "last"
orderByMock.direction = -1 # descending
query._orders = [orderByMock]
doc1 = mock.Mock()
doc1.reference._path = ("col", "adocument1")
doc1._data = {
"first": {"stringValue": "Ada"},
"last": {"stringValue": "secondlovelace"},
}
doc2 = mock.Mock()
doc2.reference._path = ("col", "adocument2")
doc2._data = {
"first": {"stringValue": "Ada"},
"last": {"stringValue": "lovelace"},
}
sort = query._comparator(doc1, doc2)
assert sort == -1
def test_basequery_comparator_missing_order_by_field_in_data_raises():
query = _make_base_query(mock.sentinel.parent)
orderByMock = mock.Mock()
orderByMock.field.field_path = "last"
orderByMock.direction = 1 # ascending
query._orders = [orderByMock]
doc1 = mock.Mock()
doc1.reference._path = ("col", "adocument1")
doc1._data = {}
doc2 = mock.Mock()
doc2.reference._path = ("col", "adocument2")
doc2._data = {
"first": {"stringValue": "Ada"},
"last": {"stringValue": "lovelace"},
}
with pytest.raises(ValueError) as exc_info:
query._comparator(doc1, doc2)
(message,) = exc_info.value.args
assert message.startswith("Can only compare fields ")
def test_basequery_recursive_multiple():
from google.cloud.firestore_v1.collection import CollectionReference
from google.cloud.firestore_v1.base_query import BaseQuery
class DerivedQuery(BaseQuery):
@staticmethod
def _get_collection_reference_class():
return CollectionReference
query = DerivedQuery(_make_client().collection("asdf"))
assert isinstance(query.recursive().recursive(), DerivedQuery)
def _get_op_class():
from google.cloud.firestore_v1.types import StructuredQuery
return StructuredQuery.FieldFilter.Operator
def test__enum_from_op_string_lt():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("<") == op_class.LESS_THAN
def test__enum_from_op_string_le():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("<=") == op_class.LESS_THAN_OR_EQUAL
def test__enum_from_op_string_eq():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("==") == op_class.EQUAL
def test__enum_from_op_string_ge():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string(">=") == op_class.GREATER_THAN_OR_EQUAL
def test__enum_from_op_string_gt():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string(">") == op_class.GREATER_THAN
def test__enum_from_op_string_array_contains():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("array_contains") == op_class.ARRAY_CONTAINS
def test__enum_from_op_string_in():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("in") == op_class.IN
def test__enum_from_op_string_array_contains_any():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("array_contains_any") == op_class.ARRAY_CONTAINS_ANY
def test__enum_from_op_string_not_in():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("not-in") == op_class.NOT_IN
def test__enum_from_op_string_not_eq():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
op_class = _get_op_class()
assert _enum_from_op_string("!=") == op_class.NOT_EQUAL
def test__enum_from_op_string_invalid():
from google.cloud.firestore_v1.base_query import _enum_from_op_string
with pytest.raises(ValueError):
_enum_from_op_string("?")
def test__isnan_valid():
from google.cloud.firestore_v1.base_query import _isnan
assert _isnan(float("nan"))
def test__isnan_invalid():
from google.cloud.firestore_v1.base_query import _isnan
assert not _isnan(51.5)
assert not _isnan(None)
assert not _isnan("str")
assert not _isnan(int)
assert not _isnan(1.0 + 1.0j)
def test__enum_from_direction_success():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.base_query import _enum_from_direction
from google.cloud.firestore_v1.query import Query
dir_class = StructuredQuery.Direction
assert _enum_from_direction(Query.ASCENDING) == dir_class.ASCENDING
assert _enum_from_direction(Query.DESCENDING) == dir_class.DESCENDING
# Ints pass through
assert _enum_from_direction(dir_class.ASCENDING) == dir_class.ASCENDING
assert _enum_from_direction(dir_class.DESCENDING) == dir_class.DESCENDING
def test__enum_from_direction_failure():
from google.cloud.firestore_v1.base_query import _enum_from_direction
with pytest.raises(ValueError):
_enum_from_direction("neither-ASCENDING-nor-DESCENDING")
def test__filter_pb_unary():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.base_query import _filter_pb
from google.cloud.firestore_v1.types import query
unary_pb = query.StructuredQuery.UnaryFilter(
field=query.StructuredQuery.FieldReference(field_path="a.b.c"),
op=StructuredQuery.UnaryFilter.Operator.IS_NULL,
)
filter_pb = _filter_pb(unary_pb)
expected_pb = query.StructuredQuery.Filter(unary_filter=unary_pb)
assert filter_pb == expected_pb
def test__filter_pb_field():
from google.cloud.firestore_v1.types import StructuredQuery
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import query
from google.cloud.firestore_v1.base_query import _filter_pb
field_filter_pb = query.StructuredQuery.FieldFilter(
field=query.StructuredQuery.FieldReference(field_path="XYZ"),
op=StructuredQuery.FieldFilter.Operator.GREATER_THAN,
value=document.Value(double_value=90.75),
)
filter_pb = _filter_pb(field_filter_pb)
expected_pb = query.StructuredQuery.Filter(field_filter=field_filter_pb)
assert filter_pb == expected_pb
def test__filter_pb_bad_type():
from google.cloud.firestore_v1.base_query import _filter_pb
with pytest.raises(ValueError):
_filter_pb(None)
def test__cursor_pb_no_pair():
from google.cloud.firestore_v1.base_query import _cursor_pb
assert _cursor_pb(None) is None
def test__cursor_pb_success():
from google.cloud.firestore_v1.types import query
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.base_query import _cursor_pb
data = [1.5, 10, True]
cursor_pair = data, True
cursor_pb = _cursor_pb(cursor_pair)
expected_pb = query.Cursor(
values=[_helpers.encode_value(value) for value in data], before=True
)
assert cursor_pb == expected_pb
def test__query_response_to_snapshot_empty():
from google.cloud.firestore_v1.base_query import _query_response_to_snapshot
response_pb = _make_query_response()
snapshot = _query_response_to_snapshot(response_pb, None, None)
assert snapshot is None
def test__query_response_to_snapshot_after_offset():
from google.cloud.firestore_v1.base_query import _query_response_to_snapshot
skipped_results = 410
response_pb = _make_query_response(skipped_results=skipped_results)
snapshot = _query_response_to_snapshot(response_pb, None, None)
assert snapshot is None
def test__query_response_to_snapshot_response():
from google.cloud.firestore_v1.base_query import _query_response_to_snapshot
from google.cloud.firestore_v1.document import DocumentSnapshot
client = _make_client()
collection = client.collection("a", "b", "c")
_, expected_prefix = collection._parent_info()
# Create name for the protobuf.
doc_id = "gigantic"
name = "{}/{}".format(expected_prefix, doc_id)
data = {"a": 901, "b": True}
response_pb = _make_query_response(name=name, data=data)
snapshot = _query_response_to_snapshot(response_pb, collection, expected_prefix)
assert isinstance(snapshot, DocumentSnapshot)
expected_path = collection._path + (doc_id,)
assert snapshot.reference._path == expected_path
assert snapshot.to_dict() == data
assert snapshot.exists
assert snapshot.read_time == response_pb.read_time
assert snapshot.create_time == response_pb.document.create_time
assert snapshot.update_time == response_pb.document.update_time
def test__collection_group_query_response_to_snapshot_empty():
from google.cloud.firestore_v1.base_query import (
_collection_group_query_response_to_snapshot,
)
response_pb = _make_query_response()
snapshot = _collection_group_query_response_to_snapshot(response_pb, None)
assert snapshot is None
def test__collection_group_query_response_to_snapshot_after_offset():
from google.cloud.firestore_v1.base_query import (
_collection_group_query_response_to_snapshot,
)
skipped_results = 410
response_pb = _make_query_response(skipped_results=skipped_results)
snapshot = _collection_group_query_response_to_snapshot(response_pb, None)
assert snapshot is None
def test__collection_group_query_response_to_snapshot_response():
from google.cloud.firestore_v1.document import DocumentSnapshot
from google.cloud.firestore_v1.base_query import (
_collection_group_query_response_to_snapshot,
)
client = _make_client()
collection = client.collection("a", "b", "c")
other_collection = client.collection("a", "b", "d")
to_match = other_collection.document("gigantic")
data = {"a": 901, "b": True}
response_pb = _make_query_response(name=to_match._document_path, data=data)
snapshot = _collection_group_query_response_to_snapshot(response_pb, collection)
assert isinstance(snapshot, DocumentSnapshot)
assert snapshot.reference._document_path == to_match._document_path
assert snapshot.to_dict() == data
assert snapshot.exists
assert snapshot.read_time == response_pb._pb.read_time
assert snapshot.create_time == response_pb._pb.document.create_time
assert snapshot.update_time == response_pb._pb.document.update_time
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_client(project="project-project"):
from google.cloud.firestore_v1.client import Client
credentials = _make_credentials()
return Client(project=project, credentials=credentials)
def _make_order_pb(field_path, direction):
from google.cloud.firestore_v1.types import query
return query.StructuredQuery.Order(
field=query.StructuredQuery.FieldReference(field_path=field_path),
direction=direction,
)
def _make_query_response(**kwargs):
# kwargs supported are ``skipped_results``, ``name`` and ``data``
from google.cloud.firestore_v1.types import document
from google.cloud.firestore_v1.types import firestore
from google.cloud._helpers import _datetime_to_pb_timestamp
from google.cloud.firestore_v1 import _helpers
now = datetime.datetime.utcnow()
read_time = _datetime_to_pb_timestamp(now)
kwargs["read_time"] = read_time
name = kwargs.pop("name", None)
data = kwargs.pop("data", None)
if name is not None and data is not None:
document_pb = document.Document(name=name, fields=_helpers.encode_dict(data))
delta = datetime.timedelta(seconds=100)
update_time = _datetime_to_pb_timestamp(now - delta)
create_time = _datetime_to_pb_timestamp(now - 2 * delta)
document_pb._pb.update_time.CopyFrom(update_time)
document_pb._pb.create_time.CopyFrom(create_time)
kwargs["document"] = document_pb
return firestore.RunQueryResponse(**kwargs)
def _make_cursor_pb(pair):
from google.cloud.firestore_v1 import _helpers
from google.cloud.firestore_v1.types import query
values, before = pair
value_pbs = [_helpers.encode_value(value) for value in values]
return query.Cursor(values=value_pbs, before=before)
def _make_query_partition(*args, **kwargs):
from google.cloud.firestore_v1.base_query import QueryPartition
return QueryPartition(*args, **kwargs)
def test_constructor():
partition = _make_query_partition(mock.sentinel.query, "start", "end")
assert partition._query is mock.sentinel.query
assert partition.start_at == "start"
assert partition.end_at == "end"
def test_query_begin():
partition = _make_query_partition(DummyQuery("PARENT"), None, "end")
query = partition.query()
assert query._parent == "PARENT"
assert query.all_descendants == "YUP"
assert query.orders == "ORDER"
assert query.start_at is None
assert query.end_at == (["end"], True)
def test_query_middle():
partition = _make_query_partition(DummyQuery("PARENT"), "start", "end")
query = partition.query()
assert query._parent == "PARENT"
assert query.all_descendants == "YUP"
assert query.orders == "ORDER"
assert query.start_at == (["start"], True)
assert query.end_at == (["end"], True)
def test_query_end():
partition = _make_query_partition(DummyQuery("PARENT"), "start", None)
query = partition.query()
assert query._parent == "PARENT"
assert query.all_descendants == "YUP"
assert query.orders == "ORDER"
assert query.start_at == (["start"], True)
assert query.end_at is None
class DummyQuery:
_all_descendants = "YUP"
_PARTITION_QUERY_ORDER = "ORDER"
def __init__(
self, parent, *, all_descendants=None, orders=None, start_at=None, end_at=None
):
self._parent = parent
self.all_descendants = all_descendants
self.orders = orders
self.start_at = start_at
self.end_at = end_at
def _make_projection_for_select(field_paths):
from google.cloud.firestore_v1.types import query
return query.StructuredQuery.Projection(
fields=[
query.StructuredQuery.FieldReference(field_path=field_path)
for field_path in field_paths
]
)
def _make_collection(*path, **kw):
from google.cloud.firestore_v1 import collection
return collection.CollectionReference(*path, **kw)
def _make_docref(*path, **kw):
from google.cloud.firestore_v1 import document
return document.DocumentReference(*path, **kw)
def _make_snapshot(docref, values):
from google.cloud.firestore_v1 import document
return document.DocumentSnapshot(docref, values, True, None, None, None)
|
tools/save_pretrained_model.py | gcunhase/tensorflow-onnx | 1,473 | 12756537 | <gh_stars>1000+
# SPDX-License-Identifier: Apache-2.0
"""
Save pre-trained model.
"""
import tensorflow as tf
import numpy as np
# pylint: disable=redefined-outer-name,reimported,import-outside-toplevel
def save_pretrained_model(sess, outputs, feeds, out_dir, model_name="pretrained"):
"""Save pretrained model and config"""
try:
import os
import sys
import tensorflow as tf
import subprocess
to_onnx_path = "{}/to_onnx".format(out_dir)
if not os.path.isdir(to_onnx_path):
os.makedirs(to_onnx_path)
saved_model = "{}/saved_model".format(to_onnx_path)
inputs_path = "{}/inputs.npy".format(to_onnx_path)
pretrained_model_yaml_path = "{}/pretrained.yaml".format(to_onnx_path)
envars_path = "{}/environment.txt".format(to_onnx_path)
pip_requirement_path = "{}/requirements.txt".format(to_onnx_path)
print("===============Save Saved Model========================")
if os.path.exists(saved_model):
print("{} already exists, SKIP".format(saved_model))
return
print("Save tf version, python version and installed packages")
tf_version = tf.__version__
py_version = sys.version
pip_packages = subprocess.check_output([sys.executable, "-m", "pip", "freeze", "--all"])
pip_packages = pip_packages.decode("UTF-8")
with open(envars_path, "w") as fp:
fp.write(tf_version + os.linesep)
fp.write(py_version)
with open(pip_requirement_path, "w") as fp:
fp.write(pip_packages)
print("Save model for tf2onnx: {}".format(to_onnx_path))
# save inputs
inputs = {}
for inp, value in feeds.items():
if isinstance(inp, str):
inputs[inp] = value
else:
inputs[inp.name] = value
np.save(inputs_path, inputs)
print("Saved inputs to {}".format(inputs_path))
# save graph and weights
from tensorflow.saved_model import simple_save
# pylint: disable=unnecessary-comprehension
simple_save(sess, saved_model,
{n: i for n, i in zip(inputs.keys(), feeds.keys())},
{op.name: op for op in outputs})
print("Saved model to {}".format(saved_model))
# generate config
pretrained_model_yaml = '''
{}:
model: ./saved_model
model_type: saved_model
input_get: get_ramp
'''.format(model_name)
pretrained_model_yaml += " inputs:\n"
for inp, _ in inputs.items():
pretrained_model_yaml += \
" \"{input}\": np.array(np.load(\"./inputs.npy\")[()][\"{input}\"])\n".format(input=inp)
outputs = [op.name for op in outputs]
pretrained_model_yaml += " outputs:\n"
for out in outputs:
pretrained_model_yaml += " - {}\n".format(out)
with open(pretrained_model_yaml_path, "w") as f:
f.write(pretrained_model_yaml)
print("Saved pretrained model yaml to {}".format(pretrained_model_yaml_path))
print("=========================================================")
except Exception as ex: # pylint: disable=broad-except
print("Error: {}".format(ex))
def test():
"""Test sample."""
x_val = np.random.rand(5, 20).astype(np.float32)
y_val = np.random.rand(20, 10).astype(np.float32)
x = tf.placeholder(tf.float32, x_val.shape, name="x")
y = tf.placeholder(tf.float32, y_val.shape, name="y")
z = tf.matmul(x, y)
w = tf.get_variable("weight", [5, 10], dtype=tf.float32)
init = tf.global_variables_initializer()
outputs = [z + w]
feeds = {x: x_val, y: y_val}
with tf.Session() as sess:
sess.run(init)
sess.run(outputs, feeds)
# NOTE: NOT override the saved model, so put below snippet after testing the BEST model.
# if you perform testing several times.
save_pretrained_model(sess, outputs, feeds, "./tests", model_name="test")
if __name__ == "__main__":
test()
|
tests/enterprise/test_malware.py | frbor/pyattck | 377 | 12756551 | def test_malware_have_actors(attck_fixture):
"""
All MITRE Enterprise ATT&CK Malware should have Actors
Args:
attck_fixture ([type]): our default MITRE Enterprise ATT&CK JSON fixture
"""
for malware in attck_fixture.enterprise.malwares:
if malware.actors:
assert getattr(malware,'actors')
def test_malware_have_techniques(attck_fixture):
"""
All MITRE Enterprise ATT&CK Malware should havre techniques
Args:
attck_fixture ([type]): our default MITRE Enterprise ATT&CK JSON fixture
"""
for malware in attck_fixture.enterprise.malwares:
if malware.techniques:
assert getattr(malware,'techniques')
|
gluoncv/torch/data/transforms/instance_transforms/augmentation.py | RafLit/gluon-cv | 5,447 | 12756556 | # -*- coding: utf-8 -*-
# adapted from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/transforms/augmentation.py
import sys
import inspect
import random
import numpy as np
import pprint
from abc import ABCMeta, abstractmethod
from typing import List, Optional, Tuple, Union
from PIL import Image
from .transform import (
Transform,
TransformList,
BlendTransform,
CropTransform,
HFlipTransform,
NoOpTransform,
Transform,
VFlipTransform,
ExtentTransform,
ResizeTransform,
RotationTransform
)
__all__ = [
"Augmentation",
"TransformGen",
"apply_transform_gens",
"AugInput",
"StandardAugInput",
"apply_augmentations",
"RandomApply",
"RandomBrightness",
"RandomContrast",
"RandomCrop",
"RandomExtent",
"RandomFlip",
"RandomSaturation",
"RandomLighting",
"RandomRotation",
"Resize",
"ResizeShortestEdge",
"RandomCropWithInstance",
"PadAugmentation",
]
"""
Overview of the augmentation system:
We have a design goal that aims at allowing:
(1) Arbitrary structures of input data (e.g. list[list[boxes]], dict[str, boxes],
multiple semantic segmentations for each image, etc) and arbitrary new data types
(rotated boxes, 3D meshes, densepose, etc)
(2) A list of augmentation to be applied sequentially
`Augmentation` defines policies to create deterministic transforms from input data.
An augmentation policy may need to access arbitrary input data, so it declares the input
data needed, to be provided by users when calling its `get_transform` method.
`Augmentation` is not able to apply transforms to data: data associated with
one sample may be much more than what `Augmentation` gets. For example, most
augmentation policies only need an image, but the actual input samples can be
much more complicated.
`AugInput` manages all inputs needed by `Augmentation` and implements the logic
to apply a sequence of augmentation. It has to define how the inputs are transformed,
because arguments needed by one `Augmentation` needs to be transformed to become arguments
of the next `Augmentation` in the sequence.
`AugInput` does not need to contain all input data, because most augmentation policies
only need very few fields (e.g., most only need "image"). We provide `StandardAugInput`
that only contains "images", "boxes", "sem_seg", that are enough to create transforms
for most cases. In this way, users keep the responsibility to apply transforms to other
potentially new data types and structures, e.g. keypoints, proposals boxes.
To extend the system, one can do:
1. To add a new augmentation policy that only needs to use standard inputs
("image", "boxes", "sem_seg"), writing a subclass of `Augmentation` is sufficient.
2. To use new data types or custom data structures, `StandardAugInput` can still be used as long
as the new data types or custom data structures are not needed by any augmentation policy.
The new data types or data structures can be transformed using the
transforms returned by `AugInput.apply_augmentations`.
3. To add new augmentation policies that need new data types or data structures, in addition to
implementing new `Augmentation`, a new `AugInput` is needed as well.
"""
def _check_img_dtype(img):
assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format(
type(img)
)
assert not isinstance(img.dtype, np.integer) or (
img.dtype == np.uint8
), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(
img.dtype
)
assert img.ndim in [2, 3], img.ndim
class Augmentation(metaclass=ABCMeta):
"""
Augmentation defines policies/strategies to generate :class:`Transform` from data.
It is often used for pre-processing of input data. A policy typically contains
randomness, but it can also choose to deterministically generate a :class:`Transform`.
A "policy" that generates a :class:`Transform` may, in the most general case,
need arbitrary information from input data in order to determine what transforms
to apply. Therefore, each :class:`Augmentation` instance defines the arguments
needed by its :meth:`get_transform` method with the :attr:`input_args` attribute.
When called with the positional arguments defined by the :attr:`input_args`,
the :meth:`get_transform` method executes the policy.
Examples:
::
# if a policy needs to know both image and semantic segmentation
assert aug.input_args == ("image", "sem_seg")
tfm: Transform = aug.get_transform(image, sem_seg)
new_image = tfm.apply_image(image)
To implement a custom :class:`Augmentation`, define its :attr:`input_args` and
implement :meth:`get_transform`.
Note that :class:`Augmentation` defines the policies to create a :class:`Transform`,
but not how to apply the actual transform to those data.
"""
input_args: Tuple[str] = ("image",)
"""
Attribute of class instances that defines the argument(s) needed by
:meth:`get_transform`. Default to only "image", because most policies only
require knowing the image in order to determine the transform.
Users can freely define arbitrary new args and their types in custom
:class:`Augmentation`. In detectron2 we use the following convention:
* image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255].
* boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
of N instances. Each is in XYXY format in unit of absolute coordinates.
* sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
We do not specify convention for other types and do not include builtin
:class:`Augmentation` that uses other types in detectron2.
"""
def _init(self, params=None):
if params:
for k, v in params.items():
if k != "self" and not k.startswith("_"):
setattr(self, k, v)
# NOTE: in the future, can allow it to return list[Augmentation],
# to delegate augmentation to others
@abstractmethod
def get_transform(self, *args) -> Transform:
"""
Execute the policy to use input data to create transform(s).
Args:
arguments must follow what's defined in :attr:`input_args`.
Returns:
Return a :class:`Transform` instance, which is the transform to apply to inputs.
"""
pass
def _rand_range(self, low=1.0, high=None, size=None):
"""
Uniform float random number between low and high.
"""
if high is None:
low, high = 0, low
if size is None:
size = []
return np.random.uniform(low, high, size)
def __repr__(self):
"""
Produce something like:
"MyAugmentation(field1={self.field1}, field2={self.field2})"
"""
try:
sig = inspect.signature(self.__init__)
classname = type(self).__name__
argstr = []
for name, param in sig.parameters.items():
assert (
param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
), "The default __repr__ doesn't support *args or **kwargs"
assert hasattr(self, name), (
"Attribute {} not found! "
"Default __repr__ only works if attributes match the constructor.".format(name)
)
attr = getattr(self, name)
default = param.default
if default is attr:
continue
argstr.append("{}={}".format(name, pprint.pformat(attr)))
return "{}({})".format(classname, ", ".join(argstr))
except AssertionError:
return super().__repr__()
__str__ = __repr__
TransformGen = Augmentation
"""
Alias for Augmentation, since it is something that generates :class:`Transform`s
"""
class AugInput:
"""
A base class for anything on which a list of :class:`Augmentation` can be applied.
This class provides input arguments for :class:`Augmentation` to use, and defines how
to apply transforms to these data.
An instance of this class must satisfy the following:
* :class:`Augmentation` declares some data it needs as arguments. A :class:`AugInput`
must provide access to these data in the form of attribute access (``getattr``).
For example, if a :class:`Augmentation` to be applied needs "image" and "sem_seg"
arguments, this class must have the attribute "image" and "sem_seg" whose content
is as required by the :class:`Augmentation`s.
* This class must have a :meth:`transform(tfm: Transform) -> None` method which
in-place transforms all attributes stored in the class.
"""
def transform(self, tfm: Transform) -> None:
raise NotImplementedError
def apply_augmentations(
self, augmentations: List[Union[Augmentation, Transform]]
) -> TransformList:
"""
Apply a list of Transform/Augmentation in-place and returned the applied transform.
Attributes of this class will be modified.
Returns:
TransformList:
returns transformed inputs and the list of transforms applied.
The TransformList can then be applied to other data associated with the inputs.
"""
tfms = []
for aug in augmentations:
if isinstance(aug, Augmentation):
args = []
for f in aug.input_args:
try:
args.append(getattr(self, f))
except AttributeError:
raise AttributeError(
f"Augmentation {aug} needs '{f}', which is not an attribute of {self}!"
)
tfm = aug.get_transform(*args)
assert isinstance(tfm, Transform), (
f"{type(aug)}.get_transform must return an instance of Transform! "
"Got {type(tfm)} instead."
)
else:
tfm = aug
self.transform(tfm)
tfms.append(tfm)
return TransformList(tfms)
class StandardAugInput(AugInput):
"""
A standard implementation of :class:`AugInput` for the majority of use cases.
This class provides the following standard attributes that are common to use by
Augmentation (augmentation policies). These are chosen because most
:class:`Augmentation` won't need anything more to define a augmentation policy.
After applying augmentations to these special attributes, the returned transforms
can then be used to transform other data structures that users have.
Attributes:
image (ndarray): image in HW or HWC format. The meaning of C is up to users
boxes (ndarray or None): Nx4 boxes in XYXY_ABS mode
sem_seg (ndarray or None): HxW semantic segmentation mask
Examples:
::
input = StandardAugInput(image, boxes=boxes)
tfms = input.apply_augmentations(list_of_augmentations)
transformed_image = input.image
transformed_boxes = input.boxes
transformed_other_data = tfms.apply_other(other_data)
An extended project that works with new data types may require augmentation
policies that need more inputs. An algorithm may need to transform inputs
in a way different from the standard approach defined in this class. In those
situations, users can implement new subclasses of :class:`AugInput` with differnt
attributes and the :meth:`transform` method.
"""
def __init__(
self,
image: np.ndarray,
*,
boxes: Optional[np.ndarray] = None,
sem_seg: Optional[np.ndarray] = None,
):
"""
Args:
image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
floating point in range [0, 1] or [0, 255].
boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
of N instances. Each is in XYXY format in unit of absolute coordinates.
sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
"""
_check_img_dtype(image)
self.image = image
self.boxes = boxes
self.sem_seg = sem_seg
def transform(self, tfm: Transform) -> None:
"""
In-place transform all attributes of this class.
"""
self.image = tfm.apply_image(self.image)
if self.boxes is not None:
self.boxes = tfm.apply_box(self.boxes)
if self.sem_seg is not None:
self.sem_seg = tfm.apply_segmentation(self.sem_seg)
def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs):
"""
Use :meth:`AugInput.apply_augmentations` instead.
"""
if isinstance(inputs, np.ndarray):
# handle the common case of image-only Augmentation, also for backward compatibility
image_only = True
inputs = StandardAugInput(inputs)
else:
image_only = False
tfms = inputs.apply_augmentations(augmentations)
return inputs.image if image_only else inputs, tfms
apply_transform_gens = apply_augmentations
"""
Alias for backward-compatibility.
"""
class RandomApply(Augmentation):
"""
Randomly apply the wrapper transformation with a given probability.
"""
def __init__(self, transform, prob=0.5):
"""
Args:
transform (Transform, Augmentation): the transform to be wrapped
by the `RandomApply`. The `transform` can either be a
`Transform` or `Augmentation` instance.
prob (float): probability between 0.0 and 1.0 that
the wrapper transformation is applied
"""
super().__init__()
assert isinstance(transform, (Transform, Augmentation)), (
f"The given transform must either be a Transform or Augmentation instance. "
f"Not {type(transform)}"
)
assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
self.prob = prob
self.transform = transform
if isinstance(transform, Augmentation):
self.input_args = transform.input_args
def get_transform(self, img):
do = self._rand_range() < self.prob
if do:
if isinstance(self.transform, Augmentation):
return self.transform.get_transform(img)
else:
return self.transform
else:
return NoOpTransform()
class RandomFlip(Augmentation):
"""
Flip the image horizontally or vertically with the given probability.
"""
def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
"""
Args:
prob (float): probability of flip.
horizontal (boolean): whether to apply horizontal flipping
vertical (boolean): whether to apply vertical flipping
"""
super().__init__()
if horizontal and vertical:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horizontal and not vertical:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
if do:
if self.horizontal:
return HFlipTransform(w)
elif self.vertical:
return VFlipTransform(h)
else:
return NoOpTransform()
class Resize(Augmentation):
""" Resize image to a fixed target size"""
def __init__(self, shape, interp=Image.BILINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: PIL interpolation method
"""
if isinstance(shape, int):
shape = (shape, shape)
shape = tuple(shape)
self._init(locals())
def get_transform(self, img):
return ResizeTransform(
img.shape[0], img.shape[1], self.shape[0], self.shape[1], self.interp
)
class ResizeShortestEdge(Augmentation):
"""
Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
"""
def __init__(
self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
):
"""
Args:
short_edge_length (list[int]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the shortest edge length.
If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
max_size (int): maximum allowed longest edge length.
sample_style (str): either "range" or "choice".
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(short_edge_length, int):
short_edge_length = (short_edge_length, short_edge_length)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
if self.is_range:
size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
else:
size = np.random.choice(self.short_edge_length)
if size == 0:
return NoOpTransform()
scale = size * 1.0 / min(h, w)
if h < w:
newh, neww = size, scale * w
else:
newh, neww = scale * h, size
if max(newh, neww) > self.max_size:
scale = self.max_size * 1.0 / max(newh, neww)
newh = newh * scale
neww = neww * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return ResizeTransform(h, w, newh, neww, self.interp)
class RandomRotation(Augmentation):
"""
This method returns a copy of this image, rotated the given
number of degrees counter clockwise around the given center.
"""
def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
"""
Args:
angle (list[float]): If ``sample_style=="range"``,
a [min, max] interval from which to sample the angle (in degrees).
If ``sample_style=="choice"``, a list of angles to sample from
expand (bool): choose if the image should be resized to fit the whole
rotated image (default), or simply cropped
center (list[[float, float]]): If ``sample_style=="range"``,
a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
[0, 0] being the top left of the image and [1, 1] the bottom right.
If ``sample_style=="choice"``, a list of centers to sample from
Default: None, which means that the center of rotation is the center of the image
center has no effect if expand=True because it only affects shifting
"""
super().__init__()
assert sample_style in ["range", "choice"], sample_style
self.is_range = sample_style == "range"
if isinstance(angle, (float, int)):
angle = (angle, angle)
if center is not None and isinstance(center[0], (float, int)):
center = (center, center)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
center = None
if self.is_range:
angle = np.random.uniform(self.angle[0], self.angle[1])
if self.center is not None:
center = (
np.random.uniform(self.center[0][0], self.center[1][0]),
np.random.uniform(self.center[0][1], self.center[1][1]),
)
else:
angle = np.random.choice(self.angle)
if self.center is not None:
center = np.random.choice(self.center)
if center is not None:
center = (w * center[0], h * center[1]) # Convert to absolute coordinates
if angle % 360 == 0:
return NoOpTransform()
return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
class RandomCrop(Augmentation):
"""
Randomly crop a subimage out of an image.
"""
def __init__(self, crop_type: str, crop_size):
"""
Args:
crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
See `config/defaults.py` for explanation.
crop_size (tuple[float]): the relative ratio or absolute pixels of
height and width
"""
super().__init__()
assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
croph, cropw = self.get_crop_size((h, w))
assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
h0 = np.random.randint(h - croph + 1)
w0 = np.random.randint(w - cropw + 1)
return CropTransform(w0, h0, cropw, croph)
def get_crop_size(self, image_size):
"""
Args:
image_size (tuple): height, width
Returns:
crop_size (tuple): height, width in absolute pixels
"""
h, w = image_size
if self.crop_type == "relative":
ch, cw = self.crop_size
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "relative_range":
crop_size = np.asarray(self.crop_size, dtype=np.float32)
ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
return int(h * ch + 0.5), int(w * cw + 0.5)
elif self.crop_type == "absolute":
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
elif self.crop_type == "absolute_range":
assert self.crop_size[0] <= self.crop_size[1]
ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
return ch, cw
else:
NotImplementedError("Unknown crop type {}".format(self.crop_type))
class RandomExtent(Augmentation):
"""
Outputs an image by cropping a random "subrect" of the source image.
The subrect can be parameterized to include pixels outside the source image,
in which case they will be set to zeros (i.e. black). The size of the output
image will vary with the size of the random subrect.
"""
def __init__(self, scale_range, shift_range):
"""
Args:
output_size (h, w): Dimensions of output image
scale_range (l, h): Range of input-to-output size scaling factor
shift_range (x, y): Range of shifts of the cropped subrect. The rect
is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
where (w, h) is the (width, height) of the input image. Set each
component to zero to crop at the image's center.
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
img_h, img_w = img.shape[:2]
# Initialize src_rect to fit the input image.
src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
# Apply a random scaling to the src_rect.
src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
# Apply a random shift to the coordinates origin.
src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
# Map src_rect coordinates into image coordinates (center at corner).
src_rect[0::2] += 0.5 * img_w
src_rect[1::2] += 0.5 * img_h
return ExtentTransform(
src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
)
class RandomContrast(Augmentation):
"""
Randomly transforms image contrast.
Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce contrast
- intensity = 1 will preserve the input image
- intensity > 1 will increase contrast
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=img.mean(), src_weight=1 - w, dst_weight=w)
class RandomBrightness(Augmentation):
"""
Randomly transforms image brightness.
Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce brightness
- intensity = 1 will preserve the input image
- intensity > 1 will increase brightness
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation
intensity_max (float): Maximum augmentation
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
w = np.random.uniform(self.intensity_min, self.intensity_max)
return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
class RandomSaturation(Augmentation):
"""
Randomly transforms saturation of an RGB image.
Input images are assumed to have 'RGB' channel order.
Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
- intensity < 1 will reduce saturation (make the image more grayscale)
- intensity = 1 will preserve the input image
- intensity > 1 will increase saturation
See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
"""
def __init__(self, intensity_min, intensity_max):
"""
Args:
intensity_min (float): Minimum augmentation (1 preserves input).
intensity_max (float): Maximum augmentation (1 preserves input).
"""
super().__init__()
self._init(locals())
def get_transform(self, img):
assert img.shape[-1] == 3, "RandomSaturation only works on RGB images"
w = np.random.uniform(self.intensity_min, self.intensity_max)
grayscale = img.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
class RandomLighting(Augmentation):
"""
The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
Input images are assumed to have 'RGB' channel order.
The degree of color jittering is randomly sampled via a normal distribution,
with standard deviation given by the scale parameter.
"""
def __init__(self, scale):
"""
Args:
scale (float): Standard deviation of principal component weighting.
"""
super().__init__()
self._init(locals())
self.eigen_vecs = np.array(
[[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
)
self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
def get_transform(self, img):
assert img.shape[-1] == 3, "RandomLighting only works on RGB images"
weights = np.random.normal(scale=self.scale, size=3)
return BlendTransform(
src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
)
def _gen_crop_transform_with_instance(crop_size, image_size, instances, crop_box=True):
"""
Generate a CropTransform so that the cropping region contains
the center of the given instance.
Args:
crop_size (tuple): h, w in pixels
image_size (tuple): h, w
instance (dict): an annotation dict of one instance, in Detectron2's
dataset format.
"""
bbox = random.choice(instances)
crop_size = np.asarray(crop_size, dtype=np.int32)
center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
assert (
image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
), "The annotation bounding box is outside of the image!"
assert (
image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
), "Crop size is larger than image size!"
min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
# if some instance is cropped extend the box
if not crop_box:
num_modifications = 0
modified = True
# convert crop_size to float
crop_size = crop_size.astype(np.float32)
while modified:
modified, x0, y0, crop_size = adjust_crop(x0, y0, crop_size, instances)
num_modifications += 1
if num_modifications > 100:
raise ValueError(
"Cannot finished cropping adjustment within 100 tries (#instances {}).".format(
len(instances)
)
)
return CropTransform(0, 0, image_size[1], image_size[0])
return CropTransform(*map(int, (x0, y0, crop_size[1], crop_size[0])))
def adjust_crop(x0, y0, crop_size, instances, eps=1e-3):
modified = False
x1 = x0 + crop_size[1]
y1 = y0 + crop_size[0]
for bbox in instances:
if bbox[0] < x0 - eps and bbox[2] > x0 + eps:
crop_size[1] += x0 - bbox[0]
x0 = bbox[0]
modified = True
if bbox[0] < x1 - eps and bbox[2] > x1 + eps:
crop_size[1] += bbox[2] - x1
x1 = bbox[2]
modified = True
if bbox[1] < y0 - eps and bbox[3] > y0 + eps:
crop_size[0] += y0 - bbox[1]
y0 = bbox[1]
modified = True
if bbox[1] < y1 - eps and bbox[3] > y1 + eps:
crop_size[0] += bbox[3] - y1
y1 = bbox[3]
modified = True
return modified, x0, y0, crop_size
class RandomCropWithInstance(RandomCrop):
""" Instance-aware cropping.
"""
def __init__(self, crop_type, crop_size, crop_instance=True):
"""
Args:
crop_instance (bool): if False, extend cropping boxes to avoid cropping instances
"""
super().__init__(crop_type, crop_size)
self.crop_instance = crop_instance
self.input_args = ("image", "boxes")
def get_transform(self, img, boxes):
image_size = img.shape[:2]
crop_size = self.get_crop_size(image_size)
return _gen_crop_transform_with_instance(
crop_size, image_size, boxes, crop_box=self.crop_instance
)
class PadAugmentation(Augmentation):
def __init__(self, crop_size):
"""
Args:
crop_instance (bool): if False, extend cropping boxes to avoid cropping instances
"""
super().__init__()
self.crop_size = crop_size
def get_crop_size(self, image_size):
h, w = image_size
return (min(self.crop_size[0], h), min(self.crop_size[1], w))
def get_transform(self, img):
image_size = img.shape[:2]
image_size = self.get_crop_size(image_size)
return _PadTransform(image_size[0], image_size[1], self.crop_size[1], self.crop_size[0])
class _PadTransform(Transform):
def __init__(self, h: int, w: int, crop_h: int, crop_w: int):
super().__init__()
self._set_attributes(locals())
def apply_image(self, img: np.ndarray) -> np.ndarray:
h, w = img.shape[:2]
assert (
self.h == h and self.w == w
), "Input size mismatch h w {}:{} -> {}:{}".format(self.h, self.w, h, w)
padding = ((0, self.crop_h - h), (0, self.crop_w - w), (0, 0))
img = np.pad(img, pad_width=padding)
return img
def apply_coords(self, coords: np.ndarray) -> np.ndarray:
return coords
|
bcs-ui/backend/iam/permissions/resources/namespace.py | schneesu/bk-bcs | 599 | 12756570 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Dict, List, Optional, Type
import attr
from backend.iam.permissions import decorators
from backend.iam.permissions.exceptions import AttrValidationError
from backend.iam.permissions.perm import PermCtx, Permission
from backend.iam.permissions.request import IAMResource, ResourceRequest
from backend.packages.blue_krill.data_types.enum import EnumField, StructuredEnum
from backend.utils.basic import md5_digest
from .cluster import ClusterPermission, related_cluster_perm
from .constants import ResourceType
def calc_iam_ns_id(cluster_id: str, name: str) -> Optional[str]:
"""
计算(压缩)出注册到权限中心的命名空间 ID,具备唯一性. 当前的算法并不能完全避免冲突,但概率较低。
note: 权限中心对资源 ID 有长度限制,不超过32位。长度越长,处理性能越低
:param cluster_id: 集群 ID
:param name: 命名空间名,k8s 限定最长63个字符
:return: iam_ns_id 是命名空间注册到权限中心的资源 ID,它是对结构`集群ID:命名空间name`的一个压缩,
如 `BCS-K8S-40000:default` 会被处理成 `40000:5f03d33dde`。其中,保留集群数字 ID 的目的是用于
NamespaceProvider 中的 fetch_instance_info 方法
"""
cluster_idx = cluster_id.split('-')[-1]
iam_ns_id = f'{cluster_idx}:{md5_digest(name)[8:16]}{name[:2]}'
if len(iam_ns_id) > 32:
raise ValueError(f'iam_ns_id({iam_ns_id}) more than 32 characters')
return iam_ns_id
class NamespaceAction(str, StructuredEnum):
CREATE = EnumField('namespace_create', label='namespace_create')
VIEW = EnumField('namespace_view', label='namespace_view')
UPDATE = EnumField('namespace_update', label='namespace_update')
DELETE = EnumField('namespace_delete', label='namespace_delete')
USE = EnumField('namespace_use', label='namespace_use')
@attr.dataclass
class NamespacePermCtx(PermCtx):
project_id: str = ''
cluster_id: str = ''
name: Optional[str] = None # 命名空间名
iam_ns_id: Optional[str] = None # 注册到权限中心的命名空间ID
def __attrs_post_init__(self):
"""权限中心的 resource_id 长度限制为32位"""
if self.name:
self.iam_ns_id = calc_iam_ns_id(self.cluster_id, self.name)
@property
def resource_id(self) -> str:
return self.iam_ns_id
def validate(self):
super().validate()
if not self.project_id:
raise AttrValidationError('project_id must not be empty')
if not self.cluster_id:
raise AttrValidationError('cluster_id must not be empty')
class NamespaceRequest(ResourceRequest):
resource_type: str = ResourceType.Namespace
attr = {'_bk_iam_path_': f'/project,{{project_id}}/cluster,{{cluster_id}}/'}
def _make_attribute(self, res_id: str) -> Dict:
return {
'_bk_iam_path_': self.attr['_bk_iam_path_'].format(
project_id=self.attr_kwargs['project_id'], cluster_id=self.attr_kwargs['cluster_id']
)
}
def _validate_attr_kwargs(self):
if not self.attr_kwargs.get('project_id'):
raise AttrValidationError('missing project_id or project_id is invalid')
if not self.attr_kwargs.get('cluster_id'):
raise AttrValidationError('missing cluster_id or cluster_id is invalid')
class related_namespace_perm(decorators.RelatedPermission):
module_name: str = ResourceType.Namespace
def _convert_perm_ctx(self, instance, args, kwargs) -> PermCtx:
"""仅支持第一个参数是 PermCtx 子类实例"""
if len(args) <= 0:
raise TypeError('missing NamespacePermCtx instance argument')
if isinstance(args[0], PermCtx):
return NamespacePermCtx(
username=args[0].username,
project_id=args[0].project_id,
cluster_id=args[0].cluster_id,
name=args[0].name,
)
else:
raise TypeError('missing NamespacePermCtx instance argument')
class namespace_perm(decorators.Permission):
module_name: str = ResourceType.Namespace
class NamespacePermission(Permission):
"""命名空间权限"""
resource_type: str = ResourceType.Namespace
resource_request_cls: Type[ResourceRequest] = NamespaceRequest
parent_res_perm = ClusterPermission()
@related_cluster_perm(method_name='can_use')
def can_create(self, perm_ctx: NamespacePermCtx, raise_exception: bool = True) -> bool:
return self.can_action(perm_ctx, NamespaceAction.CREATE, raise_exception)
@related_cluster_perm(method_name='can_view')
def can_view(self, perm_ctx: NamespacePermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_action(perm_ctx, NamespaceAction.VIEW, raise_exception)
@related_cluster_perm(method_name='can_use')
def can_update(self, perm_ctx: NamespacePermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_action_with_view(perm_ctx, NamespaceAction.UPDATE, NamespaceAction.VIEW, raise_exception)
@related_cluster_perm(method_name='can_use')
def can_delete(self, perm_ctx: NamespacePermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_action_with_view(perm_ctx, NamespaceAction.DELETE, NamespaceAction.VIEW, raise_exception)
@related_cluster_perm(method_name='can_use')
def can_use(self, perm_ctx: NamespacePermCtx, raise_exception: bool = True) -> bool:
perm_ctx.validate_resource_id()
return self.can_action_with_view(perm_ctx, NamespaceAction.USE, NamespaceAction.VIEW, raise_exception)
def make_res_request(self, res_id: str, perm_ctx: NamespacePermCtx) -> ResourceRequest:
return self.resource_request_cls(res_id, project_id=perm_ctx.project_id, cluster_id=perm_ctx.cluster_id)
def get_parent_chain(self, perm_ctx: NamespacePermCtx) -> List[IAMResource]:
return [
IAMResource(ResourceType.Project, perm_ctx.project_id),
IAMResource(ResourceType.Cluster, perm_ctx.cluster_id),
]
def get_resource_id(self, perm_ctx: NamespacePermCtx) -> Optional[str]:
return perm_ctx.iam_ns_id
|
jobsapp/migrations/0010_auto_20201107_1404.py | astaqc/django-job-portal | 348 | 12756586 | # Generated by Django 3.0.7 on 2020-11-07 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobsapp', '0009_favorite'),
]
operations = [
migrations.AddField(
model_name='applicant',
name='comment',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='applicant',
name='status',
field=models.SmallIntegerField(default=1),
),
]
|
smarts/core/lidar.py | idsc-frazzoli/SMARTS | 554 | 12756590 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import itertools
import random
import numpy as np
import psutil
from .lidar_sensor_params import SensorParams
from .utils import pybullet
from .utils.math import batches, rotate_quat
from .utils.pybullet import bullet_client as bc
class Lidar:
def __init__(
self, origin, sensor_params: SensorParams, bullet_client: bc.BulletClient
):
self._origin = origin
self._sensor_params = sensor_params
self._bullet_client = bullet_client
self._n_threads = psutil.cpu_count(logical=False)
# As an optimization we compute a set of "base rays" once and shift translate
# them to follow the user, and then trace for collisions.
self._base_rays = None
self._static_lidar_noise = self._compute_static_lidar_noise()
@property
def origin(self):
return self._origin
@origin.setter
def origin(self, value):
self._origin = value
def _compute_static_lidar_noise(self):
n_rays = int(
(self._sensor_params.end_angle - self._sensor_params.start_angle)
/ self._sensor_params.angle_resolution
)
n_points = n_rays * len(self._sensor_params.laser_angles)
static_lidar_noise = []
for _ in range(n_points):
static_lidar_noise.append(
random.gauss(
self._sensor_params.noise_mu, self._sensor_params.noise_sigma
)
)
return np.array(static_lidar_noise, dtype=np.float)
def compute_point_cloud(self):
rays = self._compute_rays()
point_cloud, hits = self._trace_rays(rays)
# point_cloud = self._apply_noise(point_cloud)
assert (
len(point_cloud) == len(hits) == len(rays) == len(self._static_lidar_noise)
)
return point_cloud, hits, rays
def _compute_rays(self):
if self._base_rays is None:
self._base_rays = []
n_rays = int(
(self._sensor_params.end_angle - self._sensor_params.start_angle)
/ self._sensor_params.angle_resolution
)
yaws = -self._sensor_params.laser_angles
rolls = np.arange(n_rays) * self._sensor_params.angle_resolution
for yaw, roll in itertools.product(yaws, rolls):
rot = pybullet.getQuaternionFromEuler((roll, 0, yaw))
origin = np.array([0, 0, 0])
direction = rotate_quat(
np.asarray(rot, dtype=float),
np.asarray((0, self._sensor_params.max_distance, 0), dtype=float),
)
self._base_rays.append((origin, direction))
rays = [
(origin + self._origin, direction + self._origin)
for origin, direction in self._base_rays
]
return rays
def _trace_rays(self, rays):
results = []
for batched_rays in batches(
rays, int(pybullet.MAX_RAY_INTERSECTION_BATCH_SIZE - 1)
):
origins, directions = zip(*batched_rays)
results.extend(
self._bullet_client.rayTestBatch(origins, directions, self._n_threads)
)
hit_ids, _, _, positions, _ = zip(*results)
positions = list(positions)
hits = []
for i, position in enumerate(positions):
hit = hit_ids[i] != -1
hits.append(hit)
positions[i] = (
np.array(position) if hit else np.array([np.inf, np.inf, np.inf])
)
return positions, hits
def _apply_noise(self, point_cloud):
dynamic_noise = np.random.normal(
self._sensor_params.noise_mu,
self._sensor_params.noise_sigma,
size=len(point_cloud),
)
local_pc = point_cloud - self._origin
noise = self._static_lidar_noise + dynamic_noise
return point_cloud + (
local_pc
/ np.linalg.norm(local_pc, axis=1)[:, np.newaxis]
* noise[:, np.newaxis]
)
|
tests/macro/scripts/MPI/np_point_to_point.py | dina-fouad/pyccel | 206 | 12756594 | <reponame>dina-fouad/pyccel<filename>tests/macro/scripts/MPI/np_point_to_point.py
# pylint: disable=missing-function-docstring, missing-module-docstring/
from mpi4py import MPI
from numpy import zeros
from numpy import ones
if __name__ == '__main__':
rank = -1
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
# passing MPI datatypes explicitly
if rank == 0:
data = ones(5, 'int')
comm.Send([data, MPI.INT], dest=1, tag=77)
elif rank == 1:
data = zeros(5, 'int')
comm.Recv([data, MPI.INT], source=0, tag=77)
print(data)
# automatic MPI datatype discovery
if rank == 0:
data_ = ones(5, 'double')
comm.Send(data, dest=1, tag=13)
elif rank == 1:
data_ = zeros(5, 'double')
comm.Recv(data, source=0, tag=13)
print(data)
|
spot_micro_joy/scripts/spotMicroJoystickMove.py | zoltan0907/spotMicro | 1,563 | 12756606 | #!/usr/bin/python
import rospy
from std_msgs.msg import Float32, Bool
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import Twist
from math import pi
class SpotMicroJoystickControl():
BUTTON_IDLE = 0
BUTTON_WALK = 1
BUTTON_STAND = 2
BUTTON_ANGLE = 3
ANGLE_AXES_ROLL = 0
ANGLE_AXES_HEIGHT = 1
ANGLE_AXES_YAW = 2
ANGLE_AXES_PITCH = 3
WALK_AXES_FORWARD = 1
WALK_AXES_STRAFE = 0
WALK_AXES_YAW = 2
MODE_IDLE = 0
MODE_STAND = 1
MODE_ANGLE = 2
MODE_WALK = 3
MAX_ROLL_DEG = 45
MAX_YAW_DEG = 45
MAX_PATCH_DEG = 45
MAX_FORWARD_SPEED = 0.05
MAX_STRAFE_SPEED = 0.05
MAX_YAW_SPEED_DEG = 15
def __init__(self):
self._angle_cmd_msg = Vector3()
self._angle_cmd_msg.x = 0
self._angle_cmd_msg.y = 0
self._angle_cmd_msg.z = 0
self._vel_cmd_msg = Twist()
self._vel_cmd_msg.linear.x = 0
self._vel_cmd_msg.linear.y = 0
self._vel_cmd_msg.linear.z = 0
self._vel_cmd_msg.angular.x = 0
self._vel_cmd_msg.angular.y = 0
self._vel_cmd_msg.angular.z = 0
self._walk_event_cmd_msg = Bool()
self._walk_event_cmd_msg.data = True # Mostly acts as an event driven action on receipt of a true message
self._stand_event_cmd_msg = Bool()
self._stand_event_cmd_msg.data = True
self._idle_event_cmd_msg = Bool()
self._idle_event_cmd_msg.data = True
rospy.loginfo("Setting Up the Spot Micro Joystick Control Node...")
# Set up and title the ros node for this code
rospy.init_node('spot_micro_joystick_control')
# Create publishers for commanding velocity, angle, and robot states
self._ros_pub_angle_cmd = rospy.Publisher('/angle_cmd', Vector3, queue_size=1)
self._ros_pub_vel_cmd = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
self._ros_pub_walk_cmd = rospy.Publisher('/walk_cmd', Bool, queue_size=1)
self._ros_pub_stand_cmd = rospy.Publisher('/stand_cmd', Bool, queue_size=1)
self._ros_pub_idle_cmd = rospy.Publisher('/idle_cmd', Bool, queue_size=1)
rospy.loginfo("Joystick control node publishers corrrectly initialized")
def reset_all_motion_commands_to_zero(self):
'''Reset body motion cmd states to zero and publish zero value body motion commands'''
self._vel_cmd_msg.linear.x = 0
self._vel_cmd_msg.linear.y = 0
self._vel_cmd_msg.linear.z = 0
self._vel_cmd_msg.angular.x = 0
self._vel_cmd_msg.angular.y = 0
self._vel_cmd_msg.angular.z = 0
self._ros_pub_vel_cmd.publish(self._vel_cmd_msg)
def reset_all_angle_commands_to_zero(self):
'''Reset angle cmd states to zero and publish them'''
self._angle_cmd_msg.x = 0
self._angle_cmd_msg.y = 0
self._angle_cmd_msg.z = 0
self._ros_pub_angle_cmd.publish(self._angle_cmd_msg)
def on_joy(self, msg):
self.on_joy_buttons(msg.buttons)
self.on_joy_axes(msg.axes)
def on_joy_buttons(self, buttons):
if buttons[self.BUTTON_IDLE] == 1:
self._ros_pub_idle_cmd.publish(self._idle_event_cmd_msg)
rospy.loginfo('Idle command issued from joystick.')
self.mode = self.MODE_IDLE
elif buttons[self.BUTTON_STAND] == 1:
self._ros_pub_stand_cmd.publish(self._stand_event_cmd_msg)
rospy.loginfo('Stand command issued from joystick.')
self.mode = self.MODE_STAND
elif buttons[self.BUTTON_ANGLE] == 1:
self.reset_all_angle_commands_to_zero()
rospy.loginfo('Entering joystick angle command mode.')
self.mode = self.MODE_ANGLE
elif buttons[self.BUTTON_WALK] == 1:
self.reset_all_angle_commands_to_zero()
self._ros_pub_walk_cmd.publish(self._walk_event_cmd_msg)
rospy.loginfo('Entering joystick walk command mode.')
self.mode = self.MODE_WALK
def on_joy_axes(self, axes):
if self.mode == self.MODE_ANGLE:
self.on_joy_angle_mode(axes)
elif self.mode == self.MODE_WALK:
self.on_joy_walk_mode(axes)
def on_joy_walk_mode(self, axes):
self._vel_cmd_msg.linear.x = axes[self.WALK_AXES_FORWARD] * self.MAX_FORWARD_SPEED
self._vel_cmd_msg.linear.y = axes[self.WALK_AXES_STRAFE] * self.MAX_STRAFE_SPEED
self._vel_cmd_msg.angular.z = pi / 180 * axes[self.WALK_AXES_YAW] * self.MAX_YAW_SPEED_DEG
print('Cmd Values: x speed: %1.3f m/s, y speed: %1.3f m/s, yaw rate: %1.3f deg/s ' \
% (self._vel_cmd_msg.linear.x, self._vel_cmd_msg.linear.y, self._vel_cmd_msg.angular.z * 180 / pi))
self._ros_pub_vel_cmd.publish(self._vel_cmd_msg)
def on_joy_angle_mode(self, axes):
self._angle_cmd_msg.x = pi / 180 * axes[self.ANGLE_AXES_ROLL] * self.MAX_ROLL_DEG * -1
self._angle_cmd_msg.y = pi / 180 * axes[self.ANGLE_AXES_PITCH] * self.MAX_PATCH_DEG * -1
self._angle_cmd_msg.z = pi / 180 * axes[self.ANGLE_AXES_YAW] * self.MAX_YAW_DEG
print('Cmd Values: phi: %1.3f deg, theta: %1.3f deg, psi: %1.3f deg ' \
% (
self._angle_cmd_msg.x * 180 / pi, self._angle_cmd_msg.y * 180 / pi,
self._angle_cmd_msg.z * 180 / pi))
self._ros_pub_angle_cmd.publish(self._angle_cmd_msg)
def run(self):
print("green = idle")
print("yellow = stand")
print("blue = angle")
print("red = walk")
# Publish all body motion commands to 0
self.reset_all_motion_commands_to_zero()
rospy.Subscriber("/joy", Joy, self.on_joy)
rospy.spin()
if __name__ == "__main__":
smjc = SpotMicroJoystickControl()
smjc.run()
|
models/SemanticSegmentation/ICNet.py | Dou-Yu-xuan/deep-learning-visal | 150 | 12756627 | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2020/10/28 16:41
# @Author : liumin
# @File : ICNet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
__all__ = ["ICNet"]
def Conv1x1BN(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels)
)
def Conv1x1BNReLU(in_channels,out_channels):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def Conv3x3BN(in_channels,out_channels,stride,dilation=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=dilation,dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels)
)
def Conv3x3BNReLU(in_channels,out_channels,stride,dilation=1):
return nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=stride, padding=dilation,dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
class CascadeFeatureFusion(nn.Module):
def __init__(self,low_channels, high_channels, out_channels, num_classes):
super(CascadeFeatureFusion, self).__init__()
self.conv_low = Conv3x3BNReLU(low_channels,out_channels,1,dilation=2)
self.conv_high = Conv3x3BNReLU(high_channels,out_channels,1,dilation=1)
self.relu = nn.ReLU(inplace=True)
self.conv_low_cls = nn.Conv2d(out_channels, num_classes, 1, bias=False)
def forward(self, x_low, x_high):
x_low = F.interpolate(x_low, size=x_high.size()[2:], mode='bilinear', align_corners=True)
x_low = self.conv_low(x_low)
x_high = self.conv_high(x_high)
out = self.relu(x_low + x_high)
x_low_cls = self.conv_low_cls(x_low)
return out, x_low_cls
class Backbone(nn.Module):
def __init__(self, pyramids=[1,2,3,6]):
super(Backbone, self).__init__()
self.pretrained = torchvision.models.resnet50(pretrained=True)
def forward(self, x):
x = self.pretrained.conv1(x)
x = self.pretrained.bn1(x)
x = self.pretrained.relu(x)
x = self.pretrained.maxpool(x)
c1 = self.pretrained.layer1(x)
c2 = self.pretrained.layer2(c1)
c3 = self.pretrained.layer3(c2)
c4 = self.pretrained.layer4(c3)
return c1, c2, c3, c4
class PyramidPoolingModule(nn.Module):
def __init__(self, pyramids=[1,2,3,6]):
super(PyramidPoolingModule, self).__init__()
self.pyramids = pyramids
def forward(self, x):
feat = x
height, width = x.shape[2:]
for bin_size in self.pyramids:
feat_x = F.adaptive_avg_pool2d(x, output_size=bin_size)
feat_x = F.interpolate(feat_x, size=(height, width), mode='bilinear', align_corners=True)
feat = feat + feat_x
return feat
class ICNet(nn.Module):
def __init__(self, num_classes):
super(ICNet, self).__init__()
self.conv_sub1 = nn.Sequential(
Conv3x3BNReLU(3, 32, 2),
Conv3x3BNReLU(32, 32, 2),
Conv3x3BNReLU(32, 64, 2)
)
self.backbone = Backbone()
self.ppm = PyramidPoolingModule()
self.cff_12 = CascadeFeatureFusion(128, 64, 128, num_classes)
self.cff_24 = CascadeFeatureFusion(2048, 512, 128, num_classes)
self.conv_cls = nn.Conv2d(128, num_classes, 1, bias=False)
def forward(self, x):
# sub 1
x_sub1 = self.conv_sub1(x)
# sub 2
x_sub2 = F.interpolate(x, scale_factor=0.5, mode='bilinear')
_, x_sub2, _, _ = self.backbone(x_sub2)
# sub 4
x_sub4 = F.interpolate(x, scale_factor=0.25, mode='bilinear')
_, _, _, x_sub4 = self.backbone(x_sub4)
# add PyramidPoolingModule
x_sub4 = self.ppm(x_sub4)
outs = list()
x_cff_24, x_24_cls = self.cff_24(x_sub4, x_sub2)
outs.append(x_24_cls)
# x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1)
x_cff_12, x_12_cls = self.cff_12(x_cff_24, x_sub1)
outs.append(x_12_cls)
up_x2 = F.interpolate(x_cff_12, scale_factor=2, mode='bilinear')
up_x2 = self.conv_cls(up_x2)
outs.append(up_x2)
up_x8 = F.interpolate(up_x2, scale_factor=4, mode='bilinear')
outs.append(up_x8)
# 1 -> 1/4 -> 1/8 -> 1/16
outs.reverse()
return outs
if __name__ == '__main__':
model = ICNet(num_classes=19)
print(model)
input = torch.randn(1,3,512,512)
output = model(input)
print(output[0].shape)
print(output[1].shape)
print(output[2].shape)
print(output[3].shape) |
chrome/test/security_tests/security_tests.gyp | nagineni/chromium-crosswalk | 231 | 12756629 | <filename>chrome/test/security_tests/security_tests.gyp
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'security_tests',
'type': 'shared_library',
'sources': [
'../../../sandbox/win/tests/validation_tests/commands.cc',
'../../../sandbox/win/tests/validation_tests/commands.h',
'ipc_security_tests.cc',
'ipc_security_tests.h',
'security_tests.cc',
],
},
],
}
|
jina/types/document/helper.py | arijitdas123student/jina | 15,179 | 12756641 | import functools
from typing import Iterable, List
__all__ = ['DocGroundtruthPair']
if False:
from . import Document
class DocGroundtruthPair:
"""
Helper class to expose common interface to the traversal logic of the BaseExecutable Driver.
It is important to note that it checks the matching structure of `docs` and `groundtruths`. It is important while
traversing to ensure that then the driver can be applied at a comparable level of granularity and adjacency.
This does not imply that you can't compare at the end a document with 10 matches with a groundtruth with 20 matches
:param doc: Target `Document`.
:param groundtruth: The :class:`Document` with desired state.
"""
def __init__(self, doc: 'Document', groundtruth: 'Document'):
"""Set constructor method.
:param doc: actual Document
:param groundtruth: groundtruth Document
"""
self.doc = doc
self.groundtruth = groundtruth
@property
def matches(self) -> Iterable['DocGroundtruthPair']:
"""Get the pairs between matches and Groundtruth.
:yields: DocGroundtruthPair object
"""
assert len(self.doc.matches) == len(self.groundtruth.matches)
for doc, groundtruth in zip(self.doc.matches, self.groundtruth.matches):
yield DocGroundtruthPair(doc, groundtruth)
@property
def chunks(self) -> Iterable['DocGroundtruthPair']:
"""Get the pairs between chunks and Groundtruth.
:yields: DocGroundtruthPair object
"""
assert len(self.doc.chunks) == len(self.groundtruth.chunks)
for doc, groundtruth in zip(self.doc.chunks, self.groundtruth.chunks):
yield DocGroundtruthPair(doc, groundtruth)
class VersionedMixin:
"""
Helper class to add versioning to an object. The version number is incremented each time an attribute is set.
"""
version = 0
ON_GETATTR: List = []
def _increase_version(self):
super().__setattr__('version', self.version + 1)
def __setattr__(self, attr, value):
super().__setattr__(attr, value)
self._increase_version()
def __delattr__(self, attr):
super(VersionedMixin, self).__delattr__(attr)
self._increase_version()
def versioned(fn):
"""
Decorator function that increases the version number each time the decorated method is called.
The class of the decorated method must be a subclass of :class:`VersionedMixin`
:param fn: the method to decorate
:return: decorated function
"""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
self._increase_version()
return fn(self, *args, **kwargs)
return wrapper
|
nlp_gym/core_components/embeddings.py | lipucky/nlp-gym | 120 | 12756657 | <gh_stars>100-1000
from flair.embeddings import DocumentPoolEmbeddings, WordEmbeddings
import flair
import torch
flair.device = torch.device('cpu')
class DocEmbeddings:
__instance = None
@staticmethod
def getInstance():
""" Static access method. """
if DocEmbeddings.__instance is None:
DocEmbeddings()
return DocEmbeddings.__instance
def __init__(self):
""" Virtually private constructor. """
if DocEmbeddings.__instance is not None:
raise Exception("This class is a singleton!")
else:
doc_embeddings = DocumentPoolEmbeddings([WordEmbeddings("glove")])
DocEmbeddings.__instance = doc_embeddings
|
examples/eventget.py | hirnimeshrampuresoftware/python-tcod | 231 | 12756665 | #!/usr/bin/env python3
# To the extent possible under law, the libtcod maintainers have waived all
# copyright and related or neighboring rights for this example. This work is
# published from: United States.
# https://creativecommons.org/publicdomain/zero/1.0/
"""An demonstration of event handling using the tcod.event module.
"""
from typing import List
import tcod
WIDTH, HEIGHT = 720, 480
def main() -> None:
"""Example program for tcod.event"""
event_log: List[str] = []
motion_desc = ""
with tcod.context.new(width=WIDTH, height=HEIGHT) as context:
console = context.new_console()
while True:
# Display all event items.
console.clear()
console.print(0, console.height - 1, motion_desc)
for i, item in enumerate(event_log[::-1]):
y = console.height - 3 - i
if y < 0:
break
console.print(0, y, item)
context.present(console, integer_scaling=True)
# Handle events.
for event in tcod.event.wait():
context.convert_event(event) # Set tile coordinates for event.
print(repr(event))
if isinstance(event, tcod.event.Quit):
raise SystemExit()
if isinstance(event, tcod.event.WindowResized) and event.type == "WINDOWRESIZED":
console = context.new_console()
if isinstance(event, tcod.event.MouseMotion):
motion_desc = str(event)
else:
event_log.append(str(event))
if __name__ == "__main__":
main()
|
examples/example_network_info.py | seth586/lndmanage | 166 | 12756717 | <gh_stars>100-1000
from lndmanage.lib.network_info import NetworkAnalysis
from lndmanage.lib.node import LndNode
from lndmanage import settings
import logging.config
logging.config.dictConfig(settings.logger_config)
logger = logging.getLogger(__name__)
if __name__ == '__main__':
node = LndNode()
network_analysis = NetworkAnalysis(node)
network_analysis.print_node_overview(node.pub_key)
logger.info('-------- Nodes with highest capacity: --------')
for n in network_analysis.get_sorted_nodes_by_property():
logger.info(n)
logger.info('-------- Nodes with highest degree: --------')
for n in network_analysis.get_sorted_nodes_by_property(key='degree'):
logger.info(n)
logger.info('-------- Nodes with highest capacity/channel: --------')
for n in network_analysis.get_sorted_nodes_by_property(key='capacity_per_channel', min_degree=10):
logger.info(n)
logger.info('-------- Nodes with lowest capacity/channel: --------')
for n in network_analysis.get_sorted_nodes_by_property(key='capacity_per_channel', min_degree=20, decrementing=False):
logger.info(n)
logger.info('-------- Nodes with most user nodes: --------')
for n in network_analysis.get_sorted_nodes_by_property(key='user_nodes', min_degree=20):
logger.info(n)
network_analysis.print_find_nodes_giving_most_secondary_hops(node.pub_key)
|
CircuitPython_Heart_Sculpture/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12756793 | <reponame>gamblor21/Adafruit_Learning_System_Guides<filename>CircuitPython_Heart_Sculpture/code.py
import time
import adafruit_dotstar
from rainbowio import colorwheel
import board
import touchio
pixel = adafruit_dotstar.DotStar(board.APA102_SCK, board.APA102_MOSI, 1, brightness=.1)
touch = touchio.TouchIn(board.D1)
hue = 0
while True:
hue = hue + touch.value * 3
if hue > 255: # Wrap back around to red
hue = hue - 255
pixel[0] = colorwheel(hue)
time.sleep(.05)
|
epitran/test/test_farsi.py | dwijap/epitran | 422 | 12756863 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
import epitran
class TestSorani(unittest.TestCase):
def setUp(self):
self.epi = epitran.Epitran(u'fas-Arab')
def test_faarsi(self):
tr = self.epi.transliterate('فارسی')
self.assertEqual(tr, 'fɒrsj')
def test_rowshan(self):
tr = self.epi.transliterate('روشن')
self.assertEqual(tr, 'rvʃn')
def test_hamaye(self):
tr = self.epi.transliterate('همهٔ')
self.assertEqual(tr, 'hmhʔ')
def test_aafraad(self):
tr = self.epi.transliterate('افراد')
self.assertEqual(tr, 'ɒfrɒd')
def test_bashar(self):
tr = self.epi.transliterate('بشر')
self.assertEqual(tr, 'bʃr')
def test_aazaad(self):
tr = self.epi.transliterate('آزاد')
self.assertEqual(tr, 'ɒzɒd')
def test_donjaa(self):
tr = self.epi.transliterate('دنیا')
self.assertEqual(tr, 'dnjɒ')
def test_miaayand(self):
tr = self.epi.transliterate('میآیند')
self.assertEqual(tr, 'mjɒjnd')
def test_heysiyaat(self):
tr = self.epi.transliterate('حیثیت')
self.assertEqual(tr, 'hjsjt')
|
python/tests/test_dump.py | JX7P/libucl | 1,301 | 12756880 | from .compat import unittest
import ucl
import sys
class DumpTest(unittest.TestCase):
def test_no_args(self):
with self.assertRaises(TypeError):
ucl.dump()
def test_none(self):
self.assertEqual(ucl.dump(None), None)
def test_null(self):
data = { "a" : None }
valid = "a = null;\n"
self.assertEqual(ucl.dump(data), valid)
def test_int(self):
data = { "a" : 1 }
valid = "a = 1;\n"
self.assertEqual(ucl.dump(data), valid)
def test_nested_int(self):
data = { "a" : { "b" : 1 } }
valid = "a {\n b = 1;\n}\n"
self.assertEqual(ucl.dump(data), valid)
def test_int_array(self):
data = { "a" : [1,2,3,4] }
valid = "a [\n 1,\n 2,\n 3,\n 4,\n]\n"
self.assertEqual(ucl.dump(data), valid)
def test_str(self):
data = { "a" : "b" }
valid = "a = \"b\";\n"
self.assertEqual(ucl.dump(data), valid)
@unittest.skipIf(sys.version_info[0] > 2, "Python3 uses unicode only")
def test_unicode(self):
data = { unicode("a") : unicode("b") }
valid = unicode("a = \"b\";\n")
self.assertEqual(ucl.dump(data), valid)
def test_float(self):
data = { "a" : 1.1 }
valid = "a = 1.100000;\n"
self.assertEqual(ucl.dump(data), valid)
def test_boolean(self):
data = { "a" : True, "b" : False }
valid = [
"a = true;\nb = false;\n",
"b = false;\na = true;\n"
]
self.assertIn(ucl.dump(data), valid)
def test_empty_ucl(self):
self.assertEqual(ucl.dump({}), "")
def test_json(self):
data = { "a" : 1, "b": "bleh;" }
valid = [
'{\n "a": 1,\n "b": "bleh;"\n}',
'{\n "b": "bleh;",\n "a": 1\n}'
]
self.assertIn(ucl.dump(data, ucl.UCL_EMIT_JSON), valid)
|
test/unit/agent/objects/nginx/filters.py | dp92987/nginx-amplify-agent | 308 | 12756883 | <reponame>dp92987/nginx-amplify-agent<filename>test/unit/agent/objects/nginx/filters.py
# -*- coding: utf-8 -*-
import re
from hamcrest import *
from amplify.agent.objects.nginx.filters import Filter
from test.base import BaseTestCase
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
class FiltersTestCase(BaseTestCase):
def test_init(self):
filtr = Filter(
filter_rule_id='1',
metric='http.something',
data=[
['logname', '~', 'foo.txt'],
['$request_method', '~', 'post'],
['$request_uri', '~', '.*\.gif'],
['$status', '!~', '200']
]
)
assert_that(filtr.filter_rule_id, equal_to('1'))
assert_that(filtr.metric, equal_to('http.something'))
assert_that(filtr.filename, equal_to('foo.txt'))
assert_that('logname', not_(is_in(filtr.data)))
assert_that(filtr.data['request_method'], equal_to(re.compile("POST")))
assert_that(filtr.data['request_uri'], equal_to(re.compile(".*\.gif")))
assert_that(filtr.data['status'], equal_to(re.compile("200")))
assert_that('logname', not_(is_in(filtr._negated_conditions)))
assert_that(filtr._negated_conditions['request_method'], equal_to(False))
assert_that(filtr._negated_conditions['request_uri'], equal_to(False))
assert_that(filtr._negated_conditions['status'], equal_to(True))
def test_init_without_filename(self):
filtr = Filter(
filter_rule_id='1',
metric='http.something',
data=[
['$request_method', '~', 'post'],
['$request_uri', '~', '*.gif']
]
)
assert_that(filtr.filename, equal_to(None))
def test_empty(self):
filtr = Filter(
filter_rule_id='1',
metric='http.something',
data=[]
)
assert_that(filtr.empty, equal_to(True))
def test_filematch(self):
filtr = Filter(
filter_rule_id='1',
metric='http.something',
data=[
['logname', '~', 'foo.txt']
]
)
assert_that(filtr.matchfile('foo.txt'), equal_to(True))
assert_that(filtr.matchfile('foo.log'), equal_to(False))
filtr = Filter(
filter_rule_id='1',
metric='http.something',
data=[
['logname', '!~', 'foo.txt']
]
)
assert_that(filtr.matchfile('foo.txt'), equal_to(False))
assert_that(filtr.matchfile('foo.log'), equal_to(True))
filtr = Filter(
filter_rule_id='1',
metric='http.something',
data=[
['$request_method', '~', 'post'],
['$request_uri', '~', '.*\.gif'],
['$status', '!~', '200'] ]
)
assert_that(filtr.matchfile('foo.txt'), equal_to(True))
assert_that(filtr.matchfile('foo.log'), equal_to(True))
|
examples/voc/evaluate.py | pazeshun/fcn | 251 | 12756902 | #!/usr/bin/env python
import argparse
import os.path as osp
import re
import chainer
from chainer import cuda
import fcn
import numpy as np
import tqdm
def main():
parser = argparse.ArgumentParser()
parser.add_argument('model_file')
parser.add_argument('-g', '--gpu', default=0, type=int,
help='if -1, use cpu only (default: 0)')
args = parser.parse_args()
dataset = fcn.datasets.VOC2011ClassSeg('seg11valid')
n_class = len(dataset.class_names)
basename = osp.basename(args.model_file).lower()
if basename.startswith('fcn8s-atonce') or \
basename.startswith('fcn8satonce'):
model_name = 'FCN8sAtOnce'
else:
match = re.match('^fcn(32|16|8)s.*$', basename)
if match is None:
print('Unsupported model filename: %s' % args.model_file)
quit(1)
model_name = 'FCN%ss' % match.groups()[0]
model_class = getattr(fcn.models, model_name)
model = model_class(n_class=n_class)
chainer.serializers.load_npz(args.model_file, model)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
lbl_preds, lbl_trues = [], []
for i in tqdm.trange(len(dataset)):
datum, lbl_true = fcn.datasets.transform_lsvrc2012_vgg16(
dataset.get_example(i))
x_data = np.expand_dims(datum, axis=0)
if args.gpu >= 0:
x_data = cuda.to_gpu(x_data)
with chainer.no_backprop_mode():
x = chainer.Variable(x_data)
with chainer.using_config('train', False):
model(x)
lbl_pred = chainer.functions.argmax(model.score, axis=1)[0]
lbl_pred = chainer.cuda.to_cpu(lbl_pred.data)
lbl_preds.append(lbl_pred)
lbl_trues.append(lbl_true)
acc, acc_cls, mean_iu, fwavacc = \
fcn.utils.label_accuracy_score(lbl_trues, lbl_preds, n_class)
print('Accuracy: %.4f' % (100 * acc))
print('AccClass: %.4f' % (100 * acc_cls))
print('Mean IoU: %.4f' % (100 * mean_iu))
print('Fwav Acc: %.4f' % (100 * fwavacc))
if __name__ == '__main__':
main()
|
tests/test_fit_result.py | pckroon/symfit | 192 | 12756930 | # SPDX-FileCopyrightText: 2014-2020 <NAME>
#
# SPDX-License-Identifier: MIT
from __future__ import division, print_function
import pytest
import pickle
from collections import OrderedDict
import numpy as np
from symfit import (
Variable, Parameter, Fit, FitResults, Eq, Ge, CallableNumericalModel, Model
)
from symfit.distributions import BivariateGaussian
from symfit.core.minimizers import (
BaseMinimizer, MINPACK, BFGS, NelderMead, ChainedMinimizer, BasinHopping
)
from symfit.core.objectives import (
LogLikelihood, LeastSquares, VectorLeastSquares, MinimizeModel
)
def ge_constraint(a): # Has to be in the global namespace for pickle.
return a - 1
class TestTestResult():
@classmethod
def setup_class(cls):
xdata = np.linspace(1, 10, 10)
ydata = 3 * xdata ** 2
cls.a = Parameter('a')
cls.b = Parameter('b')
x = Variable('x')
y = Variable('y')
model = Model({y: cls.a * x ** cls.b})
fit = Fit(model, x=xdata, y=ydata)
cls.fit_result = fit.execute()
fit = Fit(model, x=xdata, y=ydata, minimizer=MINPACK)
cls.minpack_result = fit.execute()
fit = Fit(model, x=xdata, objective=LogLikelihood)
cls.likelihood_result = fit.execute()
fit = Fit(model, x=xdata, y=ydata, minimizer=[BFGS, NelderMead])
cls.chained_result = fit.execute()
z = Variable('z')
constraints = [
Eq(cls.a, cls.b),
CallableNumericalModel.as_constraint(
{z: ge_constraint}, connectivity_mapping={z: {cls.a}},
constraint_type=Ge, model=model
)
]
fit = Fit(model, x=xdata, y=ydata, constraints=constraints)
cls.constrained_result = fit.execute()
fit = Fit(model, x=xdata, y=ydata, constraints=constraints,
minimizer=BasinHopping)
cls.constrained_basinhopping_result = fit.execute()
def test_params_type(self):
assert isinstance(self.fit_result.params, OrderedDict)
def test_minimizer_output_type(self):
assert isinstance(self.fit_result.minimizer_output, dict)
assert isinstance(self.minpack_result.minimizer_output, dict)
assert isinstance(self.likelihood_result.minimizer_output, dict)
def test_fitting(self):
"""
Test if the fitting worked in the first place.
"""
assert isinstance(self.fit_result, FitResults)
assert self.fit_result.value(self.a) == pytest.approx(3.0)
assert self.fit_result.value(self.b) == pytest.approx(2.0)
assert isinstance(self.fit_result.stdev(self.a), float)
assert isinstance(self.fit_result.stdev(self.b), float)
assert isinstance(self.fit_result.r_squared, float)
# by definition since there's no fuzzyness
assert self.fit_result.r_squared == 1.0
def test_fitting_2(self):
np.random.seed(43)
mean = (0.62, 0.71) # x, y mean 0.7, 0.7
cov = [
[0.102**2, 0],
[0, 0.07**2]
]
data_1 = np.random.multivariate_normal(mean, cov, 10**5)
mean = (0.33, 0.28) # x, y mean 0.3, 0.3
cov = [ # rho = 0.25
[0.05 ** 2, 0.25 * 0.05 * 0.101],
[0.25 * 0.05 * 0.101, 0.101 ** 2]
]
data_2 = np.random.multivariate_normal(mean, cov, 10**5)
data = np.vstack((data_1, data_2))
# Insert them as y,x here as np fucks up cartesian conventions.
ydata, xedges, yedges = np.histogram2d(data[:, 1], data[:, 0], bins=200,
range=[[0.0, 1.0], [0.0, 1.0]],
density=True)
xcentres = (xedges[:-1] + xedges[1:]) / 2
ycentres = (yedges[:-1] + yedges[1:]) / 2
# Make a valid grid to match ydata
xx, yy = np.meshgrid(xcentres, ycentres, sparse=False)
x = Variable('x')
y = Variable('y')
x0_1 = Parameter('x0_1', value=0.6, min=0.5, max=0.7)
sig_x_1 = Parameter('sig_x_1', value=0.1, min=0.0, max=0.2)
y0_1 = Parameter('y0_1', value=0.7, min=0.6, max=0.8)
sig_y_1 = Parameter('sig_y_1', value=0.05, min=0.0, max=0.2)
rho_1 = Parameter('rho_1', value=0.0, min=-0.5, max=0.5)
A_1 = Parameter('A_1', value=0.5, min=0.3, max=0.7)
g_1 = A_1 * BivariateGaussian(x=x, y=y, mu_x=x0_1, mu_y=y0_1,
sig_x=sig_x_1, sig_y=sig_y_1, rho=rho_1)
x0_2 = Parameter('x0_2', value=0.3, min=0.2, max=0.4)
sig_x_2 = Parameter('sig_x_2', value=0.05, min=0.0, max=0.2)
y0_2 = Parameter('y0_2', value=0.3, min=0.2, max=0.4)
sig_y_2 = Parameter('sig_y_2', value=0.1, min=0.0, max=0.2)
rho_2 = Parameter('rho_2', value=0.26, min=0.0, max=0.8)
A_2 = Parameter('A_2', value=0.5, min=0.3, max=0.7)
g_2 = A_2 * BivariateGaussian(x=x, y=y, mu_x=x0_2, mu_y=y0_2,
sig_x=sig_x_2, sig_y=sig_y_2, rho=rho_2)
model = g_1 + g_2
fit = Fit(model, xx, yy, ydata)
fit_result = fit.execute()
assert fit_result.r_squared > 0.95
for param in fit.model.params:
try:
assert fit_result.stdev(param)**2 == pytest.approx(fit_result.variance(param))
except AssertionError:
assert fit_result.variance(param) <= 0.0
assert np.isnan(fit_result.stdev(param))
# Covariance matrix should be symmetric
for param_1 in fit.model.params:
for param_2 in fit.model.params:
assert fit_result.covariance(param_1, param_2) == pytest.approx(fit_result.covariance(param_2, param_1), rel=1e-3)
def test_minimizer_included(self):
""""The minimizer used should be included in the results."""
assert isinstance(self.constrained_result.minimizer, BaseMinimizer)
assert isinstance(self.constrained_basinhopping_result.minimizer, BaseMinimizer)
assert isinstance(self.likelihood_result.minimizer, BaseMinimizer)
assert isinstance(self.fit_result.minimizer, BaseMinimizer)
assert isinstance(self.chained_result.minimizer, ChainedMinimizer)
for minimizer, cls in zip(self.chained_result.minimizer.minimizers, [BFGS, NelderMead]):
assert isinstance(minimizer, cls)
def test_objective_included(self):
""""The objective used should be included in the results."""
assert isinstance(self.fit_result.objective, LeastSquares)
assert isinstance(self.minpack_result.objective, VectorLeastSquares)
assert isinstance(self.likelihood_result.objective, LogLikelihood)
assert isinstance(self.constrained_result.objective, LeastSquares)
assert isinstance(self.constrained_basinhopping_result.objective, LeastSquares)
def test_constraints_included(self):
"""
Test if the constraints have been properly fed to the results object so
we can easily print their compliance.
"""
# For a constrained fit we expect a list of MinimizeModel objectives.
for constrained_result in [self.constrained_result, self.constrained_basinhopping_result]:
assert isinstance(constrained_result.constraints, list)
for constraint in self.constrained_result.constraints:
assert isinstance(constraint, MinimizeModel)
def test_message_included(self):
"""Status message should be included."""
assert isinstance(self.fit_result.status_message, str)
assert isinstance(self.minpack_result.status_message, str)
assert isinstance(self.likelihood_result.status_message, str)
assert isinstance(self.constrained_result.status_message, str)
assert isinstance(self.constrained_basinhopping_result.status_message, str)
def test_pickle(self):
for fit_result in [self.fit_result, self.chained_result,
self.constrained_basinhopping_result,
self.constrained_result, self.likelihood_result]:
dumped = pickle.dumps(fit_result)
new_result = pickle.loads(dumped)
assert sorted(fit_result.__dict__.keys()) == sorted(new_result.__dict__.keys())
for k, v1 in fit_result.__dict__.items():
v2 = new_result.__dict__[k]
if k == 'minimizer':
assert type(v1) == type(v2)
elif k != 'minimizer_output': # Ignore minimizer_output
if isinstance(v1, np.ndarray):
assert v1 == pytest.approx(v2, nan_ok=True)
def test_gof_presence(self):
"""
Test if the expected goodness of fit estimators are present.
"""
assert hasattr(self.fit_result, 'objective_value')
assert hasattr(self.fit_result, 'r_squared')
assert hasattr(self.fit_result, 'chi_squared')
assert not hasattr(self.fit_result, 'log_likelihood')
assert not hasattr(self.fit_result, 'likelihood')
assert hasattr(self.minpack_result, 'objective_value')
assert hasattr(self.minpack_result, 'r_squared')
assert hasattr(self.minpack_result, 'chi_squared')
assert not hasattr(self.minpack_result, 'log_likelihood')
assert not hasattr(self.minpack_result, 'likelihood')
assert hasattr(self.likelihood_result, 'objective_value')
assert not hasattr(self.likelihood_result, 'r_squared')
assert not hasattr(self.likelihood_result, 'chi_squared')
assert hasattr(self.likelihood_result, 'log_likelihood')
assert hasattr(self.likelihood_result, 'likelihood')
|
tests/test_http_proxy.py | sentyaev/pact-python | 428 | 12756934 | <reponame>sentyaev/pact-python
from unittest import TestCase
from pact.http_proxy import app
from fastapi.testclient import TestClient
client = TestClient(app)
class HttpProxyTestCase(TestCase):
def test_ping(self):
res = client.get('/ping')
self.assertEqual(res.status_code, 200)
assert res.json() == {"ping": "pong"}
def test_handle_http_error(self):
res = client.get(
'/something_does_not_exist'
)
self.assertEqual(res.status_code, 404)
json_res = res.json()
json_res['code'] = 404
json_res['name'] = 'Not Found'
def test_setup(self):
payload = {'anyPayload': 'really'}
res = client.post(
'/setup',
json=payload
)
self.assertEqual(res.status_code, 201)
json_res = res.json()
assert json_res == payload
def setup_state(self, payload):
setup_res = client.post(
'/setup',
json=payload
)
self.assertEqual(setup_res.status_code, 201)
def test_home_should_return_expected_response(self):
message = {
'event': 'ObjectCreated:Put',
'bucket': 'bucket_name',
'key': 'path_to_file_in_s3.pdf',
'documentType': 'application/pdf'
}
data = {
'messageHandlers': {
'A document created successfully': message
}
}
self.setup_state(data)
payload = {
'providerStates': [{'name': 'A document created successfully'}]
}
res = client.post(
'/',
json=payload
)
self.assertEqual(res.json(), {'contents': message})
def test_home_raise_runtime_error_if_no_matched(self):
data = {
'messageHandlers': {
'A document created successfully': {
'event': 'ObjectCreated:Put'
}
}
}
self.setup_state(data)
payload = {
'providerStates': [{'name': 'New state to raise RuntimeError'}]
}
res = client.post(
'/',
json=payload
)
self.assertEqual(res.status_code, 500)
assert res.json() == {
'detail': 'No matched handler.'
}
|
imago/imago.py | jdfrid/imago-forensics | 208 | 12756944 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
import argparse
import extractor
import helper
from os import walk
def main(args=None):
print """\
##################################################
# imago.py #
# Digital evidences from images! #
# Made with <3 by <NAME> #
# Twitter: @solventred #
##################################################
"""
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input', help='Input directory path', type=str, required=True)
parser.add_argument('-x','--exif', help='Extract exif metadata', action='store_true')
parser.add_argument('-g','--gps', help='Extract, parse and convert to coordinates, GPS exif metadata from images (if any)It works only with JPEG.', action='store_true')
parser.add_argument('-e','--ela', help='Extract, Error Level Analysis image,It works only with JPEG. *BETA*', action='store_true')
parser.add_argument('-n','--nude', help='Detect Nudity, It works only with JPEG, *BETA*', action='store_true')
parser.add_argument('-d','--digest', help='Calculate perceptual image hashing', type=str, choices=["md5", "sha256", "sha512", "all"])
parser.add_argument('-p','--percentualhash', help='Calculate hash digest', type=str, choices=["ahash", "phash", "dhash","whash","all"])
parser.add_argument('-o','--output', help='Output directory path', type=str)
parser.add_argument('-s','--sqli', help='Keep SQLite file after the computation', action='store_true')
parser.add_argument('-t','--type', help='Select the image, this flag can be JPEG or TIFF, if this argument it is not provided, imago will process all the image types(i.e. JPEG, TIFF)', type=str, choices=["jpeg","tiff"])
args = parser.parse_args()
if (args.exif or args.gps or args.ela or args.digest or args.nude or args.percentualhash):
filetype = ""
if (args.type == "jpeg"):
filetype = "image/jpeg"
elif (args.type == "tiff"):
filetype = "image/tiff"
else:
filetype = "image"
if args.output:
output_path = args.output
else:
output_path = "."
base_dir = args.input
helper.initialize_sqli()
image_list = list(helper.list_files(base_dir, filetype))
for filename in image_list:
print ("Processing %s" % (filename,))
# Creation of the SQLite row for the file
helper.image_row("evidences", filename)
extractor.basic_info(filename)
if args.nude:
extractor.detect_nudity(filename)
if args.gps:
extractor.PIL_exif_data_GPS(filename)
if args.percentualhash == "ahash":
extractor.ahash(filename)
elif args.percentualhash == "phash":
extractor.phash(filename)
elif args.percentualhash == "dhash":
extractor.dhash(filename)
elif args.percentualhash == "whash":
extractor.whash(filename)
elif args.percentualhash == "all":
extractor.ahash(filename)
extractor.phash(filename)
extractor.whash(filename)
extractor.dhash(filename)
if args.digest == "md5":
extractor.md5(filename)
elif args.digest == "sha256":
extractor.sha256(filename)
elif args.digest == "sha512":
extractor.sha512(filename)
elif args.digest == "all":
extractor.md5(filename)
extractor.sha256(filename)
extractor.sha512(filename)
if args.exif:
extractor.exif_info(filename)
if args.ela:
extractor.ela(filename,output_path)
print ("Processing of %s completed!" % (filename,))
# Creation of the file CSV
helper.create_csv(output_path)
if not args.sqli:
os.remove('metadata.db')
elif args.sqli and args.output:
os.rename("metadata.db", os.path.join(args.output,"metadata.db"))
else:
print("ERROR: Select at least one type of extraction")
if __name__ == "__main__":
main()
|
models/nerf_utils.py | tjtanaa/ml-gsn | 202 | 12757004 | <reponame>tjtanaa/ml-gsn<gh_stars>100-1000
# Adapted from https://github.com/krrish94/nerf-pytorch
import torch
from einops import repeat
def meshgrid_xy(tensor1: torch.Tensor, tensor2: torch.Tensor) -> (torch.Tensor, torch.Tensor):
"""Mimick np.meshgrid(..., indexing="xy") in pytorch. torch.meshgrid only allows "ij" indexing.
(If you're unsure what this means, safely skip trying to understand this, and run a tiny example!)
Args:
tensor1 (torch.Tensor): Tensor whose elements define the first dimension of the returned meshgrid.
tensor2 (torch.Tensor): Tensor whose elements define the second dimension of the returned meshgrid.
"""
# TESTED
ii, jj = torch.meshgrid(tensor1, tensor2)
return ii.transpose(-1, -2), jj.transpose(-1, -2)
def cumprod_exclusive(tensor: torch.Tensor) -> torch.Tensor:
r"""Mimick functionality of tf.math.cumprod(..., exclusive=True), as it isn't available in PyTorch.
Args:
tensor (torch.Tensor): Tensor whose cumprod (cumulative product, see `torch.cumprod`) along dim=-1
is to be computed.
Returns:
cumprod (torch.Tensor): cumprod of Tensor along dim=-1, mimiciking the functionality of
tf.math.cumprod(..., exclusive=True) (see `tf.math.cumprod` for details).
"""
# TESTED
# Only works for the last dimension (dim=-1)
dim = -1
# Compute regular cumprod first (this is equivalent to `tf.math.cumprod(..., exclusive=False)`).
cumprod = torch.cumprod(tensor, dim)
# "Roll" the elements along dimension 'dim' by 1 element.
cumprod = torch.roll(cumprod, 1, dim)
# Replace the first element by "1" as this is what tf.cumprod(..., exclusive=True) does.
cumprod[..., 0] = 1.0
return cumprod
def get_ray_bundle_batch(height: int, width: int, focal_length, tform_cam2world: torch.Tensor):
r"""Compute the bundle of rays passing through all pixels of a batch of image (one ray per pixel).
Args:
height (int): Height of an image (number of pixels).
width (int): Width of an image (number of pixels).
focal_length (float or torch.Tensor): Focal length (number of pixels, i.e., calibrated intrinsics).
tform_cam2world (torch.Tensor): A 6-DoF rigid-body transform (shape: :math:`(B, 4, 4)`) that
transforms a 3D point from the camera frame to the "world" frame for the current example.
Returns:
ray_origins (torch.Tensor): A tensor of shape :math:`(B, width, height, 3)` denoting the centers of
each ray. `ray_origins[B][i][j]` denotes the origin of the ray passing through pixel at batch index
`B`, row index `j`, and column index `i`.
ray_directions (torch.Tensor): A tensor of shape :math:`(B, width, height, 3)` denoting the
direction of each ray (a unit vector). `ray_directions[B][i][j]` denotes the direction of the ray
passing through the pixel at batch index `B`, row index `j`, and column index `i`.
"""
x = torch.arange(width, dtype=tform_cam2world.dtype, device=tform_cam2world.device).to(tform_cam2world)
y = torch.arange(height, dtype=tform_cam2world.dtype, device=tform_cam2world.device)
ii, jj = meshgrid_xy(x, y)
if type(focal_length) in [tuple, list]:
# if given two values, assume they are fx and fy
fx, fy = focal_length
else:
# otherwise assume fx and fy share the same magnitude, but opposing polarity
fx, fy = focal_length, -focal_length
# construct unit direction vectors
# shape [height, width, 3]
directions = torch.stack([(ii - width * 0.5) / fx, (jj - height * 0.5) / fy, -torch.ones_like(ii)], dim=-1)
B = tform_cam2world.shape[0]
# shape [B x height x width, 1, 3]
directions = directions.view(1, -1, 1, 3).repeat(B, 1, 1, 1).view(-1, 1, 3)
# shape [B x height x width, 4, 4]
tform_cam2world = tform_cam2world.unsqueeze(1).repeat(1, height * width, 1, 1).view(-1, 4, 4)
ray_directions = torch.sum(directions * tform_cam2world[:, :3, :3], dim=-1).view(B, height, width, 3)
ray_origins = tform_cam2world[:, :3, -1].view(B, height, width, 3)
return ray_origins, ray_directions
def get_sample_points(
tform_cam2world, F, H, W, samples_per_ray=32, near=0, far=1, use_viewdirs=True, perturb=False, mask=None
):
B = tform_cam2world.shape[0]
ray_origins, ray_directions = get_ray_bundle_batch(H, W, F, tform_cam2world) # [B, H, W, 3]
ro = ray_origins.view((B, -1, 3))
rd = ray_directions.view((B, -1, 3))
if mask is not None:
if len(mask.shape) == 1:
# same mask for each image in batch, mask is shape [n_patch_pixels]
ro = ro[:, mask, :]
rd = rd[:, mask, :]
elif len(mask.shape) == 2:
# different mask for each image in batch, mask is shape [B, n_patch_pixels]
mask = repeat(mask, 'b n_patch_pixels -> b n_patch_pixels 3')
# ro is shape [B, n_pixels, 3], gather along pixel dimension
ro = torch.gather(ro, dim=1, index=mask)
rd = torch.gather(rd, dim=1, index=mask)
near = near * torch.ones_like(rd[..., :1])
far = far * torch.ones_like(rd[..., :1])
num_rays = ro.shape[1]
t_vals = torch.linspace(0.0, 1.0, samples_per_ray, dtype=ro.dtype, device=ro.device)
z_vals = near * (1.0 - t_vals) + far * t_vals
if perturb:
# Get intervals between samples.
mids = 0.5 * (z_vals[..., 1:] + z_vals[..., :-1])
upper = torch.cat((mids, z_vals[..., -1:]), dim=-1)
lower = torch.cat((z_vals[..., :1], mids), dim=-1)
# Stratified samples in those intervals.
t_rand = torch.rand(z_vals.shape, dtype=ro.dtype, device=ro.device)
z_vals = lower + (upper - lower) * t_rand
# pts -> (B, H*W, N_samples, 3)
# pts are in world coordinates
pts = ro[..., None, :] + rd[..., None, :] * z_vals[..., :, None]
if use_viewdirs:
viewdirs = rd
viewdirs = viewdirs / viewdirs.norm(p=2, dim=-1).unsqueeze(-1)
viewdirs = viewdirs.view((B, -1, 1, 3))
# input_dirs -> (B, H*W, N_samples, 3)
viewdirs = viewdirs.expand(pts.shape)
else:
viewdirs = None
return pts, viewdirs, z_vals, rd, ro
def volume_render_radiance_field(
rgb,
occupancy,
depth_values,
ray_directions,
radiance_field_noise_std=0.0,
alpha_activation='relu',
activate_rgb=True,
density_bias=0,
):
one_e_10 = torch.tensor([1e10], dtype=ray_directions.dtype, device=ray_directions.device)
dists = torch.cat(
(
depth_values[..., 1:] - depth_values[..., :-1],
one_e_10.expand(depth_values[..., :1].shape),
),
dim=-1,
)
dists = dists * ray_directions[..., None, :].norm(p=2, dim=-1)
noise = 0.0
if radiance_field_noise_std > 0.0:
noise = (
torch.randn(
occupancy.shape,
dtype=occupancy.dtype,
device=occupancy.device,
)
* radiance_field_noise_std
)
if alpha_activation == 'relu':
sigma_a = torch.nn.functional.relu(occupancy + noise)
elif alpha_activation == 'softplus':
# Deformable NeRF uses softplus instead of ReLU https://arxiv.org/pdf/2011.12948.pdf
sigma_a = torch.nn.functional.softplus(occupancy + noise + density_bias)
alpha = 1.0 - torch.exp(-sigma_a * dists)
weights = alpha * cumprod_exclusive(1.0 - alpha + 1e-10)
if activate_rgb:
rgb = torch.sigmoid(rgb)
# widened sigmoid from https://github.com/google/mipnerf/blob/main/internal/models.py#L123
rgb_padding = 0.001
rgb = rgb * (1 + 2 * rgb_padding) - rgb_padding
rgb_map = weights[..., None] * rgb
rgb_map = rgb_map.sum(dim=-2)
depth_map = weights * depth_values
depth_map = depth_map.sum(dim=-1)
acc_map = weights.sum(dim=-1)
disp_map = 1.0 / torch.max(1e-10 * torch.ones_like(depth_map), depth_map / acc_map)
# occupancy prior from Neural Volumes
# https://github.com/facebookresearch/neuralvolumes/blob/master/models/neurvol1.py#L130
occupancy_prior = torch.mean(
torch.log(0.1 + alpha.view(alpha.size(0), -1)) + torch.log(0.1 + 1.0 - alpha.view(alpha.size(0), -1)) - -2.20727
)
return rgb_map, disp_map, acc_map, weights, depth_map, occupancy_prior
def sample_pdf_2(bins, weights, num_samples, det=False):
"""sample_pdf function from another concurrent pytorch implementation
by yenchenlin (https://github.com/yenchenlin/nerf-pytorch).
"""
weights = weights + 1e-5
pdf = weights / torch.sum(weights, dim=-1, keepdim=True)
cdf = torch.cumsum(pdf, dim=-1)
cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], dim=-1) # (batchsize, len(bins))
# Take uniform samples
if det:
u = torch.linspace(0.0, 1.0, steps=num_samples, dtype=weights.dtype, device=weights.device)
u = u.expand(list(cdf.shape[:-1]) + [num_samples])
else:
u = torch.rand(
list(cdf.shape[:-1]) + [num_samples],
dtype=weights.dtype,
device=weights.device,
)
# Invert CDF
u = u.contiguous()
cdf = cdf.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.max(torch.zeros_like(inds - 1), inds - 1)
above = torch.min((cdf.shape[-1] - 1) * torch.ones_like(inds), inds)
inds_g = torch.stack((below, above), dim=-1) # (batchsize, num_samples, 2)
matched_shape = (inds_g.shape[0], inds_g.shape[1], cdf.shape[-1])
cdf_g = torch.gather(cdf.unsqueeze(1).expand(matched_shape), 2, inds_g)
bins_g = torch.gather(bins.unsqueeze(1).expand(matched_shape), 2, inds_g)
denom = cdf_g[..., 1] - cdf_g[..., 0]
denom = torch.where(denom < 1e-5, torch.ones_like(denom), denom)
t = (u - cdf_g[..., 0]) / denom
samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0])
return samples
|
project/1-process_data/CSVtolabel.py | ScottAI/2016CCF_BDCI_Sougou | 214 | 12757028 | # coding=utf-8
"""
根据上一步骤得到的CSV文件,将搜索文本以及三个属性剥离,保存为相应的文件
注意路径
"""
import pandas as pd
#path of the train and test files
trainname = 'user_tag_query.10W.TRAIN.csv'
testname = 'user_tag_query.10W.TEST.csv'
data = pd.read_csv(trainname)
print data.info()
#generate three labels for age/gender/education
data.age.to_csv("train_age.csv", index=False)
data.Gender.to_csv("train_gender.csv", index=False)
data.Education.to_csv("train_education.csv", index=False)
#generate trainfile's text file
data.QueryList.to_csv("train_querylist.csv", index=False)
data = pd.read_csv(testname)
print data.info()
#generate testfile's text file
data.QueryList.to_csv("test_querylist.csv", index=False)
|
test/dlc_tests/container_tests/bin/security_checks.py | Elizaaaaa/deep-learning-containers | 383 | 12757041 | import sys
import logging
import os
import time
import calendar
LOGGER = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def main():
home_dir = os.path.expanduser('~')
check_that_cache_dir_is_removed(home_dir)
check_that_global_tmp_dir_is_empty()
check_vim_info_does_not_exists(home_dir)
check_bash_history(home_dir)
check_if_any_files_in_subfolder_with_mask_was_last_modified_before_the_boottime(home_dir,
"history",
recursive=False)
check_if_any_files_in_subfolder_with_mask_was_last_modified_before_the_boottime("/var/lib/cloud/instances/")
return 0
def check_that_cache_dir_is_removed(home_dir):
cache_dir_path = os.path.join(home_dir, ".cache")
if os.path.exists(cache_dir_path):
content_of_cache_dir = [f for f in os.listdir(cache_dir_path)]
LOGGER.info("Contents of cache directory: %s", content_of_cache_dir)
if len(content_of_cache_dir) > 1:
raise ValueError("cache dir includes more than 1 file (not only motd)")
if not content_of_cache_dir[0].startswith("pip"):
raise ValueError("cache dir include file that it probably should not have: {}"
.format(content_of_cache_dir[0]))
def check_that_global_tmp_dir_is_empty():
global_tmp_dir_path = "/tmp/"
global_tmp_dir_content = [f for f in os.listdir(global_tmp_dir_path)]
for f in global_tmp_dir_content:
if not f.startswith(".") and "system" not in f.lower() and "dkms" not in f.lower() and "ccNPSUr9.s" not in f and "hsperfdata" not in f:
raise ValueError("/tmp folder includes file that probably should not be there: {}".format(f))
def check_vim_info_does_not_exists(home_dir):
viminfo_path = os.path.join(home_dir, ".viminfo")
if os.path.exists(viminfo_path):
raise ValueError("{} still exists".format(viminfo_path))
def check_bash_history(home_dir):
bash_history_path = os.path.join(home_dir, ".bash_history")
if os.path.exists(bash_history_path):
with open(bash_history_path, "r") as bash_history_file:
if bash_history_file.read():
raise ValueError("{} contains history".format(bash_history_path))
def check_if_any_files_in_subfolder_with_mask_was_last_modified_before_the_boottime(folder, mask=None, recursive=True):
uptime_seconds = 0
if recursive:
# Recursive travel and get all files under given folder
all_files = [os.path.join(dp, f) for dp, dn, filenames in os.walk(folder) for f in filenames]
else:
all_files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))]
# Get the bootime
with open('/proc/uptime', 'r') as uptime_process:
uptime_seconds = int(round(float(uptime_process.readline().split()[0])))
current_time_seconds = int(calendar.timegm(time.gmtime()))
boot_time_seconds = current_time_seconds - uptime_seconds
# Filter the files need to be checked
if mask is not None:
all_files = [f for f in all_files if mask in f]
for f in all_files:
last_modified_time_seconds = int(round(os.path.getmtime(f)))
if last_modified_time_seconds < boot_time_seconds:
raise ValueError("Looks like {} was modified before the current boot".format(f))
if __name__ == "__main__":
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
ivy/array/wrapping.py | VedPatwardhan/ivy | 681 | 12757066 | <reponame>VedPatwardhan/ivy
# local
import ivy
# global
from typing import Callable, Type, List, Iterable, Optional
from types import ModuleType
def _wrap_function(function_name: str) -> Callable:
"""Wraps the function called `function_name`.
Parameters
----------
function_name
the name of the function e.g. "abs", "mean" etc.
Returns
-------
new_function
the wrapped function.
Examples
--------
>>> ivy.set_backend("torch")
>>> from ivy.array.wrapping import _wrap_function
>>> absolute = _wrap_function("abs")
>>> x = ivy.array([-1])
>>> print(absolute(x))
ivy.array([1])
"""
def new_function(self, *args, **kwargs):
"""Add the data of the current array from which the instance function is invoked
as the first arg parameter or kwarg parameter. Return the new function with
the name function_name and the new args variable or kwargs as the new inputs.
"""
function = ivy.__dict__[function_name]
# gives us the position and name of the array argument
data_idx = function.array_spec[0]
if len(args) > data_idx[0][0]:
args = ivy.copy_nest(args, to_mutable=True)
data_idx = [data_idx[0][0]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(args, data_idx, self._data)
else:
kwargs = ivy.copy_nest(kwargs, to_mutable=True)
data_idx = [data_idx[0][1]] + [
0 if idx is int else idx for idx in data_idx[1:]
]
ivy.insert_into_nest_at_index(kwargs, data_idx, self._data)
return function(*args, **kwargs)
return new_function
def add_ivy_array_instance_methods(
cls: Type[ivy.Array], modules: List[ModuleType], to_ignore: Optional[Iterable] = ()
):
"""Loop over all ivy modules such as activations, general, etc. and add
the module functions to ivy arrays as instance methods using _wrap_function.
Parameters
----------
cls
the class we want to add the instance methods to.
modules
the modules to loop over: activations, general etc.
to_ignore
any items we don't want to add an instance method for.
Examples
--------
As shown, `add_ivy_array_instance_methods` adds all the appropriate functions from
the activations module as instance methods to our toy `ArrayExample` class:
>>> from ivy.functional.ivy import activations
>>> class ArrayExample: \
pass
>>> ivy.add_ivy_array_instance_methods(ArrayExample, [activations])
>>> print(hasattr(ArrayExample, "relu"), hasattr(ArrayExample, "softmax"))
True True
"""
for module in modules:
for key, value in module.__dict__.items():
# we skip the cases where the function is protected, the instance
# method has already been added manually and a few other cases
if (
key.startswith("_")
or key[0].isupper()
or not callable(value)
or key in cls.__dict__
or hasattr(cls, key)
or key in to_ignore
or key not in ivy.__dict__
):
continue
try:
setattr(cls, key, _wrap_function(key))
except AttributeError:
pass
|
hayaku_get_merged_dict.py | hayaku/hayaku | 369 | 12757107 | <filename>hayaku_get_merged_dict.py
# -*- coding: utf-8 -*-
import os
def import_dir(name, fromlist=()):
PACKAGE_EXT = '.sublime-package'
dirname = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
if dirname.endswith(PACKAGE_EXT):
dirname = dirname[:-len(PACKAGE_EXT)]
return __import__('{0}.{1}'.format(dirname, name), fromlist=fromlist)
try:
imp = import_dir('hayaku_dict_driver', ('parse_dict_json',))
get_css_dict, merge_dict, merge_aliases = imp.get_css_dict, imp.merge_dict, imp.merge_aliases
except ImportError:
from hayaku_dict_driver import get_css_dict, merge_dict, merge_aliases
hayaku_extra_dicts_cache = {}
hayaku_extra_aliases_cache = {}
hayaku_dict_cache = {}
hayaku_aliases_cache = {}
def get_merged_dict(options):
global hayaku_extra_dicts_cache
global hayaku_extra_aliases_cache
global hayaku_dict_cache
global hayaku_aliases_cache
settings = options.get('settings')
cache_key = 'CSS'
preprocessor = options.get('CSS_preprocessor')
if preprocessor:
cache_key = preprocessor
result_dict, result_aliases = get_css_dict(preprocessor=preprocessor)
new_dict = {}
new_aliases = {}
extra_scopes = ['user', 'syntax', 'project'] + settings.get('hayaku_extra_scopes', [])
for scope in extra_scopes:
dict_name = 'hayaku_' + scope + '_dict'
alias_name = 'hayaku_' + scope + '_aliases'
new_dict[dict_name] = settings.get(dict_name, {})
new_aliases[alias_name] = settings.get(alias_name, {})
# TODO: use a function for those two if-else noodles
if 'CSS' in new_dict[dict_name]:
if preprocessor in new_dict[dict_name]:
new_dict[dict_name] = merge_dict(new_dict[dict_name].get('CSS'), new_dict[dict_name].get(preprocessor))
else:
new_dict[dict_name] = new_dict[dict_name].get('CSS')
elif preprocessor in new_dict[dict_name]:
new_dict[dict_name] = new_dict[dict_name].get(preprocessor)
if 'CSS' in new_aliases[alias_name]:
if preprocessor in new_aliases[alias_name]:
new_aliases[alias_name] = merge_dict(new_aliases[alias_name].get('CSS'), new_aliases[alias_name].get(preprocessor))
else:
new_aliases[alias_name] = new_aliases[alias_name].get('CSS')
elif preprocessor in new_aliases[alias_name]:
new_aliases[alias_name] = new_aliases[alias_name].get(preprocessor)
if new_dict != hayaku_extra_dicts_cache.get(cache_key):
hayaku_extra_dicts_cache[cache_key] = new_dict
for dict_scope in dict(hayaku_extra_dicts_cache.get(cache_key)):
result_dict = merge_dict(result_dict, hayaku_extra_dicts_cache.get(cache_key).get(dict_scope))
hayaku_dict_cache[cache_key] = result_dict
elif cache_key in hayaku_dict_cache:
result_dict = hayaku_dict_cache[cache_key]
if new_aliases != hayaku_extra_aliases_cache.get(cache_key):
hayaku_extra_aliases_cache[cache_key] = new_aliases
for aliases_scope in dict(hayaku_extra_aliases_cache.get(cache_key)):
result_aliases = merge_aliases(result_aliases, hayaku_extra_aliases_cache.get(cache_key).get(aliases_scope))
hayaku_aliases_cache[cache_key] = result_aliases
elif cache_key in hayaku_aliases_cache:
result_aliases = hayaku_aliases_cache[cache_key]
return result_dict, result_aliases
|
RecoLocalCalo/HcalRecProducers/python/hfQIE10Reco_cfi.py | Purva-Chaudhari/cmssw | 852 | 12757129 | import FWCore.ParameterSet.Config as cms
import RecoLocalCalo.HcalRecProducers.hfsimplereco_cfi as _mod
hfQIE10Reco = _mod.hfsimplereco.clone(
digiLabel = "simHcalUnsuppressedDigis:HFQIE10DigiCollection",
Subdetector = 'HFQIE10',
firstSample = 2,
samplesToAdd = 1
)
|
drivers/phantomjs.py | RajatNair/the-endorser | 309 | 12757162 | import os
from drivers import IPHONE_UA
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
def get(driver_path):
if not os.path.exists(driver_path):
raise FileNotFoundError("Could not find phantomjs executable at %s. Download it for your platform at http://phantomjs.org/download.html", driver_path)
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = IPHONE_UA
driver = webdriver.PhantomJS(desired_capabilities=dcap, executable_path=driver_path)
driver.set_window_size(1024, 3000)
return driver
|
mmdet/datasets/samplers/__init__.py | Karybdis/mmdetection-mini | 834 | 12757206 |
from .group_sampler import GroupSampler
__all__ = ['GroupSampler']
|
datasets/germaner/germaner.py | dkajtoch/datasets | 10,608 | 12757227 | <gh_stars>1000+
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
import datasets
_DESCRIPTION = """\
GermaNER is a freely available statistical German Named Entity Tagger based on conditional random fields(CRF). The tagger is trained and evaluated on the NoSta-D Named Entity dataset, which was used in the GermEval 2014 for named entity recognition. The tagger comes close to the performance of the best (proprietary) system in the competition with 77% F-measure (this is the latest result; the one reported in the paper is 76%) test set performance on the four standard NER classes (PERson, LOCation, ORGanisation and OTHer).
We describe a range of features and their influence on German NER classification and provide a comparative evaluation and some analysis of the results. The software components, the training data and all data used for feature generation are distributed under permissive licenses, thus this tagger can be used in academic and commercial settings without restrictions or fees. The tagger is available as a command-line tool and as an Apache UIMA component.
"""
_HOMEPAGE_URL = "https://github.com/tudarmstadt-lt/GermaNER"
_URL = "https://raw.githubusercontent.com/tudarmstadt-lt/GermaNER/a206b554feca263d740302449fff0776c66d0040/data/v0.9.1/full_train.tsv"
_CITATION = """\
@inproceedings{Benikova2015GermaNERFO,
title={GermaNER: Free Open German Named Entity Recognition Tool},
author={<NAME> and <NAME> and <NAME> and <NAME>},
booktitle={GSCL},
year={2015}
}
"""
class GermaNER(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("0.9.1")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-LOC",
"B-ORG",
"B-OTH",
"B-PER",
"I-LOC",
"I-ORG",
"I-OTH",
"I-PER",
"O",
]
)
),
},
),
supervised_keys=None,
homepage=_HOMEPAGE_URL,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
path = dl_manager.download_and_extract(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"datapath": path},
)
]
def _generate_examples(self, datapath):
sentence_counter = 0
with open(datapath, encoding="utf-8") as f:
current_words = []
current_labels = []
for row in f:
row = row.rstrip()
row_split = row.split()
if len(row_split) == 2:
token, label = row_split
current_words.append(token)
current_labels.append(label)
else:
if not current_words:
continue
assert len(current_words) == len(current_labels), "word len doesnt match label length"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_words,
"ner_tags": current_labels,
},
)
sentence_counter += 1
current_words = []
current_labels = []
yield sentence
# if something remains:
if current_words:
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_words,
"ner_tags": current_labels,
},
)
yield sentence
|
venv/Lib/site-packages/nipype/interfaces/afni/tests/test_auto_Qwarp.py | richung99/digitizePlots | 585 | 12757240 | <reponame>richung99/digitizePlots
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..preprocess import Qwarp
def test_Qwarp_inputs():
input_map = dict(
Qfinal=dict(
argstr="-Qfinal",
),
Qonly=dict(
argstr="-Qonly",
),
allineate=dict(
argstr="-allineate",
),
allineate_opts=dict(
argstr="-allineate_opts %s",
requires=["allineate"],
),
allsave=dict(
argstr="-allsave",
xor=["nopadWARP", "duplo", "plusminus"],
),
args=dict(
argstr="%s",
),
ballopt=dict(
argstr="-ballopt",
xor=["workhard", "boxopt"],
),
base_file=dict(
argstr="-base %s",
copyfile=False,
extensions=None,
mandatory=True,
),
baxopt=dict(
argstr="-boxopt",
xor=["workhard", "ballopt"],
),
blur=dict(
argstr="-blur %s",
),
duplo=dict(
argstr="-duplo",
xor=["gridlist", "maxlev", "inilev", "iniwarp", "plusminus", "allsave"],
),
emask=dict(
argstr="-emask %s",
copyfile=False,
extensions=None,
),
environ=dict(
nohash=True,
usedefault=True,
),
expad=dict(
argstr="-expad %d",
xor=["nopadWARP"],
),
gridlist=dict(
argstr="-gridlist %s",
copyfile=False,
extensions=None,
xor=["duplo", "plusminus"],
),
hel=dict(
argstr="-hel",
xor=["nmi", "mi", "lpc", "lpa", "pear"],
),
in_file=dict(
argstr="-source %s",
copyfile=False,
extensions=None,
mandatory=True,
),
inilev=dict(
argstr="-inilev %d",
xor=["duplo"],
),
iniwarp=dict(
argstr="-iniwarp %s",
xor=["duplo"],
),
iwarp=dict(
argstr="-iwarp",
xor=["plusminus"],
),
lpa=dict(
argstr="-lpa",
xor=["nmi", "mi", "lpc", "hel", "pear"],
),
lpc=dict(
argstr="-lpc",
position=-2,
xor=["nmi", "mi", "hel", "lpa", "pear"],
),
maxlev=dict(
argstr="-maxlev %d",
position=-1,
xor=["duplo"],
),
mi=dict(
argstr="-mi",
xor=["mi", "hel", "lpc", "lpa", "pear"],
),
minpatch=dict(
argstr="-minpatch %d",
),
nmi=dict(
argstr="-nmi",
xor=["nmi", "hel", "lpc", "lpa", "pear"],
),
noXdis=dict(
argstr="-noXdis",
),
noYdis=dict(
argstr="-noYdis",
),
noZdis=dict(
argstr="-noZdis",
),
noneg=dict(
argstr="-noneg",
),
nopad=dict(
argstr="-nopad",
),
nopadWARP=dict(
argstr="-nopadWARP",
xor=["allsave", "expad"],
),
nopenalty=dict(
argstr="-nopenalty",
),
nowarp=dict(
argstr="-nowarp",
),
noweight=dict(
argstr="-noweight",
),
num_threads=dict(
nohash=True,
usedefault=True,
),
out_file=dict(
argstr="-prefix %s",
extensions=None,
name_source=["in_file"],
name_template="ppp_%s",
),
out_weight_file=dict(
argstr="-wtprefix %s",
extensions=None,
),
outputtype=dict(),
overwrite=dict(
argstr="-overwrite",
),
pblur=dict(
argstr="-pblur %s",
),
pear=dict(
argstr="-pear",
),
penfac=dict(
argstr="-penfac %f",
),
plusminus=dict(
argstr="-plusminus",
xor=["duplo", "allsave", "iwarp"],
),
quiet=dict(
argstr="-quiet",
xor=["verb"],
),
resample=dict(
argstr="-resample",
),
verb=dict(
argstr="-verb",
xor=["quiet"],
),
wball=dict(
argstr="-wball %s",
xor=["wmask"],
),
weight=dict(
argstr="-weight %s",
extensions=None,
),
wmask=dict(
argstr="-wpass %s %f",
xor=["wball"],
),
workhard=dict(
argstr="-workhard",
xor=["boxopt", "ballopt"],
),
)
inputs = Qwarp.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Qwarp_outputs():
output_map = dict(
base_warp=dict(
extensions=None,
),
source_warp=dict(
extensions=None,
),
warped_base=dict(
extensions=None,
),
warped_source=dict(
extensions=None,
),
weights=dict(
extensions=None,
),
)
outputs = Qwarp.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
data/allface_dataset.py | bruinxiong/Rotate-and-Render | 397 | 12757251 | import os
import math
import numpy as np
from PIL import Image
import skimage.transform as trans
import cv2
import torch
from data import dataset_info
from data.base_dataset import BaseDataset
import util.util as util
dataset_info = dataset_info()
class AllFaceDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument('--no_pairing_check', action='store_true',
help='If specified, skip sanity check of correct label-image file pairing')
return parser
def cv2_loader(self, img_str):
img_array = np.frombuffer(img_str, dtype=np.uint8)
return cv2.imdecode(img_array, cv2.IMREAD_COLOR)
def fill_list(self, tmp_list):
length = len(tmp_list)
if length % self.opt.batchSize != 0:
end = math.ceil(length / self.opt.batchSize) * self.opt.batchSize
tmp_list = tmp_list + tmp_list[-1 * (end - length) :]
return tmp_list
def initialize(self, opt):
self.opt = opt
dataset_num = dataset_info.get_dataset(opt)
self.prefix = [dataset_info.prefix[num] for num in dataset_num]
file_list = [dataset_info.file_list[num] for num in dataset_num]
land_mark_list = [dataset_info.land_mark_list[num] for num in dataset_num]
self.params_dir = [dataset_info.params_dir[num] for num in dataset_num]
self.folder_level = [dataset_info.folder_level[num] for num in dataset_num]
self.num_datasets = len(file_list)
assert len(land_mark_list) == self.num_datasets, \
'num of landmk dir should be the num of datasets'
assert len(self.params_dir) == self.num_datasets, \
'num of params_dir should be the num of datasets'
self.dataset_lists = []
self.landmark_paths = []
self.sizes = []
for n in range(self.num_datasets):
with open(file_list[n]) as f:
img_lists = f.readlines()
img_lists = self.fill_list(img_lists)
self.sizes.append(len(img_lists))
self.dataset_lists.append(sorted(img_lists))
with open(land_mark_list[n]) as f:
landmarks = f.readlines()
landmarks = self.fill_list(landmarks)
self.landmark_paths.append(sorted(landmarks))
self.dataset_size = min(self.sizes)
self.initialized = False
def get_landmarks(self, landmark, img_list):
landmark_split = landmark.strip().split(' ')
filename1_without_ext = os.path.basename(img_list.strip())
filename2_without_ext = os.path.basename(landmark_split[0])
assert (filename1_without_ext == filename2_without_ext), \
"The image_path %s and params_path %s don't match." % \
(img_list, landmark_split[0])
label = landmark_split[1]
landmarks = landmark_split[2:]
landmarks = list(map(float, landmarks))
landmarks_array = np.array(landmarks).reshape(5, 2)
return landmarks_array, label
def get_param_file(self, img_list, dataset_num):
img_name = os.path.splitext(img_list)[0]
name_split = img_name.split("/")
folder_level = self.folder_level[dataset_num]
param_folder = os.path.join(self.params_dir[dataset_num],
"/".join([name_split[i] for i in range(len(name_split) - folder_level, len(name_split))]) + ".txt")
# params = np.loadtxt(param_folder)
return param_folder
def paths_match(self, path1, path2):
filename1_without_ext = os.path.splitext(os.path.basename(path1)[-10:])[0]
filename2_without_ext = os.path.splitext(os.path.basename(path2)[-10:])[0]
return filename1_without_ext == filename2_without_ext
def affine_align(self, img, landmark=None, **kwargs):
M = None
h, w, c = img.shape
src = np.array([
[38.2946, 51.6963],
[73.5318, 51.5014],
[56.0252, 71.7366],
[41.5493, 92.3655],
[70.7299, 92.2041]], dtype=np.float32)
src = src * 290 / 112
src[:, 0] += 50
src[:, 1] += 60
src = src / 400 * self.opt.crop_size
dst = landmark
# dst = landmark.astype(np.float32)
tform = trans.SimilarityTransform()
tform.estimate(dst, src)
M = tform.params[0:2, :]
warped = cv2.warpAffine(img, M, (self.opt.crop_size, self.opt.crop_size), borderValue=0.0)
return warped, M
def __getitem__(self, index):
# Label Image
randnum = np.random.randint(sum(self.sizes))
dataset_num = np.random.randint(self.num_datasets)
image_path = self.dataset_lists[dataset_num][index].strip()
image_path = os.path.join(self.prefix[dataset_num], image_path)
img = cv2.imread(image_path)
if img is None:
raise Exception('None Image')
param_path = self.get_param_file(image_path, dataset_num)
# img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
M = None
landmark_path = self.landmark_paths[dataset_num][index].strip()
landmarks, label = self.get_landmarks(landmark_path, image_path)
wrapped_img, M = self.affine_align(img, landmarks)
M = torch.from_numpy(M).float()
wrapped_img = wrapped_img.transpose(2, 0, 1) / 255.0
wrapped_img = torch.from_numpy(wrapped_img).float()
input_dict = {
'image': wrapped_img,
'param_path': param_path,
'M': M,
'path': image_path
}
# Give subclasses a chance to modify the final output
self.postprocess(input_dict)
return input_dict
def postprocess(self, input_dict):
return input_dict
def __len__(self):
return self.dataset_size
|
app/YtManager/settings.py | chibicitiberiu/ytsm | 298 | 12757254 | """
Django settings for YtManager project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import sys
import logging
from os.path import dirname as up
#
# Basic Django stuff
#
ALLOWED_HOSTS = ['*']
SESSION_COOKIE_AGE = 3600 * 30 # one month
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'dynamic_preferences',
'dynamic_preferences.users.apps.UserPreferencesConfig',
'YtManagerApp.apps.YtManagerAppConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'YtManager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'dynamic_preferences.processors.global_preferences',
],
},
},
]
WSGI_APPLICATION = 'YtManager.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Thumbnails
THUMBNAIL_SIZE_VIDEO = (410, 230)
THUMBNAIL_SIZE_SUBSCRIPTION = (250, 250)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# Misc Django stuff
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOG_FORMAT = '%(asctime)s|%(process)d|%(thread)d|%(name)s|%(filename)s|%(lineno)d|%(levelname)s|%(message)s'
CONSOLE_LOG_FORMAT = '%(asctime)s | %(name)s | %(filename)s:%(lineno)d | %(levelname)s | %(message)s'
#
# Directories
#
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
PROJECT_ROOT = up(up(os.path.dirname(__file__))) # Project root
BASE_DIR = up(os.path.dirname(__file__)) # Base dir of the application
CONFIG_DIR = os.getenv("YTSM_CONFIG_DIR", os.path.join(PROJECT_ROOT, "config"))
DATA_DIR = os.getenv("YTSM_DATA_DIR", os.path.join(PROJECT_ROOT, "data"))
STATIC_ROOT = os.path.join(PROJECT_ROOT, "static")
MEDIA_ROOT = os.path.join(DATA_DIR, 'media')
#
# Defaults
#
_DEFAULT_DEBUG = False
_DEFAULT_SECRET_KEY = <KEY>'
_DEFAULT_DATABASE = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(DATA_DIR, 'ytmanager.db'),
'HOST': None,
'USER': None,
'PASSWORD': None,
'PORT': None,
}
CONFIG_ERRORS = []
CONFIG_WARNINGS = []
# These are just to make inspector happy, they will be set in the load_config_ini() method
DEBUG = None
SECRET_KEY = None
DATABASES = None
LOG_LEVEL = None
#
# Config parser options
#
CFG_PARSER_OPTS = {
'PROJECT_ROOT': PROJECT_ROOT,
'BASE_DIR': BASE_DIR,
'CONFIG_DIR': CONFIG_DIR,
'DATA_DIR': DATA_DIR,
}
#
# Load globals from config.ini
#
def get_global_opt(name, cfgparser, env_variable=None, fallback=None, boolean=False, integer=False):
"""
Reads a configuration option, in the following order:
1. environment variable
2. config parser
3. fallback
:param integer:
:param cfgparser:
:param name:
:param env_variable:
:param fallback:
:param boolean:
:return:
"""
# Get from environment variable
if env_variable is not None:
value = os.getenv(env_variable)
if value is not None and boolean:
return value.lower() in ['true', 't', 'on', 'yes', 'y', '1']
elif value is not None and integer:
try:
return int(value)
except ValueError:
CONFIG_WARNINGS.append(f'Environment variable {env_variable}: value must be an integer value!')
elif value is not None:
return value
# Get from config parser
if boolean:
try:
return cfgparser.getboolean('global', name, fallback=fallback, vars=CFG_PARSER_OPTS)
except ValueError:
CONFIG_WARNINGS.append(f'config.ini file: Value set for option global.{name} is not valid! '
f'Valid options: true, false, on, off.')
return fallback
if integer:
try:
return cfgparser.getint('global', name, fallback=fallback, vars=CFG_PARSER_OPTS)
except ValueError:
CONFIG_WARNINGS.append(f'config.ini file: Value set for option global.{name} must be an integer number! ')
return fallback
return cfgparser.get('global', name, fallback=fallback, vars=CFG_PARSER_OPTS)
def load_config_ini():
from configparser import ConfigParser
from YtManagerApp.utils.extended_interpolation_with_env import ExtendedInterpolatorWithEnv
import dj_database_url
try:
os.makedirs(DATA_DIR, exist_ok=True)
logging.info(f"Using data directory {DATA_DIR}")
except OSError as e:
print(f'CRITICAL ERROR! Cannot create data directory {DATA_DIR}! {e}', file=sys.stderr)
return
cfg = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolatorWithEnv())
cfg_file = os.path.join(CONFIG_DIR, "config.ini")
read_ok = cfg.read([cfg_file])
if cfg_file not in read_ok:
CONFIG_ERRORS.append(f'Configuration file {cfg_file} could not be read! Please make sure the file is in the '
'right place, and it has read permissions.')
# Debug
global DEBUG
DEBUG = get_global_opt('Debug', cfg, env_variable='YTSM_DEBUG', fallback=_DEFAULT_DEBUG, boolean=True)
# Secret key
# SECURITY WARNING: keep the secret key used in production secret!
global SECRET_KEY
SECRET_KEY = get_global_opt('SecretKey', cfg, env_variable='YTSM_SECRET_KEY', fallback=_DEFAULT_SECRET_KEY)
# Database
global DATABASES
DATABASES = {
'default': _DEFAULT_DATABASE
}
if cfg.has_option('global', 'DatabaseURL'):
DATABASES['default'] = dj_database_url.parse(cfg.get('global', 'DatabaseURL', vars=CFG_PARSER_OPTS),
conn_max_age=600)
else:
DATABASES['default'] = {
'ENGINE': get_global_opt('DatabaseEngine', cfg,
env_variable='YTSM_DB_ENGINE', fallback=_DEFAULT_DATABASE['ENGINE']),
'NAME': get_global_opt('DatabaseName', cfg,
env_variable='YTSM_DB_NAME', fallback=_DEFAULT_DATABASE['NAME']),
'HOST': get_global_opt('DatabaseHost', cfg,
env_variable='YTSM_DB_HOST', fallback=_DEFAULT_DATABASE['HOST']),
'USER': get_global_opt('DatabaseUser', cfg,
env_variable='YTSM_DB_USER', fallback=_DEFAULT_DATABASE['USER']),
'PASSWORD': get_global_opt('DatabasePassword', cfg,
env_variable='YTSM_DB_PASSWORD', fallback=_DEFAULT_DATABASE['PASSWORD']),
'PORT': get_global_opt('DatabasePort', cfg,
env_variable='YTSM_DB_PORT', fallback=_DEFAULT_DATABASE['PORT']),
}
# Log settings
global LOG_LEVEL
log_level_str = get_global_opt('LogLevel', cfg, env_variable='YTSM_LOG_LEVEL', fallback='INFO')
try:
LOG_LEVEL = getattr(logging, log_level_str)
except AttributeError:
CONFIG_WARNINGS.append(f'Invalid log level {log_level_str}. '
f'Valid options are: DEBUG, INFO, WARN, ERROR, CRITICAL.')
print("Invalid log level " + LOG_LEVEL)
LOG_LEVEL = logging.INFO
load_config_ini()
|
tests/test_cms_auth.py | Allen7D/mini-shop-server | 533 | 12757266 | # _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2020/4/13.
"""
from app import create_app
from tests.utils import get_authorization
__author__ = 'Allen7D'
app = create_app()
def test_create_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/append', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
def test_delete_auth_list():
with app.test_client() as client:
rv = client.post('/cms/auth/remove', headers={
'Authorization': get_authorization()
}, json={
'group_id': 5,
'auth_ids': [1, 2, 3]
})
json_data = rv.get_json()
print(json_data)
test_create_auth_list()
test_delete_auth_list()
|
components/contrib/CatBoost/Train_classifier/from_CSV/component.py | Iuiu1234/pipelines | 2,860 | 12757269 | <reponame>Iuiu1234/pipelines
from kfp.components import InputPath, OutputPath, create_component_from_func
def catboost_train_classifier(
training_data_path: InputPath('CSV'),
model_path: OutputPath('CatBoostModel'),
starting_model_path: InputPath('CatBoostModel') = None,
label_column: int = 0,
loss_function: str = 'Logloss',
num_iterations: int = 500,
learning_rate: float = None,
depth: int = 6,
random_seed: int = 0,
cat_features: list = None,
text_features: list = None,
additional_training_options: dict = {},
):
'''Train a CatBoost classifier model.
Args:
training_data_path: Path for the training data in CSV format.
model_path: Output path for the trained model in binary CatBoostModel format.
starting_model_path: Path for the existing trained model to start from.
label_column: Column containing the label data.
loss_function: The metric to use in training and also selector of the machine learning
problem to solve. Default = 'Logloss'
num_iterations: Number of trees to add to the ensemble.
learning_rate: Step size shrinkage used in update to prevents overfitting.
Default value is selected automatically for binary classification with other parameters set to default.
In all other cases default is 0.03.
depth: Depth of a tree. All trees are the same depth. Default = 6
random_seed: Random number seed. Default = 0
cat_features: A list of Categorical features (indices or names).
text_features: A list of Text features (indices or names).
additional_training_options: A dictionary with additional options to pass to CatBoostClassifier
Outputs:
model: Trained model in binary CatBoostModel format.
Annotations:
author: <NAME> <<EMAIL>>
'''
import tempfile
from pathlib import Path
from catboost import CatBoostClassifier, Pool
column_descriptions = {label_column: 'Label'}
column_description_path = tempfile.NamedTemporaryFile(delete=False).name
with open(column_description_path, 'w') as column_description_file:
for idx, kind in column_descriptions.items():
column_description_file.write('{}\t{}\n'.format(idx, kind))
train_data = Pool(
training_data_path,
column_description=column_description_path,
has_header=True,
delimiter=',',
)
model = CatBoostClassifier(
iterations=num_iterations,
depth=depth,
learning_rate=learning_rate,
loss_function=loss_function,
random_seed=random_seed,
verbose=True,
**additional_training_options,
)
model.fit(
train_data,
cat_features=cat_features,
text_features=text_features,
init_model=starting_model_path,
#verbose=False,
#plot=True,
)
Path(model_path).parent.mkdir(parents=True, exist_ok=True)
model.save_model(model_path)
if __name__ == '__main__':
catboost_train_classifier_op = create_component_from_func(
catboost_train_classifier,
output_component_file='component.yaml',
base_image='python:3.7',
packages_to_install=['catboost==0.23'],
annotations={
"author": "<NAME> <<EMAIL>>",
"canonical_location": "https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_classifier/from_CSV/component.yaml",
},
)
|
Code/tests/test_expand_repo.py | macmule/autopkg | 855 | 12757275 | <reponame>macmule/autopkg
#!/usr/local/autopkg/python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import imp
import os
import unittest
autopkg = imp.load_source(
"autopkg", os.path.join(os.path.dirname(__file__), "..", "autopkg")
)
class TestExpandRepo(unittest.TestCase):
"""Test cases for expansion of recipe repos for add/delete/update."""
def test_expand_single_autopkg_org_urls(self):
"""Expand single part short repo URLs in the AutoPkg org on GitHub"""
url = autopkg.expand_repo_url("recipes")
self.assertEqual(url, "https://github.com/autopkg/recipes")
url = autopkg.expand_repo_url("bogus")
self.assertEqual(url, "https://github.com/autopkg/bogus")
def test_expand_multi_autopkg_org_urls(self):
"""Expand multi part short repo URLs in the AutoPkg org on GitHub"""
url = autopkg.expand_repo_url("autopkg/recipes")
self.assertEqual(url, "https://github.com/autopkg/recipes")
url = autopkg.expand_repo_url("autopkg/bogus")
self.assertEqual(url, "https://github.com/autopkg/bogus")
def test_expand_multi_other_org_urls(self):
"""Expand multi part short repo URLs in another org on GitHub"""
url = autopkg.expand_repo_url("eth-its/autopkg-mac-recipes")
self.assertEqual(url, "https://github.com/eth-its/autopkg-mac-recipes")
url = autopkg.expand_repo_url("facebook/Recipes-For-AutoPkg")
self.assertEqual(url, "https://github.com/facebook/Recipes-For-AutoPkg")
url = autopkg.expand_repo_url("bogusorg/bogusrepo")
self.assertEqual(url, "https://github.com/bogusorg/bogusrepo")
def test_expand_full_urls(self):
"""Expand full URLs"""
url = autopkg.expand_repo_url("http://github.com/eth-its/autopkg-mac-recipes")
self.assertEqual(url, "http://github.com/eth-its/autopkg-mac-recipes")
url = autopkg.expand_repo_url("https://github.com/eth-its/autopkg-mac-recipes")
self.assertEqual(url, "https://github.com/eth-its/autopkg-mac-recipes")
url = autopkg.expand_repo_url("http://github.com/facebook/Recipes-For-AutoPkg")
self.assertEqual(url, "http://github.com/facebook/Recipes-For-AutoPkg")
url = autopkg.expand_repo_url("https://github.com/facebook/Recipes-For-AutoPkg")
self.assertEqual(url, "https://github.com/facebook/Recipes-For-AutoPkg")
url = autopkg.expand_repo_url("http://github.com/bogusorg/bogusrepo")
self.assertEqual(url, "http://github.com/bogusorg/bogusrepo")
url = autopkg.expand_repo_url("https://github.com/bogusorg/bogusrepo")
self.assertEqual(url, "https://github.com/bogusorg/bogusrepo")
# TODO: Not yet implemented.
# def test_expand_file_urls(self):
# """Expand file URLs"""
# url = autopkg.expand_repo_url("file:///private/tmp/")
# self.assertEqual(url, "/private/tmp/")
# url = autopkg.expand_repo_url("file:///foo/bar/")
# self.assertEqual(url, "/foo/bar/")
def test_expand_file_paths(self):
"""Expand file paths"""
url = autopkg.expand_repo_url("/private/tmp/")
self.assertEqual(url, "/private/tmp")
url = autopkg.expand_repo_url("/foo/bar/")
self.assertEqual(url, "/foo/bar")
url = autopkg.expand_repo_url("/foo/bar")
self.assertEqual(url, "/foo/bar")
url = autopkg.expand_repo_url(
"~/Library/AutoPkg/RecipeRepos/com.github.autopkg.recipes"
)
self.assertEqual(
url, "~/Library/AutoPkg/RecipeRepos/com.github.autopkg.recipes"
)
url = autopkg.expand_repo_url("/Users/Shared/foo")
self.assertEqual(url, "/Users/Shared/foo")
if __name__ == "__main__":
unittest.main()
|
13-protocol-abc/double/double_test.py | SeirousLee/example-code-2e | 990 | 12757320 | <filename>13-protocol-abc/double/double_test.py
from typing import TYPE_CHECKING
import pytest
from double_protocol import double
def test_double_int() -> None:
given = 2
result = double(given)
assert result == given * 2
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_str() -> None:
given = 'A'
result = double(given)
assert result == given * 2
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_fraction() -> None:
from fractions import Fraction
given = Fraction(2, 5)
result = double(given)
assert result == given * 2
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_array() -> None:
from array import array
given = array('d', [1.0, 2.0, 3.14])
result = double(given)
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_nparray() -> None:
import numpy as np # type: ignore
given = np.array([[1, 2], [3, 4]])
result = double(given)
comparison = result == given * 2
assert comparison.all()
if TYPE_CHECKING:
reveal_type(given)
reveal_type(result)
def test_double_none() -> None:
given = None
with pytest.raises(TypeError):
double(given)
|
dpkt/edp.py | Vito-Swift/dpkt | 924 | 12757325 | <gh_stars>100-1000
"""Extreme Discovery Protocol."""
from __future__ import absolute_import
import dpkt
class EDP(dpkt.Packet):
__hdr__ = (
('version', 'B', 1),
('reserved', 'B', 0),
('hlen', 'H', 0),
('sum', 'H', 0),
('seq', 'H', 0),
('mid', 'H', 0),
('mac', '6s', b'')
)
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
class TestEDP(object):
"""
Test basic EDP functionality.
"""
@classmethod
def setup_class(cls):
from binascii import unhexlify
cls.buf = unhexlify(
'01' # version
'00' # reserved
'013c' # hlen
'9e76' # sum
'001b' # seq
'0000' # mid
'080027' # mac
'2d90ed990200240000000000000000000000000f020207000000000000000000000000000000009901010445584f532d32000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000099000004'
)
cls.p = EDP(cls.buf)
def test_version(self):
assert (self.p.version == 1)
def test_reserved(self):
assert (self.p.reserved == 0)
def test_hlen(self):
assert (self.p.hlen == 316)
def test_sum(self):
assert (self.p.sum == 40566)
def test_seq(self):
assert (self.p.seq == 27)
def test_mid(self):
assert (self.p.mid == 0)
def test_mac(self):
assert (self.p.mac == b"\x08\x00'-\x90\xed")
def test_bytes(self):
assert bytes(self.p) == self.buf
# force recalculation of the checksum
edp = EDP(self.buf)
edp.sum = 0
assert edp.sum == 0
assert bytes(edp) == self.buf
|
scripts/setJson.py | TadeasPilar/KiKit | 784 | 12757343 | <reponame>TadeasPilar/KiKit
#!/usr/bin/env python3
import click
import json
from collections import OrderedDict
def setKey(obj, path, value):
key = path[0] if isinstance(obj, dict) else int(path[0])
if len(path) == 1:
obj[key] = value
return
setKey(obj[key], path[1:], value)
@click.command()
@click.argument("input", type=click.File("r"))
@click.argument("output", type=click.File("w"))
@click.option("--property", "-s", type=str, multiple=True, help="<path>=<value>")
def run(input, output, property):
"""
Set a key to a value in JSON.
"""
obj = json.load(input, object_pairs_hook=OrderedDict)
for p in property:
path, value = tuple(p.split("="))
path = path.split(".")
value = json.loads(value, object_pairs_hook=OrderedDict)
setKey(obj, path, value)
json.dump(obj, output, indent=4)
if __name__ == "__main__":
run()
|
test/functional/feature_mandatory_coinbase.py | nondejus/elements | 947 | 12757344 | #!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mandatory coinbase feature"""
from binascii import b2a_hex
from test_framework.blocktools import create_coinbase
from test_framework.messages import CBlock, CProof, CTxOutValue, CTxOut
from test_framework.script import CScript, OP_RETURN
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
mandatory_privkey = "<KEY>"
mandatory_address = "XP3bwB9jSxt58frSa3cJismgGL3F57ukUy"
#mandatory_pubkey = "<KEY>"
mandatory_script = "a914804b9fd9d6939c2e960b7aa31124a5d532f4e59c87"
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal', 'rules': 'segwit'})
assert_equal(rsp, expect)
class MandatoryCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# Non-zero coinbase outputs *must* match this. Not setting it means anything is allowed
self.extra_args = [["-con_mandatorycoinbase="+mandatory_script], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node0 = self.nodes[0]
node1 = self.nodes[1]
node0.importprivkey(mandatory_privkey)
self.log.info("generatetoaddress: Making blocks of various kinds, checking for rejection")
# Create valid blocks to get out of IBD and get some funds (subsidy goes to permitted addr)
node0.generatetoaddress(101, mandatory_address)
# Generating for another address will not work
assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed: bad-coinbase-txos", node0.generatetoaddress, 1, node0.getnewaddress())
# Have non-mandatory node make a template
self.sync_all()
tmpl = node1.getblocktemplate({'rules': ['segwit']})
# We make a block with OP_TRUE coinbase output that will fail on node0
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.proof = CProof(bytearray.fromhex('51'))
block.vtx = [coinbase_tx]
block.block_height = int(tmpl["height"])
block.hashMerkleRoot = block.calc_merkle_root()
self.log.info("getblocktemplate: Test block on both nodes")
assert_template(node1, block, None)
assert_template(node0, block, 'bad-coinbase-txos')
self.log.info("getblocktemplate: Test non-subsidy block on both nodes")
# Without block reward anything goes, this allows commitment outputs like segwit
coinbase_tx.vout[0].nValue = CTxOutValue(0)
coinbase_tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
coinbase_tx.rehash()
block.vtx = [coinbase_tx]
assert_template(node0, block, None)
assert_template(node1, block, None)
#
# Also test that coinbases can't have fees.
self.sync_all()
tmpl = node1.getblocktemplate({'rules': ['segwit']})
coinbase_tx = create_coinbase(height=int(tmpl["height"]))
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
# Add fee output.
coinbase_tx.vout[0].nValue.setToAmount(coinbase_tx.vout[0].nValue.getAmount() - 1)
coinbase_tx.vout.append(CTxOut(1))
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.proof = CProof(bytearray.fromhex('51'))
block.vtx = [coinbase_tx]
block.block_height = int(tmpl["height"])
block.hashMerkleRoot = block.calc_merkle_root()
# should not be accepted
assert_template(node0, block, "bad-cb-fee")
assert_template(node1, block, "bad-cb-fee")
if __name__ == '__main__':
MandatoryCoinbaseTest().main()
|
tools/translation/helper/sanity_check.py | zealoussnow/chromium | 14,668 | 12757348 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Sanity checking for grd_helper.py. Run manually before uploading a CL."""
import io
import os
import subprocess
import sys
# Add the parent dir so that we can import from "helper".
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from helper import grd_helper
from helper import translation_helper
if sys.platform.startswith('win'):
# Use the |git.bat| in the depot_tools/ on Windows.
GIT = 'git.bat'
else:
GIT = 'git'
here = os.path.dirname(os.path.realpath(__file__))
repo_root = os.path.normpath(os.path.join(here, '..', '..', '..'))
def list_files_in_repository(repo_path, pattern):
"""Lists all files matching given pattern in the given git repository"""
# This works because git does its own glob expansion even though there is no
# shell to do it.
output = subprocess.check_output([GIT, 'ls-files', '--', pattern],
cwd=repo_path).decode('utf-8')
return output.strip().splitlines()
def read_file_as_text(path):
with io.open(path, mode='r', encoding='utf-8') as f:
return f.read()
# Sanity checks to ensure that we can parse all grd and grdp files in the repo.
# Must not fail.
def Run():
grds = list_files_in_repository(repo_root, '*.grd')
grdps = list_files_in_repository(repo_root, '*.grdp')
print('Found %d grds, %d grdps in the repo.' % (len(grds), len(grdps)))
# Make sure we can parse all .grd files in the source tree. Grd files are
# parsed via the file path.
for grd in grds:
# This file is intentionally missing an include, skip it.
if grd == os.path.join('tools', 'translation', 'testdata', 'internal.grd'):
continue
path = os.path.join(repo_root, grd)
grd_helper.GetGrdMessages(path, os.path.dirname(path))
# Make sure we can parse all .grdp files in the source tree.
# Grdp files are parsed using file contents instead of path.
for grdp in grdps:
path = os.path.join(repo_root, grdp)
# Parse grdp files using file contents.
contents = read_file_as_text(path)
grd_helper.GetGrdpMessagesFromString(contents)
print('Successfully parsed all .grd and .grdp files in the repo.')
# Additional check for translateable grds. Translateable grds are a subset
# of all grds so this checks some files twice, but it exercises the
# get_translatable_grds() path and also doesn't need to skip internal.grd.
TRANSLATION_EXPECTATIONS_PATH = os.path.join(repo_root, 'tools',
'gritsettings',
'translation_expectations.pyl')
translateable_grds = translation_helper.get_translatable_grds(
repo_root, grds, TRANSLATION_EXPECTATIONS_PATH)
print('Found %d translateable .grd files in translation expectations.' %
len(translateable_grds))
for grd in translateable_grds:
path = os.path.join(repo_root, grd.path)
grd_helper.GetGrdMessages(path, os.path.dirname(path))
print('Successfully parsed all translateable_grds .grd files in translation '
'expectations.')
print('DONE')
if __name__ == '__main__':
Run()
|
blackmamba/lib/future/moves/subprocess.py | oz90210/blackmamba | 2,151 | 12757363 | from __future__ import absolute_import
from future.utils import PY2, PY26
from subprocess import *
if PY2:
__future_module__ = True
from commands import getoutput, getstatusoutput
if PY26:
from future.backports.misc import check_output
|
cyvcf2/tests/test_hemi.py | leoisl/cyvcf2 | 307 | 12757382 | import numpy as np
from cyvcf2 import VCF, Variant, Writer
import os.path
HERE = os.path.dirname(__file__)
HEM_PATH = os.path.join(HERE, "test-hemi.vcf")
VCF_PATH = os.path.join(HERE, "test.vcf.gz")
def check_var(v):
s = [x.split(":")[0] for x in str(v).split("\t")[9:]]
lookup = {'0/0': 0, '0/1': 1, './1': 1, '1/.': 1, '0/.': 0, './0': 0, '1/1': 3, '.': 2, './.': 2}
expected = np.array([lookup[ss] for ss in s])
obs = v.gt_types
assert np.all(expected == obs), zip(expected, obs)
def test_hemi():
"""
make sure that we are getting the correct gt_types
for hemizygous variants
"""
for p in (HEM_PATH, VCF_PATH):
vcf = VCF(p)
for v in vcf:
check_var(v)
|
lib/node/utils.py | AnonymouzAuthorz/RevisitingTabularDL | 298 | 12757393 | <reponame>AnonymouzAuthorz/RevisitingTabularDL<gh_stars>100-1000
# Source: https://github.com/Qwicen/node
import contextlib
import gc
import glob
import hashlib
import os
import time
import numpy as np
import requests
import torch
from tqdm import tqdm
def download(url, filename, delete_if_interrupted=True, chunk_size=4096):
""" saves file from url to filename with a fancy progressbar """
try:
with open(filename, "wb") as f:
print("Downloading {} > {}".format(url, filename))
response = requests.get(url, stream=True)
total_length = response.headers.get('content-length')
if total_length is None: # no content length header
f.write(response.content)
else:
total_length = int(total_length)
with tqdm(total=total_length) as progressbar:
for data in response.iter_content(chunk_size=chunk_size):
if data: # filter-out keep-alive chunks
f.write(data)
progressbar.update(len(data))
except Exception as e:
if delete_if_interrupted:
print("Removing incomplete download {}.".format(filename))
os.remove(filename)
raise e
return filename
def iterate_minibatches(*tensors, batch_size, shuffle=True, epochs=1,
allow_incomplete=True, callback=lambda x:x):
indices = np.arange(len(tensors[0]))
upper_bound = int((np.ceil if allow_incomplete else np.floor) (len(indices) / batch_size)) * batch_size
epoch = 0
while True:
if shuffle:
np.random.shuffle(indices)
for batch_start in callback(range(0, upper_bound, batch_size)):
batch_ix = indices[batch_start: batch_start + batch_size]
batch = [tensor[batch_ix] for tensor in tensors]
yield batch if len(tensors) > 1 else batch[0]
epoch += 1
if epoch >= epochs:
break
def process_in_chunks(function, *args, batch_size, out=None, **kwargs):
"""
Computes output by applying batch-parallel function to large data tensor in chunks
:param function: a function(*[x[indices, ...] for x in args]) -> out[indices, ...]
:param args: one or many tensors, each [num_instances, ...]
:param batch_size: maximum chunk size processed in one go
:param out: memory buffer for out, defaults to torch.zeros of appropriate size and type
:returns: function(data), computed in a memory-efficient way
"""
total_size = args[0].shape[0]
first_output = function(*[x[0: batch_size] for x in args])
output_shape = (total_size,) + tuple(first_output.shape[1:])
if out is None:
out = torch.zeros(*output_shape, dtype=first_output.dtype, device=first_output.device,
layout=first_output.layout, **kwargs)
out[0: batch_size] = first_output
for i in range(batch_size, total_size, batch_size):
batch_ix = slice(i, min(i + batch_size, total_size))
out[batch_ix] = function(*[x[batch_ix] for x in args])
return out
def check_numpy(x):
""" Makes sure x is a numpy array """
if isinstance(x, torch.Tensor):
x = x.detach().cpu().numpy()
x = np.asarray(x)
assert isinstance(x, np.ndarray)
return x
@contextlib.contextmanager
def nop_ctx():
yield None
def get_latest_file(pattern):
list_of_files = glob.glob(pattern) # * means all if need specific format then *.csv
assert len(list_of_files) > 0, "No files found: " + pattern
return max(list_of_files, key=os.path.getctime)
def md5sum(fname):
""" Computes mdp checksum of a file """
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def free_memory(sleep_time=0.1):
""" Black magic function to free torch memory and some jupyter whims """
gc.collect()
torch.cuda.synchronize()
gc.collect()
torch.cuda.empty_cache()
time.sleep(sleep_time)
def to_float_str(element):
try:
return str(float(element))
except ValueError:
return element
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.