max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
ffmpeg-3.2.5/tools/zmqshell.py | huyu0415/FFmpeg | 3,645 | 22362 | <gh_stars>1000+
#!/usr/bin/env python2
import sys, zmq, cmd
class LavfiCmd(cmd.Cmd):
prompt = 'lavfi> '
def __init__(self, bind_address):
context = zmq.Context()
self.requester = context.socket(zmq.REQ)
self.requester.connect(bind_address)
cmd.Cmd.__init__(self)
def onecmd(self, cmd):
if cmd == 'EOF':
sys.exit(0)
print 'Sending command:[%s]' % cmd
self.requester.send(cmd)
message = self.requester.recv()
print 'Received reply:[%s]' % message
try:
bind_address = sys.argv[1] if len(sys.argv) > 1 else "tcp://localhost:5555"
LavfiCmd(bind_address).cmdloop('FFmpeg libavfilter interactive shell')
except KeyboardInterrupt:
pass
|
qinhaifang/src/evalTools/script/convert_label_map_to_geojson.py | SpaceNetChallenge/BuildingFootprintDetectors | 161 | 22364 | <gh_stars>100-1000
#!/usr/bin/env python
# encoding=gbk
"""
Convert mask to geojson format
"""
import os
import os.path
import re
import logging
import logging.config
from multiprocessing import Pool
import skimage.io as sk
import numpy as np
import scipy.io as sio
import setting
from spaceNet import geoTools as gT
import spaceNet.image_util as img_util
def process_convert_mask_to_geojson():
"""docstring for process_convert_mask_to_geojson"""
if setting.CONVERT_RES == 1:
label_map_file_list = os.listdir(setting.PREDICT_LABEL_MAP_DIR)
else:
label_map_file_list = os.listdir(setting.LABEL_MAP_DIR_4X)
pool_size = 8
pool = Pool(pool_size)
case = 0
for convert_res in pool.imap_unordered(convert_worker, label_map_file_list):
case += 1
if case % 100 == 0:
logging.info('Convert {}'.format(case))
image_id, msg = convert_res
pool.close()
pool.join()
def convert_worker(mat_file):
"""docstring for convert_worker"""
try:
if setting.CONVERT_RES == 1:
image_id = '_'.join(mat_file.split('.')[0].split('_')[1:])
print('image_id:{}'.format(image_id))
mat_file = os.path.join(setting.PREDICT_LABEL_MAP_DIR, mat_file)
mat = sio.loadmat(mat_file)
#print(mat.keys())
#exit(0)
label_map = mat['inst_img']
building_list = img_util.create_buildinglist_from_label_map(image_id, label_map)
geojson_file = os.path.join(setting.PREDICT_PIXEL_GEO_JSON_DIR, '{}_predict.geojson'.format(image_id))
else:
#print('{}'.format(mat_file))
image_id = '_'.join(mat_file.split('.')[0].split('_')[:])
#print('{}'.format(image_id))
mat_file = os.path.join(setting.LABEL_MAP_DIR_4X, mat_file)
mat = sio.loadmat(mat_file)
label_map = mat['GTinst']['Segmentation'][0][0]
building_list = img_util.create_buildinglist_from_label_map(image_id, label_map)
geojson_file = os.path.join(setting.PIXEL_GEO_JSON_DIR_4X, '{}_Pixel.geojson'.format(image_id))
gT.exporttogeojson(geojson_file, building_list)
return image_id, 'Done'
except Exception as e:
logging.warning('Convert Exception[{}] image_id[{}]'.format(e, image_id))
return image_id, e
def test_geojson():
"""docstring for test_geojson"""
label_map_file_list = os.listdir(setting.PREDICT_LABEL_MAP_DIR)
for mat_file in label_map_file_list:
image_id = '_'.join(mat_file.split('.')[0].split('_')[1:])
predict_geojson_file = os.path.join(setting.PREDICT_PIXEL_GEO_JSON_DIR, '{}_predict.geojson'.format(image_id))
image_name = os.path.join(setting.PIC_3BAND_DIR, '3band_{}.tif'.format(image_id))
img = sk.imread(image_name, True)
label_map = np.zeros(img.shape, dtype=np.uint8)
label_map = img_util.create_label_map_from_polygons(gT.importgeojson(predict_geojson_file),
label_map)
label_img = img_util.create_label_img(img, label_map)
save_file = os.path.join(setting.TMP_DIR, '{}_predict.png'.format(image_id))
sk.imsave(save_file, label_img)
truth_geojson_file = os.path.join(setting.PIXEL_GEO_JSON_DIR, '{}_Pixel.geojson'.format(image_id))
print('{}'.format(truth_geojson_file))
label_map = np.zeros(img.shape, dtype=np.uint8)
print('label_map shape{}'.format(label_map.shape))
label_map = img_util.create_label_map_from_polygons(gT.importgeojson(truth_geojson_file), label_map)
label_img = img_util.create_label_img(img, label_map)
save_file = os.path.join(setting.TMP_DIR, '{}_Pixel.png'.format(image_id))
sk.imsave(save_file, label_img)
if __name__ == '__main__':
process_convert_mask_to_geojson()
#test_geojson()
|
gabbi/tests/test_driver.py | scottwallacesh/gabbi | 145 | 22388 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test that the driver can build tests effectively."""
import os
import unittest
from gabbi import driver
TESTS_DIR = 'test_gabbits'
class DriverTest(unittest.TestCase):
def setUp(self):
super(DriverTest, self).setUp()
self.loader = unittest.defaultTestLoader
self.test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
def test_driver_loads_three_tests(self):
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost', port=8001)
self.assertEqual(1, len(suite._tests),
'top level suite contains one suite')
self.assertEqual(3, len(suite._tests[0]._tests),
'contained suite contains three tests')
the_one_test = suite._tests[0]._tests[0]
self.assertEqual('test_driver_sample_one',
the_one_test.__class__.__name__,
'test class name maps')
self.assertEqual('one',
the_one_test.test_data['name'])
self.assertEqual('/', the_one_test.test_data['url'])
def test_driver_prefix(self):
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost', port=8001,
prefix='/mountpoint')
the_one_test = suite._tests[0]._tests[0]
the_two_test = suite._tests[0]._tests[1]
self.assertEqual('/mountpoint', the_one_test.prefix)
self.assertEqual('/mountpoint', the_two_test.prefix)
def test_build_requires_host_or_intercept(self):
with self.assertRaises(AssertionError):
driver.build_tests(self.test_dir, self.loader)
def test_build_with_url_provides_host(self):
"""This confirms that url provides the required host."""
suite = driver.build_tests(self.test_dir, self.loader,
url='https://foo.example.com')
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
ssl = first_test.test_data['ssl']
self.assertEqual('https://foo.example.com/', full_url)
self.assertTrue(ssl)
def test_build_require_ssl(self):
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost',
require_ssl=True)
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
self.assertEqual('https://localhost:8001/', full_url)
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost',
require_ssl=False)
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
self.assertEqual('http://localhost:8001/', full_url)
def test_build_url_target(self):
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost', port='999',
url='https://example.com:1024/theend')
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
self.assertEqual('https://example.com:1024/theend/', full_url)
def test_build_url_target_forced_ssl(self):
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost', port='999',
url='http://example.com:1024/theend',
require_ssl=True)
first_test = suite._tests[0]._tests[0]
full_url = first_test._parse_url(first_test.test_data['url'])
self.assertEqual('https://example.com:1024/theend/', full_url)
def test_build_url_use_prior_test(self):
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost',
use_prior_test=True)
for test in suite._tests[0]._tests:
if test.test_data['name'] != 'use_prior_false':
expected_use_prior = True
else:
expected_use_prior = False
self.assertEqual(expected_use_prior,
test.test_data['use_prior_test'])
suite = driver.build_tests(self.test_dir, self.loader,
host='localhost',
use_prior_test=False)
for test in suite._tests[0]._tests:
self.assertEqual(False, test.test_data['use_prior_test'])
|
mayan/apps/web_links/migrations/0004_make_labes_unique.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 22404 | <reponame>nattangwiwat/Mayan-EDMS-recitation<filename>mayan/apps/web_links/migrations/0004_make_labes_unique.py
from django.db import migrations
def operation_make_labels_unique(apps, schema_editor):
WebLink = apps.get_model(app_label='web_links', model_name='WebLink')
for web_link in WebLink.objects.using(schema_editor.connection.alias).all():
# Look for instances with the same label
duplicate_queryset = WebLink.objects.using(
schema_editor.connection.alias
).filter(label=web_link.label).exclude(pk=web_link.pk)
if duplicate_queryset:
# If a duplicate is found, append the id to the original instance
# label
web_link.label = '{}__{}'.format(web_link.label, web_link.pk)
web_link.save()
def operation_make_labels_unique_reverse(apps, schema_editor):
WebLink = apps.get_model(app_label='web_links', model_name='WebLink')
for web_link in WebLink.objects.using(schema_editor.connection.alias).all():
if web_link.label.endswith('__{}'.format(web_link.pk)):
web_link.label = web_link.label.replace(
'__{}'.format(web_link.pk), ''
)
web_link.save()
class Migration(migrations.Migration):
dependencies = [
('web_links', '0003_auto_20191211_0233'),
]
operations = [
migrations.RunPython(
code=operation_make_labels_unique,
reverse_code=operation_make_labels_unique_reverse
),
]
|
DeepRTS/__init__.py | cair/deep-rts | 144 | 22424 | try:
from DeepRTS import Engine
except ImportError:
import Engine
try:
from DeepRTS.Engine import Map, UnitManager, Constants, Player
from DeepRTS.Engine import Constants
except ImportError:
from Engine import Map, UnitManager, Constants, Player, Constants
|
rlzoo/common/build_rlbench_env.py | tensorlayer/RLzoo | 750 | 22427 | <reponame>tensorlayer/RLzoo
import sys
from collections import OrderedDict
import numpy as np
from gym import spaces
from pyrep.const import RenderMode
from pyrep.objects.dummy import Dummy
from pyrep.objects.vision_sensor import VisionSensor
from rlbench.environment import Environment
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.observation_config import ObservationConfig
from rlbench.tasks import *
# Don't forget to add: export PYTHONPATH=PATH_TO_YOUR_LOCAL_RLBENCH_REPO
# list of state types
state_types = ['left_shoulder_rgb',
'left_shoulder_depth',
'left_shoulder_mask',
'right_shoulder_rgb',
'right_shoulder_depth',
'right_shoulder_mask',
'wrist_rgb',
'wrist_depth',
'wrist_mask',
'joint_velocities',
'joint_velocities_noise',
'joint_positions',
'joint_positions_noise',
'joint_forces',
'joint_forces_noise',
'gripper_pose',
'gripper_touch_forces',
'task_low_dim_state']
class RLBenchEnv():
""" make RLBench env to have same interfaces as openai.gym """
def __init__(self, task_name: str, state_type: list = 'state', ):
# render_mode=None):
"""
create RL Bench environment
:param task_name: task names can be found in rlbench.tasks
:param state_type: state or vision or a sub list of state_types list like ['left_shoulder_rgb']
"""
if state_type == 'state' or state_type == 'vision' or isinstance(state_type, list):
self._state_type = state_type
else:
raise ValueError('State type value error, your value is {}'.format(state_type))
# self._render_mode = render_mode
self._render_mode = None
obs_config = ObservationConfig()
obs_config.set_all(True)
action_mode = ActionMode(ArmActionMode.ABS_JOINT_VELOCITY)
self.env = Environment(
action_mode, obs_config=obs_config, headless=True)
self.env.launch()
try:
self.task = self.env.get_task(getattr(sys.modules[__name__], task_name))
except:
raise NotImplementedError
_, obs = self.task.reset()
self.spec = Spec(task_name)
if self._state_type == 'state':
self.observation_space = spaces.Box(
low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape)
elif self._state_type == 'vision':
space_dict = OrderedDict()
space_dict["state"] = spaces.Box(
low=-np.inf, high=np.inf, shape=obs.get_low_dim_data().shape)
for i in ["left_shoulder_rgb", "right_shoulder_rgb", "wrist_rgb", "front_rgb"]:
space_dict[i] = spaces.Box(
low=0, high=1, shape=getattr(obs, i).shape)
self.observation_space = spaces.Dict(space_dict)
else:
space_dict = OrderedDict()
for name in self._state_type:
if name.split('_')[-1] in ('rgb', 'depth', 'mask'):
space_dict[name] = spaces.Box(
low=0, high=1, shape=getattr(obs, name).shape)
else:
space_dict[name] = spaces.Box(
low=-np.inf, high=np.inf,
shape=getattr(obs, name).shape)
self.observation_space = spaces.Dict(space_dict)
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(self.env.action_size,), dtype=np.float32)
# if render_mode is not None:
# # Add the camera to the scene
# cam_placeholder = Dummy('cam_cinematic_placeholder')
# self._gym_cam = VisionSensor.create([640, 360])
# self._gym_cam.set_pose(cam_placeholder.get_pose())
# if render_mode == 'human':
# self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
# else:
# self._gym_cam.set_render_mode(RenderMode.OPENGL3)
def _extract_obs(self, obs):
if self._state_type == 'state':
return np.array(obs.get_low_dim_data(), np.float32)
elif self._state_type == 'vision':
return np.array([np.array(obs.get_low_dim_data(), np.float32),
np.array(obs.left_shoulder_rgb, np.float32),
np.array(obs.right_shoulder_rgb, np.float32),
np.array(obs.wrist_rgb, np.float32),
np.array(obs.front_rgb, np.float32), ])
else:
result = ['tag']
for name in self._state_type:
result.append(np.array(getattr(obs, name), np.float32))
return np.delete(np.array(result,), 0, 0)
def seed(self, seed_value):
# set seed as in openai.gym env
pass
def render(self, mode='human'):
# todo render available at any time
if self._render_mode is None:
self._render_mode = mode
# Add the camera to the scene
cam_placeholder = Dummy('cam_cinematic_placeholder')
self._gym_cam = VisionSensor.create([640, 360])
self._gym_cam.set_pose(cam_placeholder.get_pose())
if mode == 'human':
self._gym_cam.set_render_mode(RenderMode.OPENGL3_WINDOWED)
else:
self._gym_cam.set_render_mode(RenderMode.OPENGL3)
if mode != self._render_mode:
raise ValueError(
'The render mode must match the render mode selected in the '
'constructor. \nI.e. if you want "human" render mode, then '
'create the env by calling: '
'gym.make("reach_target-state-v0", render_mode="human").\n'
'You passed in mode %s, but expected %s.' % (
mode, self._render_mode))
if mode == 'rgb_array':
return self._gym_cam.capture_rgb()
def reset(self):
descriptions, obs = self.task.reset()
return self._extract_obs(obs)
def step(self, action):
obs, reward, terminate = self.task.step(action)
return self._extract_obs(obs), reward, terminate, None
def close(self):
self.env.shutdown()
class Spec():
""" a fake spec """
def __init__(self, id_name):
self.id = id_name
|
habitat_baselines/utils/gym_adapter.py | srama2512/habitat-api | 355 | 22439 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Union
import gym
import numpy as np
from gym import spaces
from habitat.core.simulator import Observations
from habitat.utils.visualizations.utils import observations_to_image
def flatten_dict(d, parent_key=""):
# From https://stackoverflow.com/questions/6027558/flatten-nested-dictionaries-compressing-keys
items = []
for k, v in d.items():
new_key = parent_key + str(k) if parent_key else str(k)
if isinstance(v, dict):
items.extend(flatten_dict(v, new_key).items())
else:
items.append((new_key, v))
return dict(items)
def smash_observation_space(obs_space, limit_keys):
obs_shapes = [obs_space.spaces[k].shape for k in limit_keys]
def transform_shape(shape):
if len(shape) == 2:
return (np.prod(shape),)
return shape
obs_shapes = [transform_shape(shape) for shape in obs_shapes]
obs_dims = [len(shape) for shape in obs_shapes]
if len(set(obs_dims)) == 1 and obs_dims[0] == 1:
# Smash together
total_dim = sum([shape[0] for shape in obs_shapes])
return spaces.Box(
shape=(total_dim,), low=-1.0, high=1.0, dtype=np.float32
)
return obs_space
class HabGymWrapper(gym.Env):
"""
Wraps a Habitat RLEnv into a format compatible with the standard OpenAI Gym
interface. Currently does not support discrete actions. This wrapper
therefore changes the behavior so that:
- The action input to `.step(...)` is always a numpy array
- The returned value of `.step(...)` and `.reset()` is a either a numpy array or a
dictionary consisting of string keys and numpy array values.
- The action space is converted to a `gym.spaces.Box`, action spaces from the RLEnv are
flattened into one Box space.
- The observation space is either a `gym.spaces.Box` or a `gym.spaces.Dict`
where the spaces of the Dict are `gym.spaces.Box`.
Configuration allows filtering the included observations, specifying goals,
or filtering actions. Listed below are the
config keys:
- `RL.GYM_OBS_KEYS`: Which observation names from the wrapped environment
to include. The order of the key names is kept in the output observation array.
- `RL.GYM_DESIRED_GOAL_KEYS`: By default is an empty list. If not empty,
any observations are returned in the `desired_goal` returned key of the
observation.
- `RL.GYM_FIX_INFO_DICT`: By default False, but if specified as true, this
flattens the returned info dictionary to have depth 1 where sub-keys are
concatenated to parent keys.
- `RL.GYM_ACTION_KEYS`: Include a subset of the allowed actions in the
wrapped environment. If not specified or empty, all actions are included.
Example usage:
```
config = baselines_get_config(hab_cfg_path)
env_class = get_env_class(config.ENV_NAME)
env = habitat_baselines.utils.env_utils.make_env_fn(
env_class=env_class, config=config
)
env = HabGymWrapper(env)
env = HabRenderWrapper(env)
```
"""
def __init__(self, env, save_orig_obs: bool = False):
self._gym_goal_keys = env._rl_config.get("GYM_DESIRED_GOAL_KEYS", [])
self._gym_achieved_goal_keys = env._rl_config.get(
"GYM_ACHIEVED_GOAL_KEYS", []
)
self._fix_info_dict = env._rl_config.get("GYM_FIX_INFO_DICT", False)
self._gym_action_keys = env._rl_config.get("GYM_ACTION_KEYS", None)
self._gym_obs_keys = env._rl_config.get("GYM_OBS_KEYS", None)
action_space = env.action_space
action_space = spaces.Dict(
{
k: v
for k, v in action_space.spaces.items()
if (
(self._gym_action_keys is None)
or (k in self._gym_action_keys)
)
}
)
self._last_obs: Optional[Observations] = None
self.action_mapping = {}
self._save_orig_obs = save_orig_obs
self.orig_obs = None
if len(action_space.spaces) != 1:
raise ValueError(
"Cannot convert this action space, more than one action"
)
self.orig_action_name = list(action_space.spaces.keys())[0]
action_space = action_space.spaces[self.orig_action_name]
if not isinstance(action_space, spaces.Dict):
raise ValueError("Cannot convert this action space")
all_box = True
for sub_space in action_space.spaces.values():
if not isinstance(sub_space, spaces.Box):
all_box = False
break
if not all_box:
raise ValueError("Cannot convert this action space")
start_i = 0
for name, sub_space in action_space.spaces.items():
end_i = start_i + sub_space.shape[0]
self.action_mapping[name] = (start_i, end_i)
self.action_space = spaces.Box(
shape=(end_i,), low=-1.0, high=1.0, dtype=np.float32
)
self.observation_space = smash_observation_space(
env.observation_space, self._gym_obs_keys
)
dict_space = {
"observation": self.observation_space,
}
if len(self._gym_goal_keys) > 0:
dict_space["desired_goal"] = smash_observation_space(
env.observation_space, self._gym_goal_keys
)
if len(self._gym_achieved_goal_keys) > 0:
dict_space["achieved_goal"] = smash_observation_space(
env.observation_space, self._gym_achieved_goal_keys
)
if len(dict_space) > 1:
self.observation_space = spaces.Dict(dict_space)
self._env = env
def step(self, action: np.ndarray):
action_args = {}
for k, (start_i, end_i) in self.action_mapping.items():
action_args[k] = action[start_i:end_i]
action = {
"action": self.orig_action_name,
"action_args": action_args,
}
return self.direct_hab_step(action)
def direct_hab_step(self, action: Union[int, str, Dict[str, Any]]):
obs, reward, done, info = self._env.step(action=action)
self._last_obs = obs
obs = self._transform_obs(obs)
if self._fix_info_dict:
info = flatten_dict(info)
info = {k: float(v) for k, v in info.items()}
return obs, reward, done, info
def _is_space_flat(self, space_name):
if isinstance(self.observation_space, spaces.Box):
return True
return isinstance(
self.observation_space.spaces[space_name], spaces.Box
)
def _transform_obs(self, obs):
if self._save_orig_obs:
self.orig_obs = obs
observation = {"observation": [obs[k] for k in self._gym_obs_keys]}
if len(self._gym_goal_keys) > 0:
observation["desired_goal"] = [obs[k] for k in self._gym_goal_keys]
if len(self._gym_achieved_goal_keys) > 0:
observation["achieved_goal"] = [
obs[k] for k in self._gym_achieved_goal_keys
]
for k, v in observation.items():
if self._is_space_flat(k):
observation[k] = np.concatenate(v)
if len(observation) == 1:
return observation["observation"]
return observation
def reset(self) -> Union[np.ndarray, Dict[str, np.ndarray]]:
obs = self._env.reset()
self._last_obs = obs
return self._transform_obs(obs)
def render(self, mode: str = "rgb_array") -> np.ndarray:
frame = None
if mode == "rgb_array":
frame = observations_to_image(
self._last_obs, self._env._env.get_metrics()
)
else:
raise ValueError(f"Render mode {mode} not currently supported.")
return frame
|
python/akg/ms/utils.py | tianjiashuo/akg | 286 | 22442 | <filename>python/akg/ms/utils.py
# Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""utils"""
# input format begin
DEFAULT = "DefaultFormat"
NCHW = "NCHW"
NHWC = "NHWC"
HWCN = "HWCN"
NC1HWC0 = "NC1HWC0"
FRAC_Z = "FracZ"
# input format end
# fusion type begin
ELEMWISE = "ELEMWISE"
CONVLUTION = "CONVLUTION"
COMMREDUCE = "COMMREDUCE"
SEGMENT = "SEGMENT"
OPAQUE = "OPAQUE"
# fusion type end
BINDS = "binds" |
visualize/usecases/get_user_info.py | RevanthRyo/Alize | 160 | 22446 | <filename>visualize/usecases/get_user_info.py
import requests
from django.conf import settings
from visualize.utils.api import Client
class GetUserInfo(object):
"""
GetUserInfo :
params : username
response :
{
"login": "torvalds",
"id": 1024025,
"avatar_url": "https://avatars0.githubusercontent.com/u/1024025?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/torvalds",
"html_url": "https://github.com/torvalds",
"followers_url": "https://api.github.com/users/torvalds/followers",
"following_url": "https://api.github.com/users/torvalds/following{/other_user}",
"gists_url": "https://api.github.com/users/torvalds/gists{/gist_id}",
"starred_url": "https://api.github.com/users/torvalds/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/torvalds/subscriptions",
"organizations_url": "https://api.github.com/users/torvalds/orgs",
"repos_url": "https://api.github.com/users/torvalds/repos",
"events_url": "https://api.github.com/users/torvalds/events{/privacy}",
"received_events_url": "https://api.github.com/users/torvalds/received_events",
"type": "User",
"site_admin": false,
"name": "<NAME>",
"company": "Linux Foundation",
"blog": "",
"location": "Portland, OR",
"email": null,
"hireable": null,
"bio": null,
"public_repos": 6,
"public_gists": 0,
"followers": 72049,
"following": 0,
"created_at": "2011-09-03T15:26:22Z",
"updated_at": "2017-11-14T16:54:03Z"
}
"""
def _extract_infos(self, data):
return {
"id": data["id"],
"name": data["name"],
"username": data["login"],
"html_url": data["html_url"],
"url": data["url"],
"avatar": data["avatar_url"],
"total_repos": data["public_repos"],
"followers": data["followers"],
"following": data["following"],
"created_at": data["created_at"],
"company": data["company"],
"bio": data["bio"],
"email": data["email"],
"location": data["location"],
}
def validate(self, username):
if not username:
raise Exception("Invalid username")
def execute(self, username):
self.validate(username)
api_response = Client().user_info(url_params={"username": username})
if "message" in api_response:
return False
response = self._extract_infos(api_response)
return response
|
blocks/bricks/sequence_generators.py | KIKOcaoyue/blocks | 1,067 | 22558 | """Sequence generation framework.
Recurrent networks are often used to generate/model sequences.
Examples include language modelling, machine translation, handwriting
synthesis, etc.. A typical pattern in this context is that
sequence elements are generated one often another, and every generated
element is fed back into the recurrent network state. Sometimes
also an attention mechanism is used to condition sequence generation
on some structured input like another sequence or an image.
This module provides :class:`SequenceGenerator` that builds a sequence
generating network from three main components:
* a core recurrent transition, e.g. :class:`~blocks.bricks.recurrent.LSTM`
or :class:`~blocks.bricks.recurrent.GatedRecurrent`
* a readout component that can produce sequence elements using
the network state and the information from the attention mechanism
* an attention mechanism (see :mod:`~blocks.bricks.attention` for
more information)
Implementation-wise :class:`SequenceGenerator` fully relies on
:class:`BaseSequenceGenerator`. At the level of the latter an
attention is mandatory, moreover it must be a part of the recurrent
transition (see :class:`~blocks.bricks.attention.AttentionRecurrent`).
To simulate optional attention, :class:`SequenceGenerator` wraps the
pure recurrent network in :class:`FakeAttentionRecurrent`.
"""
from abc import ABCMeta, abstractmethod
from six import add_metaclass
from theano import tensor
from blocks.bricks import Initializable, Random, Bias, NDimensionalSoftmax
from blocks.bricks.base import application, Brick, lazy
from blocks.bricks.parallel import Fork, Merge
from blocks.bricks.lookup import LookupTable
from blocks.bricks.recurrent import recurrent
from blocks.bricks.attention import (
AbstractAttentionRecurrent, AttentionRecurrent)
from blocks.roles import add_role, COST
from blocks.utils import dict_union, dict_subset
class BaseSequenceGenerator(Initializable):
r"""A generic sequence generator.
This class combines two components, a readout network and an
attention-equipped recurrent transition, into a context-dependent
sequence generator. Third component must be also given which
forks feedback from the readout network to obtain inputs for the
transition.
The class provides two methods: :meth:`generate` and :meth:`cost`. The
former is to actually generate sequences and the latter is to compute
the cost of generating given sequences.
The generation algorithm description follows.
**Definitions and notation:**
* States :math:`s_i` of the generator are the states of the transition
as specified in `transition.state_names`.
* Contexts of the generator are the contexts of the
transition as specified in `transition.context_names`.
* Glimpses :math:`g_i` are intermediate entities computed at every
generation step from states, contexts and the previous step glimpses.
They are computed in the transition's `apply` method when not given
or by explicitly calling the transition's `take_glimpses` method. The
set of glimpses considered is specified in
`transition.glimpse_names`.
* Outputs :math:`y_i` are produced at every step and form the output
sequence. A generation cost :math:`c_i` is assigned to each output.
**Algorithm:**
1. Initialization.
.. math::
y_0 = readout.initial\_outputs(contexts)\\
s_0, g_0 = transition.initial\_states(contexts)\\
i = 1\\
By default all recurrent bricks from :mod:`~blocks.bricks.recurrent`
have trainable initial states initialized with zeros. Subclass them
or :class:`~blocks.bricks.recurrent.BaseRecurrent` directly to get
custom initial states.
2. New glimpses are computed:
.. math:: g_i = transition.take\_glimpses(
s_{i-1}, g_{i-1}, contexts)
3. A new output is generated by the readout and its cost is
computed:
.. math::
f_{i-1} = readout.feedback(y_{i-1}) \\
r_i = readout.readout(f_{i-1}, s_{i-1}, g_i, contexts) \\
y_i = readout.emit(r_i) \\
c_i = readout.cost(r_i, y_i)
Note that the *new* glimpses and the *old* states are used at this
step. The reason for not merging all readout methods into one is
to make an efficient implementation of :meth:`cost` possible.
4. New states are computed and iteration is done:
.. math::
f_i = readout.feedback(y_i) \\
s_i = transition.compute\_states(s_{i-1}, g_i,
fork.apply(f_i), contexts) \\
i = i + 1
5. Back to step 2 if the desired sequence
length has not been yet reached.
| A scheme of the algorithm described above follows.
.. image:: /_static/sequence_generator_scheme.png
:height: 500px
:width: 500px
..
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component of the sequence generator.
transition : instance of :class:`AbstractAttentionRecurrent`
The transition component of the sequence generator.
fork : :class:`~.bricks.Brick`
The brick to compute the transition's inputs from the feedback.
See Also
--------
:class:`.Initializable` : for initialization parameters
:class:`SequenceGenerator` : more user friendly interface to this\
brick
"""
@lazy()
def __init__(self, readout, transition, fork, **kwargs):
self.readout = readout
self.transition = transition
self.fork = fork
children = [self.readout, self.fork, self.transition]
kwargs.setdefault('children', []).extend(children)
super(BaseSequenceGenerator, self).__init__(**kwargs)
@property
def _state_names(self):
return self.transition.compute_states.outputs
@property
def _context_names(self):
return self.transition.apply.contexts
@property
def _glimpse_names(self):
return self.transition.take_glimpses.outputs
def _push_allocation_config(self):
# Configure readout. That involves `get_dim` requests
# to the transition. To make sure that it answers
# correctly we should finish its configuration first.
self.transition.push_allocation_config()
transition_sources = (self._state_names + self._context_names +
self._glimpse_names)
self.readout.source_dims = [self.transition.get_dim(name)
if name in transition_sources
else self.readout.get_dim(name)
for name in self.readout.source_names]
# Configure fork. For similar reasons as outlined above,
# first push `readout` configuration.
self.readout.push_allocation_config()
feedback_name, = self.readout.feedback.outputs
self.fork.input_dim = self.readout.get_dim(feedback_name)
self.fork.output_dims = self.transition.get_dims(
self.fork.apply.outputs)
@application
def cost(self, application_call, outputs, mask=None, **kwargs):
"""Returns the average cost over the minibatch.
The cost is computed by averaging the sum of per token costs for
each sequence over the minibatch.
.. warning::
Note that, the computed cost can be problematic when batches
consist of vastly different sequence lengths.
Parameters
----------
outputs : :class:`~tensor.TensorVariable`
The 3(2) dimensional tensor containing output sequences.
The axis 0 must stand for time, the axis 1 for the
position in the batch.
mask : :class:`~tensor.TensorVariable`
The binary matrix identifying fake outputs.
Returns
-------
cost : :class:`~tensor.Variable`
Theano variable for cost, computed by summing over timesteps
and then averaging over the minibatch.
Notes
-----
The contexts are expected as keyword arguments.
Adds average cost per sequence element `AUXILIARY` variable to
the computational graph with name ``per_sequence_element``.
"""
# Compute the sum of costs
costs = self.cost_matrix(outputs, mask=mask, **kwargs)
cost = tensor.mean(costs.sum(axis=0))
add_role(cost, COST)
# Add auxiliary variable for per sequence element cost
application_call.add_auxiliary_variable(
(costs.sum() / mask.sum()) if mask is not None else costs.mean(),
name='per_sequence_element')
return cost
@application
def cost_matrix(self, application_call, outputs, mask=None, **kwargs):
"""Returns generation costs for output sequences.
See Also
--------
:meth:`cost` : Scalar cost.
"""
# We assume the data has axes (time, batch, features, ...)
batch_size = outputs.shape[1]
# Prepare input for the iterative part
states = dict_subset(kwargs, self._state_names, must_have=False)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
feedback = self.readout.feedback(outputs)
inputs = self.fork.apply(feedback, as_dict=True)
# Run the recurrent network
results = self.transition.apply(
mask=mask, return_initial_states=True, as_dict=True,
**dict_union(inputs, states, contexts))
# Separate the deliverables. The last states are discarded: they
# are not used to predict any output symbol. The initial glimpses
# are discarded because they are not used for prediction.
# Remember, glimpses are computed _before_ output stage, states are
# computed after.
states = {name: results[name][:-1] for name in self._state_names}
glimpses = {name: results[name][1:] for name in self._glimpse_names}
# Compute the cost
feedback = tensor.roll(feedback, 1, 0)
feedback = tensor.set_subtensor(
feedback[0],
self.readout.feedback(self.readout.initial_outputs(batch_size)))
readouts = self.readout.readout(
feedback=feedback, **dict_union(states, glimpses, contexts))
costs = self.readout.cost(readouts, outputs)
if mask is not None:
costs *= mask
for name, variable in list(glimpses.items()) + list(states.items()):
application_call.add_auxiliary_variable(
variable.copy(), name=name)
# This variables can be used to initialize the initial states of the
# next batch using the last states of the current batch.
for name in self._state_names + self._glimpse_names:
application_call.add_auxiliary_variable(
results[name][-1].copy(), name=name+"_final_value")
return costs
@recurrent
def generate(self, outputs, **kwargs):
"""A sequence generation step.
Parameters
----------
outputs : :class:`~tensor.TensorVariable`
The outputs from the previous step.
Notes
-----
The contexts, previous states and glimpses are expected as keyword
arguments.
"""
states = dict_subset(kwargs, self._state_names)
# masks in context are optional (e.g. `attended_mask`)
contexts = dict_subset(kwargs, self._context_names, must_have=False)
glimpses = dict_subset(kwargs, self._glimpse_names)
next_glimpses = self.transition.take_glimpses(
as_dict=True, **dict_union(states, glimpses, contexts))
next_readouts = self.readout.readout(
feedback=self.readout.feedback(outputs),
**dict_union(states, next_glimpses, contexts))
next_outputs = self.readout.emit(next_readouts)
next_costs = self.readout.cost(next_readouts, next_outputs)
next_feedback = self.readout.feedback(next_outputs)
next_inputs = (self.fork.apply(next_feedback, as_dict=True)
if self.fork else {'feedback': next_feedback})
next_states = self.transition.compute_states(
as_list=True,
**dict_union(next_inputs, states, next_glimpses, contexts))
return (next_states + [next_outputs] +
list(next_glimpses.values()) + [next_costs])
@generate.delegate
def generate_delegate(self):
return self.transition.apply
@generate.property('states')
def generate_states(self):
return self._state_names + ['outputs'] + self._glimpse_names
@generate.property('outputs')
def generate_outputs(self):
return (self._state_names + ['outputs'] +
self._glimpse_names + ['costs'])
def get_dim(self, name):
if name in (self._state_names + self._context_names +
self._glimpse_names):
return self.transition.get_dim(name)
elif name == 'outputs':
return self.readout.get_dim(name)
return super(BaseSequenceGenerator, self).get_dim(name)
@application
def initial_states(self, batch_size, *args, **kwargs):
# TODO: support dict of outputs for application methods
# to simplify this code.
state_dict = dict(
self.transition.initial_states(
batch_size, as_dict=True, *args, **kwargs),
outputs=self.readout.initial_outputs(batch_size))
return [state_dict[state_name]
for state_name in self.generate.states]
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.generate.states
@add_metaclass(ABCMeta)
class AbstractReadout(Initializable):
"""The interface for the readout component of a sequence generator.
The readout component of a sequence generator is a bridge between
the core recurrent network and the output sequence.
Parameters
----------
source_names : list
A list of the source names (outputs) that are needed for the
readout part e.g. ``['states']`` or
``['states', 'weighted_averages']`` or ``['states', 'feedback']``.
readout_dim : int
The dimension of the readout.
Attributes
----------
source_names : list
readout_dim : int
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`Readout` : the typically used readout brick
"""
@lazy(allocation=['source_names', 'readout_dim'])
def __init__(self, source_names, readout_dim, **kwargs):
self.source_names = source_names
self.readout_dim = readout_dim
super(AbstractReadout, self).__init__(**kwargs)
@abstractmethod
def emit(self, readouts):
"""Produce outputs from readouts.
Parameters
----------
readouts : :class:`~theano.Variable`
Readouts produced by the :meth:`readout` method of
a `(batch_size, readout_dim)` shape.
"""
pass
@abstractmethod
def cost(self, readouts, outputs):
"""Compute generation cost of outputs given readouts.
Parameters
----------
readouts : :class:`~theano.Variable`
Readouts produced by the :meth:`readout` method
of a `(..., readout dim)` shape.
outputs : :class:`~theano.Variable`
Outputs whose cost should be computed. Should have as many
or one less dimensions compared to `readout`. If readout has
`n` dimensions, first `n - 1` dimensions of `outputs` should
match with those of `readouts`.
"""
pass
@abstractmethod
def initial_outputs(self, batch_size):
"""Compute initial outputs for the generator's first step.
In the notation from the :class:`BaseSequenceGenerator`
documentation this method should compute :math:`y_0`.
"""
pass
@abstractmethod
def readout(self, **kwargs):
r"""Compute the readout vector from states, glimpses, etc.
Parameters
----------
\*\*kwargs: dict
Contains sequence generator states, glimpses,
contexts and feedback from the previous outputs.
"""
pass
@abstractmethod
def feedback(self, outputs):
"""Feeds outputs back to be used as inputs of the transition."""
pass
class Readout(AbstractReadout):
r"""Readout brick with separated emitter and feedback parts.
:class:`Readout` combines a few bits and pieces into an object
that can be used as the readout component in
:class:`BaseSequenceGenerator`. This includes an emitter brick,
to which :meth:`emit`, :meth:`cost` and :meth:`initial_outputs`
calls are delegated, a feedback brick to which :meth:`feedback`
functionality is delegated, and a pipeline to actually compute
readouts from all the sources (see the `source_names` attribute
of :class:`AbstractReadout`).
The readout computation pipeline is constructed from `merge` and
`post_merge` brick, whose responsibilites are described in the
respective docstrings.
Parameters
----------
emitter : an instance of :class:`AbstractEmitter`
The emitter component.
feedback_brick : an instance of :class:`AbstractFeedback`
The feedback component.
merge : :class:`~.bricks.Brick`, optional
A brick that takes the sources given in `source_names` as an input
and combines them into a single output. If given, `merge_prototype`
cannot be given.
merge_prototype : :class:`.FeedForward`, optional
If `merge` isn't given, the transformation given by
`merge_prototype` is applied to each input before being summed. By
default a :class:`.Linear` transformation without biases is used.
If given, `merge` cannot be given.
post_merge : :class:`.Feedforward`, optional
This transformation is applied to the merged inputs. By default
:class:`.Bias` is used.
merged_dim : int, optional
The input dimension of `post_merge` i.e. the output dimension of
`merge` (or `merge_prototype`). If not give, it is assumed to be
the same as `readout_dim` (i.e. `post_merge` is assumed to not
change dimensions).
\*\*kwargs : dict
Passed to the parent's constructor.
See Also
--------
:class:`BaseSequenceGenerator` : see how exactly a readout is used
:class:`AbstractEmitter`, :class:`AbstractFeedback`
"""
def __init__(self, emitter=None, feedback_brick=None,
merge=None, merge_prototype=None,
post_merge=None, merged_dim=None, **kwargs):
if not emitter:
emitter = TrivialEmitter(kwargs['readout_dim'])
if not feedback_brick:
feedback_brick = TrivialFeedback(kwargs['readout_dim'])
if not merge:
merge = Merge(input_names=kwargs['source_names'],
prototype=merge_prototype)
if not post_merge:
post_merge = Bias(dim=kwargs['readout_dim'])
if not merged_dim:
merged_dim = kwargs['readout_dim']
self.emitter = emitter
self.feedback_brick = feedback_brick
self.merge = merge
self.post_merge = post_merge
self.merged_dim = merged_dim
children = [self.emitter, self.feedback_brick, self.merge,
self.post_merge]
kwargs.setdefault('children', []).extend(children)
super(Readout, self).__init__(**kwargs)
def _push_allocation_config(self):
self.emitter.readout_dim = self.get_dim('readouts')
self.feedback_brick.output_dim = self.get_dim('outputs')
self.merge.input_names = self.source_names
self.merge.input_dims = self.source_dims
self.merge.output_dim = self.merged_dim
self.post_merge.input_dim = self.merged_dim
self.post_merge.output_dim = self.readout_dim
@application
def readout(self, **kwargs):
merged = self.merge.apply(**{name: kwargs[name]
for name in self.merge.input_names})
merged = self.post_merge.apply(merged)
return merged
@application
def emit(self, readouts):
return self.emitter.emit(readouts)
@application
def cost(self, readouts, outputs):
return self.emitter.cost(readouts, outputs)
@application
def initial_outputs(self, batch_size):
return self.emitter.initial_outputs(batch_size)
@application(outputs=['feedback'])
def feedback(self, outputs):
return self.feedback_brick.feedback(outputs)
def get_dim(self, name):
if name == 'outputs':
return self.emitter.get_dim(name)
elif name == 'feedback':
return self.feedback_brick.get_dim(name)
elif name == 'readouts':
return self.readout_dim
return super(Readout, self).get_dim(name)
@add_metaclass(ABCMeta)
class AbstractEmitter(Brick):
"""The interface for the emitter component of a readout.
Attributes
----------
readout_dim : int
The dimension of the readout. Is given by the
:class:`Readout` brick when allocation configuration
is pushed.
See Also
--------
:class:`Readout`
:class:`SoftmaxEmitter` : for integer outputs
Notes
-----
An important detail about the emitter cost is that it will be
evaluated with inputs of different dimensions so it has to be
flexible enough to handle this. The two ways in which it can be
applied are:
1. In :meth:BaseSequenceGenerator.cost_matrix where it will
be applied to the whole sequence at once.
2. In :meth:BaseSequenceGenerator.generate where it will be
applied to only one step of the sequence.
"""
@abstractmethod
def emit(self, readouts):
"""Implements the respective method of :class:`Readout`."""
pass
@abstractmethod
def cost(self, readouts, outputs):
"""Implements the respective method of :class:`Readout`."""
pass
@abstractmethod
def initial_outputs(self, batch_size):
"""Implements the respective method of :class:`Readout`."""
pass
@add_metaclass(ABCMeta)
class AbstractFeedback(Brick):
"""The interface for the feedback component of a readout.
See Also
--------
:class:`Readout`
:class:`LookupFeedback` for integer outputs
"""
@abstractmethod
def feedback(self, outputs):
"""Implements the respective method of :class:`Readout`."""
pass
class TrivialEmitter(AbstractEmitter):
"""An emitter for the trivial case when readouts are outputs.
Parameters
----------
readout_dim : int
The dimension of the readout.
Notes
-----
By default :meth:`cost` always returns zero tensor.
"""
@lazy(allocation=['readout_dim'])
def __init__(self, readout_dim, **kwargs):
super(TrivialEmitter, self).__init__(**kwargs)
self.readout_dim = readout_dim
@application
def emit(self, readouts):
return readouts
@application
def cost(self, readouts, outputs):
return tensor.zeros_like(outputs)
@application
def initial_outputs(self, batch_size):
return tensor.zeros((batch_size, self.readout_dim))
def get_dim(self, name):
if name == 'outputs':
return self.readout_dim
return super(TrivialEmitter, self).get_dim(name)
class SoftmaxEmitter(AbstractEmitter, Initializable, Random):
"""A softmax emitter for the case of integer outputs.
Interprets readout elements as energies corresponding to their indices.
Parameters
----------
initial_output : int or a scalar :class:`~theano.Variable`
The initial output.
"""
def __init__(self, initial_output=0, **kwargs):
self.initial_output = initial_output
self.softmax = NDimensionalSoftmax()
children = [self.softmax]
kwargs.setdefault('children', []).extend(children)
super(SoftmaxEmitter, self).__init__(**kwargs)
@application
def probs(self, readouts):
return self.softmax.apply(readouts, extra_ndim=readouts.ndim - 2)
@application
def emit(self, readouts):
probs = self.probs(readouts)
batch_size = probs.shape[0]
pvals_flat = probs.reshape((batch_size, -1))
generated = self.theano_rng.multinomial(pvals=pvals_flat)
return generated.reshape(probs.shape).argmax(axis=-1)
@application
def cost(self, readouts, outputs):
# WARNING: unfortunately this application method works
# just fine when `readouts` and `outputs` have
# different dimensions. Be careful!
return self.softmax.categorical_cross_entropy(
outputs, readouts, extra_ndim=readouts.ndim - 2)
@application
def initial_outputs(self, batch_size):
return self.initial_output * tensor.ones((batch_size,), dtype='int64')
def get_dim(self, name):
if name == 'outputs':
return 0
return super(SoftmaxEmitter, self).get_dim(name)
class TrivialFeedback(AbstractFeedback):
"""A feedback brick for the case when readout are outputs."""
@lazy(allocation=['output_dim'])
def __init__(self, output_dim, **kwargs):
super(TrivialFeedback, self).__init__(**kwargs)
self.output_dim = output_dim
@application(outputs=['feedback'])
def feedback(self, outputs):
return outputs
def get_dim(self, name):
if name == 'feedback':
return self.output_dim
return super(TrivialFeedback, self).get_dim(name)
class LookupFeedback(AbstractFeedback, Initializable):
"""A feedback brick for the case when readout are integers.
Stores and retrieves distributed representations of integers.
"""
def __init__(self, num_outputs=None, feedback_dim=None, **kwargs):
self.num_outputs = num_outputs
self.feedback_dim = feedback_dim
self.lookup = LookupTable(num_outputs, feedback_dim)
children = [self.lookup]
kwargs.setdefault('children', []).extend(children)
super(LookupFeedback, self).__init__(**kwargs)
def _push_allocation_config(self):
self.lookup.length = self.num_outputs
self.lookup.dim = self.feedback_dim
@application
def feedback(self, outputs):
assert self.output_dim == 0
return self.lookup.apply(outputs)
def get_dim(self, name):
if name == 'feedback':
return self.feedback_dim
return super(LookupFeedback, self).get_dim(name)
class FakeAttentionRecurrent(AbstractAttentionRecurrent, Initializable):
"""Adds fake attention interface to a transition.
:class:`BaseSequenceGenerator` requires its transition brick to support
:class:`~blocks.bricks.attention.AbstractAttentionRecurrent` interface,
that is to have an embedded attention mechanism. For the cases when no
attention is required (e.g. language modeling or encoder-decoder
models), :class:`FakeAttentionRecurrent` is used to wrap a usual
recurrent brick. The resulting brick has no glimpses and simply
passes all states and contexts to the wrapped one.
.. todo::
Get rid of this brick and support attention-less transitions
in :class:`BaseSequenceGenerator`.
"""
def __init__(self, transition, **kwargs):
self.transition = transition
self.state_names = transition.apply.states
self.context_names = transition.apply.contexts
self.glimpse_names = []
children = [self.transition]
kwargs.setdefault('children', []).extend(children)
super(FakeAttentionRecurrent, self).__init__(**kwargs)
@application
def apply(self, *args, **kwargs):
return self.transition.apply(*args, **kwargs)
@apply.delegate
def apply_delegate(self):
return self.transition.apply
@application
def compute_states(self, *args, **kwargs):
return self.transition.apply(iterate=False, *args, **kwargs)
@compute_states.delegate
def compute_states_delegate(self):
return self.transition.apply
@application(outputs=[])
def take_glimpses(self, *args, **kwargs):
return None
@application
def initial_states(self, batch_size, *args, **kwargs):
return self.transition.initial_states(batch_size,
*args, **kwargs)
@initial_states.property('outputs')
def initial_states_outputs(self):
return self.transition.apply.states
def get_dim(self, name):
return self.transition.get_dim(name)
class SequenceGenerator(BaseSequenceGenerator):
r"""A more user-friendly interface for :class:`BaseSequenceGenerator`.
Parameters
----------
readout : instance of :class:`AbstractReadout`
The readout component for the sequence generator.
transition : instance of :class:`.BaseRecurrent`
The recurrent transition to be used in the sequence generator.
Will be combined with `attention`, if that one is given.
attention : object, optional
The attention mechanism to be added to ``transition``,
an instance of
:class:`~blocks.bricks.attention.AbstractAttention`.
add_contexts : bool
If ``True``, the
:class:`.AttentionRecurrent` wrapping the
`transition` will add additional contexts for the attended and its
mask.
\*\*kwargs : dict
All keywords arguments are passed to the base class. If `fork`
keyword argument is not provided, :class:`.Fork` is created
that forks all transition sequential inputs without a "mask"
substring in them.
"""
def __init__(self, readout, transition, attention=None,
add_contexts=True, **kwargs):
normal_inputs = [name for name in transition.apply.sequences
if 'mask' not in name]
kwargs.setdefault('fork', Fork(normal_inputs))
if attention:
transition = AttentionRecurrent(
transition, attention,
add_contexts=add_contexts, name="att_trans")
else:
transition = FakeAttentionRecurrent(transition,
name="with_fake_attention")
super(SequenceGenerator, self).__init__(
readout, transition, **kwargs)
|
buildroot/support/testing/tests/init/test_busybox.py | rbrenton/hassos | 617 | 22609 | <gh_stars>100-1000
import infra.basetest
from tests.init.base import InitSystemBase as InitSystemBase
class InitSystemBusyboxBase(InitSystemBase):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
# BR2_TARGET_ROOTFS_TAR is not set
"""
def check_init(self):
super(InitSystemBusyboxBase, self).check_init("/bin/busybox")
class TestInitSystemBusyboxRo(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
# BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW is not set
BR2_TARGET_ROOTFS_SQUASHFS=y
"""
def test_run(self):
self.start_emulator("squashfs")
self.check_init()
self.check_network("eth0", 1)
class TestInitSystemBusyboxRw(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
BR2_TARGET_ROOTFS_EXT2=y
"""
def test_run(self):
self.start_emulator("ext2")
self.check_init()
self.check_network("eth0", 1)
class TestInitSystemBusyboxRoNet(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
BR2_SYSTEM_DHCP="eth0"
# BR2_TARGET_GENERIC_REMOUNT_ROOTFS_RW is not set
BR2_TARGET_ROOTFS_SQUASHFS=y
"""
def test_run(self):
self.start_emulator("squashfs")
self.check_init()
self.check_network("eth0")
class TestInitSystemBusyboxRwNet(InitSystemBusyboxBase):
config = InitSystemBusyboxBase.config + \
"""
BR2_SYSTEM_DHCP="eth0"
BR2_TARGET_ROOTFS_EXT2=y
"""
def test_run(self):
self.start_emulator("ext2")
self.check_init()
self.check_network("eth0")
|
download_stats.py | zhengsipeng/kinetics-downloader | 263 | 22611 | import argparse, os
import lib.config as config
import lib.utils as utils
def count_present_and_missing(cls, directory, metadata):
"""
Count present and missing videos for a class based on metadata.
:param cls: The class. If None, count all videos (used for testing videos - no classes).
:param directory: Directory containing the videos.
:param metadata: Kinetics metadata json.
:return: Tuple: number present videos, number of missing videos
"""
present = 0
missing = 0
for key in metadata:
if cls is None or metadata[key]["annotations"]["label"] == cls:
if os.path.isfile(os.path.join(directory, "{}.mp4".format(key))):
present += 1
else:
missing += 1
return present, missing
def main(args):
# load video classes
classes = utils.load_json(config.CLASSES_PATH)
# load lists of videos
train_metadata = utils.load_json(config.TRAIN_METADATA_PATH)
val_metadata = utils.load_json(config.VAL_METADATA_PATH)
test_metadata = utils.load_json(config.TEST_METADATA_PATH)
num_found = 0
total = 0
total_train_present = 0
total_train_missing = 0
total_val_present = 0
total_val_missing = 0
# load subset
subset = None
if args.subset:
subset = utils.load_json(args.subset)
# count train and validation videos
for cls in classes:
if subset is not None and cls not in subset:
continue
total += 1
cls_train_path = os.path.join(config.TRAIN_ROOT, cls.replace(" ", "_"))
cls_valid_path = os.path.join(config.VALID_ROOT, cls.replace(" ", "_"))
train_found = False
valid_found = False
if os.path.isdir(cls_train_path):
train_present, train_missing = count_present_and_missing(cls, cls_train_path, train_metadata)
train_found = True
total_train_present += train_present
total_train_missing += train_missing
if os.path.isdir(cls_valid_path):
valid_present, valid_missing = count_present_and_missing(cls, cls_valid_path, val_metadata)
valid_found = True
total_val_present += valid_present
total_val_missing += valid_missing
if train_found or valid_found:
num_found += 1
if args.details:
print("class {}".format(cls))
if train_found:
print("train: {} / {}".format(train_present, train_present + train_missing))
if valid_found:
print("valid: {} / {}".format(valid_present, valid_present + valid_missing))
print()
# count test videos
test_present, test_missing = count_present_and_missing(None, config.TEST_ROOT, test_metadata)
# print
train_percent_found = 0
if total_train_present > 0:
train_percent_found = (total_train_present * 100) / (total_train_present + total_train_missing)
valid_percent_found = 0
if total_val_present > 0:
valid_percent_found = (total_val_present * 100) / (total_val_present + total_val_missing)
test_percent_found = 0
if test_present > 0:
test_percent_found = (test_present * 100) / (test_present + test_missing)
print("class stats:")
print("\t{:d} / {:d} classes found".format(num_found, total))
print()
print("video stats (only for found classes):")
print("\t{:d} / {:d} ({:.2f}%) train videos found".format(
total_train_present, total_train_present + total_train_missing, train_percent_found))
print("\t{:d} / {:d} ({:.2f}%) valid videos found".format(
total_val_present, total_val_present + total_val_missing, valid_percent_found))
print("\t{:d} / {:d} ({:.2f}%) test videos found".format(
test_present, test_present + test_missing, test_percent_found))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Print statistics about downloaded videos.")
parser.add_argument("-d", "--details", action="store_true", default=False, help="detailed stats for each found class")
parser.add_argument("-s", "--subset", help="path to a JSON file containing a subset of Kinetics classes")
parsed = parser.parse_args()
main(parsed) |
cdlib/evaluation/comparison.py | xing-lab-pitt/cdlib | 248 | 22620 | import numpy as np
from cdlib.evaluation.internal import onmi
from cdlib.evaluation.internal.omega import Omega
from nf1 import NF1
from collections import namedtuple, defaultdict
__all__ = [
"MatchingResult",
"normalized_mutual_information",
"overlapping_normalized_mutual_information_LFK",
"overlapping_normalized_mutual_information_MGH",
"omega",
"f1",
"nf1",
"adjusted_rand_index",
"adjusted_mutual_information",
"variation_of_information",
"partition_closeness_simple",
]
# MatchingResult = namedtuple("MatchingResult", ['mean', 'std'])
MatchingResult = namedtuple("MatchingResult", "score std")
MatchingResult.__new__.__defaults__ = (None,) * len(MatchingResult._fields)
def __check_partition_coverage(first_partition: object, second_partition: object):
nodes_first = {
node: None for community in first_partition.communities for node in community
}
nodes_second = {
node: None for community in second_partition.communities for node in community
}
if len(set(nodes_first.keys()) ^ set(nodes_second.keys())) != 0:
raise ValueError("Both partitions should cover the same node set")
def __check_partition_overlap(first_partition: object, second_partition: object):
if first_partition.overlap or second_partition.overlap:
raise ValueError("Not defined for overlapping partitions")
def normalized_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is an normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by ``sqrt(H(labels_true) * H(labels_pred))``
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.normalized_mutual_information(louvain_communities,leiden_communities)
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import normalized_mutual_info_score
return MatchingResult(
score=normalized_mutual_info_score(first_partition_c, second_partition_c)
)
def overlapping_normalized_mutual_information_LFK(
first_partition: object, second_partition: object
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by Lancichinetti et al. (1)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_LFK(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2009). Detecting the overlapping and hierarchical community structure in complex networks. New Journal of Physics, 11(3), 033015.
"""
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
)
)
def overlapping_normalized_mutual_information_MGH(
first_partition: object, second_partition: object, normalization: str = "max"
) -> MatchingResult:
"""
Overlapping Normalized Mutual Information between two clusterings.
Extension of the Normalized Mutual Information (NMI) score to cope with overlapping partitions.
This is the version proposed by McDaid et al. using a different normalization than the original LFR one. See ref.
for more details.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:param normalization: one of "max" or "LFK". Default "max" (corresponds to the main method described in the article)
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.overlapping_normalized_mutual_information_MGH(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2011). Normalized mutual information to evaluate overlapping community finding algorithms. arXiv preprint arXiv:1110.2515. Chicago
"""
if normalization == "max":
variant = "MGH"
elif normalization == "LFK":
variant = "MGH_LFK"
else:
raise ValueError(
"Wrong 'normalization' value. Please specify one among [max, LFK]."
)
return MatchingResult(
score=onmi.onmi(
[set(x) for x in first_partition.communities],
[set(x) for x in second_partition.communities],
variant=variant,
)
)
def omega(first_partition: object, second_partition: object) -> MatchingResult:
"""
Index of resemblance for overlapping, complete coverage, network clusterings.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.omega(louvain_communities,leiden_communities)
:Reference:
1. <NAME>, <NAME>, and <NAME>. 2012. `Using the omega index for evaluating abstractive algorithms detection. <https://pdfs.semanticscholar.org/59d6/5d5aa09d789408fd9fd3c009a1b070ff5859.pdf/>`_ In Proceedings of Workshop on Evaluation Metrics and System Comparison for Automatic Summarization. Association for Computational Linguistics, Stroudsburg, PA, USA, 10-18.
"""
__check_partition_coverage(first_partition, second_partition)
first_partition = {k: v for k, v in enumerate(first_partition.communities)}
second_partition = {k: v for k, v in enumerate(second_partition.communities)}
om_idx = Omega(first_partition, second_partition)
return MatchingResult(score=om_idx.omega_score)
def f1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the average F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.f1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_ In Complex Networks VII (pp. 133-144). Springer, Cham.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(
score=results["details"]["F1 mean"][0], std=results["details"]["F1 std"][0]
)
def nf1(first_partition: object, second_partition: object) -> MatchingResult:
"""
Compute the Normalized F1 score of the optimal algorithms matches among the partitions in input.
Works on overlapping/non-overlapping complete/partial coverage partitions.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.nf1(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2016). `A novel approach to evaluate algorithms detection internal on ground truth. <https://www.researchgate.net/publication/287204505_A_novel_approach_to_evaluate_community_detection_algorithms_on_ground_truth/>`_
2. <NAME>. (2017). : `RDyn: graph benchmark handling algorithms dynamics. Journal of Complex Networks. <https://academic.oup.com/comnet/article-abstract/5/6/893/3925036?redirectedFrom=PDF/>`_ 5(6), 893-912.
"""
nf = NF1(first_partition.communities, second_partition.communities)
results = nf.summary()
return MatchingResult(score=results["scores"].loc["NF1"][0])
def adjusted_rand_index(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_index(a, b) == adjusted_rand_index(b, a)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_rand_index(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., & <NAME>. (1985). `Comparing partitions. <https://link.springer.com/article/10.1007/BF01908075/>`_ Journal of classification, 2(1), 193-218.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_rand_score
return MatchingResult(
score=adjusted_rand_score(first_partition_c, second_partition_c)
)
def adjusted_mutual_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [max(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.adjusted_mutual_information(louvain_communities,leiden_communities)
:Reference:
1. <NAME>., <NAME>., & <NAME>. (2010). `Information theoretic measures for clusterings comparison: Variants, properties, normalization and correction for chance. <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf/>`_ Journal of Machine Learning Research, 11(Oct), 2837-2854.
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
first_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(first_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
second_partition_c = [
x[1]
for x in sorted(
[
(node, nid)
for nid, cluster in enumerate(second_partition.communities)
for node in cluster
],
key=lambda x: x[0],
)
]
from sklearn.metrics import adjusted_mutual_info_score
return MatchingResult(
score=adjusted_mutual_info_score(first_partition_c, second_partition_c)
)
def variation_of_information(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Variation of Information among two nodes partitions.
$$ H(p)+H(q)-2MI(p, q) $$
where MI is the mutual information, H the partition entropy and p,q are the algorithms sets
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.variation_of_information(louvain_communities,leiden_communities)
:Reference:
1. Meila, M. (2007). `Comparing clusterings - an information based distance. <https://www.sciencedirect.com/science/article/pii/S0047259X06002016/>`_ Journal of Multivariate Analysis, 98, 873-895. doi:10.1016/j.jmva.2006.11.013
"""
__check_partition_coverage(first_partition, second_partition)
__check_partition_overlap(first_partition, second_partition)
n = float(sum([len(c1) for c1 in first_partition.communities]))
sigma = 0.0
for c1 in first_partition.communities:
p = len(c1) / n
for c2 in second_partition.communities:
q = len(c2) / n
r = len(set(c1) & set(c2)) / n
if r > 0.0:
sigma += r * (np.log2(r / p) + np.log2(r / q))
return MatchingResult(score=abs(sigma))
def partition_closeness_simple(
first_partition: object, second_partition: object
) -> MatchingResult:
"""Community size density closeness.
Simple implementation that does not leverage kernel density estimator.
$$ S_G(A,B) = \frac{1}{2} \Sum_{i=1}^{r}\Sum_{j=1}^{s} min(\frac{n^a(x^a_i)}{N^a}, \frac{n^b_j(x^b_j)}{N^b}) \delta(x_i^a,x_j^b) $$
where:
$$ N^a $$ total number of communities in A of any size;
$$ x^a $$ ordered list of community sizes for A;
$$ n^a $$ multiplicity of community sizes for A.
(symmetrically for B)
:param first_partition: NodeClustering object
:param second_partition: NodeClustering object
:return: MatchingResult object
:Example:
>>> from cdlib import evaluation, algorithms
>>> g = nx.karate_club_graph()
>>> louvain_communities = algorithms.louvain(g)
>>> leiden_communities = algorithms.leiden(g)
>>> evaluation.partition_closeness_simple(louvain_communities,leiden_communities)
:Reference:
1. Dao, Vinh-Loc, <NAME>, and <NAME>. "Estimating the similarity of community detection methods based on cluster size distribution." International Conference on Complex Networks and their Applications. Springer, Cham, 2018.
"""
coms_a = sorted(list(set([len(c) for c in first_partition.communities])))
freq_a = defaultdict(int)
for a in coms_a:
freq_a[a] += 1
freq_a = [freq_a[a] for a in sorted(freq_a)]
n_a = sum([coms_a[i] * freq_a[i] for i in range(0, len(coms_a))])
coms_b = sorted(list(set([len(c) for c in second_partition.communities])))
freq_b = defaultdict(int)
for b in coms_b:
freq_b[b] += 1
freq_b = [freq_b[b] for b in sorted(freq_b)]
n_b = sum([coms_b[i] * freq_b[i] for i in range(0, len(coms_b))])
closeness = 0
for i in range(0, len(coms_a)):
for j in range(0, len(coms_b)):
if coms_a[i] == coms_b[j]:
closeness += min(
(coms_a[i] * freq_a[i]) / n_a, (coms_b[j] * freq_b[j]) / n_b
)
closeness *= 0.5
return MatchingResult(score=closeness)
|
textclf/tester/dl_tester.py | lswjkllc/textclf | 146 | 22644 | <reponame>lswjkllc/textclf<gh_stars>100-1000
import torch
from transformers import BertTokenizer
from .base_tester import Tester
from textclf.utils.raw_data import create_tokenizer
from textclf.utils.create import create_instance
from textclf.config import DLTesterConfig
from textclf.data.dictionary import Dictionary
class DLTester(Tester):
"""负责Deep Learning model的测试"""
def __init__(self, config: DLTesterConfig):
super().__init__(config)
self.tokenizer = create_tokenizer(self.config.tokenizer)
self.use_cuda = self.config.use_cuda and torch.cuda.is_available()
print(f"Load checkpoint from {self.config.model_path}..")
checkpoint = torch.load(self.config.model_path)
self.model_conf, self.dictionary, self.label2id = \
checkpoint["info_for_test"]
self.model = create_instance(self.model_conf)
self.model.load_state_dict(checkpoint['model_state_dict'])
self.classes = sorted(self.label2id, key=self.label2id.get)
def _preprocess(self, text):
text_tokenized = self.tokenizer(text)
if isinstance(self.dictionary, Dictionary):
text_processed = self.dictionary.tokens_to_tensor(
text_tokenized, max_len=self.config.max_len
)
text_len = (text_processed != self.dictionary.pad()).sum()
elif isinstance(self.dictionary, BertTokenizer):
text_processed = torch.LongTensor(
self.dictionary.encode(text_tokenized, add_special_tokens=True)[:-1])
max_len = self.config.max_len
pad_id = self.dictionary.pad_token_id
if len(text_processed) >= max_len:
text_processed = text_processed[:max_len]
else:
text_processed = torch.cat([
text_processed,
torch.ones(max_len-len(text_processed)).long()*pad_id
])
text_len = (text_processed != pad_id).sum()
if self.use_cuda:
text_processed = text_processed.cuda()
text_len = text_len.cuda()
return text_processed.unsqueeze(0), text_len.unsqueeze(0)
def predict_label(self, text):
text_processed, text_len = self._preprocess(text)
self.model.eval()
with torch.no_grad():
logits = self.model(text_processed, text_len)
label_id = torch.argmax(logits)
return self.classes[label_id]
def predict_prob(self, text):
text_processed, text_len = self._preprocess(text)
self.model.eval()
with torch.no_grad():
logits = self.model(text_processed, text_len)[0]
return torch.softmax(logits, dim=0).tolist()
def get_all_labels(self):
return self.classes
|
ch10/myproject_virtualenv/src/django-myproject/myproject/settings/production.py | PacktPublishing/Django-3-Web-Development-Cookbook | 159 | 22663 | from ._base import *
DEBUG = False
WEBSITE_URL = "https://example.com" # without trailing slash
MEDIA_URL = f"{WEBSITE_URL}/media/"
|
tools/generate_things/generate_navigation.py | akalenuk/wordsandbuttons | 367 | 22675 | <reponame>akalenuk/wordsandbuttons
import os
import subprocess
PAGES_DIR = "../../pages"
keyword_note = {
'tutorials': '',
'demos': '',
'quizzes': '',
'mathematics': '',
'algorithms': '',
'programming': 'By the way, if you prefer books to blogs, <a href="https://wordsandbuttons.online/SYTYKC.pdf">there is a free book</a> that was originally made from this section.'
}
index_title = 'Hello, world!'
index_description = 'This is <i>Words and Buttons Online</i> — a growing collection of interactive tutorials, demos, and quizzes about maths, algorithms, and programming.'
all_span_ids = []
def read_index_spans(path):
global all_span_ids
index_spans = []
for file_name in os.listdir(path):
if os.path.isfile(path + '/' + file_name):
if file_name.endswith('.html'):
html = open(path + '/' + file_name, 'r')
text = html.read()
html.close()
spans = text.split('<span id="index_')
if spans != []:
spans = spans[1:]
Spans = text.split('<Span id="index_')
if Spans != []:
Spans = Spans[1:]
span_ids = ['index_' + s.split('"')[0] for s in spans]
span_titles = [s.split('>')[1].split('<')[0].lower() for s in spans]
span_ids += ['index_' + s.split('"')[0] for s in Spans]
span_titles += [s.split('>')[1].split('<')[0] for s in Spans]
for i in range(0, len(span_ids)):
index_spans += [ (file_name, span_ids[i], span_titles[i]) ]
for span_id in span_ids:
if span_id in all_span_ids:
print('Duplicated index span id: ' + span_id + " in " + file_name)
all_span_ids += [span_id]
return index_spans
date_link_title_description_keywords = []
all_keywords = set()
for filename in os.listdir(PAGES_DIR):
if filename == 'index.html':
continue
if filename == 'faq.html':
continue
if filename.endswith(".html"):
f = open(PAGES_DIR + "/" + filename, 'rt')
content = f.read()
f.close
if content.find("meta name=\"keywords\"") == -1:
continue
date_from_git = subprocess.run(["git", "log", "--reverse", "--date=iso", "--format=%cd", "--", filename], \
cwd=PAGES_DIR, \
stdout=subprocess.PIPE)
full_date = date_from_git.stdout.decode('utf-8')
date = full_date.split(' ')[0]
title = content.split("<title>")[1].split("</title>")[0]
description = content.split('<meta name="description" content="')[1].split('">')[0]
keywords = content.split('<meta name="keywords" content="')[1].split('">')[0].split(', ')
if keywords[0] == "":
continue
date_link_title_description_keywords += [(date, filename, title, description, keywords)]
all_keywords.update(keywords)
date_link_title_description_keywords.sort()
# index
f = open('index.template')
template = f.read()
f.close()
index = '%s' % template
f = open('links.txt')
links = f.readlines()
f.close()
links_html = '<h1>More interactive learning</h1>'
for link in links:
if link.strip().find(' ') != -1:
url = link.split(' ')[0]
title_chunks = link.split(' ')[1:]
title = title_chunks[0]
for chunk in title_chunks[1:]: # no hanging short words
if len(chunk) < 2:
title += ' ' + chunk
else:
title += ' ' + chunk
links_html += '<p style="margin-bottom: 12pt;">'+title+'<br><a href="'+url+'">'+url+'</a></p>\n'
menu = '<p class="links" style="width: 555pt;">'
for (kw, _) in keyword_note.items():
menu += '<nobr><a style="padding-right: 4pt;" href="all_' + kw + '.html">#' + kw + '</a></nobr> '
menu += '</p>'
# index is now real index not a timeline
the_index = '<h1 title="A real index on index.html! How cool is that!">Index</h1>'
spans = read_index_spans(PAGES_DIR)
cur_letter = ''
for (f, i, t) in sorted(spans, key = lambda fit: fit[2].upper()):
letter = t[0].upper()
if cur_letter != letter:
if cur_letter != '':
the_index += '</p>\n'
the_index += '<h2>'+letter+'</h2>\n'
the_index += '<p class="index_items">\n'
cur_letter = letter
the_index += '<nobr><a style="padding-right: 24pt;" href="' + f + '#' + i + '">' + t + '</a></nobr>\n'
the_index += '</p>\n'
index = index.replace('<h1>Title</h1>', '<h1>' + index_title + '</h1>')
index = index.replace('<p>Description</p>', '<p style="width: 555pt;">' + index_description + '</p>')
index = index.replace('<div id="menu"></div>', '\n' + menu + '\n')
index = index.replace('<p>Note</p>', '')
index = index.replace('<div id="timeline"></div>', '\n' + the_index + '\n')
index = index.replace('<div id="links"></div>', '\n' + links_html + '\n')
f = open('../../pages/' + 'index.html', 'w')
f.write(index)
f.close
# tag's all_* pages
for title in list(all_keywords):
page = '%s' % template
timeline = ''
menu = '<p class="links" style="width: 555pt;">'
for (kw, _) in keyword_note.items():
if kw == title:
menu += '<nobr><span style="padding-right: 4pt; color: #999;">#' + kw + '</span></nobr> '
else:
menu += '<nobr><a style="padding-right: 4pt;" href="all_' + kw + '.html">#' + kw + '</a></nobr> '
menu += '</p>'
for (d, l, t, desc, kwds) in date_link_title_description_keywords[::-1]:
if not title in kwds:
continue
timeline += '<p class="title">' + '<a href="' + l + '">' + t + '</a></p>\n'
timeline += '<p class="description">' + desc + '</p>\n'
timeline += '<p class="links">'
for kw in sorted(list(kwds)):
if kw == title:
timeline += '<span style="padding-right: 8pt; color: #999;">#' + kw + '</span> '
else:
timeline += '<a style="padding-right: 8pt;" href="all_' + kw + '.html">#' + kw + '</a> '
timeline += '</p>\n'
page = page.replace('<h1>Title</h1>', '<h1><a href="index.html">Words and Buttons</a>: ' + title + '</h1>')
page = page.replace('<p>Description</p>', '')
page = page.replace('<div id="menu"></div>', '\n' + menu + '\n')
page = page.replace('<p>Note</p>', '<p style="width: 555pt;">' + keyword_note[title] + '</p>')
page = page.replace('<div id="timeline"></div>', '\n' + timeline + '\n')
page = page.replace('<div id="links"></div>', '')
f = open('../../pages/all_' + title + '.html', 'w')
f.write(page)
f.close
|
test-framework/test-suites/integration/tests/add/test_add_host_bonded.py | knutsonchris/stacki | 123 | 22691 | <filename>test-framework/test-suites/integration/tests/add/test_add_host_bonded.py
import json
from textwrap import dedent
import pytest
@pytest.mark.usefixtures("add_host_with_interface")
class TestAddHostBonded:
def test_no_hosts(self, host):
result = host.run('stack add host bonded')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_matching_hosts(self, host):
result = host.run('stack add host bonded a:test')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_multiple_hosts(self, host):
result = host.run('stack add host bonded frontend-0-0 backend-0-0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument must be unique
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_channel(self, host):
result = host.run('stack add host bonded backend-0-0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "channel" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_interfaces(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "interfaces" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_ip(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "ip" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_no_network(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "network" parameter is required
{host} [channel=string] [interfaces=string] [ip=string] [name=string] [network=string] [options=string]
''')
def test_invalid_network(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=test')
assert result.rc == 255
assert result.stderr == 'error - network "test" does not exist\n'
def test_missing_interface(self, host):
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=private')
assert result.rc == 255
assert result.stderr == 'error - interface "eth1" does not exist for host "backend-0-0"\n'
def test_comma_seperated_interfaces(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=private')
assert result.rc == 0
# Check the interface is in the database now
result = host.run('stack list host interface backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'channel': None,
'default': None,
'host': 'backend-0-0',
'interface': 'bond0',
'ip': '192.168.0.1',
'mac': None,
'module': 'bonding',
'name': 'backend-0-0',
'network': 'private',
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth0',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth1',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
}
]
def test_space_seperated_interfaces(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces="eth0 eth1" ip=192.168.0.1 network=private')
assert result.rc == 0
# Check the interface is in the database now
result = host.run('stack list host interface backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'channel': None,
'default': None,
'host': 'backend-0-0',
'interface': 'bond0',
'ip': '192.168.0.1',
'mac': None,
'module': 'bonding',
'name': 'backend-0-0',
'network': 'private',
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth0',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth1',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
}
]
def test_default_with_options(self, host):
# Add a second interface to our backend
result = host.run('stack add host interface backend-0-0 interface=eth1 default=true')
assert result.rc == 0
# Add the bonded interface
result = host.run('stack add host bonded backend-0-0 channel=bond0 '
'interfaces=eth0,eth1 ip=192.168.0.1 network=private options=test_options')
assert result.rc == 0
# Check the interface is in the database now
result = host.run('stack list host interface backend-0-0 output-format=json')
assert result.rc == 0
assert json.loads(result.stdout) == [
{
'channel': None,
'default': True,
'host': 'backend-0-0',
'interface': 'bond0',
'ip': '192.168.0.1',
'mac': None,
'module': 'bonding',
'name': 'backend-0-0',
'network': 'private',
'options': 'bonding-opts="test_options"',
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth0',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
},
{
'channel': 'bond0',
'default': None,
'host': 'backend-0-0',
'interface': 'eth1',
'ip': None,
'mac': None,
'module': None,
'name': 'backend-0-0',
'network': None,
'options': None,
'vlan': None
}
]
|
tests/test_connect.py | mkniewallner/edgedb-python | 214 | 22706 | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import socket
import edgedb
from edgedb import _testbase as tb
class TestConnect(tb.AsyncQueryTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.port = cls._get_free_port()
@classmethod
def _get_free_port(cls):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.bind(('127.0.0.1', 0))
return sock.getsockname()[1]
except Exception:
return None
finally:
sock.close()
async def test_connect_async_01(self):
orig_conn_args = self.get_connect_args()
conn_args = orig_conn_args.copy()
conn_args['port'] = self.port
conn_args['wait_until_available'] = 0
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = '127.0.0.1'
await edgedb.async_connect(**conn_args)
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = orig_conn_args['host']
await edgedb.async_connect(**conn_args)
def test_connect_sync_01(self):
orig_conn_args = self.get_connect_args()
conn_args = orig_conn_args.copy()
conn_args['port'] = self.port
conn_args['wait_until_available'] = 0
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = '127.0.0.1'
edgedb.connect(**conn_args)
with self.assertRaisesRegex(
edgedb.ClientConnectionError,
f'(?s).*Is the server running.*port {self.port}.*'):
conn_args['host'] = orig_conn_args['host']
edgedb.connect(**conn_args)
|
anchore/anchore-modules/queries/show-familytree.py | berez23/anchore | 401 | 22729 | #!/usr/bin/env python
import sys
import os
import re
import json
import traceback
import anchore.anchore_utils
# main routine
try:
config = anchore.anchore_utils.init_query_cmdline(sys.argv, "params: all\nhelp: shows dockerfile lines.")
except Exception as err:
print str(err)
sys.exit(1)
if not config:
sys.exit(0)
if len(config['params']) <= 0:
print "Query requires input: all"
warns = list()
outlist = list()
outlist.append(["Image_Id", "Repo_Tags", "Image Type"])
try:
idata = anchore.anchore_utils.load_image_report(config['imgid'])
ftree = idata['familytree']
for fid in ftree:
tags = "unknown"
itype = "unknown"
try:
fdata = anchore.anchore_utils.load_image_report(fid)
tags = ','.join(fdata['anchore_all_tags'])
if not tags:
tags = "none"
itype = fdata['meta']['usertype']
if not itype:
itype = "intermediate"
except:
warns.append("family tree id ("+str(fid)+") does not appear to have been analyzed, no data for this member of the tree")
outlist.append([fid, str(tags), str(itype)])
except Exception as err:
# handle the case where something wrong happened
import traceback
traceback.print_exc()
warns.append("query error: "+str(err))
pass
anchore.anchore_utils.write_kvfile_fromlist(config['output'], outlist)
if len(warns) > 0:
anchore.anchore_utils.write_plainfile_fromlist(config['output_warns'], warns)
sys.exit(0)
|
h2o-py/tests/testdir_jira/pyunit_pubdev_7353_reset_threshold.py | vishalbelsare/h2o-3 | 6,098 | 22741 | import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.utils.model_utils import reset_model_threshold
def test_reset_threshold():
"""
Test the model threshold can be reset.
Performance metric should be recalculated and also predictions should be changed based on the new threshold.
"""
# import data
airlines = h2o.import_file(path=pyunit_utils.locate("smalldata/airlines/modified_airlines.csv"))
# convert columns to factors
airlines["Year"] = airlines["Year"].asfactor()
airlines["Month"] = airlines["Month"].asfactor()
airlines["DayOfWeek"] = airlines["DayOfWeek"].asfactor()
airlines["Cancelled"] = airlines["Cancelled"].asfactor()
airlines['FlightNum'] = airlines['FlightNum'].asfactor()
# set the predictor names and the response column name
predictors = ["Origin", "Dest", "Year", "UniqueCarrier", "DayOfWeek", "Month", "Distance", "FlightNum"]
response = "IsDepDelayed"
# split into train and validation sets
train, valid = airlines.split_frame(ratios = [.8], seed = 1234)
# initialize the estimator
model = H2OGradientBoostingEstimator(seed = 1234, ntrees=5)
# train the model
model.train(x=predictors, y=response, training_frame=train)
old_threshold = model._model_json['output']['default_threshold']
# predict
preds = model.predict(airlines)
# reset the threshold and get the old one
new_threshold = 0.6917189903082518
old_returned = reset_model_threshold(model, new_threshold)
reset_model = h2o.get_model(model.model_id)
reset_threshold = reset_model._model_json['output']['default_threshold']
# predict with reset model
preds_reset = reset_model.predict(airlines)
# compare thresholds
assert old_threshold == old_returned
assert new_threshold == reset_threshold
assert reset_threshold != old_threshold
# compare predictions
preds_local = preds.as_data_frame()
preds_reset_local = preds_reset.as_data_frame()
print("old threshold:", old_threshold, "new_threshold:", new_threshold)
for i in range(airlines.nrow):
if old_threshold <= preds_local.iloc[i, 2] < new_threshold:
assert preds_local.iloc[i, 0] != preds_reset_local.iloc[i, 0]
else:
assert preds_local.iloc[i, 0] == preds_reset_local.iloc[i, 0]
if __name__ == "__main__":
pyunit_utils.standalone_test(test_reset_threshold)
else:
test_reset_threshold()
|
tests/mixins.py | jarkkorantala/sqlalchemy-utils | 879 | 22758 | import pytest
import sqlalchemy as sa
class ThreeLevelDeepOneToOne(object):
@pytest.fixture
def Catalog(self, Base, Category):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
category = sa.orm.relationship(
Category,
uselist=False,
backref='catalog'
)
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
catalog_id = sa.Column(
'_catalog_id',
sa.Integer,
sa.ForeignKey('catalog._id')
)
sub_category = sa.orm.relationship(
SubCategory,
uselist=False,
backref='category'
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
category_id = sa.Column(
'_category_id',
sa.Integer,
sa.ForeignKey('category._id')
)
product = sa.orm.relationship(
Product,
uselist=False,
backref='sub_category'
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Integer)
sub_category_id = sa.Column(
'_sub_category_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
class ThreeLevelDeepOneToMany(object):
@pytest.fixture
def Catalog(self, Base, Category):
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
categories = sa.orm.relationship(Category, backref='catalog')
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
catalog_id = sa.Column(
'_catalog_id',
sa.Integer,
sa.ForeignKey('catalog._id')
)
sub_categories = sa.orm.relationship(
SubCategory, backref='category'
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
category_id = sa.Column(
'_category_id',
sa.Integer,
sa.ForeignKey('category._id')
)
products = sa.orm.relationship(
Product,
backref='sub_category'
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
sub_category_id = sa.Column(
'_sub_category_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
def __repr__(self):
return '<Product id=%r>' % self.id
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
class ThreeLevelDeepManyToMany(object):
@pytest.fixture
def Catalog(self, Base, Category):
catalog_category = sa.Table(
'catalog_category',
Base.metadata,
sa.Column('catalog_id', sa.Integer, sa.ForeignKey('catalog._id')),
sa.Column('category_id', sa.Integer, sa.ForeignKey('category._id'))
)
class Catalog(Base):
__tablename__ = 'catalog'
id = sa.Column('_id', sa.Integer, primary_key=True)
categories = sa.orm.relationship(
Category,
backref='catalogs',
secondary=catalog_category
)
return Catalog
@pytest.fixture
def Category(self, Base, SubCategory):
category_subcategory = sa.Table(
'category_subcategory',
Base.metadata,
sa.Column(
'category_id',
sa.Integer,
sa.ForeignKey('category._id')
),
sa.Column(
'subcategory_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
)
)
class Category(Base):
__tablename__ = 'category'
id = sa.Column('_id', sa.Integer, primary_key=True)
sub_categories = sa.orm.relationship(
SubCategory,
backref='categories',
secondary=category_subcategory
)
return Category
@pytest.fixture
def SubCategory(self, Base, Product):
subcategory_product = sa.Table(
'subcategory_product',
Base.metadata,
sa.Column(
'subcategory_id',
sa.Integer,
sa.ForeignKey('sub_category._id')
),
sa.Column(
'product_id',
sa.Integer,
sa.ForeignKey('product._id')
)
)
class SubCategory(Base):
__tablename__ = 'sub_category'
id = sa.Column('_id', sa.Integer, primary_key=True)
products = sa.orm.relationship(
Product,
backref='sub_categories',
secondary=subcategory_product
)
return SubCategory
@pytest.fixture
def Product(self, Base):
class Product(Base):
__tablename__ = 'product'
id = sa.Column('_id', sa.Integer, primary_key=True)
price = sa.Column(sa.Numeric)
return Product
@pytest.fixture
def init_models(self, Catalog, Category, SubCategory, Product):
pass
|
src/triage/experiments/singlethreaded.py | josephbajor/triage_NN | 160 | 22777 | from triage.experiments import ExperimentBase
class SingleThreadedExperiment(ExperimentBase):
def process_query_tasks(self, query_tasks):
self.feature_generator.process_table_tasks(query_tasks)
def process_matrix_build_tasks(self, matrix_build_tasks):
self.matrix_builder.build_all_matrices(matrix_build_tasks)
def process_train_test_batches(self, batches):
self.model_train_tester.process_all_batches(batches)
def process_subset_tasks(self, subset_tasks):
self.subsetter.process_all_tasks(subset_tasks)
|
mmtbx/bulk_solvent/f_model_all_scales.py | dperl-sol/cctbx_project | 155 | 22809 | <reponame>dperl-sol/cctbx_project
from __future__ import absolute_import, division, print_function
from cctbx.array_family import flex
from cctbx import adptbx
from mmtbx import bulk_solvent
from cctbx.array_family import flex
from cctbx import adptbx
import mmtbx
from libtbx import group_args
import mmtbx.arrays
import mmtbx.bulk_solvent.scaler
from libtbx.test_utils import approx_equal
from libtbx.math_utils import ifloor, iceil
import mmtbx.f_model
import mmtbx.bulk_solvent.bulk_solvent_and_scaling as bss
from six.moves import zip, range
class run(mmtbx.f_model.manager):
"""
This is a very specialized routine to perform complex protocols of updating
all scales of fmodel, including case of twininng, presence of H and lileky
more. Inside it pretends to be fmodel proper (done by dictionary updates
before and after - any better ideas of how to do it nicer are welcome!).
"""
def __init__(self,
fmodel,
apply_back_trace,
remove_outliers,
fast,
params,
refine_hd_scattering,
log):
### Must be first thing here
self.__dict__.update(fmodel.__dict__)
# From this point on: self = fmodel
###
russ = self.compute(apply_back_trace = apply_back_trace, remove_outliers =
remove_outliers, fast = fast, params = params,
refine_hd_scattering = refine_hd_scattering, log = log)
### Must be next to last...
fmodel.__dict__.update(self.__dict__)
### ...and this one is last
self.russ = russ
def compute(self, apply_back_trace, remove_outliers, fast,
params, refine_hd_scattering, log):
assert [self.arrays.core_twin, self.twin_law].count(None) in [0,2]
self.show(prefix = "start", log = log)
self.reset_all_scales()
self.show(prefix = "re-set all scales", log = log)
if(remove_outliers and not self.twinned()):
for iii in range(5):
self.remove_outliers(use_model = False, log = None) # XXX
self.show(prefix = "remove outliers", log = log)
result = None
if(self.twinned()):
for cycle in range(2):
if(log is not None): print("cycle %d:"%cycle, file=log)
self.update_twin_fraction()
self.show(prefix = "update twin fraction", log = log)
result = self.update_solvent_and_scale_twin(log = log,
refine_hd_scattering = refine_hd_scattering)
else:
result = self.update_solvent_and_scale_2(
fast = fast,
params = params,
apply_back_trace = apply_back_trace,
refine_hd_scattering = refine_hd_scattering,
log = log)
#XXX if(remove_outliers and not self.twinned()):
#XXX self.remove_outliers(use_model = True, log = None) # XXX
if(remove_outliers and not self.twinned()):
for iii in range(5):
self.remove_outliers(use_model = True, log = None) # XXX
self.show(prefix = "remove outliers", log = log)
return result
def reset_all_scales(self):
size = self.f_obs().data().size()
zero_c = flex.complex_double(size,0)
zero_d = flex.double(size,0)
one_d = flex.double(size,1)
f_part1_twin = self.f_calc_twin()
f_part2_twin = self.f_calc_twin()
if(f_part1_twin is not None):
f_part1_twin = self.f_calc_twin().array(data=zero_c)
f_part2_twin = self.f_calc_twin().array(data=zero_c)
self.update_core(
f_part1 = self.f_calc().array(data=zero_c),
f_part2 = self.f_calc().array(data=zero_c),
f_part1_twin = f_part1_twin,
f_part2_twin = f_part2_twin,
k_isotropic = one_d,
k_anisotropic = one_d,
k_mask = [zero_d]*len(self.k_masks()))
def show(self, prefix, log, r=None):
if(log is None): return
if(r is None): r = self.r_all()
m = "%24s: r(all,work,free)=%6.4f %6.4f %6.4f n_refl.: %d"%(prefix, r,
self.r_work(), self.r_free(), self.f_obs().data().size())
if(not self.twinned()):
print(m, file=log)
else:
print(m+" twin_fraction=%4.2f"%self.twin_fraction, file=log)
def need_to_refine_hd_scattering_contribution(self):
if(self.xray_structure is None): return False
refine_hd_scattering = True
hd_selection = self.xray_structure.hd_selection()
occ_h_all_zero = self.xray_structure.select(
hd_selection).scatterers().extract_occupancies().all_eq(0.0) # riding H
if(self.xray_structure.guess_scattering_type_neutron() or
hd_selection.count(True)==0 or
not occ_h_all_zero):
refine_hd_scattering = False
return refine_hd_scattering
def update_solvent_and_scale_2(self, fast, params, apply_back_trace,
refine_hd_scattering, log):
if(params is None): params = bss.master_params.extract()
if(self.xray_structure is not None):
# Figure out Fcalc and Fmask based on presence of H
hd_selection = self.xray_structure.hd_selection()
xrs_no_h = self.xray_structure.select(~hd_selection)
xrs_h = self.xray_structure.select(hd_selection)
# Create data container for scalers. If H scattering is refined then it is
# assumed that self.f_calc() does not contain H contribution at all.
fmodel_kbu = mmtbx.f_model.manager_kbu(
f_obs = self.f_obs(),
f_calc = self.f_calc(),
f_masks = self.f_masks(),
ss = self.ss)
# Compute k_total and k_mask using one of the two methods (anal or min).
# Note: this intentionally ignores previously existing f_part1 and f_part2.
#
k_sol, b_sol, b_cart, b_adj = [None,]*4
if(fast): # analytical
assert len(fmodel_kbu.f_masks)==1
result = mmtbx.bulk_solvent.scaler.run_simple(
fmodel_kbu = fmodel_kbu,
r_free_flags = self.r_free_flags(),
bulk_solvent = params.bulk_solvent,
aniso_scale = params.anisotropic_scaling,
bin_selections = self.bin_selections)
r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
else: # using minimization: exp solvent and scale model (k_sol,b_sol,b_cart)
result = bss.bulk_solvent_and_scales(
fmodel_kbu = fmodel_kbu,
params = params)
k_sol, b_sol, b_cart = result.k_sols(), result.b_sols(), result.b_cart()
r_all_from_scaler = result.r_all() # must be here, before apply_back_trace
if(apply_back_trace and len(fmodel_kbu.f_masks)==1 and
self.xray_structure is not None):
o = result.apply_back_trace_of_overall_exp_scale_matrix(
xray_structure = self.xray_structure)
b_adj = o.b_adj
if(not fast): b_sol, b_cart = [o.b_sol], o.b_cart
self.update_xray_structure(
xray_structure = o.xray_structure,
update_f_calc = True)
fmodel_kbu = fmodel_kbu.update(f_calc = self.f_calc())
self.show(prefix = "overall B=%s to atoms"%str("%7.2f"%o.b_adj).strip(),
log = log)
# Update self with new arrays so that H correction knows current R factor.
# If no H to account for, then this is the final result.
k_masks = result.k_masks()
k_anisotropic = result.k_anisotropic()
k_isotropic = result.k_isotropic()
self.update_core(
k_mask = k_masks,
k_anisotropic = k_anisotropic,
k_isotropic = k_isotropic)
self.show(prefix = "bulk-solvent and scaling", log = log)
# Consistency check
if(not apply_back_trace):
assert approx_equal(self.r_all(), r_all_from_scaler)
# Add contribution from H (if present and riding). This goes to f_part2.
kh, bh = 0, 0
if(refine_hd_scattering and
self.need_to_refine_hd_scattering_contribution()):
# Obsolete previous contribution f_part2
f_part2 = fmodel_kbu.f_calc.array(data=fmodel_kbu.f_calc.data()*0)
self.update_core(f_part2 = f_part2)
xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
f_h = self.compute_f_calc(xray_structure = xrs_h)
# Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
data = fmodel_kbu.f_calc.data()
for k_mask_, f_mask_ in zip(k_masks, fmodel_kbu.f_masks):
data = data + k_mask_*f_mask_.data()
f_calc_plus_f_bulk_no_scales = fmodel_kbu.f_calc.array(data = data)
# Consistency check
assert approx_equal(self.f_model().data(),
f_calc_plus_f_bulk_no_scales.data()*k_isotropic*k_anisotropic)
assert approx_equal(self.f_model_no_scales().data(),
f_calc_plus_f_bulk_no_scales.data())
#
# Compute contribution from H (F_H)
#
# Coarse sampling
b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
b_min = int(max(0,b_mean)*0.5)
b_max = int(b_mean*1.5)
sc = 1000.
kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = fmodel_kbu.f_obs.data(),
f_calc = f_calc_plus_f_bulk_no_scales.data(),
f_mask = f_h.data(),
k_total = k_isotropic*k_anisotropic,
ss = fmodel_kbu.ss,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
r_ref = self.r_work())
if(o.updated()):
f_part2 = f_h.array(data = o.k_mask()*f_h.data())
kh, bh = o.k_sol(), o.b_sol()
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
# Fine sampling
k_min = max(0,o.k_sol()-0.1)
k_max = o.k_sol()+0.1
b_min = max(0,o.b_sol()-5.)
b_max = o.b_sol()+5.
kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(1.*sc))]
o = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = fmodel_kbu.f_obs.data(),
f_calc = f_calc_plus_f_bulk_no_scales.data(),
f_mask = f_h.data(),
k_total = k_isotropic*k_anisotropic,
ss = fmodel_kbu.ss,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
r_ref = o.r())
if(o.updated()):
f_part2 = f_h.array(data = o.k_mask()*f_h.data())
kh, bh = o.k_sol(), o.b_sol()
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log, r=o.r())
# THIS HELPS if fast=true is used, see how it works in reality
#
if(fast):
fmodel_kbu_ = mmtbx.f_model.manager_kbu(
f_obs = self.f_obs(),
f_calc = f_calc_plus_f_bulk_no_scales,
f_masks = [f_part2],
ss = self.ss)
result = mmtbx.bulk_solvent.scaler.run_simple(
fmodel_kbu = fmodel_kbu_,
r_free_flags = self.r_free_flags(),
bulk_solvent = params.bulk_solvent,
aniso_scale = params.anisotropic_scaling,
bin_selections = self.bin_selections)
f_part2 = f_part2.array(data = result.core.k_mask()*f_part2.data())
k_isotropic = result.core.k_isotropic*result.core.k_isotropic_exp
k_anisotropic = result.core.k_anisotropic
# Update self with final scales
self.update_core(
k_mask = k_masks,
k_anisotropic = k_anisotropic,
k_isotropic = k_isotropic,
f_part2 = f_part2)
# Make sure what came out of scaling matches what self thinks it really is
# It must match at least up to 1.e-6.
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
if(fast):
assert approx_equal(result.r_work(), self.r_work(), 1.e-4)
else:
assert approx_equal(self.r_all(), o.r()), [self.r_all(), o.r()]
return group_args(
k_sol = k_sol,
b_sol = b_sol,
b_cart = b_cart,
k_h = kh,
b_h = bh,
b_adj = b_adj)
def update_solvent_and_scale_twin(self, refine_hd_scattering, log):
if(not self.twinned()): return
assert len(self.f_masks()) == 1
# Re-set all scales to unit or zero
self.show(prefix = "update scales twin start", log = log)
self.reset_all_scales()
self.show(prefix = "reset f_part, k_(total,mask)", log = log)
f_calc_data = self.f_calc().data()
f_calc_data_twin = self.f_calc_twin().data()
# Initial trial set
sc = 1000.
ksr = [i/sc for i in range(ifloor(0*sc), iceil(0.6*sc)+1, int(0.05*sc))]
bsr = [i/sc for i in range(ifloor(0*sc), iceil(150.*sc)+1, int(10.*sc))]
o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_data,
f_calc_2 = f_calc_data_twin,
f_mask_1 = self.arrays.core.f_masks[0].data(),
f_mask_2 = self.arrays.core_twin.f_masks[0].data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(ksr),
b_sol_range = flex.double(bsr),
miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = self.r_all())
if(o_kbu_sol.updated()):
self.update(
k_mask = o_kbu_sol.k_mask(),
k_anisotropic = o_kbu_sol.k_anisotropic())
# Second (finer) trial set
k_min = max(o_kbu_sol.k_sol()-0.05, 0)
k_max = min(o_kbu_sol.k_sol()+0.05, 0.6)
ksr = [i/sc for i in range(ifloor(k_min*sc), iceil(k_max*sc)+1, int(0.01*sc))]
b_min = max(o_kbu_sol.b_sol()-10, 0)
b_max = min(o_kbu_sol.b_sol()+10, 150)
bsr = [i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(1.*sc))]
o_kbu_sol = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_data,
f_calc_2 = f_calc_data_twin,
f_mask_1 = self.arrays.core.f_masks[0].data(),
f_mask_2 = self.arrays.core_twin.f_masks[0].data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(ksr),
b_sol_range = flex.double(bsr),
miller_indices = self.f_obs().indices(), #XXX ??? What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = o_kbu_sol.r())
if(o_kbu_sol.updated()):
self.update(
k_mask = o_kbu_sol.k_mask(),
k_anisotropic = o_kbu_sol.k_anisotropic())
# Disable due to rare failures. Technically they should always match. But
# since different routines are used tiny disagreements are possible.
# See examples in : /net/anaconda/raid1/afonine/work/bugs/twin_refinement
#assert approx_equal(self.r_all(), o_kbu_sol.r(), 1.e-5)
##############
# use apply_back_trace in if below
if(self.xray_structure is not None):
o = mmtbx.bulk_solvent.scaler.tmp(
xray_structure = self.xray_structure,
k_anisotropic = o_kbu_sol.k_anisotropic(),
k_masks = [o_kbu_sol.k_mask()],
ss = self.ss)
self.update_xray_structure(
xray_structure = o.xray_structure,
update_f_calc = True)
#############
self.update(
k_mask = o.k_masks,
k_anisotropic = o.k_anisotropic)
self.show(prefix = "bulk-solvent and scaling", log = log)
#
# Add contribution from H (if present and riding). This goes to f_part2.
#
kh, bh = 0, 0
if(refine_hd_scattering and
self.need_to_refine_hd_scattering_contribution()):
hd_selection = self.xray_structure.hd_selection()
xrs_no_h = self.xray_structure.select(~hd_selection)
xrs_h = self.xray_structure.select(hd_selection)
# Accumulate all mask contributions: Fcalc_atoms+Fbulk_1+...+Fbulk_N
data = self.f_calc().data()+self.f_masks()[0].data()*self.k_masks()[0]
f_calc_plus_f_bulk_no_scales = self.f_calc().array(data = data)
data = self.f_calc_twin().data()+\
self.f_masks_twin()[0].data()*self.k_masks_twin()[0]
f_calc_plus_f_bulk_no_scales_twin = self.f_calc_twin().array(data = data)
# Initial FH contribution
xrs_h = xrs_h.set_occupancies(value=1).set_b_iso(value = 0)
f_h = self.compute_f_calc(xray_structure = xrs_h)
f_h_twin = self.compute_f_calc(xray_structure = xrs_h,
miller_array = self.f_calc_twin())
# Coarse sampling
b_mean = flex.mean(xrs_no_h.extract_u_iso_or_u_equiv())*adptbx.u_as_b(1.)
b_min = int(max(0,b_mean)*0.5)
b_max = int(b_mean*1.5)
sc = 1000.
kr=[i/sc for i in range(ifloor(0*sc), iceil(1.5*sc)+1, int(0.1*sc))]
br=[i/sc for i in range(ifloor(b_min*sc), iceil(b_max*sc)+1, int(5.*sc))]
obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_plus_f_bulk_no_scales.data(),
f_calc_2 = f_calc_plus_f_bulk_no_scales_twin.data(),
f_mask_1 = f_h.data(),
f_mask_2 = f_h_twin.data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
miller_indices = self.f_obs().indices(), # XXX What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = self.r_work())
if(obj.updated()):
f_part2 = f_h.array( data = obj.k_mask()*f_h.data())
f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
kh, bh = obj.k_sol(), obj.b_sol()
# Fine sampling
k_min = max(0,obj.k_sol()-0.1)
k_max = obj.k_sol()+0.1
b_min = max(0,obj.b_sol()-5.)
b_max = obj.b_sol()+5.
kr=[i/sc for i in range(ifloor(k_min*sc),iceil(k_max*sc)+1,int(0.01*sc))]
br=[i/sc for i in range(ifloor(b_min*sc),iceil(b_max*sc)+1,int(5.*sc))]
obj = bulk_solvent.k_sol_b_sol_k_anisotropic_scaler_twin(
f_obs = self.f_obs().data(),
f_calc_1 = f_calc_plus_f_bulk_no_scales.data(),
f_calc_2 = f_calc_plus_f_bulk_no_scales_twin.data(),
f_mask_1 = f_h.data(),
f_mask_2 = f_h_twin.data(),
ss = self.ss,
twin_fraction = self.twin_fraction,
k_sol_range = flex.double(kr),
b_sol_range = flex.double(br),
miller_indices = self.f_obs().indices(), # XXX What about twin-related?
unit_cell = self.f_obs().unit_cell(),
r_ref = obj.r())
if(obj.updated()):
f_part2 = f_h.array( data = obj.k_mask()*f_h.data())
f_part2_twin = f_h_twin.array(data = obj.k_mask()*f_h_twin.data())
kh, bh = obj.k_sol(), obj.b_sol()
self.update_core(
f_part2 = f_part2,
f_part2_twin = f_part2_twin,
k_anisotropic = obj.k_anisotropic())
self.show(prefix = "add H (%4.2f, %6.2f)"%(kh, bh), log = log)
b_cart = adptbx.u_as_b(adptbx.u_star_as_u_cart(
self.f_obs().unit_cell(), o_kbu_sol.u_star()))
return group_args(
k_sol = o_kbu_sol.k_sol(),
b_sol = o_kbu_sol.b_sol(),
b_cart = b_cart,
k_h = kh,
b_h = bh)
|
notebooks/shared/ipypublish/export_plugins/html_standard.py | leonbett/debuggingbook | 728 | 22813 | #!/usr/bin/env python
"""html in standard nbconvert format
"""
from ipypublish.html.create_tpl import create_tpl
from ipypublish.html.standard import content
from ipypublish.html.standard import content_tagging
from ipypublish.html.standard import document
from ipypublish.html.standard import inout_prompt
from ipypublish.html.standard import mathjax
from ipypublish.html.standard import widgets
oformat = 'HTML'
config = {}
template = create_tpl([
document.tpl_dict,
content.tpl_dict, content_tagging.tpl_dict,
mathjax.tpl_dict, widgets.tpl_dict,
inout_prompt.tpl_dict
])
|
scripts/issues/issue6.py | slamer59/awesome-panel | 179 | 22840 | <filename>scripts/issues/issue6.py
import panel as pn
def main():
text_error = """
This is not formatted correctly by Markdown due to the indentation!"""
text_ok = """
This is formatted correctly by Markdown!
"""
app = pn.Column(
pn.pane.Markdown(text_error),
pn.pane.HTML(
"<hr>",
sizing_mode="stretch_width",
),
pn.pane.Markdown(text_ok),
)
app.servable()
main()
|
pycon/tutorials/urls.py | azkarmoulana/pycon | 154 | 22850 | <gh_stars>100-1000
from django.conf.urls import url, patterns
from .views import tutorial_email, tutorial_message
urlpatterns = patterns("", # flake8: noqa
url(r"^mail/(?P<pk>\d+)/(?P<pks>[0-9,]+)/$", tutorial_email, name="tutorial_email"),
url(r"^message/(?P<pk>\d+)/$", tutorial_message, name="tutorial_message"),
)
|
src/past/types/oldstr.py | kianmeng/python-future | 908 | 22899 | """
Pure-Python implementation of a Python 2-like str object for Python 3.
"""
from numbers import Integral
from past.utils import PY2, with_metaclass
if PY2:
from collections import Iterable
else:
from collections.abc import Iterable
_builtin_bytes = bytes
class BaseOldStr(type):
def __instancecheck__(cls, instance):
return isinstance(instance, _builtin_bytes)
def unescape(s):
r"""
Interprets strings with escape sequences
Example:
>>> s = unescape(r'abc\\def') # i.e. 'abc\\\\def'
>>> print(s)
'abc\def'
>>> s2 = unescape('abc\\ndef')
>>> len(s2)
8
>>> print(s2)
abc
def
"""
return s.encode().decode('unicode_escape')
class oldstr(with_metaclass(BaseOldStr, _builtin_bytes)):
"""
A forward port of the Python 2 8-bit string object to Py3
"""
# Python 2 strings have no __iter__ method:
@property
def __iter__(self):
raise AttributeError
def __dir__(self):
return [thing for thing in dir(_builtin_bytes) if thing != '__iter__']
# def __new__(cls, *args, **kwargs):
# """
# From the Py3 bytes docstring:
# bytes(iterable_of_ints) -> bytes
# bytes(string, encoding[, errors]) -> bytes
# bytes(bytes_or_buffer) -> immutable copy of bytes_or_buffer
# bytes(int) -> bytes object of size given by the parameter initialized with null bytes
# bytes() -> empty bytes object
#
# Construct an immutable array of bytes from:
# - an iterable yielding integers in range(256)
# - a text string encoded using the specified encoding
# - any object implementing the buffer API.
# - an integer
# """
#
# if len(args) == 0:
# return super(newbytes, cls).__new__(cls)
# # Was: elif isinstance(args[0], newbytes):
# # We use type() instead of the above because we're redefining
# # this to be True for all unicode string subclasses. Warning:
# # This may render newstr un-subclassable.
# elif type(args[0]) == newbytes:
# return args[0]
# elif isinstance(args[0], _builtin_bytes):
# value = args[0]
# elif isinstance(args[0], unicode):
# if 'encoding' not in kwargs:
# raise TypeError('unicode string argument without an encoding')
# ###
# # Was: value = args[0].encode(**kwargs)
# # Python 2.6 string encode() method doesn't take kwargs:
# # Use this instead:
# newargs = [kwargs['encoding']]
# if 'errors' in kwargs:
# newargs.append(kwargs['errors'])
# value = args[0].encode(*newargs)
# ###
# elif isinstance(args[0], Iterable):
# if len(args[0]) == 0:
# # What is this?
# raise ValueError('unknown argument type')
# elif len(args[0]) > 0 and isinstance(args[0][0], Integral):
# # It's a list of integers
# value = b''.join([chr(x) for x in args[0]])
# else:
# raise ValueError('item cannot be interpreted as an integer')
# elif isinstance(args[0], Integral):
# if args[0] < 0:
# raise ValueError('negative count')
# value = b'\x00' * args[0]
# else:
# value = args[0]
# return super(newbytes, cls).__new__(cls, value)
def __repr__(self):
s = super(oldstr, self).__repr__() # e.g. b'abc' on Py3, b'abc' on Py3
return s[1:]
def __str__(self):
s = super(oldstr, self).__str__() # e.g. "b'abc'" or "b'abc\\ndef'
# TODO: fix this:
assert s[:2] == "b'" and s[-1] == "'"
return unescape(s[2:-1]) # e.g. 'abc' or 'abc\ndef'
def __getitem__(self, y):
if isinstance(y, Integral):
return super(oldstr, self).__getitem__(slice(y, y+1))
else:
return super(oldstr, self).__getitem__(y)
def __getslice__(self, *args):
return self.__getitem__(slice(*args))
def __contains__(self, key):
if isinstance(key, int):
return False
def __native__(self):
return bytes(self)
__all__ = ['oldstr']
|
src/falconpy/quick_scan.py | CrowdStrike/falconpy | 111 | 22906 | """Falcon Quick Scan API Interface Class
_______ __ _______ __ __ __
| _ .----.-----.--.--.--.--| | _ | |_.----|__| |--.-----.
|. 1___| _| _ | | | | _ | 1___| _| _| | <| -__|
|. |___|__| |_____|________|_____|____ |____|__| |__|__|__|_____|
|: 1 | |: 1 |
|::.. . | CROWDSTRIKE FALCON |::.. . | FalconPy
`-------' `-------'
OAuth2 API - Customer SDK
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
"""
from ._util import force_default, process_service_request, handle_single_argument
from ._payload import generic_payload_list, aggregate_payload
from ._service_class import ServiceClass
from ._endpoint._quick_scan import _quick_scan_endpoints as Endpoints
class QuickScan(ServiceClass):
"""The only requirement to instantiate an instance of this class is one of the following:
- a valid client_id and client_secret provided as keywords.
- a credential dictionary with client_id and client_secret containing valid API credentials
{
"client_id": "CLIENT_ID_HERE",
"client_secret": "CLIENT_SECRET_HERE"
}
- a previously-authenticated instance of the authentication service class (oauth2.py)
- a valid token provided by the authentication service class (oauth2.py)
"""
@force_default(defaults=["body"], default_types=["dict"])
def get_scans_aggregates(self: object, body: dict = None, **kwargs) -> dict:
"""Get scans aggregations as specified via json in request body.
Keyword arguments:
body -- full body payload, not required when using other keywords.
{
"date_ranges": [
{
"from": "string",
"to": "string"
}
],
"field": "string",
"filter": "string",
"interval": "string",
"min_doc_count": 0,
"missing": "string",
"name": "string",
"q": "string",
"ranges": [
{
"From": 0,
"To": 0
}
],
"size": 0,
"sort": "string",
"sub_aggregates": [
null
],
"time_zone": "string",
"type": "string"
}
date_ranges -- List of dictionaries.
field -- String.
filter -- FQL syntax. String.
interval -- String.
min_doc_count -- Minimum number of documents required to match. Integer.
missing -- String.
name -- Scan name. String.
q -- FQL syntax. String.
ranges -- List of dictionaries.
size -- Integer.
sort -- FQL syntax. String.
sub_aggregates -- List of strings.
time_zone -- String.
type -- String.
This method only supports keywords for providing arguments.
This method does not support body payload validation.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/GetScansAggregates
"""
if not body:
body = aggregate_payload(submitted_keywords=kwargs)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetScansAggregates",
body=body
)
@force_default(defaults=["parameters"], default_types=["dict"])
def get_scans(self: object, *args, parameters: dict = None, **kwargs) -> dict:
"""Check the status of a volume scan. Time required for
analysis increases with the number of samples in a volume
but usually it should take less than 1 minute.
Keyword arguments:
ids -- One or more remediation IDs. String or list of strings.
parameters - full parameters payload, not required if ids is provided as a keyword.
Arguments: When not specified, the first argument to this method is assumed to be 'ids'.
All others are ignored.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/GetScans
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="GetScans",
keywords=kwargs,
params=handle_single_argument(args, parameters, "ids")
)
@force_default(defaults=["body"], default_types=["dict"])
def scan_samples(self: object, *args, body: dict = None, **kwargs) -> dict:
"""Get scans aggregations as specified via json in request body.
Keyword arguments:
body -- full body payload, not required when samples keyword is provided.
{
"samples": [
"string"
]
}
samples -- SHA256(s) of the samples to scan. Must have been previously submitted using
SampleUploadV3 (SampleUploads class). String or list of strings.
Arguments: When not specified, the first argument to this method is assumed to be
'samples'. All others are ignored.
Returns: dict object containing API response.
HTTP Method: POST
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/ScanSamples
"""
if not body:
body = generic_payload_list(submitted_arguments=args,
submitted_keywords=kwargs,
payload_value="samples"
)
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="ScanSamples",
body=body,
body_validator={"samples": list} if self.validate_payloads else None,
body_required=["samples"] if self.validate_payloads else None
)
@force_default(defaults=["parameters"], default_types=["dict"])
def query_submissions(self: object, parameters: dict = None, **kwargs) -> dict:
"""Find IDs for submitted scans by providing an FQL filter and paging details.
Returns a set of volume IDs that match your criteria.
Keyword arguments:
filter -- The filter expression that should be used to limit the results. FQL syntax.
limit -- The maximum number of records to return. [integer, 1-5000]
offset -- The integer offset to start retrieving records from.
parameters - full parameters payload, not required if using other keywords.
sort -- The property to sort by. FQL syntax.
This method only supports keywords for providing arguments.
Returns: dict object containing API response.
HTTP Method: GET
Swagger URL
https://assets.falcon.crowdstrike.com/support/api/swagger.html#/quick-scan/QuerySubmissionsMixin0
"""
return process_service_request(
calling_object=self,
endpoints=Endpoints,
operation_id="QuerySubmissionsMixin0",
keywords=kwargs,
params=parameters
)
# These method names align to the operation IDs in the API but
# do not conform to snake_case / PEP8 and are defined here for
# backwards compatibility / ease of use purposes
GetScansAggregates = get_scans_aggregates
GetScans = get_scans
ScanSamples = scan_samples
QuerySubmissionsMixin0 = query_submissions
# The legacy name for this class does not conform to PascalCase / PEP8
# It is defined here for backwards compatibility purposes only.
Quick_Scan = QuickScan # pylint: disable=C0103
|
scripts/lc/ARES/testing/run_rose_tool.py | ouankou/rose | 488 | 22914 | <gh_stars>100-1000
#!/usr/bin/env python
"""Runs a ROSE tool. If the tool does not return status 0, then runs the
corresponding non-ROSE compiler. Records whether the tool succeeded, in
passed.txt and failed.txt, but always returns status 0.
"""
import argparse
import inspect
import os
from support.local_logging import Logger
from support.runner import Runner
_SEPARATOR = "================================================================================"
class ROSERunner (object):
def __init__(self):
# Will be a Namespace (e.g. can refer to self._args_defined.command_args):
self._args_defined = None
# Will be a list:
self._args_remaining = None
self._current_dir = ""
self._failed_file = None
self._failed_file_path = ""
self._logger = Logger("run_rose_tool.ROSERunner")
self._parser = None
self._passed_file = None
self._passed_file_path = ""
self._primary_command = ""
self._runner = Runner()
self._script_dir = ""
self._secondary_command = ""
self._define_args()
def _define_args(self):
""" This script passes all its arguments on to the called
programs, so there are no argus defined.
"""
parser = argparse.ArgumentParser(
description="""Runs a ROSE tool. If the tool does not return status 0, then runs the
corresponding non-ROSE compiler. Records whether the tool succeeded, in
passed.txt and failed.txt, but always returns status 0.
""")
# We want ALL the arguments, so, we are using parse_known_arguments
# below instead and commenting this out for now:
## This matches the first positional and all remaining/following args:
#parser.add_argument('command_args', nargs=argparse.REMAINDER)
self._parser = parser
def _process_args(self):
self._args_defined, self._args_remaining = self._parser.parse_known_args()
self._logger.debug("defined args\n" + str(self._args_defined))
self._logger.debug("remaining args\n" + str(self._args_remaining))
self._current_dir = os.getcwd()
#self._script_dir = os.path.dirname(os.path.abspath(__file__))
# Robustly get this script's directory, even when started by exec or execfiles:
script_rel_path = inspect.getframeinfo(inspect.currentframe()).filename
self._script_dir = os.path.dirname(os.path.abspath(script_rel_path))
self._primary_command = "/g/g17/charles/code/ROSE/rose-0.9.10.64-intel-18.0.1.mpi/tutorial/identityTranslator"
self._secondary_command = "/usr/tce/packages/mvapich2/mvapich2-2.2-intel-18.0.1/bin/mpicxx"
self._passed_file_path = os.path.join (self._script_dir, "passed.txt")
self._failed_file_path = os.path.join (self._script_dir, "failed.txt")
def _log_success(self, args):
self._logger.success("\n" + _SEPARATOR + "\nPASSED")
self._logger.debug("Will log to passed file:")
self._logger.debug(args)
self._passed_file.write(str(args) + '\n')
def _log_failure(self, args):
self._logger.problem("\n" + _SEPARATOR + "\nFAILED")
self._logger.debug("Will log to failed file:")
self._logger.debug(args)
self._failed_file.write(str(args) + '\n')
def _run_command (self, args, dir):
self._logger.info("\n" + _SEPARATOR)
self._runner.callOrLog(args, dir)
def run(self):
""" Run the primary command. If it fails, run the secondary command. If
that fails, let the exception (Runner.Failed) propagate.
"""
self._logger.set_debug_off()
#self._logger._logger.setLevel(Logger.ERROR)
self._process_args()
self._passed_file = open(self._passed_file_path, 'a')
self._failed_file = open(self._failed_file_path, 'a')
try:
primary_args = [self._primary_command] + self._args_remaining
self._run_command(primary_args, self._current_dir)
self._log_success(primary_args)
except Runner.Failed, e:
self._log_failure(primary_args)
secondary_args = [self._secondary_command] + self._args_remaining
self._run_command(secondary_args, self._current_dir)
def main():
ROSERunner().run()
if __name__ == '__main__':
main()
|
test/auth/test_client_credentials.py | membranepotential/mendeley-python-sdk | 103 | 22916 | from oauthlib.oauth2 import InvalidClientError, MissingTokenError
import pytest
from test import configure_mendeley, cassette
def test_should_get_authenticated_session():
mendeley = configure_mendeley()
auth = mendeley.start_client_credentials_flow()
with cassette('fixtures/auth/client_credentials/get_authenticated_session.yaml'):
session = auth.authenticate()
assert session.token['access_token']
assert session.host == 'https://api.mendeley.com'
def test_should_throw_exception_on_incorrect_credentials():
mendeley = configure_mendeley()
mendeley.client_secret += '-invalid'
auth = mendeley.start_client_credentials_flow()
# We should never get an access token back
# and the OAuth library should be unhappy about that
with cassette('fixtures/auth/client_credentials/incorrect_credentials.yaml'), pytest.raises(MissingTokenError):
auth.authenticate()
|
tests/settings.py | matrixorz/firefly | 247 | 22951 | <reponame>matrixorz/firefly
# coding=utf-8
DEBUG = True
TESTING = True
SECRET_KEY = 'secret_key for test'
# mongodb
MONGODB_SETTINGS = {
'db': 'firefly_test',
'username': '',
'password': '',
'host': '127.0.0.1',
'port': 27017
}
# redis cache
CACHE_TYPE = 'redis'
CACHE_REDIS_HOST = '127.0.0.1'
CACHE_REDIS_PORT = 6379
CACHE_REDIS_DB = 9
CACHE_REDIS_PASSWORD = ''
# mail sender
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = 'MAIL_USERNAME'
MAIL_PASSWORD = '<PASSWORD>'
MAIL_DEFAULT_SENDER = '<EMAIL>'
SECURITY_PASSWORD_SALT = "abc"
SECURITY_PASSWORD_HASH = "<PASSWORD>"
# SECURITY_PASSWORD_HASH = "<PASSWORD>"
SECURITY_EMAIL_SENDER = "<EMAIL>"
SECURITY_CONFIRM_SALT = "570be5f24e690ce5af208244f3e539a93b6e4f05"
SECURITY_REMEMBER_SALT = "de154140385c591ea771dcb3b33f374383e6ea47"
# Set secret keys for CSRF protection
CSRF_ENABLED = False
WTF_CSRF_ENABLED = False
SERVER_EMAIL = 'Python-China <<EMAIL>>'
# Flask-SocialBlueprint
SOCIAL_BLUEPRINT = {
# https://developers.facebook.com/apps/
"flask_social_blueprint.providers.Facebook": {
# App ID
'consumer_key': '197…',
# App Secret
'consumer_secret': 'c956c1…'
},
# https://apps.twitter.com/app/new
"flask_social_blueprint.providers.Twitter": {
# Your access token from API Keys tab
'consumer_key': 'bkp…',
# access token secret
'consumer_secret': 'pHUx…'
},
# https://console.developers.google.com/project
"flask_social_blueprint.providers.Google": {
# Client ID
'consumer_key': '797….apps.googleusercontent.com',
# Client secret
'consumer_secret': 'bDG…'
},
# https://github.com/settings/applications/new
"flask_social_blueprint.providers.Github": {
# Client ID
'consumer_key': '6f6…',
# Client Secret
'consumer_secret': '1a9…'
},
}
|
gan/kdd_utilities.py | mesarcik/Efficient-GAN-Anomaly-Detection | 408 | 22973 | import tensorflow as tf
"""Class for KDD10 percent GAN architecture.
Generator and discriminator.
"""
learning_rate = 0.00001
batch_size = 50
layer = 1
latent_dim = 32
dis_inter_layer_dim = 128
init_kernel = tf.contrib.layers.xavier_initializer()
def generator(z_inp, is_training=False, getter=None, reuse=False):
""" Generator architecture in tensorflow
Generates data from the latent space
Args:
z_inp (tensor): variable in the latent space
reuse (bool): sharing variables or not
Returns:
(tensor): last activation layer of the generator
"""
with tf.variable_scope('generator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(z_inp,
units=64,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = tf.nn.relu(net, name='relu')
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=121,
kernel_initializer=init_kernel,
name='fc')
return net
def discriminator(x_inp, is_training=False, getter=None, reuse=False):
""" Discriminator architecture in tensorflow
Discriminates between real data and generated data
Args:
x_inp (tensor): input data for the encoder.
reuse (bool): sharing variables or not
Returns:
logits (tensor): last activation layer of the discriminator (shape 1)
intermediate_layer (tensor): intermediate layer for feature matching
"""
with tf.variable_scope('discriminator', reuse=reuse, custom_getter=getter):
name_net = 'layer_1'
with tf.variable_scope(name_net):
net = tf.layers.dense(x_inp,
units=256,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_2'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=128,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net, rate=0.2, name='dropout',
training=is_training)
name_net = 'layer_3'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=dis_inter_layer_dim,
kernel_initializer=init_kernel,
name='fc')
net = leakyReLu(net)
net = tf.layers.dropout(net,
rate=0.2,
name='dropout',
training=is_training)
intermediate_layer = net
name_net = 'layer_4'
with tf.variable_scope(name_net):
net = tf.layers.dense(net,
units=1,
kernel_initializer=init_kernel,
name='fc')
net = tf.squeeze(net)
return net, intermediate_layer
def leakyReLu(x, alpha=0.1, name=None):
if name:
with tf.variable_scope(name):
return _leakyReLu_impl(x, alpha)
else:
return _leakyReLu_impl(x, alpha)
def _leakyReLu_impl(x, alpha):
return tf.nn.relu(x) - (alpha * tf.nn.relu(-x)) |
cocotb/_py_compat.py | lavanyajagan/cocotb | 350 | 22980 | # Copyright (c) cocotb contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Backports and compatibility shims for newer python features.
These are for internal use - users should use a third party library like `six`
if they want to use these shims in their own code
"""
import sys
# backport of Python 3.7's contextlib.nullcontext
class nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
block of code is only sometimes used with a normal context manager:
cm = optional_cm if condition else nullcontext()
with cm:
# Perform operation, using optional_cm if condition is True
"""
def __init__(self, enter_result=None):
self.enter_result = enter_result
def __enter__(self):
return self.enter_result
def __exit__(self, *excinfo):
pass
# On python 3.7 onwards, `dict` is guaranteed to preserve insertion order.
# Since `OrderedDict` is a little slower that `dict`, we prefer the latter
# when possible.
if sys.version_info[:2] >= (3, 7):
insertion_ordered_dict = dict
else:
import collections
insertion_ordered_dict = collections.OrderedDict
|
digits/inference/__init__.py | PhysicsTeacher13/Digits-NVIDIA | 111 | 22992 | <filename>digits/inference/__init__.py
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from .images import ImageInferenceJob
from .job import InferenceJob
__all__ = [
'InferenceJob',
'ImageInferenceJob',
]
|
app/cli/plugin/__init__.py | lonless0/flask_project | 786 | 23011 | <reponame>lonless0/flask_project<gh_stars>100-1000
from .generator import generate
from .init import init
|
examples/panflute/myemph.py | jacobwhall/panflute | 361 | 23020 | #!/usr/bin/env python
import panflute as pf
"""
Pandoc filter that causes emphasis to be rendered using
the custom macro '\myemph{...}' rather than '\emph{...}'
in latex. Other output formats are unaffected.
"""
def latex(s):
return pf.RawInline(s, format='latex')
def myemph(e, doc):
if type(e)==pf.Emph and doc.format=='latex':
return pf.Span(latex('\\myemph{'), *e.items, latex('}'))
if __name__ == "__main__":
pf.toJSONFilter(myemph)
|
src/python/grpcio_tests/tests/interop/_intraop_test_case.py | txl0591/grpc | 117 | 23038 | <reponame>txl0591/grpc<filename>src/python/grpcio_tests/tests/interop/_intraop_test_case.py
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common code for unit tests of the interoperability test code."""
from tests.interop import methods
class IntraopTestCase(object):
"""Unit test methods.
This class must be mixed in with unittest.TestCase and a class that defines
setUp and tearDown methods that manage a stub attribute.
"""
def testEmptyUnary(self):
methods.TestCase.EMPTY_UNARY.test_interoperability(self.stub, None)
def testLargeUnary(self):
methods.TestCase.LARGE_UNARY.test_interoperability(self.stub, None)
def testServerStreaming(self):
methods.TestCase.SERVER_STREAMING.test_interoperability(self.stub, None)
def testClientStreaming(self):
methods.TestCase.CLIENT_STREAMING.test_interoperability(self.stub, None)
def testPingPong(self):
methods.TestCase.PING_PONG.test_interoperability(self.stub, None)
def testCancelAfterBegin(self):
methods.TestCase.CANCEL_AFTER_BEGIN.test_interoperability(self.stub,
None)
def testCancelAfterFirstResponse(self):
methods.TestCase.CANCEL_AFTER_FIRST_RESPONSE.test_interoperability(
self.stub, None)
def testTimeoutOnSleepingServer(self):
methods.TestCase.TIMEOUT_ON_SLEEPING_SERVER.test_interoperability(
self.stub, None)
|
tests/test_sagemaker/test_sagemaker_processing.py | gtourkas/moto | 5,460 | 23047 | import boto3
from botocore.exceptions import ClientError
import datetime
import pytest
from moto import mock_sagemaker
from moto.sts.models import ACCOUNT_ID
FAKE_ROLE_ARN = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
TEST_REGION_NAME = "us-east-1"
class MyProcessingJobModel(object):
def __init__(
self,
processing_job_name,
role_arn,
container=None,
bucket=None,
prefix=None,
app_specification=None,
network_config=None,
processing_inputs=None,
processing_output_config=None,
processing_resources=None,
stopping_condition=None,
):
self.processing_job_name = processing_job_name
self.role_arn = role_arn
self.container = (
container
or "683313688378.dkr.ecr.us-east-1.amazonaws.com/sagemaker-scikit-learn:0.23-1-cpu-py3"
)
self.bucket = bucket or "my-bucket"
self.prefix = prefix or "sagemaker"
self.app_specification = app_specification or {
"ImageUri": self.container,
"ContainerEntrypoint": ["python3",],
}
self.network_config = network_config or {
"EnableInterContainerTrafficEncryption": False,
"EnableNetworkIsolation": False,
}
self.processing_inputs = processing_inputs or [
{
"InputName": "input",
"AppManaged": False,
"S3Input": {
"S3Uri": "s3://{}/{}/processing/".format(self.bucket, self.prefix),
"LocalPath": "/opt/ml/processing/input",
"S3DataType": "S3Prefix",
"S3InputMode": "File",
"S3DataDistributionType": "FullyReplicated",
"S3CompressionType": "None",
},
}
]
self.processing_output_config = processing_output_config or {
"Outputs": [
{
"OutputName": "output",
"S3Output": {
"S3Uri": "s3://{}/{}/processing/".format(
self.bucket, self.prefix
),
"LocalPath": "/opt/ml/processing/output",
"S3UploadMode": "EndOfJob",
},
"AppManaged": False,
}
]
}
self.processing_resources = processing_resources or {
"ClusterConfig": {
"InstanceCount": 1,
"InstanceType": "ml.m5.large",
"VolumeSizeInGB": 10,
},
}
self.stopping_condition = stopping_condition or {
"MaxRuntimeInSeconds": 3600,
}
def save(self):
sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
params = {
"AppSpecification": self.app_specification,
"NetworkConfig": self.network_config,
"ProcessingInputs": self.processing_inputs,
"ProcessingJobName": self.processing_job_name,
"ProcessingOutputConfig": self.processing_output_config,
"ProcessingResources": self.processing_resources,
"RoleArn": self.role_arn,
"StoppingCondition": self.stopping_condition,
}
return sagemaker.create_processing_job(**params)
@mock_sagemaker
def test_create_processing_job():
sagemaker = boto3.client("sagemaker", region_name=TEST_REGION_NAME)
processing_job_name = "MyProcessingJob"
role_arn = "arn:aws:iam::{}:role/FakeRole".format(ACCOUNT_ID)
container = "382416733822.dkr.ecr.us-east-1.amazonaws.com/linear-learner:1"
bucket = "my-bucket"
prefix = "my-prefix"
app_specification = {
"ImageUri": container,
"ContainerEntrypoint": ["python3", "app.py"],
}
processing_resources = {
"ClusterConfig": {
"InstanceCount": 2,
"InstanceType": "ml.m5.xlarge",
"VolumeSizeInGB": 20,
},
}
stopping_condition = {"MaxRuntimeInSeconds": 60 * 60}
job = MyProcessingJobModel(
processing_job_name,
role_arn,
container=container,
bucket=bucket,
prefix=prefix,
app_specification=app_specification,
processing_resources=processing_resources,
stopping_condition=stopping_condition,
)
resp = job.save()
resp["ProcessingJobArn"].should.match(
r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(processing_job_name)
)
resp = sagemaker.describe_processing_job(ProcessingJobName=processing_job_name)
resp["ProcessingJobName"].should.equal(processing_job_name)
resp["ProcessingJobArn"].should.match(
r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(processing_job_name)
)
assert "python3" in resp["AppSpecification"]["ContainerEntrypoint"]
assert "app.py" in resp["AppSpecification"]["ContainerEntrypoint"]
assert resp["RoleArn"] == role_arn
assert resp["ProcessingJobStatus"] == "Completed"
assert isinstance(resp["CreationTime"], datetime.datetime)
assert isinstance(resp["LastModifiedTime"], datetime.datetime)
@mock_sagemaker
def test_list_processing_jobs():
client = boto3.client("sagemaker", region_name="us-east-1")
name = "blah"
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar"
test_processing_job = MyProcessingJobModel(processing_job_name=name, role_arn=arn)
test_processing_job.save()
processing_jobs = client.list_processing_jobs()
assert len(processing_jobs["ProcessingJobSummaries"]).should.equal(1)
assert processing_jobs["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal(name)
assert processing_jobs["ProcessingJobSummaries"][0][
"ProcessingJobArn"
].should.match(r"^arn:aws:sagemaker:.*:.*:processing-job/{}$".format(name))
assert processing_jobs.get("NextToken") is None
@mock_sagemaker
def test_list_processing_jobs_multiple():
client = boto3.client("sagemaker", region_name="us-east-1")
name_job_1 = "blah"
arn_job_1 = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar"
test_processing_job_1 = MyProcessingJobModel(
processing_job_name=name_job_1, role_arn=arn_job_1
)
test_processing_job_1.save()
name_job_2 = "blah2"
arn_job_2 = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar2"
test_processing_job_2 = MyProcessingJobModel(
processing_job_name=name_job_2, role_arn=arn_job_2
)
test_processing_job_2.save()
processing_jobs_limit = client.list_processing_jobs(MaxResults=1)
assert len(processing_jobs_limit["ProcessingJobSummaries"]).should.equal(1)
processing_jobs = client.list_processing_jobs()
assert len(processing_jobs["ProcessingJobSummaries"]).should.equal(2)
assert processing_jobs.get("NextToken").should.be.none
@mock_sagemaker
def test_list_processing_jobs_none():
client = boto3.client("sagemaker", region_name="us-east-1")
processing_jobs = client.list_processing_jobs()
assert len(processing_jobs["ProcessingJobSummaries"]).should.equal(0)
@mock_sagemaker
def test_list_processing_jobs_should_validate_input():
client = boto3.client("sagemaker", region_name="us-east-1")
junk_status_equals = "blah"
with pytest.raises(ClientError) as ex:
client.list_processing_jobs(StatusEquals=junk_status_equals)
expected_error = f"1 validation errors detected: Value '{junk_status_equals}' at 'statusEquals' failed to satisfy constraint: Member must satisfy enum value set: ['Completed', 'Stopped', 'InProgress', 'Stopping', 'Failed']"
assert ex.value.response["Error"]["Code"] == "ValidationException"
assert ex.value.response["Error"]["Message"] == expected_error
junk_next_token = "<PASSWORD>"
with pytest.raises(ClientError) as ex:
client.list_processing_jobs(NextToken=junk_next_token)
assert ex.value.response["Error"]["Code"] == "ValidationException"
assert (
ex.value.response["Error"]["Message"]
== 'Invalid pagination token because "{0}".'
)
@mock_sagemaker
def test_list_processing_jobs_with_name_filters():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
for i in range(5):
name = "vgg-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
xgboost_processing_jobs = client.list_processing_jobs(NameContains="xgboost")
assert len(xgboost_processing_jobs["ProcessingJobSummaries"]).should.equal(5)
processing_jobs_with_2 = client.list_processing_jobs(NameContains="2")
assert len(processing_jobs_with_2["ProcessingJobSummaries"]).should.equal(2)
@mock_sagemaker
def test_list_processing_jobs_paginated():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
xgboost_processing_job_1 = client.list_processing_jobs(
NameContains="xgboost", MaxResults=1
)
assert len(xgboost_processing_job_1["ProcessingJobSummaries"]).should.equal(1)
assert xgboost_processing_job_1["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal("xgboost-0")
assert xgboost_processing_job_1.get("NextToken").should_not.be.none
xgboost_processing_job_next = client.list_processing_jobs(
NameContains="xgboost",
MaxResults=1,
NextToken=xgboost_processing_job_1.get("NextToken"),
)
assert len(xgboost_processing_job_next["ProcessingJobSummaries"]).should.equal(1)
assert xgboost_processing_job_next["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal("xgboost-1")
assert xgboost_processing_job_next.get("NextToken").should_not.be.none
@mock_sagemaker
def test_list_processing_jobs_paginated_with_target_in_middle():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
for i in range(5):
name = "vgg-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
vgg_processing_job_1 = client.list_processing_jobs(NameContains="vgg", MaxResults=1)
assert len(vgg_processing_job_1["ProcessingJobSummaries"]).should.equal(0)
assert vgg_processing_job_1.get("NextToken").should_not.be.none
vgg_processing_job_6 = client.list_processing_jobs(NameContains="vgg", MaxResults=6)
assert len(vgg_processing_job_6["ProcessingJobSummaries"]).should.equal(1)
assert vgg_processing_job_6["ProcessingJobSummaries"][0][
"ProcessingJobName"
].should.equal("vgg-0")
assert vgg_processing_job_6.get("NextToken").should_not.be.none
vgg_processing_job_10 = client.list_processing_jobs(
NameContains="vgg", MaxResults=10
)
assert len(vgg_processing_job_10["ProcessingJobSummaries"]).should.equal(5)
assert vgg_processing_job_10["ProcessingJobSummaries"][-1][
"ProcessingJobName"
].should.equal("vgg-4")
assert vgg_processing_job_10.get("NextToken").should.be.none
@mock_sagemaker
def test_list_processing_jobs_paginated_with_fragmented_targets():
client = boto3.client("sagemaker", region_name="us-east-1")
for i in range(5):
name = "xgboost-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/foobar-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
for i in range(5):
name = "vgg-{}".format(i)
arn = "arn:aws:sagemaker:us-east-1:000000000000:x-x/barfoo-{}".format(i)
MyProcessingJobModel(processing_job_name=name, role_arn=arn).save()
processing_jobs_with_2 = client.list_processing_jobs(NameContains="2", MaxResults=8)
assert len(processing_jobs_with_2["ProcessingJobSummaries"]).should.equal(2)
assert processing_jobs_with_2.get("NextToken").should_not.be.none
processing_jobs_with_2_next = client.list_processing_jobs(
NameContains="2",
MaxResults=1,
NextToken=processing_jobs_with_2.get("NextToken"),
)
assert len(processing_jobs_with_2_next["ProcessingJobSummaries"]).should.equal(0)
assert processing_jobs_with_2_next.get("NextToken").should_not.be.none
processing_jobs_with_2_next_next = client.list_processing_jobs(
NameContains="2",
MaxResults=1,
NextToken=processing_jobs_with_2_next.get("NextToken"),
)
assert len(processing_jobs_with_2_next_next["ProcessingJobSummaries"]).should.equal(
0
)
assert processing_jobs_with_2_next_next.get("NextToken").should.be.none
|
descarteslabs/workflows/models/tests/test_tile_url.py | descarteslabs/descarteslabs-python | 167 | 23057 | import pytest
import datetime
import json
import functools
from urllib.parse import urlencode, parse_qs
from descarteslabs.common.graft import client as graft_client
from ... import types
from .. import tile_url
def test_url():
base = "foo"
base_q = base + "?"
url = functools.partial(tile_url.tile_url, base, types.Image.from_id(""))
assert url() == base
assert url(session_id="foo") == base_q + urlencode({"session_id": "foo"})
assert url(colormap="foo") == base_q + urlencode({"colormap": "foo"})
assert url(colormap="") == base_q + urlencode({"colormap": ""})
assert url(reduction="mean") == base_q + urlencode({"reduction": "mean"})
assert url(checkerboard=True) == base_q + urlencode({"checkerboard": "true"})
assert url(checkerboard=False) == base_q + urlencode({"checkerboard": "false"})
assert url(bands=["red"]) == base_q + urlencode({"band": "red"})
assert url(bands=["red", "green"]) == base_q + urlencode(
{"band": ["red", "green"]}, doseq=True
)
with pytest.raises(ValueError, match="Up to 3 bands may be specified, not 4"):
url(bands=["a", "b", "c", "d"])
# 1-band scales are normalized
assert url(scales=[0, 1]) == base_q + urlencode({"scales": "[[0.0, 1.0]]"})
# If all none scales, not included
assert url(scales=[None, None]) == base_q + urlencode({"scales": "null"})
# test everything gets added together correctly
got_base, params = url(
session_id="foo", colormap="bar", bands=["red", "green"]
).split("?")
assert got_base == base
query = parse_qs(params, strict_parsing=True, keep_blank_values=True)
assert query == {
# `parse_qs` returns all values wrapped in lists
"session_id": ["foo"],
"colormap": ["bar"],
"band": ["red", "green"],
}
@pytest.mark.parametrize(
"args",
[
{
"p1": "2021-01-20",
"p2": 2.2,
"p3": 1,
},
{
"p1": datetime.datetime(2020, 1, 20),
"p2": types.Float(1.1) + 1,
"p3": 1,
},
{
"p1": types.Datetime(2021, 1, 20),
"p2": types.Float(1.1) + 1,
"p3": types.Int(1),
},
],
)
def test_url_arguments(args):
func = types.Function[
dict(p1=types.Datetime, p2=types.Float, p3=types.Int), types.Image
]("x")
base = "http://base.net"
url = functools.partial(tile_url.tile_url, base, func)
with pytest.raises(TypeError, match="missing a required argument"):
url()
with pytest.raises(TypeError, match="got an unexpected keyword argument 'blah'"):
url(**args, blah="bad")
with graft_client.consistent_guid():
got_base, params = url(**args).split("?")
assert got_base == base
query = parse_qs(params, strict_parsing=True, keep_blank_values=True)
assert query.keys() == args.keys()
with graft_client.consistent_guid():
p1_graft = types.Datetime._promote(args["p1"]).graft
assert query["p1"] == [json.dumps(p1_graft)]
if isinstance(args["p2"], float):
assert query["p2"] == ["2.2"]
else:
assert query["p2"] == [json.dumps(args["p2"].graft)]
assert query["p3"] == ["1"]
def test_no_url_for_positional_only_function():
with pytest.raises(
TypeError, match="cannot use Functions with positional-only arguments"
):
tile_url.tile_url("", types.Function[types.Int, {}, types.Image]("x"))
def test_validate_scales():
assert tile_url.validate_scales([[0.0, 1.0], [0.0, 2.0], [-1.0, 1.0]]) == [
[0.0, 1.0],
[0.0, 2.0],
[-1.0, 1.0],
]
assert tile_url.validate_scales([[0.0, 1.0]]) == [[0.0, 1.0]]
# ints -> floats
assert tile_url.validate_scales([[0, 1]]) == [[0.0, 1.0]]
# 1-band convenience
assert tile_url.validate_scales([0, 1]) == [[0.0, 1.0]]
# no scalings
assert tile_url.validate_scales(None) == []
assert tile_url.validate_scales([]) == []
with pytest.raises(TypeError, match="Expected a list or tuple of scales"):
tile_url.validate_scales(0)
with pytest.raises(TypeError, match="Expected a list or tuple of scales"):
tile_url.validate_scales("foo")
with pytest.raises(TypeError, match="Scaling 0: expected a 2-item list or tuple"):
tile_url.validate_scales([1, 2, 3])
with pytest.raises(TypeError, match="Scaling 0: items in scaling must be numbers"):
tile_url.validate_scales([1, "foo"])
with pytest.raises(ValueError, match="expected up to 3 scales, but got 4"):
tile_url.validate_scales([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0], [0.0, 1.0]])
with pytest.raises(ValueError, match="but length was 3"):
tile_url.validate_scales([[0.0, 1.0, 2.0]])
with pytest.raises(ValueError, match="but length was 1"):
tile_url.validate_scales([[0.0]])
with pytest.raises(ValueError, match="one number and one None in scales"):
tile_url.validate_scales([[None, 1.0]])
|
census_data_downloader/tables/medianage.py | JoeGermuska/census-data-downloader | 170 | 23091 | #! /usr/bin/env python
# -*- coding: utf-8 -*
import collections
from census_data_downloader.core.tables import BaseTableConfig
from census_data_downloader.core.decorators import register
@register
class MedianAgeDownloader(BaseTableConfig):
PROCESSED_TABLE_NAME = 'medianage'
UNIVERSE = "total population"
RAW_TABLE_NAME = 'B01002'
RAW_FIELD_CROSSWALK = collections.OrderedDict({
"001": "median",
"002": "male",
"003": "female"
})
|
HetSANN_MRV/execute_sparse.py | xhhszc/hetsann | 116 | 23097 | import os
import time
import random
import scipy.sparse as sp
import numpy as np
import tensorflow as tf
import argparse
from models import SpHGAT
from utils import process
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', help='Dataset.', default='imdb', type=str)
parser.add_argument('--epochs', help='Epochs.', default=100000, type=int)
parser.add_argument('--patience', help='Patience for early stopping.', default=100, type=int)
parser.add_argument('--lr', help='Learning rate.', default=0.005, type=float)
parser.add_argument('--l2_coef', help='Weight decay.', default=0.0005, type=float)
parser.add_argument('--dropout', help='Dropout.', default=0.6, type=float)
parser.add_argument('--train_rate', help='Label rate for training.', default=0.1, type=float)
parser.add_argument('--seed', help='Random seed for data splitting.', default=None, type=int)
parser.add_argument('--layers', help='Number of layers.', default=2, type=int)
parser.add_argument('--hid', help='Number of hidden units per head in each layer.',
nargs='*', default=[8, 8], type=int)
parser.add_argument('--heads', help='Number of attention heads in each layer.',
nargs='*', default=[8, 1], type=int)
parser.add_argument('--residue', help='Using residue.', action='store_true')
parser.add_argument('--repeat', help='Repeat.', default=10, type=int)
parser.add_argument('--random_feature', help='Random features', action='store_true')
parser.add_argument('--target_node', help='index of target nodes for classification.',
nargs='*', default=[0, 1], type=int)
parser.add_argument('--target_is_multilabels', help='each type of target node for classification is multi-labels or not.(0 means not else means yes)',
nargs='*', default=[0, 1], type=int)
parser.add_argument('--saved_model_suffix', help='to splite checkpoint by suffix', default="", type=str)
parser.add_argument('--no_attn_reg', help='Do not use edge direction regularization', action='store_true')
parser.add_argument('--simple_inner', help='Use original inner product', action='store_true')
parser.add_argument('--loop_coef', help='Coefficient for regularization.', default=1e-3, type=float)
parser.add_argument('--inv_coef', help='Coefficient for regularization.', default=1e-3, type=float)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
args= parser.parse_args()
dataset = args.dataset
checkpt_file = 'pre_trained/{}/{}/{}.ckpt'.format(dataset, args.saved_model_suffix, dataset)
checkpt_file = checkpt_file.replace('//', '/')
process.mkdir(os.path.split(checkpt_file)[0])
# training params
batch_size = 1
train_rate = args.train_rate
seed = args.seed
nb_epochs = args.epochs
patience = args.patience
lr = args.lr # learning rate
l2_coef = args.l2_coef # weight decay
dropout = args.dropout
repeat = args.repeat
random_feature = args.random_feature
target_node = args.target_node
is_multilabel = [False if t==0 else True for t in args.target_is_multilabels]
loop_coef = args.loop_coef
inv_coef = args.inv_coef
layers = args.layers
hid = args.hid
if len(hid) == 1:
hid_units = hid * layers
elif len(hid) == layers:
hid_units = hid
heads = args.heads
if len(heads) == 1:
n_heads = heads * layers
elif len(heads) == 2:
n_heads = [heads[0]] * (layers - 1) + [heads[1]]
elif len(heads) == layers:
n_heads = heads
residual = args.residue # False
nonlinearity = tf.nn.elu
model = SpHGAT
no_attn_reg = args.no_attn_reg
simple_inner = args.simple_inner
random.seed(seed) # random seed for random data split only
print('Dataset: ' + dataset)
print('Train rate: ' + str(train_rate))
print('----- Opt. hyperparams -----')
print('lr: ' + str(lr))
print('l2_coef: ' + str(l2_coef))
print('----- Archi. hyperparams -----')
print('nb. layers: ' + str(len(hid_units)))
print('nb. units per layer: ' + str(hid_units))
print('nb. attention heads: ' + str(n_heads))
print('residual: ' + str(residual))
print('nonlinearity: ' + str(nonlinearity))
print('model: ' + str(model))
print('target nodes: ', target_node)
print('is_multilabel: ', is_multilabel)
print('loop_coef:', loop_coef)
print('inv_coef:', inv_coef)
sparse = True
metr_num = 2
total_vl_acc = np.array([0.]*(len(target_node)*metr_num)) # should be array
total_ts_acc = np.array([0.]*(len(target_node)*metr_num)) # should be array
def get_loss_acc(logits, labels, msk, is_multilabel=False):
global model
class_num = labels.shape[-1]
log_resh = tf.reshape(logits, [-1, class_num])
lab_resh = tf.reshape(labels, [-1, class_num])
msk_resh = tf.reshape(msk, [-1])
if is_multilabel:
loss = model.masked_sigmoid_cross_entropy(log_resh, lab_resh, msk_resh)
accuracy = [model.micro_f1(log_resh, lab_resh, msk_resh), model.macro_f1(log_resh, lab_resh, msk_resh)]
acc_name = ['if1', 'af1']
acc_full_name = ['micro f1', 'macro f1']
else:
loss = model.masked_softmax_cross_entropy(log_resh, lab_resh, msk_resh)
accuracy = [model.micro_f1_onelabel(log_resh, lab_resh, msk_resh), model.macro_f1_onelabel(log_resh, lab_resh, msk_resh)]
acc_name = ['if1', 'af1']
acc_full_name = ['micro f1', 'macro f1']
return loss, accuracy, acc_name, acc_full_name
def print_eachclass_info(train_loss_each, train_acc_each, val_loss_each, val_acc_each, acc_name):
tl_average = np.mean(np.array(train_loss_each), axis=0)
ta_average = np.mean(np.array(train_acc_each), axis=0)
vl_average = np.mean(np.array(val_loss_each), axis=0)
va_average = np.mean(np.array(val_acc_each), axis=0)
metric_num = int(len(ta_average)/len(tl_average))
for i in range(len(tl_average)):
line = '\t\t target %s: loss = %.3f, ' % (i, tl_average[i])
for j in range(metric_num):
line += '%s = %.5f, ' % (acc_name[i*metric_num+j], ta_average[i*metric_num+j])
line += '| Val: loss = %.3f, ' % (vl_average[i])
for j in range(metric_num):
line += '%s = %.5f, ' % (acc_name[i*metric_num+j], va_average[i*metric_num+j])
print(line)
for repeat_i in range(repeat):
print('Run #' + str(repeat_i) + ':')
adj, adj_type, edge_list, features, y_train, y_val, y_test,\
train_mask, val_mask, test_mask = process.load_heterogeneous_data(dataset, train_rate=train_rate, target_node=target_node)
features = [process.preprocess_features(feature)[0] for feature in features]
nb_nodes = [feature.shape[0] for feature in features]
ft_size = [feature.shape[1] for feature in features]
nb_classes = [y.shape[1] for y in y_train]
features = [feature[np.newaxis] for feature in features]
y_train = [y[np.newaxis] for y in y_train]
y_val = [y[np.newaxis] for y in y_val]
y_test = [y[np.newaxis] for y in y_test]
train_mask = [m[np.newaxis] for m in train_mask]
val_mask = [m[np.newaxis] for m in val_mask]
test_mask = [m[np.newaxis] for m in test_mask]
if random_feature:
features[0] = np.random.standard_normal(features[0].shape)
if sparse:
biases = [process.preprocess_adj_hete(a) for a in adj] # transposed here
else:
biases = []
for a in adj:
a = a.todense()
a = a[np.newaxis]
if no_attn_reg:
edge_list = [(i,) for i in range(len(adj_type))]
if simple_inner:
edge_list = []
with tf.Graph().as_default():
with tf.name_scope('input'):
ftr_in = [tf.placeholder(dtype=tf.float32,
shape=(batch_size, nb, ft)) for nb, ft in zip(nb_nodes, ft_size)]
if sparse:
bias_in = [tf.sparse_placeholder(dtype=tf.float32) for _ in biases]
else:
bias_in = None
lbl_in = [tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes[target_node[i]], nb_classes[i])) for i in range(len(nb_classes))]
msk_in = [tf.placeholder(dtype=tf.int32, shape=(batch_size, nb_nodes[target_node[i]])) for i in range(len(nb_classes))]
attn_drop = tf.placeholder(dtype=tf.float32, shape=())
ffd_drop = tf.placeholder(dtype=tf.float32, shape=())
is_train = tf.placeholder(dtype=tf.bool, shape=())
logits = model.inference(ftr_in, nb_classes, nb_nodes, is_train,
attn_drop, ffd_drop, target_nodes=target_node,
bias_mat=bias_in, adj_type=adj_type,
edge_list=edge_list,
hid_units=hid_units, n_heads=n_heads,
residual=residual, activation=nonlinearity)
with tf.name_scope('loss_acc'):
loss, accuracy, acc_name, acc_full_name = [], [], [], []
all_class_loss = 0.0
for tn in range(len(target_node)):
tn_logits = logits[tn]
tn_labels = lbl_in[tn]
tn_masks = msk_in[tn]
tn_is_multilabel = is_multilabel[tn]
tn_loss, tn_accuracy, tn_acc_name, tn_acc_full_name = get_loss_acc(tn_logits, tn_labels, tn_masks, is_multilabel=tn_is_multilabel)
loss.append(tn_loss)
accuracy.extend(tn_accuracy)
acc_name.extend(tn_acc_name)
acc_full_name.extend(tn_acc_full_name)
all_class_loss += tn_loss
loss_loop = tf.add_n(tf.get_collection('loss_loop')) * loop_coef
loss_inv= tf.add_n(tf.get_collection('loss_inv')) * inv_coef
train_op = model.training(all_class_loss + loss_loop + loss_inv, lr, l2_coef)
saver = tf.train.Saver()
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
vlss_mn = np.inf
vacc_mx = 0.0
curr_step = 0
with tf.Session(config=config) as sess:
sess.run(init_op)
vacc_early_model = 0.0
vlss_early_model = 0.0
vacc_each_early_model = np.array([0.]*(len(target_node)*metr_num))
for epoch in range(nb_epochs):
# summary information
train_loss_avg = 0
train_acc_avg = 0
val_loss_avg = 0
val_acc_avg = 0
# for each class information
train_loss_each = []
train_acc_each = []
val_loss_each = []
val_acc_each = []
tr_step = 0
tr_size = features[0].shape[0]
while tr_step * batch_size < tr_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[tr_step * batch_size:(tr_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[tr_step*batch_size:(tr_step+1)*batch_size] for i, d in zip(lbl_in, y_train)})
fd.update({i:d[tr_step*batch_size:(tr_step+1)*batch_size] for i, d in zip(msk_in, train_mask)})
fd.update({is_train: True})
fd.update({attn_drop: dropout, ffd_drop:dropout})
_, loss_list_tr, acc_list_tr, loss_loop_tr, loss_inv_tr = sess.run([train_op, loss, accuracy, loss_loop, loss_inv], feed_dict=fd)
train_loss_each.append(np.array(loss_list_tr))
train_acc_each.append(np.array(acc_list_tr))
train_loss_avg += np.sum(np.array(loss_list_tr))
train_acc_avg += np.sum(np.array(acc_list_tr))
tr_step += 1
vl_step = 0
vl_size = features[0].shape[0]
while vl_step * batch_size < vl_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[vl_step * batch_size:(vl_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[vl_step * batch_size:(vl_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[vl_step*batch_size:(vl_step+1)*batch_size] for i, d in zip(lbl_in, y_val)})
fd.update({i:d[vl_step*batch_size:(vl_step+1)*batch_size] for i, d in zip(msk_in, val_mask)})
fd.update({is_train: False})
fd.update({attn_drop: 0.0, ffd_drop:0.0})
loss_list_vl, acc_list_vl = sess.run([loss, accuracy], feed_dict=fd)
acc_list_vl = [0. if np.isnan(acc_vl) else acc_vl for acc_vl in acc_list_vl]
val_loss_each.append(np.array(loss_list_vl))
val_acc_each.append(np.array(acc_list_vl))
val_loss_avg += np.sum(np.array(loss_list_vl))
val_acc_avg += np.sum(np.array(acc_list_vl))
vl_step += 1
print('Training %s: loss = %.5f, %s = %.5f, loss_loop = %.5f, loss_inv = %.5f | Val: loss = %.5f, %s = %.5f' %
(epoch, train_loss_avg/tr_step, 'acc/F1', train_acc_avg/tr_step,
loss_loop_tr, loss_inv_tr,
val_loss_avg/vl_step, 'acc/F1', val_acc_avg/vl_step))
print_eachclass_info(train_loss_each, train_acc_each, val_loss_each, val_acc_each, acc_name)
if val_acc_avg/vl_step > vacc_mx or val_loss_avg/vl_step < vlss_mn:
if val_acc_avg/vl_step > vacc_mx and val_loss_avg/vl_step < vlss_mn:
vacc_early_model = val_acc_avg/vl_step
vlss_early_model = val_loss_avg/vl_step
vacc_each_early_model = np.mean(np.array(val_acc_each), axis=0)
saver.save(sess, checkpt_file)
print("saved model as %s"%checkpt_file)
vacc_mx = np.max((val_acc_avg/vl_step, vacc_mx))
vlss_mn = np.min((val_loss_avg/vl_step, vlss_mn))
curr_step = 0
else:
curr_step += 1
if curr_step == patience:
print('Early stop! Min loss: ', vlss_mn,
', Max', 'acc/F1', ': ', vacc_mx)
print('Early stop model validation loss: ', vlss_early_model,
', ', 'acc/F1', ': ', vacc_early_model)
total_vl_acc += vacc_each_early_model
break
if curr_step < patience:
print('Min loss: ', vlss_mn, ', Max', 'acc/F1', ': ', vacc_mx)
print('model validation loss: ', vlss_early_model, ', ', 'acc/F1', ': ', vacc_early_model)
total_vl_acc += vacc_each_early_model
saver.restore(sess, checkpt_file)
ts_size = features[0].shape[0]
ts_step = 0
test_loss_each = []
test_acc_each = []
while ts_step * batch_size < ts_size:
if sparse:
fd = {i: d for i, d in zip(ftr_in, features)}
fd.update({i: d for i, d in zip(bias_in, biases)})
else:
fd = {i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(ftr_in, features)}
fd.update({i: d[ts_step * batch_size:(ts_step + 1) * batch_size]
for i, d in zip(bias_in, biases)})
fd.update({i:d[ts_step*batch_size:(ts_step+1)*batch_size] for i, d in zip(lbl_in, y_test)})
fd.update({i:d[ts_step*batch_size:(ts_step+1)*batch_size] for i, d in zip(msk_in, test_mask)})
fd.update({is_train: False})
fd.update({attn_drop: 0.0, ffd_drop:0.0})
loss_list_ts, acc_list_ts = sess.run([loss, accuracy], feed_dict=fd)
test_loss_each.append(np.array(loss_list_ts))
test_acc_each.append(np.array(acc_list_ts))
ts_step += 1
test_loss_each = np.mean(np.array(test_loss_each), axis=0)
test_acc_each = np.mean(np.array(test_acc_each), axis=0)
print('*'*10,'Test information:', '*'*10)
for e in range(len(target_node)):
print('target %s: loss: %.3f, %s:%.5f, %s:%.5f' % (e, test_loss_each[e], acc_full_name[e*metr_num], test_acc_each[e*metr_num], acc_full_name[e*metr_num+1], test_acc_each[e*metr_num+1]))
total_ts_acc += test_acc_each
sess.close()
print('Validation:', total_vl_acc/repeat, 'Test:', total_ts_acc/repeat)
|
exercises/de/test_01_07.py | Jette16/spacy-course | 2,085 | 23147 | def test():
assert "spacy.load" in __solution__, "Rufst du spacy.load auf?"
assert nlp.meta["lang"] == "de", "Lädst du das korrekte Modell?"
assert nlp.meta["name"] == "core_news_sm", "Lädst du das korrekte Modell?"
assert "nlp(text)" in __solution__, "Verarbeitest du den Text korrekt?"
assert "print(doc.text)" in __solution__, "Druckst du den Text des Doc?"
__msg__.good(
"Gut gemacht! Jetzt wo du das Laden von Modellen geübt hast, lass uns "
"mal ein paar ihrer Vorhersagen anschauen."
)
|
abm-predator-prey.py | RachidStat/PyCX | 176 | 23148 | import pycxsimulator
from pylab import *
import copy as cp
nr = 500. # carrying capacity of rabbits
r_init = 100 # initial rabbit population
mr = 0.03 # magnitude of movement of rabbits
dr = 1.0 # death rate of rabbits when it faces foxes
rr = 0.1 # reproduction rate of rabbits
f_init = 30 # initial fox population
mf = 0.05 # magnitude of movement of foxes
df = 0.1 # death rate of foxes when there is no food
rf = 0.5 # reproduction rate of foxes
cd = 0.02 # radius for collision detection
cdsq = cd ** 2
class agent:
pass
def initialize():
global agents
agents = []
for i in range(r_init + f_init):
ag = agent()
ag.type = 'r' if i < r_init else 'f'
ag.x = random()
ag.y = random()
agents.append(ag)
def observe():
global agents
cla()
rabbits = [ag for ag in agents if ag.type == 'r']
if len(rabbits) > 0:
x = [ag.x for ag in rabbits]
y = [ag.y for ag in rabbits]
plot(x, y, 'b.')
foxes = [ag for ag in agents if ag.type == 'f']
if len(foxes) > 0:
x = [ag.x for ag in foxes]
y = [ag.y for ag in foxes]
plot(x, y, 'ro')
axis('image')
axis([0, 1, 0, 1])
def update_one_agent():
global agents
if agents == []:
return
ag = choice(agents)
# simulating random movement
m = mr if ag.type == 'r' else mf
ag.x += uniform(-m, m)
ag.y += uniform(-m, m)
ag.x = 1 if ag.x > 1 else 0 if ag.x < 0 else ag.x
ag.y = 1 if ag.y > 1 else 0 if ag.y < 0 else ag.y
# detecting collision and simulating death or birth
neighbors = [nb for nb in agents if nb.type != ag.type
and (ag.x - nb.x)**2 + (ag.y - nb.y)**2 < cdsq]
if ag.type == 'r':
if len(neighbors) > 0: # if there are foxes nearby
if random() < dr:
agents.remove(ag)
return
if random() < rr*(1-sum([1 for x in agents if x.type == 'r'])/nr):
agents.append(cp.copy(ag))
else:
if len(neighbors) == 0: # if there are no rabbits nearby
if random() < df:
agents.remove(ag)
return
else: # if there are rabbits nearby
if random() < rf:
agents.append(cp.copy(ag))
def update():
global agents
t = 0.
while t < 1. and len(agents) > 0:
t += 1. / len(agents)
update_one_agent()
pycxsimulator.GUI().start(func=[initialize, observe, update])
|
ml_collections/config_dict/tests/frozen_config_dict_test.py | wyddmw/ViT-pytorch-1 | 311 | 23152 | # Copyright 2021 The ML Collections Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for ml_collections.FrozenConfigDict."""
from collections import abc as collections_abc
import copy
import pickle
from absl.testing import absltest
import ml_collections
_TEST_DICT = {
'int': 2,
'list': [1, 2],
'nested_list': [[1, [2]]],
'set': {1, 2},
'tuple': (1, 2),
'frozenset': frozenset({1, 2}),
'dict': {
'float': -1.23,
'list': [1, 2],
'dict': {},
'tuple_containing_list': (1, 2, (3, [4, 5], (6, 7))),
'list_containing_tuple': [1, 2, [3, 4], (5, 6)],
},
'ref': ml_collections.FieldReference({'int': 0})
}
def _test_dict_deepcopy():
return copy.deepcopy(_TEST_DICT)
def _test_configdict():
return ml_collections.ConfigDict(_TEST_DICT)
def _test_frozenconfigdict():
return ml_collections.FrozenConfigDict(_TEST_DICT)
class FrozenConfigDictTest(absltest.TestCase):
"""Tests FrozenConfigDict in config flags library."""
def assertFrozenRaisesValueError(self, input_list):
"""Assert initialization on all elements of input_list raise ValueError."""
for initial_dictionary in input_list:
with self.assertRaises(ValueError):
_ = ml_collections.FrozenConfigDict(initial_dictionary)
def testBasicEquality(self):
"""Tests basic equality with different types of initialization."""
fcd = _test_frozenconfigdict()
fcd_cd = ml_collections.FrozenConfigDict(_test_configdict())
fcd_fcd = ml_collections.FrozenConfigDict(fcd)
self.assertEqual(fcd, fcd_cd)
self.assertEqual(fcd, fcd_fcd)
def testImmutability(self):
"""Tests immutability of frozen config."""
fcd = _test_frozenconfigdict()
self.assertEqual(fcd.list, tuple(_TEST_DICT['list']))
self.assertEqual(fcd.tuple, _TEST_DICT['tuple'])
self.assertEqual(fcd.set, frozenset(_TEST_DICT['set']))
self.assertEqual(fcd.frozenset, _TEST_DICT['frozenset'])
# Must manually check set to frozenset conversion, since Python == does not
self.assertIsInstance(fcd.set, frozenset)
self.assertEqual(fcd.dict.list, tuple(_TEST_DICT['dict']['list']))
self.assertNotEqual(fcd.dict.tuple_containing_list,
_TEST_DICT['dict']['tuple_containing_list'])
self.assertEqual(fcd.dict.tuple_containing_list[2][1],
tuple(_TEST_DICT['dict']['tuple_containing_list'][2][1]))
self.assertIsInstance(fcd.dict, ml_collections.FrozenConfigDict)
with self.assertRaises(AttributeError):
fcd.newitem = 0
with self.assertRaises(AttributeError):
fcd.dict.int = 0
with self.assertRaises(AttributeError):
fcd['newitem'] = 0
with self.assertRaises(AttributeError):
del fcd.int
with self.assertRaises(AttributeError):
del fcd['int']
def testLockAndFreeze(self):
"""Ensures .lock() and .freeze() raise errors."""
fcd = _test_frozenconfigdict()
self.assertFalse(fcd.is_locked)
self.assertFalse(fcd.as_configdict().is_locked)
with self.assertRaises(AttributeError):
fcd.lock()
with self.assertRaises(AttributeError):
fcd.unlock()
with self.assertRaises(AttributeError):
fcd.freeze()
with self.assertRaises(AttributeError):
fcd.unfreeze()
def testInitConfigDict(self):
"""Tests that ConfigDict initialization handles FrozenConfigDict.
Initializing a ConfigDict on a dictionary with FrozenConfigDict values
should unfreeze these values.
"""
dict_without_fcd_node = _test_dict_deepcopy()
dict_without_fcd_node.pop('ref')
dict_with_fcd_node = copy.deepcopy(dict_without_fcd_node)
dict_with_fcd_node['dict'] = ml_collections.FrozenConfigDict(
dict_with_fcd_node['dict'])
cd_without_fcd_node = ml_collections.ConfigDict(dict_without_fcd_node)
cd_with_fcd_node = ml_collections.ConfigDict(dict_with_fcd_node)
fcd_without_fcd_node = ml_collections.FrozenConfigDict(
dict_without_fcd_node)
fcd_with_fcd_node = ml_collections.FrozenConfigDict(dict_with_fcd_node)
self.assertEqual(cd_without_fcd_node, cd_with_fcd_node)
self.assertEqual(fcd_without_fcd_node, fcd_with_fcd_node)
def testInitCopying(self):
"""Tests that initialization copies when and only when necessary.
Ensures copying only occurs when converting mutable type to immutable type,
regardless of whether the FrozenConfigDict is initialized by a dict or a
FrozenConfigDict. Also ensures no copying occurs when converting from
FrozenConfigDict back to ConfigDict.
"""
fcd = _test_frozenconfigdict()
# These should be uncopied when creating fcd
fcd_unchanged_from_test_dict = [
(_TEST_DICT['tuple'], fcd.tuple),
(_TEST_DICT['frozenset'], fcd.frozenset),
(_TEST_DICT['dict']['tuple_containing_list'][2][2],
fcd.dict.tuple_containing_list[2][2]),
(_TEST_DICT['dict']['list_containing_tuple'][3],
fcd.dict.list_containing_tuple[3])
]
# These should be copied when creating fcd
fcd_different_from_test_dict = [
(_TEST_DICT['list'], fcd.list),
(_TEST_DICT['dict']['tuple_containing_list'][2][1],
fcd.dict.tuple_containing_list[2][1])
]
for (x, y) in fcd_unchanged_from_test_dict:
self.assertEqual(id(x), id(y))
for (x, y) in fcd_different_from_test_dict:
self.assertNotEqual(id(x), id(y))
# Also make sure that converting back to ConfigDict makes no copies
self.assertEqual(
id(_TEST_DICT['dict']['tuple_containing_list']),
id(ml_collections.ConfigDict(fcd).dict.tuple_containing_list))
def testAsConfigDict(self):
"""Tests that converting FrozenConfigDict to ConfigDict works correctly.
In particular, ensures that FrozenConfigDict does the inverse of ConfigDict
regarding type_safe, lock, and attribute mutability.
"""
# First ensure conversion to ConfigDict works on empty FrozenConfigDict
self.assertEqual(
ml_collections.ConfigDict(ml_collections.FrozenConfigDict()),
ml_collections.ConfigDict())
cd = _test_configdict()
cd_fcd_cd = ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd))
self.assertEqual(cd, cd_fcd_cd)
# Make sure locking is respected
cd.lock()
self.assertEqual(
cd, ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd)))
# Make sure type_safe is respected
cd = ml_collections.ConfigDict(_TEST_DICT, type_safe=False)
self.assertEqual(
cd, ml_collections.ConfigDict(ml_collections.FrozenConfigDict(cd)))
def testInitSelfReferencing(self):
"""Ensure initialization fails on self-referencing dicts."""
self_ref = {}
self_ref['self'] = self_ref
parent_ref = {'dict': {}}
parent_ref['dict']['parent'] = parent_ref
tuple_parent_ref = {'dict': {}}
tuple_parent_ref['dict']['tuple'] = (1, 2, tuple_parent_ref)
attribute_cycle = {'dict': copy.deepcopy(self_ref)}
self.assertFrozenRaisesValueError(
[self_ref, parent_ref, tuple_parent_ref, attribute_cycle])
def testInitCycles(self):
"""Ensure initialization fails if an attribute of input is cyclic."""
inner_cyclic_list = [1, 2]
cyclic_list = [3, inner_cyclic_list]
inner_cyclic_list.append(cyclic_list)
cyclic_tuple = tuple(cyclic_list)
test_dict_cyclic_list = _test_dict_deepcopy()
test_dict_cyclic_tuple = _test_dict_deepcopy()
test_dict_cyclic_list['cyclic_list'] = cyclic_list
test_dict_cyclic_tuple['dict']['cyclic_tuple'] = cyclic_tuple
self.assertFrozenRaisesValueError(
[test_dict_cyclic_list, test_dict_cyclic_tuple])
def testInitDictInList(self):
"""Ensure initialization fails on dict and ConfigDict in lists/tuples."""
list_containing_dict = {'list': [1, 2, 3, {'a': 4, 'b': 5}]}
tuple_containing_dict = {'tuple': (1, 2, 3, {'a': 4, 'b': 5})}
list_containing_cd = {'list': [1, 2, 3, _test_configdict()]}
tuple_containing_cd = {'tuple': (1, 2, 3, _test_configdict())}
fr_containing_list_containing_dict = {
'fr': ml_collections.FieldReference([1, {
'a': 2
}])
}
self.assertFrozenRaisesValueError([
list_containing_dict, tuple_containing_dict, list_containing_cd,
tuple_containing_cd, fr_containing_list_containing_dict
])
def testInitFieldReferenceInList(self):
"""Ensure initialization fails on FieldReferences in lists/tuples."""
list_containing_fr = {'list': [1, 2, 3, ml_collections.FieldReference(4)]}
tuple_containing_fr = {
'tuple': (1, 2, 3, ml_collections.FieldReference('a'))
}
self.assertFrozenRaisesValueError([list_containing_fr, tuple_containing_fr])
def testInitInvalidAttributeName(self):
"""Ensure initialization fails on attributes with invalid names."""
dot_name = {'dot.name': None}
immutable_name = {'__hash__': None}
with self.assertRaises(ValueError):
ml_collections.FrozenConfigDict(dot_name)
with self.assertRaises(AttributeError):
ml_collections.FrozenConfigDict(immutable_name)
def testFieldReferenceResolved(self):
"""Tests that FieldReferences are resolved."""
cfg = ml_collections.ConfigDict({'fr': ml_collections.FieldReference(1)})
frozen_cfg = ml_collections.FrozenConfigDict(cfg)
self.assertNotIsInstance(frozen_cfg._fields['fr'],
ml_collections.FieldReference)
hash(frozen_cfg) # with FieldReference resolved, frozen_cfg is hashable
def testFieldReferenceCycle(self):
"""Tests that FieldReferences may not contain reference cycles."""
frozenset_fr = {'frozenset': frozenset({1, 2})}
frozenset_fr['fr'] = ml_collections.FieldReference(
frozenset_fr['frozenset'])
list_fr = {'list': [1, 2]}
list_fr['fr'] = ml_collections.FieldReference(list_fr['list'])
cyclic_fr = {'a': 1}
cyclic_fr['fr'] = ml_collections.FieldReference(cyclic_fr)
cyclic_fr_parent = {'dict': {}}
cyclic_fr_parent['dict']['fr'] = ml_collections.FieldReference(
cyclic_fr_parent)
# FieldReference is allowed to point to non-cyclic objects:
_ = ml_collections.FrozenConfigDict(frozenset_fr)
_ = ml_collections.FrozenConfigDict(list_fr)
# But not cycles:
self.assertFrozenRaisesValueError([cyclic_fr, cyclic_fr_parent])
def testDeepCopy(self):
"""Ensure deepcopy works and does not affect equality."""
fcd = _test_frozenconfigdict()
fcd_deepcopy = copy.deepcopy(fcd)
self.assertEqual(fcd, fcd_deepcopy)
def testEquals(self):
"""Tests that __eq__() respects hidden mutability."""
fcd = _test_frozenconfigdict()
# First, ensure __eq__() returns False when comparing to other types
self.assertNotEqual(fcd, (1, 2))
self.assertNotEqual(fcd, fcd.as_configdict())
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
fcd_list_to_tuple = ml_collections.FrozenConfigDict(list_to_tuple)
set_to_frozenset = _test_dict_deepcopy()
set_to_frozenset['set'] = frozenset(set_to_frozenset['set'])
fcd_set_to_frozenset = ml_collections.FrozenConfigDict(set_to_frozenset)
self.assertNotEqual(fcd, fcd_list_to_tuple)
# Because set == frozenset in Python:
self.assertEqual(fcd, fcd_set_to_frozenset)
# Items are not affected by hidden mutability
self.assertCountEqual(fcd.items(), fcd_list_to_tuple.items())
self.assertCountEqual(fcd.items(), fcd_set_to_frozenset.items())
def testEqualsAsConfigDict(self):
"""Tests that eq_as_configdict respects hidden mutability but not type."""
fcd = _test_frozenconfigdict()
# First, ensure eq_as_configdict() returns True with an equal ConfigDict but
# False for other types.
self.assertFalse(fcd.eq_as_configdict([1, 2]))
self.assertTrue(fcd.eq_as_configdict(fcd.as_configdict()))
empty_fcd = ml_collections.FrozenConfigDict()
self.assertTrue(empty_fcd.eq_as_configdict(ml_collections.ConfigDict()))
# Now, ensure it has the same immutability detection as __eq__().
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
fcd_list_to_tuple = ml_collections.FrozenConfigDict(list_to_tuple)
set_to_frozenset = _test_dict_deepcopy()
set_to_frozenset['set'] = frozenset(set_to_frozenset['set'])
fcd_set_to_frozenset = ml_collections.FrozenConfigDict(set_to_frozenset)
self.assertFalse(fcd.eq_as_configdict(fcd_list_to_tuple))
# Because set == frozenset in Python:
self.assertTrue(fcd.eq_as_configdict(fcd_set_to_frozenset))
def testHash(self):
"""Ensures __hash__() respects hidden mutability."""
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
self.assertEqual(
hash(_test_frozenconfigdict()),
hash(ml_collections.FrozenConfigDict(_test_dict_deepcopy())))
self.assertNotEqual(
hash(_test_frozenconfigdict()),
hash(ml_collections.FrozenConfigDict(list_to_tuple)))
# Ensure Python realizes FrozenConfigDict is hashable
self.assertIsInstance(_test_frozenconfigdict(), collections_abc.Hashable)
def testUnhashableType(self):
"""Ensures __hash__() fails if FrozenConfigDict has unhashable value."""
unhashable_fcd = ml_collections.FrozenConfigDict(
{'unhashable': bytearray()})
with self.assertRaises(TypeError):
hash(unhashable_fcd)
def testToDict(self):
"""Ensure to_dict() does not care about hidden mutability."""
list_to_tuple = _test_dict_deepcopy()
list_to_tuple['list'] = tuple(list_to_tuple['list'])
self.assertEqual(_test_frozenconfigdict().to_dict(),
ml_collections.FrozenConfigDict(list_to_tuple).to_dict())
def testPickle(self):
"""Make sure FrozenConfigDict can be dumped and loaded with pickle."""
fcd = _test_frozenconfigdict()
locked_fcd = ml_collections.FrozenConfigDict(_test_configdict().lock())
unpickled_fcd = pickle.loads(pickle.dumps(fcd))
unpickled_locked_fcd = pickle.loads(pickle.dumps(locked_fcd))
self.assertEqual(fcd, unpickled_fcd)
self.assertEqual(locked_fcd, unpickled_locked_fcd)
if __name__ == '__main__':
absltest.main()
|
tests/test_utils.py | Guillerbr/python-pagseguro | 115 | 23199 | # -*- coding: utf-8 -*-
import datetime
from pagseguro.utils import (is_valid_cpf, is_valid_cnpj, is_valid_email,
parse_date)
from pagseguro.exceptions import PagSeguroValidationError
import pytest
from dateutil.tz import tzutc
def test_is_valid_email():
valid = '<EMAIL>'
valid2 = u'<EMAIL>'
not_valid = '@asd.com'
not_valid2 = 'bad'
not_valid3 = u'user@росси́я'
with pytest.raises(PagSeguroValidationError):
is_valid_email(not_valid)
with pytest.raises(PagSeguroValidationError):
is_valid_email(not_valid2)
with pytest.raises(PagSeguroValidationError):
is_valid_email(not_valid3)
assert is_valid_email(valid) == '<EMAIL>'
assert is_valid_email(valid2) == u'<EMAIL>'
def test_parse_date():
# DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
date_str = '2016-10-10T10:10:10'
assert parse_date(date_str) == datetime.datetime(2016, 10, 10, 10, 10, 10,
tzinfo=tzutc())
def test_is_valid_cpf():
valid = '041.684.826-50'
valid2 = '04168482650'
bad = 'bla///'
max_digits = '1111111111111111111111111'
invalid_cpf = '040.684.826-50'
with pytest.raises(PagSeguroValidationError):
is_valid_cpf(bad)
with pytest.raises(PagSeguroValidationError):
is_valid_cpf(max_digits)
with pytest.raises(PagSeguroValidationError):
is_valid_cpf(invalid_cpf)
assert is_valid_cpf(valid) == valid
assert is_valid_cpf(valid2) == '04168482650'
def test_is_valid_cnpj():
valid = '31331052000174'
valid2 = '72.168.117/0001-90'
invalid = '///'
digits = '1111111'
wrong_number = '31331052000175'
with pytest.raises(PagSeguroValidationError):
is_valid_cnpj(invalid)
with pytest.raises(PagSeguroValidationError):
is_valid_cnpj(digits)
with pytest.raises(PagSeguroValidationError):
is_valid_cnpj(wrong_number)
assert is_valid_cnpj(valid) == '31331052000174'
assert is_valid_cnpj(valid2) == '72168117000190'
|
tests/perf/test-prop-write.py | wenq1/duktape | 4,268 | 23201 | <reponame>wenq1/duktape
def test():
obj = { 'xxx1': 1, 'xxx2': 2, 'xxx3': 4, 'xxx4': 4, 'foo': 123 }
i = 0
while i < 1e7:
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
obj['foo'] = 234
i += 1
test()
|
clickhouse_driver/numpy/result.py | fasttrack-solutions/clickhouse-driver | 823 | 23202 | <reponame>fasttrack-solutions/clickhouse-driver<filename>clickhouse_driver/numpy/result.py<gh_stars>100-1000
from itertools import chain
import numpy as np
import pandas as pd
from pandas.api.types import union_categoricals
from ..progress import Progress
from ..result import QueryResult
class NumpyQueryResult(QueryResult):
"""
Stores query result from multiple blocks as numpy arrays.
"""
def store(self, packet):
block = getattr(packet, 'block', None)
if block is None:
return
# Header block contains no rows. Pick columns from it.
if block.num_rows:
if self.columnar:
self.data.append(block.get_columns())
else:
self.data.extend(block.get_rows())
elif not self.columns_with_types:
self.columns_with_types = block.columns_with_types
def get_result(self):
"""
:return: stored query result.
"""
for packet in self.packet_generator:
self.store(packet)
if self.columnar:
data = []
# Transpose to a list of columns, each column is list of chunks
for column_chunks in zip(*self.data):
# Concatenate chunks for each column
if isinstance(column_chunks[0], np.ndarray):
column = np.concatenate(column_chunks)
elif isinstance(column_chunks[0], pd.Categorical):
column = union_categoricals(column_chunks)
else:
column = tuple(chain.from_iterable(column_chunks))
data.append(column)
else:
data = self.data
if self.with_column_types:
return data, self.columns_with_types
else:
return data
class NumpyProgressQueryResult(NumpyQueryResult):
"""
Stores query result and progress information from multiple blocks.
Provides iteration over query progress.
"""
def __init__(self, *args, **kwargs):
self.progress_totals = Progress()
super(NumpyProgressQueryResult, self).__init__(*args, **kwargs)
def __iter__(self):
return self
def __next__(self):
while True:
packet = next(self.packet_generator)
progress_packet = getattr(packet, 'progress', None)
if progress_packet:
self.progress_totals.increment(progress_packet)
return (
self.progress_totals.rows, self.progress_totals.total_rows
)
else:
self.store(packet)
def get_result(self):
# Read all progress packets.
for _ in self:
pass
return super(NumpyProgressQueryResult, self).get_result()
class NumpyIterQueryResult(object):
"""
Provides iteration over returned data by chunks (streaming by chunks).
"""
def __init__(
self, packet_generator,
with_column_types=False):
self.packet_generator = packet_generator
self.with_column_types = with_column_types
self.first_block = True
super(NumpyIterQueryResult, self).__init__()
def __iter__(self):
return self
def __next__(self):
packet = next(self.packet_generator)
block = getattr(packet, 'block', None)
if block is None:
return []
if self.first_block and self.with_column_types:
self.first_block = False
rv = [block.columns_with_types]
rv.extend(block.get_rows())
return rv
else:
return block.get_rows()
|
micropsi_core/world/island/__init__.py | brucepro/micropsi2 | 119 | 23207 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
"""
__author__ = 'joscha'
__date__ = '03.08.12'
|
src/tests/t_kadm5_hook.py | tizenorg/platform.upstream.krb5 | 372 | 23231 | <reponame>tizenorg/platform.upstream.krb5<gh_stars>100-1000
#!/usr/bin/python
from k5test import *
plugin = os.path.join(buildtop, "plugins", "kadm5_hook", "test",
"kadm5_hook_test.so")
hook_krb5_conf = {
'all' : {
"plugins" : {
"kadm5_hook" : {
"module" : "test:" + plugin
}
}
}
}
realm = K5Realm(krb5_conf=hook_krb5_conf, create_user=False, create_host=False)
output = realm.run_kadminl ('addprinc -randkey test')
if "create: stage precommit" not in output:
fail('kadm5_hook test output not found')
success('kadm5_hook')
|
recipes/LibriParty/generate_dataset/get_dataset_from_metadata.py | JasonSWFu/speechbrain | 3,913 | 23244 | """
LibriParty Dataset creation by using official metadata.
Author
------
<NAME>, 2020
<NAME>, 2020
"""
import os
import sys
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.data_utils import download_file
from local.create_mixtures_from_metadata import create_mixture
import json
from tqdm import tqdm
URL_METADATA = (
"https://www.dropbox.com/s/0u6x6ndyedb4rl7/LibriParty_metadata.zip?dl=1"
)
# Load hyperparameters file with command-line overrides
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
metadata_folder = params["metadata_folder"]
if not os.path.exists(metadata_folder):
os.makedirs(metadata_folder)
# Download meta data from the web
download_file(
URL_METADATA,
metadata_folder + "/meta.zip",
unpack=True,
dest_unpack=metadata_folder,
)
for data_split in ["train", "dev", "eval"]:
with open(os.path.join(metadata_folder, data_split + ".json"), "r") as f:
metadata = json.load(f)
print("Creating data for {} set".format(data_split))
c_folder = os.path.join(params["out_folder"], data_split)
os.makedirs(c_folder, exist_ok=True)
for sess in tqdm(metadata.keys()):
create_mixture(sess, c_folder, params, metadata[sess])
|
fugue/column/functions.py | kvnkho/fugue | 547 | 23273 | from typing import Any, Optional
import pyarrow as pa
from fugue.column.expressions import (
ColumnExpr,
_FuncExpr,
_to_col,
function,
)
from triad import Schema
def coalesce(*args: Any) -> ColumnExpr:
"""SQL ``COALESCE`` function
:param args: If a value is not :class:`~fugue.column.expressions.ColumnExpr`
then it's converted to a literal column by
:func:`~fugue.column.expressions.col`
.. note::
this function can infer neither type nor alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.coalesce(col("a"), col("b")+col("c"), 1)
"""
return function("COALESCE", *[_to_col(x) for x in args])
def min(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``MIN`` function (aggregation)
:param col: the column to find min
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.min(col("a")) # CAST(MIN(a) AS double) AS a
f.min(-col("a")) # CAST(MIN(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.min(col("a")+1)
f.min(col("a")+col("b"))
# you can specify explicitly
# CAST(MIN(a+b) AS int) AS x
f.min(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("MIN", col)
def max(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``MAX`` function (aggregation)
:param col: the column to find max
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.max(col("a")) # CAST(MAX(a) AS double) AS a
f.max(-col("a")) # CAST(MAX(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.max(col("a")+1)
f.max(col("a")+col("b"))
# you can specify explicitly
# CAST(MAX(a+b) AS int) AS x
f.max(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("MAX", col)
def count(col: ColumnExpr) -> ColumnExpr:
"""SQL ``COUNT`` function (aggregation)
:param col: the column to find count
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.count(col("*")) # COUNT(*)
f.count(col("a")) # COUNT(a) AS a
# you can specify explicitly
# CAST(COUNT(a) AS double) AS a
f.count(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("COUNT", col)
def count_distinct(col: ColumnExpr) -> ColumnExpr:
"""SQL ``COUNT DISTINCT`` function (aggregation)
:param col: the column to find distinct element count
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.count_distinct(col("*")) # COUNT(DISTINCT *)
f.count_distinct(col("a")) # COUNT(DISTINCT a) AS a
# you can specify explicitly
# CAST(COUNT(DISTINCT a) AS double) AS a
f.count_distinct(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("COUNT", col, arg_distinct=True)
def avg(col: ColumnExpr) -> ColumnExpr:
"""SQL ``AVG`` function (aggregation)
:param col: the column to find average
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.avg(col("a")) # AVG(a) AS a
# you can specify explicitly
# CAST(AVG(a) AS double) AS a
f.avg(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("AVG", col)
def sum(col: ColumnExpr) -> ColumnExpr: # pylint: disable=redefined-builtin
"""SQL ``SUM`` function (aggregation)
:param col: the column to find sum
.. note::
* this function cannot infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
f.sum(col("a")) # SUM(a) AS a
# you can specify explicitly
# CAST(SUM(a) AS double) AS a
f.sum(col("a")).cast(float)
"""
assert isinstance(col, ColumnExpr)
return _UnaryAggFuncExpr("SUM", col)
def first(col: ColumnExpr) -> ColumnExpr:
"""SQL ``FIRST`` function (aggregation)
:param col: the column to find first
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.first(col("a")) # CAST(FIRST(a) AS double) AS a
f.first(-col("a")) # CAST(FIRST(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.first(col("a")+1)
f.first(col("a")+col("b"))
# you can specify explicitly
# CAST(FIRST(a+b) AS int) AS x
f.first(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("FIRST", col)
def last(col: ColumnExpr) -> ColumnExpr:
"""SQL ``LAST`` function (aggregation)
:param col: the column to find last
.. note::
* this function can infer type from ``col`` type
* this function can infer alias from ``col``'s inferred alias
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
# assume col a has type double
f.last(col("a")) # CAST(LAST(a) AS double) AS a
f.last(-col("a")) # CAST(LAST(-a) AS double) AS a
# neither type nor alias can be inferred in the following cases
f.last(col("a")+1)
f.last(col("a")+col("b"))
# you can specify explicitly
# CAST(LAST(a+b) AS int) AS x
f.last(col("a")+col("b")).cast(int).alias("x")
"""
assert isinstance(col, ColumnExpr)
return _SameTypeUnaryAggFuncExpr("LAST", col)
def is_agg(column: Any) -> bool:
"""Check if a column contains aggregation operation
:param col: the column to check
:return: whether the column is :class:`~fugue.column.expressions.ColumnExpr`
and contains aggregation operations
.. admonition:: New Since
:class: hint
**0.6.0**
.. admonition:: Examples
.. code-block:: python
import fugue.column.functions as f
assert not f.is_agg(1)
assert not f.is_agg(col("a"))
assert not f.is_agg(col("a")+lit(1))
assert f.is_agg(f.max(col("a")))
assert f.is_agg(-f.max(col("a")))
assert f.is_agg(f.max(col("a")+1))
assert f.is_agg(f.max(col("a"))+f.min(col("a"))))
"""
if isinstance(column, _UnaryAggFuncExpr):
return True
if isinstance(column, _FuncExpr):
return any(is_agg(x) for x in column.args) or any(
is_agg(x) for x in column.kwargs.values()
)
return False
class _UnaryAggFuncExpr(_FuncExpr):
def __init__(self, func: str, col: ColumnExpr, arg_distinct: bool = False):
super().__init__(func, col, arg_distinct=arg_distinct)
def infer_alias(self) -> ColumnExpr:
return (
self
if self.output_name != ""
else self.alias(self.args[0].infer_alias().output_name)
)
def _copy(self) -> _FuncExpr:
return _UnaryAggFuncExpr(self.func, *self.args, **self.kwargs)
class _SameTypeUnaryAggFuncExpr(_UnaryAggFuncExpr):
def _copy(self) -> _FuncExpr:
return _SameTypeUnaryAggFuncExpr(self.func, *self.args, **self.kwargs)
def infer_type(self, schema: Schema) -> Optional[pa.DataType]:
return self.as_type or self.args[0].infer_type(schema)
|
packages/core/minos-microservice-networks/tests/test_networks/test_exceptions.py | sorasful/minos-python | 247 | 23285 | import unittest
from minos.common import (
MinosException,
)
from minos.networks import (
MinosNetworkException,
)
class TestExceptions(unittest.TestCase):
def test_type(self):
self.assertTrue(issubclass(MinosNetworkException, MinosException))
if __name__ == "__main__":
unittest.main()
|
python/caliper-reader/setup.py | slabasan/Caliper | 220 | 23296 | <filename>python/caliper-reader/setup.py<gh_stars>100-1000
# Copyright (c) 2020-20201, Lawrence Livermore National Security, LLC.
# See top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import setuptools
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
# Get the version in a safe way which does not refrence the `__init__` file
# per python docs: https://packaging.python.org/guides/single-sourcing-package-version/
version = {}
with open("./caliperreader/version.py") as fp:
exec(fp.read(), version)
setuptools.setup(
name="caliper-reader",
version=version["__version__"],
description="A Python library for reading Caliper .cali files",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/LLNL/Caliper",
author="<NAME>",
author_email="<EMAIL>",
license="BSD-3-Clause",
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
],
packages=setuptools.find_packages()
)
|
programming_fundamentals/python_part_2/common_vars.py | tobaidullah/2 | 629 | 23297 | #! /usr/bin/env python
"""
Learning Series: Network Programmability Basics
Module: Programming Fundamentals
Lesson: Python Part 2
Author: <NAME> <<EMAIL>>
common_vars.py
Illustrate the following concepts:
- Code reuse
imported into other examples
"""
shapes = ["square", "triangle", "circle"]
books = [
{
"title": "War and Peace",
"shelf": 3,
"available": True
},
{
"title": "Hamlet",
"shelf": 1,
"available": False
},
{
"title": "Harold and the Purple Crayon",
"shelf": 2,
"available": True
}
]
colors = ["blue", "green", "red"]
|
contrib/stack/stripmapStack/unpackFrame_risat_raw.py | vincentschut/isce2 | 1,133 | 23311 | #!/usr/bin/env python3
import isce
from isceobj.Sensor import createSensor
import shelve
import argparse
import os
from isceobj.Util import Poly1D
from isceobj.Planet.AstronomicalHandbook import Const
from mroipac.dopiq.DopIQ import DopIQ
import copy
def cmdLineParse():
'''
Command line parser.
'''
parser = argparse.ArgumentParser(description='Unpack RISAT raw data and store metadata in pickle file.')
parser.add_argument('-i','--input', dest='indir', type=str,
required=True, help='Input CSK frame')
parser.add_argument('-o', '--output', dest='slc', type=str,
required=True, help='Output SLC file')
parser.add_argument('-p', '--polar', dest='polar', type=str,
default='RH', help='Polarization to extract')
return parser.parse_args()
def unpack(hdf5, slcname, polar='RH'):
'''
Unpack HDF5 to binary SLC file.
'''
obj = createSensor('RISAT1')
obj._imageFile = os.path.join(hdf5, 'scene_'+polar, 'dat_01.001')
obj._leaderFile = os.path.join(hdf5, 'scene_'+polar,'lea_01.001')
if not os.path.isdir(slcname):
os.mkdir(slcname)
date = os.path.basename(slcname)
obj.output = os.path.join(slcname, date + '.raw')
obj.extractImage()
obj.frame.getImage().renderHdr()
#####Estimate doppler
dop = DopIQ()
dop.configure()
img = copy.deepcopy(obj.frame.getImage())
img.setAccessMode('READ')
dop.wireInputPort('frame', object=obj.frame)
dop.wireInputPort('instrument', object=obj.frame.instrument)
dop.wireInputPort('image', object=img)
dop.calculateDoppler()
dop.fitDoppler()
fit = dop.quadratic
coef = [fit['a'], fit['b'], fit['c']]
print(coef)
obj.frame._dopplerVsPixel = [x*obj.frame.PRF for x in coef]
pickName = os.path.join(slcname, 'raw')
with shelve.open(pickName) as db:
db['frame'] = obj.frame
if __name__ == '__main__':
'''
Main driver.
'''
inps = cmdLineParse()
unpack(inps.indir, inps.slc, polar=inps.polar)
|
test/unit/agent/common/util/text.py | dp92987/nginx-amplify-agent | 308 | 23340 | <reponame>dp92987/nginx-amplify-agent
# -*- coding: utf-8 -*-
from hamcrest import *
from test.base import BaseTestCase
from amplify.agent.common.util.text import (
decompose_format, parse_line, parse_line_split
)
__author__ = "<NAME>"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__license__ = ""
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
COMBINED_FORMAT = '$remote_addr - $remote_user [$time_local] "$request" ' + \
'$status $body_bytes_sent "$http_referer" "$http_user_agent"'
class UtilTextTestCase(BaseTestCase):
def test_decompose_format_regular(self):
keys, trie, non_key_patterns, first_value_is_key = decompose_format(
COMBINED_FORMAT, full=True
)
assert_that(keys, not_none())
assert_that(trie, not_none())
assert_that(non_key_patterns, not_none())
assert_that(first_value_is_key, equal_to(True))
assert_that(keys, equal_to([
'remote_addr', 'remote_user', 'time_local', 'request', 'status',
'body_bytes_sent', 'http_referer', 'http_user_agent'
]))
assert_that(non_key_patterns, equal_to([
' - ', ' [', '] "', '" ', ' ', ' "', '" "', '"'
]))
def test_decompose_format_different(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" ' + \
'"$http_user_agent" rt=$request_time ' + \
'ut="$upstream_response_time" cs=$upstream_cache_status'
keys, trie, non_key_patterns, first_value_is_key = decompose_format(log_format, full=True)
assert_that(keys, not_none())
assert_that(trie, not_none())
assert_that(non_key_patterns, not_none())
assert_that(first_value_is_key, equal_to(True))
assert_that(keys, equal_to([
'remote_addr', 'remote_user', 'time_local', 'request', 'status',
'body_bytes_sent', 'http_referer', 'http_user_agent',
'request_time', 'upstream_response_time', 'upstream_cache_status'
]))
assert_that(non_key_patterns, equal_to([
' - ', ' [', '] "', '" ', ' ', ' "', '" "', '" rt=', ' ut="',
'" cs='
]))
def test_parse_line(self):
keys, trie = decompose_format(COMBINED_FORMAT)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "GET /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_split(self):
keys, _, non_key_patterns, first_value_is_key = decompose_format(COMBINED_FORMAT, full=True)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "GET /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_non_standard_http_method(self):
keys, trie = decompose_format(COMBINED_FORMAT)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "PROPFIND /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_split_non_standard_http_method(self):
keys, _, non_key_patterns, first_value_is_key = decompose_format(
COMBINED_FORMAT, full=True
)
line = '127.0.0.1 - - [02/Jul/2015:14:49:48 +0000] "PROPFIND /basic_status HTTP/1.1" 200 110 "-" ' + \
'"python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic"'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['http_user_agent'], equal_to(
'python-requests/2.2.1 CPython/2.7.6 Linux/3.13.0-48-generic'
))
def test_parse_line_upstream_log_format(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, trie = decompose_format(log_format)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="2.001, 0.345" cs=MISS'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
# check some complicated values
assert_that(results['request_time'], equal_to('0.010'))
assert_that(results['upstream_response_time'], equal_to('2.001, 0.345'))
def test_parse_line_split_upstream_log_format(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, _, non_key_patterns, first_value_is_key = decompose_format(log_format, full=True)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="2.001, 0.345" cs=MISS'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
# check some complicated values
assert_that(results['request_time'], equal_to('0.010'))
assert_that(results['upstream_response_time'], equal_to('2.001, 0.345'))
def test_parse_line_upstream_log_format_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time cs=$upstream_cache_status ut="$upstream_response_time"'
keys, trie = decompose_format(log_format)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 cs=- ut="-"'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_response_time'], equal_to('-'))
assert_that(results['upstream_cache_status'], equal_to('-'))
def test_parse_line_split_upstream_log_format_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time cs=$upstream_cache_status ut="$upstream_response_time"'
keys, _, non_key_patterns, first_value_is_key = decompose_format(
log_format, full=True
)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 cs=- ut="-"'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_response_time'], equal_to('-'))
assert_that(results['upstream_cache_status'], equal_to('-'))
def test_parse_line_upstream_log_format_part_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, trie = decompose_format(log_format)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="-" cs=MISS'
results = parse_line(line, keys=keys, trie=trie)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
def test_parse_line_split_upstream_log_format_part_empty_upstreams(self):
log_format = '$remote_addr - $remote_user [$time_local] ' + \
'"$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" ' + \
'rt=$request_time ut="$upstream_response_time" cs=$upstream_cache_status'
keys, _, non_key_patterns, first_value_is_key = decompose_format(log_format, full=True)
line = \
'1.2.3.4 - - [22/Jan/2010:19:34:21 +0300] "GET /foo/ HTTP/1.1" 200 11078 ' + \
'"http://www.rambler.ru/" "Mozilla/5.0 (Windows; U; Windows NT 5.1" rt=0.010 ut="-" cs=MISS'
results = parse_line_split(
line,
keys=keys,
non_key_patterns=non_key_patterns,
first_value_is_key=first_value_is_key
)
assert_that(results, not_none())
for key in keys:
assert_that(results, has_item(key))
assert_that(results[key], not_none())
# check the last value to make sure complete parse
assert_that(results['upstream_cache_status'], equal_to('MISS'))
|
test/adb_test.py | bugobliterator/python-adb | 1,549 | 23341 | <reponame>bugobliterator/python-adb
#!/usr/bin/env python
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for adb."""
from io import BytesIO
import struct
import unittest
from mock import mock
from adb import common
from adb import adb_commands
from adb import adb_protocol
from adb.usb_exceptions import TcpTimeoutException, DeviceNotFoundError
import common_stub
BANNER = b'blazetest'
LOCAL_ID = 1
REMOTE_ID = 2
class BaseAdbTest(unittest.TestCase):
@classmethod
def _ExpectWrite(cls, usb, command, arg0, arg1, data):
usb.ExpectWrite(cls._MakeHeader(command, arg0, arg1, data))
usb.ExpectWrite(data)
if command == b'WRTE':
cls._ExpectRead(usb, b'OKAY', 0, 0)
@classmethod
def _ExpectRead(cls, usb, command, arg0, arg1, data=b''):
usb.ExpectRead(cls._MakeHeader(command, arg0, arg1, data))
if data:
usb.ExpectRead(data)
if command == b'WRTE':
cls._ExpectWrite(usb, b'OKAY', LOCAL_ID, REMOTE_ID, b'')
@classmethod
def _ConvertCommand(cls, command):
return sum(c << (i * 8) for i, c in enumerate(bytearray(command)))
@classmethod
def _MakeHeader(cls, command, arg0, arg1, data):
command = cls._ConvertCommand(command)
magic = command ^ 0xFFFFFFFF
checksum = adb_protocol.AdbMessage.CalculateChecksum(data)
return struct.pack(b'<6I', command, arg0, arg1, len(data), checksum, magic)
@classmethod
def _ExpectConnection(cls, usb):
cls._ExpectWrite(usb, b'CNXN', 0x01000000, 4096, b'host::%s\0' % BANNER)
cls._ExpectRead(usb, b'CNXN', 0, 0, b'device::\0')
@classmethod
def _ExpectOpen(cls, usb, service):
cls._ExpectWrite(usb, b'OPEN', LOCAL_ID, 0, service)
cls._ExpectRead(usb, b'OKAY', REMOTE_ID, LOCAL_ID)
@classmethod
def _ExpectClose(cls, usb):
cls._ExpectRead(usb, b'CLSE', REMOTE_ID, 0)
cls._ExpectWrite(usb, b'CLSE', LOCAL_ID, REMOTE_ID, b'')
@classmethod
def _Connect(cls, usb):
return adb_commands.AdbCommands.Connect(usb, BANNER)
class AdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
usb = common_stub.StubUsb(device=None, setting=None)
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, b'%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(usb, b'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(usb)
return usb
def testConnect(self):
usb = common_stub.StubUsb(device=None, setting=None)
self._ExpectConnection(usb)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
def testConnectSerialString(self):
dev = adb_commands.AdbCommands()
with mock.patch.object(common.UsbHandle, 'FindAndOpen', return_value=None):
with mock.patch.object(adb_commands.AdbCommands, '_Connect', return_value=None):
dev.ConnectDevice(serial='/dev/invalidHandle')
def testSmallResponseShell(self):
command = b'keepin it real'
response = 'word.'
usb = self._ExpectCommand(b'shell', command, response)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(response, dev.Shell(command))
def testBigResponseShell(self):
command = b'keepin it real big'
# The data doesn't have to be big, the point is that it just concatenates
# the data from different WRTEs together.
responses = [b'other stuff, ', b'and some words.']
usb = self._ExpectCommand(b'shell', command, *responses)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(b''.join(responses).decode('utf8'),
dev.Shell(command))
def testUninstall(self):
package_name = "com.test.package"
response = 'Success'
usb = self._ExpectCommand(b'shell', ('pm uninstall "%s"' % package_name).encode('utf8'), response)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(response, dev.Uninstall(package_name))
def testStreamingResponseShell(self):
command = b'keepin it real big'
# expect multiple lines
responses = ['other stuff, ', 'and some words.']
usb = self._ExpectCommand(b'shell', command, *responses)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
response_count = 0
for (expected,actual) in zip(responses, dev.StreamingShell(command)):
self.assertEqual(expected, actual)
response_count = response_count + 1
self.assertEqual(len(responses), response_count)
def testReboot(self):
usb = self._ExpectCommand(b'reboot', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Reboot()
def testRebootBootloader(self):
usb = self._ExpectCommand(b'reboot', b'bootloader', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.RebootBootloader()
def testRemount(self):
usb = self._ExpectCommand(b'remount', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Remount()
def testRoot(self):
usb = self._ExpectCommand(b'root', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Root()
def testEnableVerity(self):
usb = self._ExpectCommand(b'enable-verity', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.EnableVerity()
def testDisableVerity(self):
usb = self._ExpectCommand(b'disable-verity', b'', b'')
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.DisableVerity()
class FilesyncAdbTest(BaseAdbTest):
@classmethod
def _MakeSyncHeader(cls, command, *int_parts):
command = cls._ConvertCommand(command)
return struct.pack(b'<%dI' % (len(int_parts) + 1), command, *int_parts)
@classmethod
def _MakeWriteSyncPacket(cls, command, data=b'', size=None):
if not isinstance(data, bytes):
data = data.encode('utf8')
return cls._MakeSyncHeader(command, size or len(data)) + data
@classmethod
def _ExpectSyncCommand(cls, write_commands, read_commands):
usb = common_stub.StubUsb(device=None, setting=None)
cls._ExpectConnection(usb)
cls._ExpectOpen(usb, b'sync:\0')
while write_commands or read_commands:
if write_commands:
command = write_commands.pop(0)
cls._ExpectWrite(usb, b'WRTE', LOCAL_ID, REMOTE_ID, command)
if read_commands:
command = read_commands.pop(0)
cls._ExpectRead(usb, b'WRTE', REMOTE_ID, LOCAL_ID, command)
cls._ExpectClose(usb)
return usb
def testPush(self):
filedata = b'alo there, govnah'
mtime = 100
send = [
self._MakeWriteSyncPacket(b'SEND', b'/data,33272'),
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE', size=mtime),
]
data = b'OKAY\0\0\0\0'
usb = self._ExpectSyncCommand([b''.join(send)], [data])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
dev.Push(BytesIO(filedata), '/data', mtime=mtime)
def testPull(self):
filedata = b"g'ddayta, govnah"
recv = self._MakeWriteSyncPacket(b'RECV', b'/data')
data = [
self._MakeWriteSyncPacket(b'DATA', filedata),
self._MakeWriteSyncPacket(b'DONE'),
]
usb = self._ExpectSyncCommand([recv], [b''.join(data)])
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=usb, banner=BANNER)
self.assertEqual(filedata, dev.Pull('/data'))
class TcpTimeoutAdbTest(BaseAdbTest):
@classmethod
def _ExpectCommand(cls, service, command, *responses):
tcp = common_stub.StubTcp('10.0.0.123')
cls._ExpectConnection(tcp)
cls._ExpectOpen(tcp, b'%s:%s\0' % (service, command))
for response in responses:
cls._ExpectRead(tcp, b'WRTE', REMOTE_ID, 0, response)
cls._ExpectClose(tcp)
return tcp
def _run_shell(self, cmd, timeout_ms=None):
tcp = self._ExpectCommand(b'shell', cmd)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=tcp, banner=BANNER)
dev.Shell(cmd, timeout_ms=timeout_ms)
def testConnect(self):
tcp = common_stub.StubTcp('10.0.0.123')
self._ExpectConnection(tcp)
dev = adb_commands.AdbCommands()
dev.ConnectDevice(handle=tcp, banner=BANNER)
def testTcpTimeout(self):
timeout_ms = 1
command = b'i_need_a_timeout'
self.assertRaises(
TcpTimeoutException,
self._run_shell,
command,
timeout_ms=timeout_ms)
class TcpHandleTest(unittest.TestCase):
def testInitWithHost(self):
tcp = common_stub.StubTcp('10.11.12.13')
self.assertEqual('10.11.12.13:5555', tcp._serial_number)
self.assertEqual(None, tcp._timeout_ms)
def testInitWithHostAndPort(self):
tcp = common_stub.StubTcp('10.11.12.13:5678')
self.assertEqual('10.11.12.13:5678', tcp._serial_number)
self.assertEqual(None, tcp._timeout_ms)
def testInitWithTimeout(self):
tcp = common_stub.StubTcp('10.0.0.2', timeout_ms=234.5)
self.assertEqual('10.0.0.2:5555', tcp._serial_number)
self.assertEqual(234.5, tcp._timeout_ms)
def testInitWithTimeoutInt(self):
tcp = common_stub.StubTcp('10.0.0.2', timeout_ms=234)
self.assertEqual('10.0.0.2:5555', tcp._serial_number)
self.assertEqual(234.0, tcp._timeout_ms)
if __name__ == '__main__':
unittest.main()
|
memory/test/test_memory.py | MaxGreil/hail | 789 | 23374 | import unittest
import uuid
from memory.client import MemoryClient
from hailtop.aiocloud.aiogoogle import GoogleStorageAsyncFS
from hailtop.config import get_user_config
from hailtop.utils import async_to_blocking
from gear.cloud_config import get_gcp_config
PROJECT = get_gcp_config().project
class BlockingMemoryClient:
def __init__(self, gcs_project=None, fs=None, deploy_config=None, session=None, headers=None, _token=None):
self._client = MemoryClient(gcs_project, fs, deploy_config, session, headers, _token)
async_to_blocking(self._client.async_init())
def _get_file_if_exists(self, filename):
return async_to_blocking(self._client._get_file_if_exists(filename))
def read_file(self, filename):
return async_to_blocking(self._client.read_file(filename))
def write_file(self, filename, data):
return async_to_blocking(self._client.write_file(filename, data))
def close(self):
return async_to_blocking(self._client.close())
class Tests(unittest.TestCase):
def setUp(self):
bucket_name = get_user_config().get('batch', 'bucket')
token = uuid.uuid4()
self.test_path = f'gs://{bucket_name}/memory-tests/{token}'
self.fs = GoogleStorageAsyncFS(project=PROJECT)
self.client = BlockingMemoryClient(fs=self.fs)
self.temp_files = set()
def tearDown(self):
async_to_blocking(self.fs.rmtree(None, self.test_path))
self.client.close()
async def add_temp_file_from_string(self, name: str, str_value: bytes):
handle = f'{self.test_path}/{name}'
async with await self.fs.create(handle) as f:
await f.write(str_value)
return handle
def test_non_existent(self):
for _ in range(3):
self.assertIsNone(self.client._get_file_if_exists(f'{self.test_path}/nonexistent'))
def test_small_write_around(self):
async def read(url):
async with await self.fs.open(url) as f:
return await f.read()
cases = [('empty_file', b''), ('null', b'\0'), ('small', b'hello world')]
for file, data in cases:
handle = async_to_blocking(self.add_temp_file_from_string(file, data))
expected = async_to_blocking(read(handle))
self.assertEqual(expected, data)
i = 0
cached = self.client._get_file_if_exists(handle)
while cached is None and i < 10:
cached = self.client._get_file_if_exists(handle)
i += 1
self.assertEqual(cached, expected)
def test_small_write_through(self):
cases = [('empty_file2', b''), ('null2', b'\0'), ('small2', b'hello world')]
for file, data in cases:
filename = f'{self.test_path}/{file}'
self.client.write_file(filename, data)
cached = self.client._get_file_if_exists(filename)
self.assertEqual(cached, data)
|
audiomate/processing/pipeline/onset.py | CostanzoPablo/audiomate | 133 | 23377 | import librosa
import numpy as np
from . import base
from . import spectral
class OnsetStrength(base.Computation):
"""
Compute a spectral flux onset strength envelope.
Based on http://librosa.github.io/librosa/generated/librosa.onset.onset_strength.html
Args:
n_mels (int): Number of mel bands to generate.
"""
def __init__(self, n_mels=128, parent=None, name=None):
super(OnsetStrength, self).__init__(left_context=1, right_context=0, parent=parent, name=name)
self.n_mels = n_mels
def compute(self, chunk, sampling_rate, corpus=None, utterance=None):
# Compute mel-spetrogram
power_spec = np.abs(spectral.stft_from_frames(chunk.data.T)) ** 2
mel = np.abs(librosa.feature.melspectrogram(S=power_spec, n_mels=self.n_mels, sr=sampling_rate))
mel_power = librosa.power_to_db(mel)
# Compute onset strengths
oenv = librosa.onset.onset_strength(S=mel_power, center=False)
# Switch dimensions and add dimension to have frames
oenv = oenv.T.reshape(oenv.shape[0], -1)
# Remove context
oenv = oenv[chunk.left_context:oenv.shape[0] - chunk.right_context]
return oenv
|
vdvae_flax/blocks.py | shaun95/google-research | 23,901 | 23395 | <reponame>shaun95/google-research
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited and the Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Building blocks for VDVAE."""
from typing import Optional, Tuple
import chex
from flax import linen as nn
import jax
_NUM_CONV_LAYER_PER_BLOCK = 4
def get_vdvae_convolution(output_channels,
kernel_shape,
weights_scale = 1.,
name = None,
precision = None):
"""Builds a 2D convolution.
Args:
output_channels: number of output channels.
kernel_shape: shape of convolutional kernel.
weights_scale: scale of initial weights in the convolution.
name: name of the module.
precision: jax precision.
Returns:
a nn.Conv2D.
"""
kernel_init = nn.initializers.variance_scaling(
scale=weights_scale, mode='fan_in', distribution='truncated_normal')
return nn.Conv(
features=output_channels,
kernel_size=kernel_shape,
strides=(1, 1),
padding='SAME',
use_bias=True,
kernel_init=kernel_init,
name=name,
precision=precision)
class ResBlock(nn.Module):
"""Residual block from the VDVAE paper.
This block is made of four convolutions, followed by an optional residual
connection and an optional average pooling to downsample the image.
Compared to the paper, it uses the same gelu non-linearity but no batch
normalization.
It also accepts as an optional input an auxiliary batch of context vectors to
be processed by 1x1 convolutions. This is typically useful to condition a VAE
on an embedded context.
"""
internal_channels: int
output_channels: int
downsampling_rate: int = 1
use_residual_connection: bool = False
last_weights_scale: float = 1.
precision: Optional[jax.lax.Precision] = None
@nn.compact
def __call__(
self,
inputs,
context_vectors = None,
):
"""Applies the res block to input images.
Args:
inputs: a rank-4 array of input images of shape (B, H, W, C).
context_vectors: optional auxiliary inputs, typically used for
conditioning. If set, they should be of rank 2, and their first (batch)
dimension should match that of `inputs`. Their number of features is
arbitrary. They will be reshaped from (B, D) to (B, 1, 1, D) and a 1x1
convolution will be applied to them.
Returns:
a the rank-4 output of the block.
"""
if self.downsampling_rate < 1:
raise ValueError('downsampling_rate should be >= 1, but got '
f'{self.downsampling_rate}.')
def build_layers(inputs):
"""Build layers of the ResBlock given a batch of inputs."""
resolution = inputs.shape[1]
if resolution > 2:
kernel_shapes = ((1, 1), (3, 3), (3, 3), (1, 1))
else:
kernel_shapes = ((1, 1), (1, 1), (1, 1), (1, 1))
conv_layers = []
aux_conv_layers = []
for layer_idx, kernel_shape in enumerate(kernel_shapes):
is_last = layer_idx == _NUM_CONV_LAYER_PER_BLOCK - 1
num_channels = self.output_channels if is_last else self.internal_channels
weights_scale = self.last_weights_scale if is_last else 1.
conv_layers.append(
get_vdvae_convolution(
num_channels,
kernel_shape,
weights_scale,
name=f'c{layer_idx}',
precision=self.precision))
aux_conv_layers.append(
get_vdvae_convolution(
num_channels, (1, 1),
0.,
name=f'aux_c{layer_idx}',
precision=self.precision))
return conv_layers, aux_conv_layers
chex.assert_rank(inputs, 4)
if inputs.shape[1] != inputs.shape[2]:
raise ValueError('VDVAE only works with square images, but got '
f'rectangular images of shape {inputs.shape[1:3]}.')
if context_vectors is not None:
chex.assert_rank(context_vectors, 2)
inputs_batch_dim = inputs.shape[0]
aux_batch_dim = context_vectors.shape[0]
if inputs_batch_dim != aux_batch_dim:
raise ValueError('Context vectors batch dimension is incompatible '
'with inputs batch dimension. Got '
f'{aux_batch_dim} vs {inputs_batch_dim}.')
context_vectors = context_vectors[:, None, None, :]
conv_layers, aux_conv_layers = build_layers(inputs)
outputs = inputs
for conv, auxiliary_conv in zip(conv_layers, aux_conv_layers):
outputs = conv(jax.nn.gelu(outputs))
if context_vectors is not None:
outputs += auxiliary_conv(context_vectors)
if self.use_residual_connection:
in_channels = inputs.shape[-1]
out_channels = outputs.shape[-1]
if in_channels != out_channels:
raise AssertionError('Cannot apply residual connection because the '
'number of output channels differs from the '
'number of input channels: '
f'{out_channels} vs {in_channels}.')
outputs += inputs
if self.downsampling_rate > 1:
shape = (self.downsampling_rate, self.downsampling_rate)
outputs = nn.avg_pool(
outputs, window_shape=shape, strides=shape, padding='VALID')
return outputs
|
play-1.2.4/python/Lib/site-packages/Rpyc/Utils/Discovery.py | AppSecAI-TEST/restcommander | 550 | 23416 | <reponame>AppSecAI-TEST/restcommander<gh_stars>100-1000
"""
Discovery: broadcasts a query, attempting to discover all running RPyC servers
over the local network/specific subnet.
"""
import socket
import select
import struct
__all__ = ["discover_servers"]
UDP_DISCOVERY_PORT = 18813
QUERY_MAGIC = "RPYC_QUERY"
MAX_DGRAM_SIZE = 100
def discover_servers(subnet = "255.255.255.255", timeout = 1):
"""broadcasts a query and returns a list of (addr, port) of running servers"""
# broadcast
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
s.sendto(QUERY_MAGIC, (subnet, UDP_DISCOVERY_PORT))
# wait for replies
replies = []
while True:
rlist, dummy, dummy = select.select([s], [], [], timeout)
if not rlist:
break
data, (addr, port) = s.recvfrom(MAX_DGRAM_SIZE)
rpyc_port, = struct.unpack("<H", data)
replies.append((addr, rpyc_port))
return list(set(replies))
|
tests/python/benchmarks/two_neighborhood_bench.py | sid17/weaver | 163 | 23453 | <gh_stars>100-1000
#! /usr/bin/env python
#
# ===============================================================
# Description: Two neighborhood benchmark
#
# Created: 2014-03-21 13:39:06
#
# Author: <NAME>, <EMAIL>
#
# Copyright (C) 2013-2014, Cornell University, see the LICENSE
# file for licensing agreement
# ===============================================================
#
import random
import sys
import time
import threading
import weaver.client as client
import simple_client
random.seed(42)
num_edges = 1768149
edge_sources = [None] * num_edges
def choose_random_pair():
global edge_sources
return (edge_sources[random.randint(0, num_edges-1)], edge_sources[random.randint(0, num_edges-1)])
if (len(sys.argv) != 2):
print "want single extra arg for file to open"
assert(False)
f = open(sys.argv[1])
i = 0
for line in f:
if (line[0] is '#'):
continue
edge_sources[i] = int(line.split(" ")[0])
i += 1
print "done loading file"
num_started = 0
num_finished = 0
cv = threading.Condition()
num_nodes = 81306 # snap twitter-combined
read_percent = 95
# node handles are range(0, num_nodes)
num_vts = 1
num_clients = 100
requests_per_client = 200
def add_labels(c, idx):
global num_nodes
tx_id = c.begin_tx()
for i in range(num_nodes):
if i % num_clients is idx:
c.set_node_property(tx_id, i, 'name', str(i))
assert(c.end_tx(tx_id))
print "writing labels finished for client " + str(idx)
def exec_reads(reqs, sc, c, exec_time, idx):
global num_started
global cv
global num_clients
global num_finished
with cv:
while num_started < num_clients:
cv.wait()
start = time.time()
cnt = 0
for pair in reqs:
cnt += 1
if (random.randint(1,100) > read_percent) :
tx_id = c.begin_tx()
c.create_edge(tx_id, pair[0], pair[1])
assert(c.end_tx(tx_id))
else:
two_neighborhood = sc.two_neighborhood(pair[0], "name", caching = True)
end = time.time()
with cv:
num_finished += 1
cv.notify_all()
exec_time[idx] = end - start
clients = []
simple_clients = []
for i in range(num_clients):
clients.append(client.Client(client._CLIENT_ID + i, i % num_vts))
simple_clients.append(simple_client.simple_client(clients[i]))
reqs = []
for i in range(num_clients):
cl_reqs = []
for _ in range(requests_per_client):
cl_reqs.append(choose_random_pair())
reqs.append(cl_reqs)
exec_time = [0] * num_clients
threads = []
print "starting writes"
for i in range(num_clients):
thr = threading.Thread(target=add_labels, args=(clients[i], i))
thr.start()
threads.append(thr)
for thr in threads:
thr.join()
print "starting requests"
for i in range(num_clients):
thr = threading.Thread(target=exec_reads, args=(reqs[i], simple_clients[i], clients[i], exec_time, i))
thr.start()
threads.append(thr)
start_time = time.time()
with cv:
num_started = num_clients
cv.notify_all()
while num_finished < num_clients:
cv.wait()
end_time = time.time()
total_time = end_time-start_time
for thr in threads:
thr.join()
print 'Total time for ' + str(num_clients * requests_per_client) + 'requests = ' + str(total_time)
throughput = (num_clients * requests_per_client) / total_time
print 'Throughput = ' + str(throughput)
|
src/warp/yul/AstTools.py | sambarnes/warp | 414 | 23485 | from __future__ import annotations
import re
from typing import Union
import warp.yul.ast as ast
from warp.yul.AstVisitor import AstVisitor
from warp.yul.WarpException import WarpException
class AstParser:
def __init__(self, text: str):
self.lines = text.splitlines()
if len(self.lines) == 0:
raise WarpException("Text should not be empty")
self.pos = 0
def parse_typed_name(self) -> ast.TypedName:
tabs = self.get_tabs()
node_type_name = self.get_word(tabs)
assert node_type_name == "TypedName:", "This node should be of type TypedNode"
self.pos += 1
assert self.get_tabs() == tabs + 1, "Wrong indentation"
node_name, node_type = self.get_word(tabs + 1).split(":")
self.pos += 1
return ast.TypedName(name=node_name, type=node_type)
def parse_literal(self) -> ast.Literal:
tabs = self.get_tabs()
assert self.get_word(tabs).startswith(
"Literal:"
), "This node should be of type Literal"
value = self.get_word(tabs + 8)
self.pos += 1
try:
value = int(value)
except ValueError:
pass
return ast.Literal(value=value)
def parse_identifier(self) -> ast.Identifier:
tabs = self.get_tabs()
assert self.get_word(tabs).startswith(
"Identifier:"
), "This node should be of type Identifier"
name = self.get_word(tabs + 11)
self.pos += 1
return ast.Identifier(name=name)
def parse_assignment(self) -> ast.Assignment:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "Assignment:"
), "This node should be of type Assignment"
self.pos += 1
assert self.get_word(tabs + 1) == "Variables:"
self.pos += 1
variables_list = self.parse_list(tabs + 1, self.parse_identifier)
assert self.get_word(tabs + 1) == "Value:"
self.pos += 1
return ast.Assignment(
variable_names=variables_list, value=self.parse_expression()
)
def parse_function_call(self) -> ast.FunctionCall:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "FunctionCall:"
), "This node should be of type FunctionCall"
self.pos += 1
return ast.FunctionCall(
function_name=self.parse_identifier(),
arguments=self.parse_list(tabs, self.parse_expression),
)
def parse_expression_statement(self) -> ast.Statement:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "ExpressionStatement:"
), "This node should be of type ExpressionStatement"
self.pos += 1
return ast.ExpressionStatement(expression=self.parse_expression())
def parse_variable_declaration(self) -> ast.VariableDeclaration:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "VariableDeclaration:"
), "This node should be of type VariableDeclaration"
self.pos += 1
assert self.get_tabs() == tabs + 1
assert self.get_word(tabs + 1) == "Variables:"
self.pos += 1
variables = self.parse_list(tabs + 1, self.parse_typed_name)
assert self.get_tabs() == tabs + 1
word = self.get_word(tabs + 1)
self.pos += 1
assert word.startswith("Value")
if word.endswith("None"):
value = None
else:
value = self.parse_expression()
return ast.VariableDeclaration(variables=variables, value=value)
def parse_block(self) -> ast.Block:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Block:", "This node should be of type Block"
self.pos += 1
return ast.Block(statements=tuple(self.parse_list(tabs, self.parse_statement)))
def parse_function_definition(self) -> ast.FunctionDefinition:
tabs = self.get_tabs()
assert (
self.get_word(tabs) == "FunctionDefinition:"
), "This node should be of type FunctionDefinition"
self.pos += 1
assert self.get_tabs() == tabs + 1 and self.get_word(tabs + 1).startswith(
"Name:"
)
fun_name = self.get_word(tabs + 7)
self.pos += 1
assert self.get_tabs() == tabs + 1 and self.get_word(tabs + 1) == "Parameters:"
self.pos += 1
params = self.parse_list(tabs + 1, self.parse_typed_name)
assert (
self.get_tabs() == tabs + 1
and self.get_word(tabs + 1) == "Return Variables:"
)
self.pos += 1
returns = self.parse_list(tabs + 1, self.parse_typed_name)
assert self.get_tabs() == tabs + 1 and self.get_word(tabs + 1) == "Body:"
self.pos += 1
body = self.parse_block()
return ast.FunctionDefinition(
name=fun_name, parameters=params, return_variables=returns, body=body
)
def parse_if(self) -> ast.If:
tabs = self.get_tabs()
assert self.get_word(tabs) == "If:", "This node should be of type If"
self.pos += 1
condition = self.parse_expression()
body = self.parse_block()
else_body = None
if self.get_tabs() > tabs:
else_body = self.parse_block()
return ast.If(condition=condition, body=body, else_body=else_body)
def parse_case(self) -> ast.Case:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Case:", "This node should be of type Case"
self.pos += 1
try:
value = self.parse_literal()
except AssertionError:
assert (
self.get_tabs() == tabs + 1 and self.get_word(tabs + 1) == "Default"
), "The value must be a literal or None (when it's the default case)"
value = None
self.pos += 1
return ast.Case(value=value, body=self.parse_block())
def parse_switch(self) -> ast.Switch:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Switch:", "This node should be of type Switch"
self.pos += 1
return ast.Switch(
expression=self.parse_expression(),
cases=self.parse_list(tabs, self.parse_case),
)
def parse_for_loop(self) -> ast.ForLoop:
tabs = self.get_tabs()
assert self.get_word(tabs) == "ForLoop:", "This node should be of type ForLoop"
self.pos += 1
return ast.ForLoop(
pre=self.parse_block(),
condition=self.parse_expression(),
post=self.parse_block(),
body=self.parse_block(),
)
def parse_break(self) -> ast.Break:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Break", "This node should be of type Break"
self.pos += 1
return ast.Break()
def parse_continue(self) -> ast.Continue:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Continue", "This node should be of type Continue"
self.pos += 1
return ast.Continue()
def parse_leave(self) -> ast.Leave:
tabs = self.get_tabs()
assert self.get_word(tabs) == "Leave", "This node should be of type Leave"
self.pos += 1
return ast.LEAVE
def parse_node(self) -> ast.Node:
tabs = self.get_tabs()
node_type_name = self.get_word(tabs).split(":")[0]
parser_name = f"parse_{self.get_name(node_type_name)}"
parser = getattr(self, parser_name, None)
if parser is None:
raise WarpException("Wrong node type name!")
return parser()
def parse_statement(self) -> ast.Statement:
statements = [
"ExpressionStatement",
"Assignment",
"VariableDeclaration",
"FunctionDefinition",
"If",
"Switch",
"ForLoop",
"Break",
"Continue",
"Leave",
"Block",
]
tabs = self.get_tabs()
node_type_name = self.get_word(tabs).split(":")[0]
assert node_type_name in statements, "Not a valid statement"
return ast.assert_statement(self.parse_node())
def parse_expression(self) -> ast.Expression:
tabs = self.get_tabs()
node_type_name = self.get_word(tabs).split(":")[0]
assert node_type_name in [
"Literal",
"Identifier",
"FunctionCall",
], "Node type must be an expression"
return ast.assert_expression(self.parse_node())
def parse_list(self, tabs, parser):
items = []
while self.pos < len(self.lines) and self.get_tabs() > tabs:
item = parser()
items.append(item)
return items
def get_tabs(self):
tabs = 0
if self.pos < len(self.lines):
for c in self.lines[self.pos]:
if not c == "\t":
break
tabs += 1
else:
raise WarpException(
"Lines are not supposed to be filled only with tabs"
)
return tabs
def get_word(self, start: int) -> str:
return self.lines[self.pos][start:]
def get_name(self, name):
name = "_".join(re.findall("[A-Z][^A-Z]*", name))
return name.lower()
class YulPrinter(AstVisitor):
def format(self, node: ast.Node, tabs: int = 0) -> str:
return self.visit(node, tabs)
def visit_typed_name(self, node: ast.TypedName, tabs: int = 0) -> str:
return f"{node.name}"
def visit_literal(self, node: ast.Literal, tabs: int = 0) -> str:
return f"{node.value}"
def visit_identifier(self, node: ast.Identifier, tabs: int = 0) -> str:
return f"{node.name}"
def visit_assignment(self, node: ast.Assignment, tabs: int = 0) -> str:
variables = ", ".join(self.visit_list(node.variable_names))
value = self.visit(node.value, 0)
return f"{variables} := {value}"
def visit_function_call(self, node: ast.FunctionCall, tabs: int = 0) -> str:
name = self.visit(node.function_name)
args = ", ".join(self.visit_list(node.arguments))
return f"{name}({args})"
def visit_expression_statement(
self, node: ast.ExpressionStatement, tabs: int = 0
) -> str:
return self.visit(node.expression, tabs)
def visit_variable_declaration(
self, node: ast.VariableDeclaration, tabs: int = 0
) -> str:
variables = ", ".join(self.visit_list(node.variables))
value = ""
if node.value is not None:
value = f" := {self.visit(node.value)}"
return f"let {variables}{value}"
def visit_block(self, node: ast.Block, tabs: int = 0) -> str:
open_block = "{"
close_block = "}"
if self.is_short(node.statements):
statements = "".join(self.visit_list(node.statements))
return " ".join([open_block, statements, close_block])
statements = self.visit_list(node.statements, tabs + 1)
statements = ["\t" * (tabs + 1) + stmt for stmt in statements]
statements = "\n".join(statements)
close_block = "\t" * tabs + close_block
res = "\n".join([open_block, statements, close_block])
return res
def visit_function_definition(
self, node: ast.FunctionDefinition, tabs: int = 0
) -> str:
parameters = ", ".join(self.visit_list(node.parameters, 0))
ret_vars = ", ".join(self.visit_list(node.return_variables, 0))
body = self.visit(node.body, tabs)
res = f"function {node.name}({parameters})"
if len(node.return_variables) > 0:
res += f" -> {ret_vars}"
res += f" {body}"
return res
def visit_if(self, node: ast.If, tabs: int = 0) -> str:
res = f"if {self.visit(node.condition)} "
res += self.visit(node.body, tabs)
if node.else_body is not None:
res += "\n" + "\t" * tabs + "else "
res += self.visit(node.else_body, tabs)
return res
def visit_case(self, node: ast.Case, tabs: int = 0) -> str:
res = "\t" * tabs
if node.value is not None:
res += f"case {self.visit(node.value)} "
else:
res += "default "
res += self.visit(node.body, tabs)
return res
def visit_switch(self, node: ast.Switch, tabs: int = 0) -> str:
res = f"switch {self.visit(node.expression)}\n"
res += "\n".join(self.visit_list(node.cases, tabs))
return res
def visit_for_loop(self, node: ast.ForLoop, tabs: int = 0) -> str:
res = "for "
res += self.visit(node.pre, tabs)
res += f" {self.visit(node.condition)} "
res += self.visit(node.post, tabs)
res += f"\n{self.visit(node.body, tabs)}"
return res
def visit_break(self, node: ast.Break, tabs: int = 0) -> str:
return "break"
def visit_continue(self, node: ast.Continue, tabs: int = 0) -> str:
return "continue"
def visit_leave(self, node: ast.Leave, tabs: int = 0) -> str:
return "leave"
def is_short(self, stmts: tuple) -> bool:
if len(stmts) == 0:
return True
return len(stmts) == 1 and type(stmts[0]).__name__ not in [
"Block",
"FunctionDefinition",
"If",
"Switch",
"ForLoop",
]
|
models/ffn_ace.py | MilesQLi/Theano-Lights | 313 | 23495 | import theano
import theano.tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams
from theano.tensor.nnet.conv import conv2d
from theano.tensor.signal.downsample import max_pool_2d
from theano.tensor.shared_randomstreams import RandomStreams
import numpy as np
from toolbox import *
from modelbase import *
import itertools
class FFN_ace(ModelSLBase):
"""
Auto-classifier-encoder (Georgiev, 2015)
"""
def save(self):
if not os.path.exists('savedmodels\\'):
os.makedirs('savedmodels\\')
self.params.save(self.filename)
def __init__(self, data, hp):
super(FFN_ace, self).__init__(self.__class__.__name__, data, hp)
# batch_size: 10000; learning_rate = 0.0015; lr_halflife = 200, 500
self.epsilon = 0.0001
self.params = Parameters()
self.shared_vars = Parameters()
n_x = self.data['n_x']
n_y = self.data['n_y']
n_h1 = 1200
n_h2 = 1000
n_h3 = 800
n_h4 = 800
scale = hp.init_scale
if hp.load_model and os.path.isfile(self.filename):
self.params.load(self.filename)
else:
with self.params:
w_h = shared_normal((n_x, n_h1), scale=scale)
b_h = shared_zeros((n_h1,))
w_h2 = shared_normal((n_h1, n_h2), scale=scale)
b_h2 = shared_zeros((n_h2,))
w_h3 = shared_normal((n_h2, n_h3), scale=scale)
b_h3 = shared_zeros((n_h3,))
w_h4 = shared_normal((n_h3, n_h4), scale=scale)
b_h4 = shared_zeros((n_h4,))
w_o = shared_normal((n_h4, n_y), scale=scale)
def batch_norm(h):
m = T.mean(h, axis=0, keepdims=True)
std = T.sqrt(T.var(h, axis=0, keepdims=True) + self.epsilon)
h = (h - m) / std
return h
def model(X, params, p_drop_input, p_drop_hidden):
X_noise = X + gaussian(X.shape, p_drop_input)
h = batch_norm(dropout(rectify(T.dot(X_noise, params.w_h) + params.b_h), p_drop_hidden))
# Dual reconstruction error
phx = T.nnet.sigmoid(T.dot(h, T.dot(h.T, X_noise)) / self.hp.batch_size)
log_phx = T.nnet.binary_crossentropy(phx, X_noise).sum()
h2 = dropout(rectify(T.dot(h, params.w_h2) + params.b_h2), p_drop_hidden)
h3 = batch_norm(dropout(rectify(T.dot(h2, params.w_h3) + params.b_h3), p_drop_hidden))
h4 = dropout(rectify(T.dot(h3, params.w_h4) + params.b_h4), p_drop_hidden)
py_x = softmax(T.dot(h4, params.w_o))
return [py_x, log_phx]
noise_py_x, cost_recon = model(self.X, self.params, 0.2, 0.5)
cost_y2 = -T.sum(self.Y * T.log(noise_py_x))
cost = cost_y2 + cost_recon
pyx, _ = model(self.X, self.params, 0., 0.)
map_pyx = T.argmax(pyx, axis=1)
error_map_pyx = T.sum(T.neq(map_pyx, T.argmax(self.Y, axis=1)))
self.compile(cost, error_map_pyx)
|
Extensions/BabaGUI/config.py | siva-msft/baba-is-auto | 108 | 23498 | import pygame
FPS = 60
BLOCK_SIZE = 48
COLOR_BACKGROUND = pygame.Color(0, 0, 0)
|
examples/pincell_depletion/restart_depletion.py | norberto-schmidt/openmc | 262 | 23512 | <reponame>norberto-schmidt/openmc<filename>examples/pincell_depletion/restart_depletion.py
import openmc
import openmc.deplete
import matplotlib.pyplot as plt
###############################################################################
# Load previous simulation results
###############################################################################
# Load geometry from statepoint
statepoint = 'statepoint.100.h5'
with openmc.StatePoint(statepoint) as sp:
geometry = sp.summary.geometry
# Load previous depletion results
previous_results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
###############################################################################
# Transport calculation settings
###############################################################################
# Instantiate a Settings object, set all runtime parameters
settings = openmc.Settings()
settings.batches = 100
settings.inactive = 10
settings.particles = 10000
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings.source = openmc.source.Source(space=uniform_dist)
entropy_mesh = openmc.RegularMesh()
entropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]
entropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]
entropy_mesh.dimension = [10, 10, 1]
settings.entropy_mesh = entropy_mesh
###############################################################################
# Initialize and run depletion calculation
###############################################################################
# Create depletion "operator"
chain_file = './chain_simple.xml'
op = openmc.deplete.Operator(geometry, settings, chain_file, previous_results)
# Perform simulation using the predictor algorithm
time_steps = [1.0, 1.0, 1.0, 1.0, 1.0] # days
power = 174 # W/cm, for 2D simulations only (use W for 3D)
integrator = openmc.deplete.PredictorIntegrator(op, time_steps, power, timestep_units='d')
integrator.integrate()
###############################################################################
# Read depletion calculation results
###############################################################################
# Open results file
results = openmc.deplete.ResultsList.from_hdf5("depletion_results.h5")
# Obtain K_eff as a function of time
time, keff = results.get_eigenvalue()
# Obtain U235 concentration as a function of time
time, n_U235 = results.get_atoms('1', 'U235')
# Obtain Xe135 capture reaction rate as a function of time
time, Xe_capture = results.get_reaction_rate('1', 'Xe135', '(n,gamma)')
###############################################################################
# Generate plots
###############################################################################
days = 24*60*60
plt.figure()
plt.plot(time/days, keff, label="K-effective")
plt.xlabel("Time (days)")
plt.ylabel("Keff")
plt.show()
plt.figure()
plt.plot(time/days, n_U235, label="U 235")
plt.xlabel("Time (days)")
plt.ylabel("n U5 (-)")
plt.show()
plt.figure()
plt.plot(time/days, Xe_capture, label="Xe135 capture")
plt.xlabel("Time (days)")
plt.ylabel("RR (-)")
plt.show()
plt.close('all')
|
001-050/029-divide-two-integers.py | bbram10/leetcode-master | 134 | 23549 | <reponame>bbram10/leetcode-master
"""
STATEMENT
Divide two integers without using multiplication, division and mod operator.
CLARIFICATIONS
- Do I have to handle 32-bit integer overflow? Yes, return the MAX_INT in that case.
- Can the divisor be zero? Yes, return the MAX_INT.
EXAMPLES
34/3 -> 11
COMMENTS
- This solution is by tusizi in Leetcode (picked up from https://discuss.leetcode.com/topic/8714/clear-python-code)
"""
def divide(dividend, divisor):
"""
:type dividend: int
:type divisor: int
:rtype: int
"""
sign = (dividend < 0) is (divisor < 0)
dividend, divisor = abs(dividend), abs(divisor)
INT_MIN, INT_MAX = -2147483648, 2147483647
if (not divisor) or (dividend < INT_MIN and divisor == -1):
return INT_MAX
to_return = 0
while dividend >= divisor:
temp, i = divisor, 1
while dividend >= temp:
dividend -= temp
to_return += i
i <<= 1
temp <<= 1
if not sign:
to_return = -to_return
return min(max(INT_MIN, to_return), INT_MAX)
|
saleor/app/tests/test_models.py | fairhopeweb/saleor | 15,337 | 23570 | from ...app.models import App
from ...webhook.event_types import WebhookEventType
def test_qs_for_event_type(payment_app):
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 1
assert qs[0] == payment_app
def test_qs_for_event_type_no_payment_permissions(payment_app):
payment_app.permissions.first().delete()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_inactive_app(payment_app):
payment_app.is_active = False
payment_app.save()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_no_webhook_event(payment_app):
webhook = payment_app.webhooks.first()
event = webhook.events.filter(event_type=WebhookEventType.PAYMENT_AUTHORIZE).first()
event.delete()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
def test_qs_for_event_type_inactive_webhook(payment_app):
webhook = payment_app.webhooks.first()
webhook.is_active = False
webhook.save()
qs = App.objects.for_event_type(WebhookEventType.PAYMENT_AUTHORIZE)
assert len(qs) == 0
|
extra_tests/ctypes_tests/test_unions.py | nanjekyejoannah/pypy | 333 | 23607 | import sys
from ctypes import *
def test_getattr():
class Stuff(Union):
_fields_ = [('x', c_char), ('y', c_int)]
stuff = Stuff()
stuff.y = ord('x') | (ord('z') << 24)
if sys.byteorder == 'little':
assert stuff.x == b'x'
else:
assert stuff.x == b'z'
def test_union_of_structures():
class Stuff(Structure):
_fields_ = [('x', c_int)]
class Stuff2(Structure):
_fields_ = [('x', c_int)]
class UnionofStuff(Union):
_fields_ = [('one', Stuff),
('two', Stuff2)]
u = UnionofStuff()
u.one.x = 3
assert u.two.x == 3
|
helios/workflows/__init__.py | thiagosfs/helios-server | 525 | 23617 | """
Helios Election Workflows
"""
from helios.datatypes import LDObjectContainer
class WorkflowObject(LDObjectContainer):
pass
|
src/genie/libs/parser/linux/route.py | balmasea/genieparser | 204 | 23633 | """route.py
Linux parsers for the following commands:
* route
"""
# python
import re
# metaparser
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import Schema, Any, Optional
from netaddr import IPAddress, IPNetwork
# =======================================================
# Schema for 'route'
# =======================================================
class RouteSchema(MetaParser):
"""Schema for route"""
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 0.0.0.0 192.168.1.1 0.0.0.0 UG 0 0 0 wlo1
schema = {
'routes': {
Any(): { # 'destination'
'mask': {
Any(): {
'nexthop': {
Any(): { # index: 1, 2, 3, etc
'interface': str,
Optional('flags'): str,
Optional('gateway'): str,
Optional('metric'): int,
Optional('ref'): int,
Optional('use'): int,
Optional('scope'): str,
Optional('proto'): str,
Optional('src'): str,
Optional('broadcast'): bool,
Optional('table'): str,
Optional('local'): bool
}
}
}
}
}
}
}
# =======================================================
# Parser for 'route'
# =======================================================
class Route(RouteSchema):
"""Parser for
* route
* route -4 -n
* route -4n
* route -n4
* route -n -4
"""
cli_command = ['route', 'route {flag}']
def cli(self, flag=None, output=None):
if output is None:
cmd = self.cli_command[0]
if flag in ['-4 -n', '-4n', '-n4']:
command = self.cli_command[1].replace('{flag}', flag)
out = self.device.execute(cmd)
else:
out = output
# Destination Gateway Genmask Flags Metric Ref Use Iface
# 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1
p1 = re.compile(r'(?P<destination>[a-z0-9\.\:]+)'
' +(?P<gateway>[a-z0-9\.\:_]+)'
' +(?P<mask>[a-z0-9\.\:]+)'
' +(?P<flags>[a-zA-Z]+)'
' +(?P<metric>(\d+))'
' +(?P<ref>(\d+))'
' +(?P<use>(\d+))'
' +(?P<interface>\S+)'
)
# Initializes the Python dictionary variable
parsed_dict = {}
# Defines the "for" loop, to pattern match each line of output
for line in out.splitlines():
line = line.strip()
# 192.168.1.0 0.0.0.0 255.255.255.0 U 600 0 0 wlo1
m = p1.match(line)
if m:
if 'routes' not in parsed_dict:
parsed_dict.setdefault('routes', {})
group = m.groupdict()
destination = group['destination']
mask = group['mask']
index_dict = {}
for str_k in ['interface', 'flags', 'gateway']:
index_dict[str_k] = group[str_k]
for int_k in ['metric', 'ref', 'use']:
index_dict[int_k] = int(group[int_k])
if destination in parsed_dict['routes']:
if mask in parsed_dict['routes'][destination]['mask']:
parsed_dict['routes'][destination]['mask'][mask].\
setdefault('nexthop', {index+1: index_dict})
else:
index = 1
parsed_dict['routes'][destination]['mask'].\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
else:
index = 1
parsed_dict['routes'].setdefault(destination, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
continue
return parsed_dict
# =======================================================
# Parser for 'netstat -rn'
# =======================================================
class ShowNetworkStatusRoute(Route, RouteSchema):
"""Parser for
* netstat -rn
"""
cli_command = ['netstat -rn']
def cli(self, output=None):
if output is None:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
return super().cli(output=out)
# =====================================================
# Parser for ip route show table all
# =====================================================
class IpRouteShowTableAll(RouteSchema):
"""
Parser for
* ip route show table all
"""
cli_command = ['ip route show table all']
def cli(self, output=None):
if output is None:
cmd = self.cli_command[0]
out = self.device.execute(cmd)
else:
out = output
# default via 192.168.1.1 dev enp7s0 proto dhcp metric 100
p1 = re.compile(r'default via (?P<gateway>[a-z0-9\.\:]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>[a-z]+)'
' metric (?P<metric>[\d]+)'
)
# 169.254.0.0/16 dev enp7s0 scope link metric 1000
p2 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' scope (?P<scope>\w+)'
' metric (?P<metric>[\d]+)'
)
# 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
p3 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
)
# 172.18.0.0/16 dev br-d19b23fac393 proto kernel scope link src 172.18.0.1 linkdown
p4 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
' linkdown '
)
# 192.168.1.0/24 dev enp7s0 proto kernel scope link src 192.168.1.212 metric 100
p5 = re.compile(r'(?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
' metric (?P<metric>[\d]+)'
)
# broadcast 127.0.0.0 dev lo table local proto kernel scope link src 127.0.0.1
p6 = re.compile(r'broadcast (?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' table (?P<table>\w+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
)
# local 10.233.44.70 dev kube-ipvs0 table local proto kernel scope host src 10.233.44.70
p7 = re.compile(r'local (?P<destination>[a-z0-9\.\:\/]+)'
' dev (?P<device>[a-z0-9\.\-]+)'
' table (?P<table>\w+)'
' proto (?P<proto>\w+)'
' scope (?P<scope>\w+)'
' src (?P<src>[a-z0-9\.\:\/]+)'
)
# Initializes the Python dictionary variable
parsed_dict = {}
# Defines the "for" loop, to pattern match each line of output
for line in out.splitlines():
line = line.strip()
# default via 192.168.1.1 dev enp7s0 proto dhcp metric 100
m = p1.match(line)
if m:
if 'routes' not in parsed_dict:
parsed_dict.setdefault('routes', {})
group = m.groupdict()
gateway = group['gateway']
interface = group['device']
metric = int(group['metric'])
if gateway:
parsed_dict['routes'] = { '0.0.0.0': {
'mask': {
'0.0.0.0': {
'nexthop': {
1:{
'gateway': gateway,
'interface': interface,
'metric': metric
}
}
}
}
}
}
# 169.254.0.0/16 dev enp7s0 scope link metric 1000
m = p2.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
metric = int(group['metric'])
scope = group['scope']
index_dict = {'interface' : interface,
'scope' : scope,
'metric': metric
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# 172.17.0.0/16 dev docker0 proto kernel scope link src 172.17.0.1
m = p3.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# 172.18.0.0/16 dev br-d19b23fac393 proto kernel scope link src 172.18.0.1 linkdown
m = p4.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# 192.168.1.0/24 dev enp7s0 proto kernel scope link src 192.168.1.212 metric 100
m = p5.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
metric = group['metric']
src = group['src']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src,
'metric': metric
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# broadcast 127.0.0.0 dev lo table local proto kernel scope link src 127.0.0.1
m = p6.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
table = group['table']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src,
'broadcast': True,
'table': table
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
# local 10.233.44.70 dev kube-ipvs0 table local proto kernel scope host src 10.233.44.70
m = p7.match(line)
if m:
group = m.groupdict()
destination = IPNetwork(group['destination'])
mask = str(destination.netmask)
destination_addr = str(destination.ip)
interface = group['device']
scope = group['scope']
proto = group['proto']
src = group['src']
table = group['table']
index_dict = {'interface' : interface,
'scope' : scope,
'proto' : proto ,
'src' : src,
'local': True,
'table': table
}
index = 1
parsed_dict['routes'].setdefault(destination_addr, {}).\
setdefault('mask', {}).\
setdefault(mask, {}).\
setdefault('nexthop', {index: index_dict})
return parsed_dict
|
chapter-7/chassis/demo.py | wallacei/microservices-in-action-copy | 115 | 23634 | import json
import datetime
import requests
from nameko.web.handlers import http
from nameko.timer import timer
from statsd import StatsClient
from circuitbreaker import circuit
class DemoChassisService:
name = "demo_chassis_service"
statsd = StatsClient('localhost', 8125, prefix='simplebank-demo')
@http('GET', '/health')
@statsd.timer('health')
def health(self, _request):
return json.dumps({'ok': datetime.datetime.utcnow().__str__()})
@http('GET', '/external')
@circuit(failure_threshold=5, expected_exception=ConnectionError)
@statsd.timer('external')
def external_request(self, _request):
response = requests.get('https://jsonplaceholder.typicode.com/posts/1')
return json.dumps({'code': response.status_code, 'body': response.text})
@http('GET', '/error')
@circuit(failure_threshold=5, expected_exception=ZeroDivisionError)
@statsd.timer('http_error')
def error_http_request(self):
return json.dumps({1 / 0})
class HealthCheckService:
name = "health_check_service"
statsd = StatsClient('localhost', 8125, prefix='simplebank-demo')
@timer(interval=10)
@statsd.timer('check_demo_service')
def check_demo_service(self):
response = requests.get('http://0.0.0.0:8000/health')
print("DemoChassisService HEALTH CHECK: status_code {}, response: {}".format(
response.status_code, response.text))
|
tests/frontend/detector/test_fast.py | swershrimpy/gtsfm | 122 | 23641 | <reponame>swershrimpy/gtsfm<gh_stars>100-1000
"""Tests for frontend's FAST detector class.
Authors: <NAME>
"""
import unittest
import tests.frontend.detector.test_detector_base as test_detector_base
from gtsfm.frontend.detector.fast import Fast
class TestFast(test_detector_base.TestDetectorBase):
"""Test class for FAST detector class in frontend.
All unit test functions defined in TestDetectorBase are run automatically.
"""
def setUp(self):
super().setUp()
self.detector = Fast()
if __name__ == "__main__":
unittest.main()
|
tests/test_cli.py | KoichiYasuoka/pynlpir | 537 | 23643 | """Unit tests for pynlpir's cli.py file."""
import os
import shutil
import stat
import unittest
try:
from urllib.error import URLError
from urllib.request import urlopen
except ImportError:
from urllib2 import URLError, urlopen
from click.testing import CliRunner
from pynlpir import cli
TEST_DIR = os.path.abspath(os.path.dirname(__file__))
LICENSE_FILE = os.path.join(TEST_DIR, 'data', 'NLPIR.user')
def can_reach_github():
"""Check if we can reach GitHub's website."""
try:
urlopen('http://github.com')
return True
except URLError:
return False
@unittest.skipIf(can_reach_github() is False, 'Unable to reach GitHub')
class TestCLI(unittest.TestCase):
"""Unit tests for the PyNLPIR CLI."""
def setUp(self):
self.runner = CliRunner()
def tearDown(self):
self.runner = None
def test_initial_license_download(self):
"""Tests that an initial license download works correctly."""
with self.runner.isolated_filesystem():
result = self.runner.invoke(cli.cli, ('update', '-d.'))
self.assertEqual(0, result.exit_code)
self.assertEqual('License updated.\n', result.output)
def test_license_update(self):
"Test that a regular license update works correctly."""
with self.runner.isolated_filesystem():
shutil.copyfile(LICENSE_FILE, os.path.basename(LICENSE_FILE))
result = self.runner.invoke(cli.cli, ('update', '-d.'))
self.assertEqual(0, result.exit_code)
self.assertEqual('License updated.\n', result.output)
result = self.runner.invoke(cli.cli, ('update', '-d.'))
self.assertEqual(0, result.exit_code)
self.assertEqual('Your license is already up-to-date.\n',
result.output)
def test_license_write_fail(self):
"""Test tha writing a license file fails appropriately."""
with self.runner.isolated_filesystem():
cwd = os.getcwd()
os.chmod(cwd, stat.S_IREAD)
with self.assertRaises((IOError, OSError)):
cli.update_license_file(cwd)
|
tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_93456241.py | eduardojdiniz/CompNeuro | 2,294 | 23683 |
# Set random number generator
np.random.seed(2020)
# Initialize step_end, n, t_range, v and i
step_end = int(t_max / dt)
n = 50
t_range = np.linspace(0, t_max, num=step_end)
v_n = el * np.ones([n, step_end])
i = i_mean * (1 + 0.1 * (t_max / dt)**(0.5) * (2 * np.random.random([n, step_end]) - 1))
# Loop for step_end - 1 steps
for step in range(1, step_end):
# Compute v_n
v_n[:, step] = v_n[:, step - 1] + (dt / tau) * (el - v_n[:, step - 1] + r * i[:, step])
# Plot figure
with plt.xkcd():
plt.figure()
plt.title('Multiple realizations of $V_m$')
plt.xlabel('time (s)')
plt.ylabel('$V_m$ (V)')
plt.plot(t_range, v_n.T, 'k', alpha=0.3)
plt.show() |
mlcomp/db/core/options.py | sUeharaE4/mlcomp | 166 | 23714 | class PaginatorOptions:
def __init__(
self,
page_number: int,
page_size: int,
sort_column: str = None,
sort_descending: bool = None
):
self.sort_column = sort_column
self.sort_descending = sort_descending
self.page_number = page_number
self.page_size = page_size
assert (page_number is not None and page_size) \
or (page_number is not None and not page_size), \
'Specify both page_number and page_size'
if not sort_column:
self.sort_column = 'id'
self.sort_descending = True
__all__ = ['PaginatorOptions']
|
api/client/src/pcluster_client/sigv4_auth.py | maclema/aws-parallelcluster | 415 | 23726 | """Sigv4 Signing Support"""
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy
# of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import botocore
import json
def sigv4_auth(method, host, path, querys, body, headers):
"Adds authorization headers for sigv4 to headers parameter."
endpoint = host.replace('https://', '').replace('http://', '')
_api_id, _service, region, _domain = endpoint.split('.', maxsplit=3)
request_parameters = '&'.join([f"{k}={v}" for k, v in querys])
url = f"{host}{path}?{request_parameters}"
session = botocore.session.Session()
request = botocore.awsrequest.AWSRequest(method=method,
url=url,
data=json.dumps(body) if body else None)
botocore.auth.SigV4Auth(session.get_credentials(),
"execute-api", region).add_auth(request)
prepared_request = request.prepare()
headers['host'] = endpoint.split('/', maxsplit=1)[0]
for k, value in prepared_request.headers.items():
headers[k] = value
|
rules/starlark_configurations/cc_test/defs.bzl | CyberFlameGO/examples | 572 | 23772 | # We can transition on native options using this
# //command_line_option:<option-name> syntax
_BUILD_SETTING = "//command_line_option:test_arg"
def _test_arg_transition_impl(settings, attr):
_ignore = (settings, attr)
return {_BUILD_SETTING: ["new arg"]}
_test_arg_transition = transition(
implementation = _test_arg_transition_impl,
inputs = [],
outputs = [_BUILD_SETTING],
)
def _test_transition_rule_impl(ctx):
# We need to copy the executable because starlark doesn't allow
# providing an executable not created by the rule
executable_src = ctx.executable.actual_test
executable_dst = ctx.actions.declare_file(ctx.label.name)
ctx.actions.run_shell(
tools = [executable_src],
outputs = [executable_dst],
command = "cp %s %s" % (executable_src.path, executable_dst.path),
)
runfiles = ctx.attr.actual_test[0][DefaultInfo].default_runfiles
return [DefaultInfo(runfiles = runfiles, executable = executable_dst)]
transition_rule_test = rule(
implementation = _test_transition_rule_impl,
attrs = {
"actual_test": attr.label(cfg = _test_arg_transition, executable = True),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
test = True,
)
def test_arg_cc_test(name, **kwargs):
cc_test_name = name + "_native_test"
transition_rule_test(
name = name,
actual_test = ":%s" % cc_test_name,
)
native.cc_test(name = cc_test_name, **kwargs)
|
db_pool/mysql/base.py | GiftLee/devops | 300 | 23860 | # -*- coding: utf-8 -*-
"""
查看 django.db.backends.mysql.base.by 源码发现 django 连接 mysql 时没有使用连接池,
导致每次数据库操作都要新建新的连接并查询完后关闭,更坑的是按照 django 的官方文档设置
CONN_MAX_AGE 参数是为了复用连接,然后设置了 CONN_MAX_AGE 后,每个新连接查询完后并不
会 close 掉,而是一直在那占着。如果在高并发模式下,很容易出现 too many connections
错误。故重写 mysql 连接库,实现连接池功能。
"""
from django.core.exceptions import ImproperlyConfigured
import queue
import threading
try:
import MySQLdb as Database
except ImportError as err:
raise ImproperlyConfigured(
'Error loading MySQLdb module.\n'
'Did you install mysqlclient?'
) from err
from django.db.backends.mysql.base import *
from django.db.backends.mysql.base import DatabaseWrapper as _DatabaseWrapper
DEFAULT_DB_POOL_SIZE = 5
class DatabaseWrapper(_DatabaseWrapper):
"""
使用此库时绝对不能设置 CONN_MAX_AGE 连接参数,否则会造成使用连接后不会快速释放到连接池,从而造成连接池阻塞
"""
connect_pools = {}
pool_size = None
mutex = threading.Lock()
def get_new_connection(self, conn_params):
with self.mutex:
# 获取 DATABASES 配置字典中的 DB_POOL_SIZE 参数
if not self.pool_size:
self.pool_size = self.settings_dict.get('DB_POOL_SIZE') or DEFAULT_DB_POOL_SIZE
if self.alias not in self.connect_pools:
self.connect_pools[self.alias] = ConnectPool(conn_params, self.pool_size)
return self.connect_pools[self.alias].get_connection()
def _close(self):
with self.mutex:
# 覆盖掉原来的 close 方法,查询结束后连接释放回连接池
if self.connection is not None:
with self.wrap_database_errors:
return self.connect_pools[self.alias].release_connection(self.connection)
class ConnectPool(object):
def __init__(self, conn_params, pool_size):
self.conn_params = conn_params
self.pool_size = pool_size
self.connect_count = 0
self.connects = queue.Queue()
def get_connection(self):
if self.connect_count < self.pool_size:
self.connect_count = self.connect_count + 1
return Database.connect(**self.conn_params)
conn = self.connects.get()
try:
# 检测连接是否有效,去掉性能更好,但建议保留
conn.ping()
except Exception:
conn = Database.connect(**self.conn_params)
return conn
def release_connection(self, conn):
self.connects.put(conn)
|
src/test/tests/hybrid/missingdata.py | visit-dav/vis | 226 | 23869 | # ----------------------------------------------------------------------------
# CLASSES: nightly
#
# Test Case: missingdata.py
#
# Tests: missing data
#
# Programmer: <NAME>
# Date: Thu Jan 19 09:49:15 PST 2012
#
# Modifications:
#
# ----------------------------------------------------------------------------
def SetTheView():
v = GetView2D()
v.viewportCoords = (0.02, 0.98, 0.25, 1)
SetView2D(v)
def test0(datapath):
TestSection("Missing data")
OpenDatabase(pjoin(datapath,"earth.nc"))
AddPlot("Pseudocolor", "height")
DrawPlots()
SetTheView()
Test("missingdata_0_00")
ChangeActivePlotsVar("carbon_particulates")
Test("missingdata_0_01")
ChangeActivePlotsVar("seatemp")
Test("missingdata_0_02")
ChangeActivePlotsVar("population")
Test("missingdata_0_03")
# Pick on higher zone numbers to make sure pick works.
PickByNode(domain=0, element=833621)
TestText("missingdata_0_04", GetPickOutput())
DeleteAllPlots()
def test1(datapath):
TestSection("Expressions and missing data")
OpenDatabase(pjoin(datapath,"earth.nc"))
DefineScalarExpression("meaningless", "carbon_particulates + seatemp")
AddPlot("Pseudocolor", "meaningless")
DrawPlots()
SetTheView()
Test("missingdata_1_00")
DeleteAllPlots()
DefineVectorExpression("color", "color(red,green,blue)")
AddPlot("Truecolor", "color")
DrawPlots()
ResetView()
SetTheView()
Test("missingdata_1_01")
DefineVectorExpression("color2", "color(population*0.364,green,blue)")
ChangeActivePlotsVar("color2")
v1 = GetView2D()
v1.viewportCoords = (0.02, 0.98, 0.02, 0.98)
v1.windowCoords = (259.439, 513.299, 288.93, 540) #25.466)
SetView2D(v1)
Test("missingdata_1_02")
def main():
datapath = data_path("netcdf_test_data")
test0(datapath)
test1(datapath)
main()
Exit()
|
RecoLocalCalo/HGCalRecProducers/python/HeterogeneousHEBRecHitGPUtoSoA_cfi.py | Purva-Chaudhari/cmssw | 852 | 23944 | import FWCore.ParameterSet.Config as cms
HEBRecHitGPUtoSoAProd = cms.EDProducer('HEBRecHitGPUtoSoA',
HEBRecHitGPUTok = cms.InputTag('HEBRecHitGPUProd'))
|
pythonx/lints/vim/vint.py | maralla/validator.vim | 255 | 23953 | <reponame>maralla/validator.vim
# -*- coding: utf-8 -*-
from validator import Validator
class VimVint(Validator):
__filetype__ = 'vim'
checker = 'vint'
args = '-w --no-color'
regex = r"""
.+?:
(?P<lnum>\d+):
(?P<col>\d+):
\s(?P<text>.+)"""
|
examples/python/qiskit_integration.py | CQCL/pytket | 249 | 23960 | <reponame>CQCL/pytket
# # Integrating `pytket` into Qiskit software
# In this tutorial, we will focus on:
# - Using `pytket` for compilation or providing devices/simulators within Qiskit workflows;
# - Adapting Qiskit code to use `pytket` directly.
# This example assumes some familiarity with the Qiskit algorithms library. We have chosen a small variational quantum eigensolver (VQE) for our example, but the same principles apply to a wide range of quantum algorithms.
#
# To run this example, you will need `pytket-qiskit`, as well as the separate `qiskit-optimization` package. You will also need IBMQ credentials stored on your local machine.
#
# Qiskit has risen to prominence as the most popular platform for the development of quantum software, providing an open source, full-stack solution with a large feature list and extensive examples from the developers and community. For many researchers who have already invested in building a large codebase built on top of Qiskit, the idea of switching entirely to a new platform can look like a time-sink and may require reversion to take advantage of the new tools that get regularly added to Qiskit.
#
# The interoperability provided by `pytket-qiskit` allows Qiskit users to start taking advantage of some of the unique features of `pytket` without having to completely rewrite their software.
# Let's take as an example an ansatz for computing the ground-state energy of a hydrogen molecule.
from qiskit.opflow.primitive_ops import PauliSumOp
H2_op = PauliSumOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
# First let's use qiskit's NumPyEigensolver to compute the exact answer:
from qiskit.algorithms import NumPyEigensolver
es = NumPyEigensolver(k=1)
exact_result = es.compute_eigenvalues(H2_op).eigenvalues[0].real
print("Exact result:", exact_result)
# The following function will attempt to find an approximation to this using VQE, given a qiskit QuantumInstance on which to run circuits:
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import SPSA
from qiskit.circuit.library import EfficientSU2
def vqe_solve(op, maxiter, quantum_instance):
optimizer = SPSA(maxiter=maxiter)
ansatz = EfficientSU2(op.num_qubits, entanglement="linear")
vqe = VQE(ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance)
return vqe.compute_minimum_eigenvalue(op).eigenvalue
# We will run this on a pytket `IBMQEmulatorBackend`. This is a noisy simulator whose characteristics match those of the real device, in this case "ibmq_belem" (a 5-qubit machine). The characteristics are retrieved from the device when the backend is constructed, so we must first load our IBMQ account. Circuits will be compiled to match the connectivity of the device and simulated using a basic noise model [constructed from the device parameters](https://qiskit.org/documentation/apidoc/aer_noise.html).
from pytket.extensions.qiskit import IBMQEmulatorBackend
from qiskit import IBMQ
IBMQ.load_account()
b_emu = IBMQEmulatorBackend("ibmq_belem", hub="ibm-q", group="open", project="main")
# Most qiskit algorithms require a qiskit `QuantumInstance` as input; this in turn is constructed from a `qiskit.providers.Backend`. The `TketBackend` class wraps a pytket backend as a `qiskit.providers.Backend`.
from pytket.extensions.qiskit.tket_backend import TketBackend
from qiskit.utils import QuantumInstance
qis_backend = TketBackend(b_emu)
qi = QuantumInstance(qis_backend, shots=8192, wait=0.1)
# Note that we could have used any other pytket shots backend instead of `b_emu` here. The `pytket` extension modules provide an interface to a wide variety of devices and simulators from different quantum software platforms.
#
# We can now run the VQE algorithm. In this example we use only 50 iterations, but greater accuracy may be achieved by increasing this number:
print("VQE result:", vqe_solve(H2_op, 50, qi))
# Another way to improve the accuracy of results is to apply optimisations to the circuit in an attempt to reduce the overall noise. When we construct our qiskit backend, we can pass in a pytket compilation pass as an additional parameter. There is a wide range of options here; we recommend the device-specific default compilation pass, provided by each tket backend. This pass will ensure that all the hardware constraints of the device are met. We can enable tket's most aggressive optimisation level by setting the parameter `optimisation_level=2`.
qis_backend2 = TketBackend(b_emu, b_emu.default_compilation_pass(optimisation_level=2))
qi2 = QuantumInstance(qis_backend2, shots=8192, wait=0.1)
# Let's run the optimisation again:
print("VQE result (with optimisation):", vqe_solve(H2_op, 50, qi2))
# These are small two-qubit circuits, so the improvement may be small, but with larger, more complex circuits, the reduction in noise from compilation will make a greater difference and allow VQE experiments to converge with fewer iterations.
|
cfgs/config.py | Pandinosaurus/yolo2-pytorch | 1,663 | 23967 | <gh_stars>1000+
import os
from .config_voc import * # noqa
from .exps.darknet19_exp1 import * # noqa
def mkdir(path, max_depth=3):
parent, child = os.path.split(path)
if not os.path.exists(parent) and max_depth > 1:
mkdir(parent, max_depth-1)
if not os.path.exists(path):
os.mkdir(path)
# input and output size
############################
multi_scale_inp_size = [np.array([320, 320], dtype=np.int),
np.array([352, 352], dtype=np.int),
np.array([384, 384], dtype=np.int),
np.array([416, 416], dtype=np.int),
np.array([448, 448], dtype=np.int),
np.array([480, 480], dtype=np.int),
np.array([512, 512], dtype=np.int),
np.array([544, 544], dtype=np.int),
np.array([576, 576], dtype=np.int),
# np.array([608, 608], dtype=np.int),
] # w, h
multi_scale_out_size = [multi_scale_inp_size[0] / 32,
multi_scale_inp_size[1] / 32,
multi_scale_inp_size[2] / 32,
multi_scale_inp_size[3] / 32,
multi_scale_inp_size[4] / 32,
multi_scale_inp_size[5] / 32,
multi_scale_inp_size[6] / 32,
multi_scale_inp_size[7] / 32,
multi_scale_inp_size[8] / 32,
# multi_scale_inp_size[9] / 32,
] # w, h
inp_size = np.array([416, 416], dtype=np.int) # w, h
out_size = inp_size / 32
# for display
############################
def _to_color(indx, base):
""" return (b, r, g) tuple"""
base2 = base * base
b = 2 - indx / base2
r = 2 - (indx % base2) / base
g = 2 - (indx % base2) % base
return b * 127, r * 127, g * 127
base = int(np.ceil(pow(num_classes, 1. / 3)))
colors = [_to_color(x, base) for x in range(num_classes)]
# detection config
############################
thresh = 0.3
# dir config
############################
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DATA_DIR = os.path.join(ROOT_DIR, 'data')
MODEL_DIR = os.path.join(ROOT_DIR, 'models')
TRAIN_DIR = os.path.join(MODEL_DIR, 'training')
TEST_DIR = os.path.join(MODEL_DIR, 'testing')
trained_model = os.path.join(MODEL_DIR, h5_fname)
pretrained_model = os.path.join(MODEL_DIR, pretrained_fname)
train_output_dir = os.path.join(TRAIN_DIR, exp_name)
test_output_dir = os.path.join(TEST_DIR, imdb_test, h5_fname)
mkdir(train_output_dir, max_depth=3)
mkdir(test_output_dir, max_depth=4)
rand_seed = 1024
use_tensorboard = True
log_interval = 50
disp_interval = 10
|
cinder/tests/unit/policies/test_volume.py | arunvinodqmco/cinder | 571 | 23999 | <gh_stars>100-1000
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from unittest import mock
import ddt
from cinder.api.contrib import volume_encryption_metadata
from cinder.api.contrib import volume_tenant_attribute
from cinder.api.v3 import volumes
from cinder import exception
from cinder.policies import volumes as volume_policies
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit import fake_constants
from cinder.tests.unit.policies import base
from cinder.tests.unit.policies import test_base
from cinder.tests.unit import utils as test_utils
from cinder.volume import api as volume_api
# TODO(yikun): The below policy test cases should be added:
# * HOST_ATTRIBUTE_POLICY
# * MIG_ATTRIBUTE_POLICY
class VolumePolicyTests(test_base.CinderPolicyTests):
def test_admin_can_create_volume(self):
admin_context = self.admin_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': admin_context.project_id
}
body = {"volume": {"size": 1}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
def test_nonadmin_user_can_create_volume(self):
user_context = self.user_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': user_context.project_id
}
body = {"volume": {"size": 1}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
def test_admin_can_create_volume_from_image(self):
admin_context = self.admin_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': admin_context.project_id
}
body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
def test_nonadmin_user_can_create_volume_from_image(self):
user_context = self.user_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': user_context.project_id
}
body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
@mock.patch.object(volume_api.API, 'get_volume')
def test_admin_can_show_volumes(self, mock_volume):
# Make sure administrators are authorized to list volumes
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
self.assertEqual(response.json_body['volume']['id'], volume.id)
@mock.patch.object(volume_api.API, 'get_volume')
def test_owner_can_show_volumes(self, mock_volume):
# Make sure owners are authorized to list their volumes
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
self.assertEqual(response.json_body['volume']['id'], volume.id)
@mock.patch.object(volume_api.API, 'get_volume')
def test_owner_cannot_show_volumes_for_others(self, mock_volume):
# Make sure volumes are only exposed to their owners
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path, 'GET')
# NOTE(lbragstad): Technically, this user isn't supposed to see this
# volume, because they didn't create it and it lives in a different
# project. Does cinder return a 404 in cases like this? Or is a 403
# expected?
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
def test_admin_can_get_all_volumes_detail(self):
# Make sure administrators are authorized to list volumes
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': admin_context.project_id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
self.assertEqual(volume.id, res_vol['id'])
def test_owner_can_get_all_volumes_detail(self):
# Make sure owners are authorized to list volumes
user_context = self.user_context
volume = self._create_fake_volume(user_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': user_context.project_id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
self.assertEqual(volume.id, res_vol['id'])
@mock.patch.object(volume_api.API, 'get')
def test_admin_can_update_volumes(self, mock_volume):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"volume": {"name": "update_name"}}
response = self._get_request_response(admin_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_can_update_volumes(self, mock_volume):
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"volume": {"name": "update_name"}}
response = self._get_request_response(user_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_update_volumes_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"volume": {"name": "update_name"}}
response = self._get_request_response(non_owner_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_can_delete_volumes(self, mock_volume):
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'DELETE')
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_admin_can_delete_volumes(self, mock_volume):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'DELETE')
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_delete_volumes_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path,
'DELETE')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get_volume')
def test_admin_can_show_tenant_id_in_volume(self, mock_volume):
# Make sure administrators are authorized to show tenant_id
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volume']
self.assertEqual(admin_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
@mock.patch.object(volume_api.API, 'get_volume')
def test_owner_can_show_tenant_id_in_volume(self, mock_volume):
# Make sure owners are authorized to show tenant_id in volume
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volume']
self.assertEqual(user_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
def test_admin_can_show_tenant_id_in_volume_detail(self):
# Make sure admins are authorized to show tenant_id in volume detail
admin_context = self.admin_context
self._create_fake_volume(admin_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': admin_context.project_id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
# Make sure owners are authorized to show tenant_id
self.assertEqual(admin_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
def test_owner_can_show_tenant_id_in_volume_detail(self):
# Make sure owners are authorized to show tenant_id in volume detail
user_context = self.user_context
self._create_fake_volume(user_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': user_context.project_id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
# Make sure owners are authorized to show tenant_id
self.assertEqual(user_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
def test_admin_can_create_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_admin_can_get_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
def test_admin_can_update_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(admin_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v2', res_meta['k'])
def test_admin_can_delete_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(admin_context, path, 'DELETE')
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_owner_can_create_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_owner_can_get_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
def test_owner_can_update_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(user_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v2', res_meta['k'])
def test_owner_can_delete_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(user_context, path, 'DELETE')
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_create_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(non_owner_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_get_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path, 'GET')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_update_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(non_owner_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_delete_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': non_owner_context.project_id,
'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(non_owner_context, path,
'DELETE')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@ddt.ddt
class VolumesPolicyTest(base.BasePolicyTest):
authorized_readers = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_readers = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
authorized_members = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_members = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
create_authorized_users = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
# The other_* users are allowed because we don't have any check
# mechanism in the code to validate this, these are validated on
# the WSGI layer
'other_project_member',
'other_project_reader',
]
create_unauthorized_users = [
'system_member',
'system_reader',
'system_foo',
]
# Basic policy test is without enforcing scope (which cinder doesn't
# yet support) and deprecated rules enabled.
def setUp(self, enforce_scope=False, enforce_new_defaults=False,
*args, **kwargs):
super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs)
self.controller = volumes.VolumeController(mock.MagicMock())
self.api_path = '/v3/%s/volumes' % (self.project_id)
def _create_volume(self):
vol_type = test_utils.create_volume_type(self.project_admin_context,
name='fake_vol_type',
testcase_instance=self)
volume = test_utils.create_volume(self.project_member_context,
volume_type_id=vol_type.id,
testcase_instance=self)
return volume
@ddt.data(*base.all_users)
def test_create_volume_policy(self, user_id):
rule_name = volume_policies.CREATE_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
req.method = 'POST'
body = {"volume": {"size": 1}}
unauthorized_exceptions = []
self.common_policy_check(user_id, self.create_authorized_users,
self.create_unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
@mock.patch('cinder.api.v3.volumes.VolumeController._image_uuid_from_ref',
return_value=fake_constants.IMAGE_ID)
@mock.patch('cinder.api.v3.volumes.VolumeController._get_image_snapshot',
return_value=None)
@mock.patch('cinder.volume.flows.api.create_volume.'
'ExtractVolumeRequestTask._get_image_metadata',
return_value=None)
def test_create_volume_from_image_policy(
self, user_id, mock_image_from_ref, mock_image_snap,
mock_img_meta):
rule_name = volume_policies.CREATE_FROM_IMAGE_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
req.method = 'POST'
body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}}
unauthorized_exceptions = []
self.common_policy_check(user_id, self.create_authorized_users,
self.create_unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
def test_create_multiattach_volume_policy(self, user_id):
vol_type = test_utils.create_volume_type(
self.project_admin_context, name='multiattach_type',
extra_specs={'multiattach': '<is> True'})
rule_name = volume_policies.MULTIATTACH_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
req.method = 'POST'
body = {"volume": {"size": 1, "volume_type": vol_type.id}}
# Relax the CREATE_POLICY in order to get past that check.
self.policy.set_rules({volume_policies.CREATE_POLICY: ""},
overwrite=False)
unauthorized_exceptions = []
self.common_policy_check(user_id, self.create_authorized_users,
self.create_unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
def test_get_volume_policy(self, user_id):
volume = self._create_volume()
rule_name = volume_policies.GET_POLICY
url = '%s/%s' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url)
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(user_id,
self.authorized_readers,
self.unauthorized_readers,
unauthorized_exceptions,
rule_name, self.controller.show, req,
id=volume.id)
@ddt.data(*base.all_users)
def test_get_all_volumes_policy(self, user_id):
self._create_volume()
rule_name = volume_policies.GET_ALL_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
# Generally, any logged in user can list all volumes.
authorized_users = [user_id]
unauthorized_users = []
# The exception is when deprecated rules are disabled, in which case
# roles are enforced. Users without the 'reader' role should be
# blocked.
if self.enforce_new_defaults:
context = self.create_context(user_id)
if 'reader' not in context.roles:
authorized_users = []
unauthorized_users = [user_id]
response = self.common_policy_check(user_id, authorized_users,
unauthorized_users, [],
rule_name,
self.controller.index, req)
# For some users, even if they're authorized, the list of volumes
# will be empty if they are not in the volume's project.
empty_response_users = [
*self.unauthorized_readers,
# legacy_admin and system_admin do not have a project_id, and
# so the list of volumes returned will be empty.
'legacy_admin',
'system_admin',
]
volumes = response['volumes'] if response else []
volume_count = 0 if user_id in empty_response_users else 1
self.assertEqual(volume_count, len(volumes))
@ddt.data(*base.all_users)
@mock.patch('cinder.db.volume_encryption_metadata_get')
def test_get_volume_encryption_meta_policy(self, user_id,
mock_encrypt_meta):
encryption_key_id = fake_constants.ENCRYPTION_KEY_ID
mock_encrypt_meta.return_value = (
{'encryption_key_id': encryption_key_id})
controller = (
volume_encryption_metadata.VolumeEncryptionMetadataController())
volume = self._create_volume()
rule_name = volume_policies.ENCRYPTION_METADATA_POLICY
url = '%s/%s/encryption' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url)
unauthorized_exceptions = [
exception.VolumeNotFound,
]
resp = self.common_policy_check(
user_id, self.authorized_readers,
self.unauthorized_readers,
unauthorized_exceptions,
rule_name, controller.index, req,
volume.id)
if user_id in self.authorized_readers:
self.assertEqual(encryption_key_id, resp['encryption_key_id'])
@ddt.data(*base.all_users)
def test_get_volume_tenant_attr_policy(self, user_id):
controller = volume_tenant_attribute.VolumeTenantAttributeController()
volume = self._create_volume()
volume = volume.obj_to_primitive()['versioned_object.data']
rule_name = volume_policies.TENANT_ATTRIBUTE_POLICY
url = '%s/%s' % (self.api_path, volume['id'])
req = fake_api.HTTPRequest.blank(url)
req.get_db_volume = mock.MagicMock()
req.get_db_volume.return_value = volume
resp_obj = mock.MagicMock(obj={'volume': volume})
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.assertNotIn('os-vol-tenant-attr:tenant_id', volume.keys())
self.common_policy_check(
user_id, self.authorized_readers,
self.unauthorized_readers,
unauthorized_exceptions,
rule_name, controller.show, req,
resp_obj, volume['id'], fatal=False)
if user_id in self.authorized_readers:
self.assertIn('os-vol-tenant-attr:tenant_id', volume.keys())
@ddt.data(*base.all_users)
def test_update_volume_policy(self, user_id):
volume = self._create_volume()
rule_name = volume_policies.UPDATE_POLICY
url = '%s/%s' % (self.api_path, volume.id)
body = {"volume": {"name": "update_name"}}
req = fake_api.HTTPRequest.blank(url)
req.method = 'PUT'
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(
user_id, self.authorized_members,
self.unauthorized_members,
unauthorized_exceptions,
rule_name, self.controller.update, req,
id=volume.id, body=body)
@ddt.data(*base.all_users)
def test_delete_volume_policy(self, user_id):
volume = self._create_volume()
rule_name = volume_policies.DELETE_POLICY
url = '%s/%s' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url)
req.method = 'DELETE'
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(
user_id, self.authorized_members,
self.unauthorized_members,
unauthorized_exceptions,
rule_name, self.controller.delete, req,
id=volume.id)
class VolumesPolicySecureRbacTest(VolumesPolicyTest):
create_authorized_users = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'other_project_member',
]
create_unauthorized_users = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'other_project_reader',
'project_foo',
'project_reader',
]
authorized_readers = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'project_reader',
]
unauthorized_readers = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_foo',
'other_project_member',
'other_project_reader',
]
authorized_members = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
]
unauthorized_members = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
def setUp(self, *args, **kwargs):
# Test secure RBAC by disabling deprecated policy rules (scope
# is still not enabled).
super().setUp(enforce_scope=False, enforce_new_defaults=True,
*args, **kwargs)
|
gff/Scripts/gff/gff_to_genbank.py | bgruening/bcbb | 339 | 24011 | <reponame>bgruening/bcbb
#!/usr/bin/env python
"""Convert a GFF and associated FASTA file into GenBank format.
Usage:
gff_to_genbank.py <GFF annotation file> [<FASTA sequence file> <molecule type>]
FASTA sequence file: input sequences matching records in GFF. Optional if sequences
are in the GFF
molecule type: type of molecule in the GFF file. Defaults to DNA, the most common case.
"""
from __future__ import print_function
import sys
import os
from Bio import SeqIO
from BCBio import GFF
def main(gff_file, fasta_file=None, molecule_type="DNA"):
out_file = "%s.gb" % os.path.splitext(gff_file)[0]
if fasta_file:
fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta"))
else:
fasta_input = {}
gff_iter = GFF.parse(gff_file, fasta_input)
SeqIO.write(_check_gff(_fix_ncbi_id(gff_iter), molecule_type), out_file, "genbank")
def _fix_ncbi_id(fasta_iter):
"""GenBank identifiers can only be 16 characters; try to shorten NCBI.
"""
for rec in fasta_iter:
if len(rec.name) > 16 and rec.name.find("|") > 0:
new_id = [x for x in rec.name.split("|") if x][-1]
print("Warning: shortening NCBI name %s to %s" % (rec.id, new_id))
rec.id = new_id
rec.name = new_id
yield rec
def _check_gff(gff_iterator, molecule_type):
"""Check GFF files before feeding to SeqIO to be sure they have sequences.
"""
for rec in gff_iterator:
if "molecule_type" not in rec.annotations:
rec.annotations["molecule_type"] = molecule_type
yield _flatten_features(rec)
def _flatten_features(rec):
"""Make sub_features in an input rec flat for output.
GenBank does not handle nested features, so we want to make
everything top level.
"""
out = []
for f in rec.features:
cur = [f]
while len(cur) > 0:
nextf = []
for curf in cur:
out.append(curf)
if len(curf.sub_features) > 0:
nextf.extend(curf.sub_features)
cur = nextf
rec.features = out
return rec
if __name__ == "__main__":
main(*sys.argv[1:])
|
third_party/pdfium/build/gyp_pdfium.py | satorumpen/node-pdfium-native | 303 | 24056 | <reponame>satorumpen/node-pdfium-native
# Copyright 2014 PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
path = os.path.abspath(os.path.split(__file__)[0])
execfile(os.path.join(path, 'gyp_pdfium'))
|
backdoors/shell/__pupy/pupy/packages/src/VideoCapture/src/fixhtml.py | mehrdad-shokri/backdoorme | 796 | 24080 | <filename>backdoors/shell/__pupy/pupy/packages/src/VideoCapture/src/fixhtml.py
import os, string
oldWin = '''span {
font-family: Verdana;
background: #e0e0d0;
font-size: 10pt;
}
</style>
</head>
<body bgcolor="#e0e0d0">
'''
oldLinux = '''span {
font-family: Verdana;
background: #e0e0d0;
font-size: 13pt;
}
</style>
</head>
<body bgcolor="#e0e0d0">
'''
new = '''span {
font-family: Verdana;
}
</style>
</head>
<body bgcolor="#f0f0f8">
'''
def fixhtmlfile(file):
if os.path.isfile(file) and file[-5:] == '.html':
print file
fp = open(file, 'rt')
cont = fp.read()
fp.close()
cont = string.replace(cont, '\r\n', '\n')
cont = string.replace(cont, oldWin, new)
cont = string.replace(cont, oldLinux, new)
fp = open(file, 'wt')
fp.write(cont)
fp.close()
def fixhtmlfiles(dir):
files = os.listdir(dir)
for file in files:
fixhtmlfile(dir + os.sep + file)
|
plugin/CustomerSupportArchive/chipDiagnostics/tools/flowcorr.py | iontorrent/TS | 125 | 24107 | import os
import numpy as np
from . import imtools, datprops
from .datfile import DatFile
from .chiptype import ChipType
moduleDir = os.path.abspath( os.path.dirname( __file__ ) )
class FlowCorr:
def __init__( self, chiptype, xblock=None, yblock=None, rootdir='.', method='' ):
'''
Initialize a flowcorr object
chiptype: a ChipType object
xblock: The full-chip column origin; setting to None returns a full chip
yblock: The full-chip row origin; setting to None returns a full chip
rootdir: root directory to look for flowcorr files.
search will also look up a level, within the
module directory, and in the dats directory
method: if specified, automaticaly loads the corresponding flowcorr
'buffer'
'file'
if advanced options need to be passed into the load functions,
they should be called separatly with method being left empty
'''
self.chiptype = ChipType(chiptype)
self.xblock = xblock
self.yblock = yblock
self.searchpath = [ rootdir,
os.path.join( rootdir, '..' ),
os.path.join( moduleDir, '../dats' ),
moduleDir,
os.path.join( moduleDir, 'dats' ) ]
if method.lower() == 'buffer':
self.frombuffer()
elif method.lower() == 'file':
self.fromfile()
elif not method:
pass
else:
raise ValueError( 'Flowcorr method "%s" is undefined' % method )
def frombuffer(self, flow_file='C2_step.dat', force=False, framerate=15):
'''
Returns the flow correction measured from a buffered flow
flowfile: measurement file used to calculate the flowcorr
force: calculate the data from raw, even if an existing analysis is present
framerate: fps
'''
try:
if force:
raise IOError
self.filename = os.path.join( self.searchpath[0], 'flowcorr_slopes.dat' )
self.flowcorr = datprops.read_dat( self.filename, 'flowcorr', chiptype=self.chiptype )
except IOError:
# Read the dat file
found = False
for dirname in self.searchpath:
self.filename = os.path.join( dirname, flow_file )
if os.path.exists( self.filename ):
found = True
break
if not found:
raise IOError( '%s was not found' % self.filename )
data = DatFile( self.filename, chiptype=self.chiptype )
# Calculate properties
self.flowcorr = data.measure_slope( method='maxslope' )
self.time_offset = np.min(data.measure_t0( method='maxslope' )) #TODO: This is not very robust. should just shift t0 here and record the offest instead of trying to do things later with it
self.pinned = data.measure_pinned()
# remove pins
self.flowcorr[ self.pinned ] = 1
# Save a few more variables
self.t0 = data.measure_t0( meathod='maxslope' )
self.actpix = data.measure_actpix
self.phpoint = data.measure_plateau()
return self.flowcorr
def fromfile( self, fc_type ):
'''
Loads the flow correction from file based on the chip type and scales up from miniblocks to full chips or analysis blocks.
This method only differentiates based on thumbnail or full chip/analysis block. All other differences are rolled into ChipType.
fc_type: can be 'ecc' or 'wt'.
flowcorr file is defined by self.chiptype.flowcorr_<fc_type>
'''
# Thumbnails are enough different to have their own function
if self.chiptype.tn == 'self':
return self.tn_fromfile( fc_type )
# Spatial thumbnails are just subsampled data. We don't need special loading
# Calculate the size of the flowcorr files
xMiniBlocks = self.chiptype.chipC / self.chiptype.miniC
yMiniBlocks = self.chiptype.chipR / self.chiptype.miniR
# Set the flowcorr path starting local before using the default
for path in self.searchpath:
filename = os.path.join( path, '%s.dat' % getattr( self.chiptype, 'flowcorr_%s' % fc_type ) )
try:
flowcorr = datprops.read_dat( filename , metric='flowcorr' )
break
except IOError:
continue
raise IOError( 'Could not find a flowcorr file' )
# Scale the flowcorr data to the entire well
sizes = [ ( 96, 168 ), # This is an unscaled P1-sized flowcorr file. This is the most likely size when reading fc_flowcorr.dat
( yMiniBlocks, xMiniBlocks ), # This is the historical per-chip file. This is ( 96, 168 ) for a P1/540 chip
( self.chiptype.chipR, self.chiptype.chipC ) ] # This is the pre-compiled value
try:
fc_xMiniBlocks = self.chiptype.fullchip.chipC / self.chiptype.fullchip.miniC
fc_yMiniBlocks = self.chiptype.fullchip.chipR / self.chiptype.fullchip.miniR
sizes.append( ( fc_yMiniBlocks, fc_xMiniBlocks ) )
sizes.append( ( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) )
except AttributeError:
pass
for size in sizes:
try:
flowcorr = flowcorr.reshape( size )
break
except ValueError:
# Keep going until you itterate through all possible sizes. If you still get an error, then die
if size == sizes[-1]:
print 'Possible Sizes'
print sizes
print 'Elements'
print flowcorr.shape
raise ValueError( 'Could not determine flowcorr size' )
continue
# Resize the image to the current size
if self.chiptype.burger is None:
# This is a standard resize operation
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.chipR, self.chiptype.chipC ) )
elif self.chiptype.spatn != 'self':
# This is burger mode on a full size chip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.burger.chipR, self.chiptype.burger.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.chipR ) / 2
last = first + self.chiptype.chipR
flowcorr = flowcorr[ first:last, : ]
else:
# This is burger mode on a spatial thumbnail
# This has the effect of adding more rows beyond the 800 typically used for a spatial thumbnail
rows = self.chiptype.chipR * self.chiptype.burger.chipR / self.chiptype.fullchip.chipR
flowcorr = imtools.imresize( flowcorr, ( rows, self.chiptype.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.chipR ) / 2
last = first + self.chiptype.chipR
flowcorr = flowcorr[ first:last, : ]
# Reduce to a single analysis block
if ( self.xblock is not None and self.yblock is not None and
self.xblock != -1 and self.yblock != -1 ):
flowcorr = flowcorr[ self.yblock: self.chiptype.blockR + self.yblock,
self.xblock: self.chiptype.blockC + self.xblock ]
self.flowcorr = flowcorr
return flowcorr
def tn_fromfile( self, fc_type ):
'''
Gets the per-well flowcorrection for a STANDARD (not spatial) thumbnail
'''
# Calculate the size of the flowcorr files
xMiniBlocks = self.chiptype.chipC / self.chiptype.miniC
yMiniBlocks = self.chiptype.chipR / self.chiptype.miniR
# Set the flowcorr path starting local before using the default
for path in self.searchpath:
filename = os.path.join( path, '%s.dat' % getattr( self.chiptype, 'flowcorr_%s' % fc_type ) )
try:
flowcorr = datprops.read_dat( filename , metric='flowcorr' )
break
except IOError:
continue
raise IOError( 'Could not find a flowcorr file' )
# Scale the flowcorr data to the entire well
sizes = ( ( 96, 168 ), # This is an unscaled P1-sized flowcorr file.
( 48, 96 ) , # This is an unscaled P0-sized flowcorr file.
( yMiniBlocks, xMiniBlocks ), # This is the historical thumbnail flowcorr (swapped x & y - STP 7/13/2015)
( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) ) # This is the pre-compiled value
for size in sizes:
try:
flowcorr = flowcorr.reshape( size )
break
except ValueError:
# Keep going until you itterate through all possible sizes. If you still get an error, then die
if size == sizes[-1]:
raise ValueError( 'Could not determine flowcorr size' )
continue
# Resize the image to the full chip size
if self.chiptype.burger is None:
# This is a standard resize operation based on the full chip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.fullchip.chipR, self.chiptype.fullchip.chipC ) )
else:
# This is burger mode on a regular thumbnail. Full chip is actually specified by burger and then we have to clip
flowcorr = imtools.imresize( flowcorr, ( self.chiptype.burger.chipR, self.chiptype.burger.chipC ) )
# Clip off the top and bottom
first = ( flowcorr.shape[0] - self.chiptype.fullchip.chipR ) / 2
last = first + self.chiptype.fullchip.chipR
flowcorr = flowcorr[ first:last, : ]
# Reduce to thumbnail data
tnflowcorr = np.zeros( ( self.chiptype.chipR, self.chiptype.chipC ) )
for r in range( self.chiptype.yBlocks ):
tn_rstart = r*self.chiptype.blockR
tn_rend = tn_rstart + self.chiptype.blockR
#fc_rstart = int( (r+0.5)*self.chiptype.fullchip.blockR ) - self.chiptype.blockR/2
# middle of block in case the thumbnail different yBlocks center within the block
fc_rstart = int( (r+0.5)*(self.chiptype.fullchip.chipR/self.chiptype.yBlocks) ) - self.chiptype.blockR/2
fc_rend = fc_rstart + self.chiptype.blockR
for c in range( self.chiptype.xBlocks ):
tn_cstart = c*self.chiptype.blockC
tn_cend = tn_cstart + self.chiptype.blockC
fc_cstart = int( (c+0.5)*self.chiptype.fullchip.blockC ) - self.chiptype.blockC/2
fc_cend = fc_cstart + self.chiptype.blockC
tnflowcorr[ tn_rstart:tn_rend, tn_cstart:tn_cend ] = flowcorr[ fc_rstart:fc_rend, fc_cstart:fc_cend ]
self.flowcorr = tnflowcorr
return self.flowcorr
|
addons/mendeley/tests/test_serializer.py | gaybro8777/osf.io | 628 | 24138 | # -*- coding: utf-8 -*-
"""Serializer tests for the Mendeley addon."""
import pytest
from addons.base.tests.serializers import CitationAddonSerializerTestSuiteMixin
from addons.base.tests.utils import MockFolder
from addons.mendeley.tests.factories import MendeleyAccountFactory
from addons.mendeley.serializer import MendeleySerializer
from tests.base import OsfTestCase
pytestmark = pytest.mark.django_db
class TestMendeleySerializer(CitationAddonSerializerTestSuiteMixin, OsfTestCase):
addon_short_name = 'mendeley'
Serializer = MendeleySerializer
ExternalAccountFactory = MendeleyAccountFactory
folder = MockFolder()
|
src/sage/coding/information_set_decoder.py | UCD4IDS/sage | 1,742 | 24143 | # -*- coding: utf-8 -*-
r"""
Information-set decoding for linear codes
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy that make it very capable over rather large codes.
Still, the decoding algorithm is exponential in dimension of the code and the
log of the field size.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
AUTHORS:
- <NAME>, <NAME>, <NAME> (2016-02, 2017-06): initial
version
"""
#******************************************************************************
# Copyright (C) 2017 <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.all import ZZ, Integer, vector, SageObject, binomial
from .decoder import Decoder
def _format_decoding_interval(decoding_interval):
r"""
Format the decoding interval of an ISD decoder when calling ``_repr_`` or
``_latex_``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import _format_decoding_interval
sage: _format_decoding_interval((0,3))
'up to 3'
sage: _format_decoding_interval((2,3))
'between 2 and 3'
sage: _format_decoding_interval((3,3))
'exactly 3'
"""
if decoding_interval[0] == 0:
return "up to {0}".format(decoding_interval[1])
if decoding_interval[0] == decoding_interval[1]:
return "exactly {0}".format(decoding_interval[0])
return "between {0} and {1}".format(decoding_interval[0], decoding_interval[1])
class InformationSetAlgorithm(SageObject):
r"""
Abstract class for algorithms for
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
To sub-class this class, override ``decode`` and ``calibrate``, and call the
super constructor from ``__init__``.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm_name`` -- A name for the specific ISD algorithm used (used for
printing).
- ``parameters`` -- (optional) A dictionary for setting the parameters of
this ISD algorithm. Note that sanity checking this dictionary for the
individual sub-classes should be done in the sub-class constructor.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
A minimal working example of how to sub-class::
sage: from sage.coding.information_set_decoder import InformationSetAlgorithm
sage: from sage.coding.decoder import DecodingError
sage: class MinimalISD(InformationSetAlgorithm):
....: def __init__(self, code, decoding_interval):
....: super(MinimalISD, self).__init__(code, decoding_interval, "MinimalISD")
....: def calibrate(self):
....: self._parameters = { } # calibrate parameters here
....: self._time_estimate = 10.0 # calibrated time estimate
....: def decode(self, r):
....: # decoding algorithm here
....: raise DecodingError("I failed")
sage: MinimalISD(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (MinimalISD) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
def __init__(self, code, decoding_interval, algorithm_name, parameters = None):
r"""
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(codes.GolayCode(GF(2)), (0,4))
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
self._code = code
self._decoding_interval = decoding_interval
self._algorithm_name = algorithm_name
if parameters:
self._parameters = parameters
self._parameters_specified = True
else:
self._parameters_specified = False
def name(self):
r"""
Return the name of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.name()
'Lee-Brickell'
"""
return self._algorithm_name
def decode(self, r):
r"""
Decode a received word using this ISD decoding algorithm.
Must be overridden by sub-classes.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: r = vector(GF(2), [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
sage: A.decode(r)
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
"""
raise NotImplementedError
def time_estimate(self):
"""
Estimate for how long this ISD algorithm takes to perform a single decoding.
The estimate is for a received word whose number of errors is within the
decoding interval of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.time_estimate() #random
0.0008162108571427874
"""
if not hasattr(self, "_time_estimate"):
self.calibrate()
return self._time_estimate
def calibrate(self):
"""
Uses test computations to estimate optimal values for any parameters
this ISD algorithm may take.
Must be overridden by sub-classes.
If ``self._parameters_specified`` is ``False``, this method shall set
``self._parameters`` to the best parameters estimated. It shall always
set ``self._time_estimate`` to the time estimate of using
``self._parameters``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
"""
raise NotImplementedError
def code(self):
r"""
Return the code associated to this ISD algorithm.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3))
sage: A.code()
[24, 12, 8] Extended Golay code over GF(2)
"""
return self._code
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
ISD algorithm will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,2))
sage: A.decoding_interval()
(0, 2)
"""
return self._decoding_interval
def parameters(self):
"""
Return any parameters this ISD algorithm uses.
If the parameters have not already been set, efficient values will first
be calibrated and returned.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4), search_size=3)
sage: A.parameters()
{'search_size': 3}
If not set, calibration will determine a sensible value::
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A.parameters() #random
{'search_size': 1}
"""
if not hasattr(self, "_parameters"):
self.calibrate()
return self._parameters
def __eq__(self, other):
r"""
Tests equality between ISD algorithm objects.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A == LeeBrickellISDAlgorithm(C, (0,4))
True
sage: A == LeeBrickellISDAlgorithm(C, (0,5))
False
sage: other_search = 1 if A.parameters()['search_size'] != 1 else 2
sage: A == LeeBrickellISDAlgorithm(C, (0,4), search_size=other_search)
False
ISD Algorithm objects can be equal only if they have both calibrated
the parameters, or if they both had it set and to the same value::
sage: A2 = LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
sage: A == A2
False
sage: A2 == LeeBrickellISDAlgorithm(C, (0,4), search_size=A.parameters()['search_size'])
True
"""
return isinstance(other, self.__class__)\
and self.code() == other.code()\
and self.decoding_interval() == other.decoding_interval()\
and self._parameters_specified == other._parameters_specified\
and (not self._parameters_specified or self.parameters() == other.parameters())
def __hash__(self):
r"""
Returns the hash value of ``self``.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: hash(A) #random
5884357732955478461
sage: C2 = codes.GolayCode(GF(3))
sage: A2 = LeeBrickellISDAlgorithm(C2, (0,4))
sage: hash(A) != hash(A2)
True
"""
return hash(str(self))
def _repr_(self):
r"""
Returns a string representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
"""
return "ISD Algorithm ({}) for {} decoding {} errors ".format(self._algorithm_name, self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this ISD algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4))
sage: latex(A)
\textnormal{ISD Algorithm (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 4 errors}
"""
return "\\textnormal{{ISD Algorithm ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self._algorithm_name, self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
class LeeBrickellISDAlgorithm(InformationSetAlgorithm):
r"""
The Lee-Brickell algorithm for information-set decoding.
For a description of the information-set decoding paradigm (ISD), see
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`.
This implements the Lee-Brickell variant of ISD, see [LB1988]_ for the
original binary case, and [Pet2010]_ for the `q`-ary extension.
Let `C` be a `[n, k]`-linear code over `GF(q)`, and let `r \in GF(q)^{n}` be
a received word in a transmission. We seek the codeword whose Hamming
distance from `r` is minimal. Let `p` and `w` be integers, such that `0\leq
p\leq w`, Let `G` be a generator matrix of `C`, and for any set of indices
`I`, we write `G_{I}` for the matrix formed by the columns of `G` indexed by
`I`. The Lee-Brickell ISD loops the following until it is successful:
1. Choose an information set `I` of `C`.
2. Compute `r' = r - r_{I}\times G_I^{-1} \times G`
3. Consider every size-`p` subset of `I`, `\{a_1, \dots, a_p\}`.
For each `m = (m_1, \dots, m_p) \in GF(q)^{p}`, compute
the error vector `e = r' - \sum_{i=1}^{p} m_i\times g_{a_i}`,
4. If `e` has a Hamming weight at most `w`, return `r-e`.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``decoding_interval`` -- a pair of integers specifying an interval of
number of errors to correct. Includes both end values.
- ``search_size`` -- (optional) the size of subsets to use on step 3 of the
algorithm as described above. Usually a small number. It has to be at most
the largest allowed number of errors. A good choice will be approximated
if this option is not set; see
:meth:`sage.coding.LeeBrickellISDAlgorithm.calibrate`
for details.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0,4)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 4 errors
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (2,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 3 errors
"""
def __init__(self, code, decoding_interval, search_size = None):
r"""
TESTS:
If ``search_size`` is not a positive integer, or is bigger than the
decoding radius, an error will be raised::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=-1)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be a positive integer
sage: LeeBrickellISDAlgorithm(C, (1, 3), search_size=4)
Traceback (most recent call last):
...
ValueError: The search size parameter has to be at most the maximal number of allowed errors
"""
if search_size is not None:
if not isinstance(search_size, (Integer, int)) or search_size < 0:
raise ValueError("The search size parameter has to be a positive integer")
if search_size > decoding_interval[1]:
raise ValueError("The search size parameter has to be at most"
" the maximal number of allowed errors")
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell",
parameters={ 'search_size': search_size })
self._parameters_specified = True
else:
self._parameters_specified = False
super(LeeBrickellISDAlgorithm, self).__init__(code, decoding_interval, "Lee-Brickell")
def decode(self, r):
r"""
The Lee-Brickell algorithm as described in the class doc.
Note that either parameters must be given at construction time or
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.calibrate()`
should be called before calling this method.
INPUT:
- `r` -- a received word, i.e. a vector in the ambient space of
:meth:`decoder.Decoder.code`.
OUTPUT: A codeword whose distance to `r` satisfies ``self.decoding_interval()``.
EXAMPLES::
sage: M = matrix(GF(2), [[1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0],\
[0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1],\
[0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],\
[0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1],\
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1]])
sage: C = codes.LinearCode(M)
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (2,2))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: c_out = A.decode(r)
sage: (r - c).hamming_weight() == 2
True
"""
import itertools
from sage.misc.prandom import sample
C = self.code()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()
p = self.parameters()['search_size']
F = C.base_ring()
G = C.generator_matrix()
Fstar = F.list()[1:]
while True:
# step 1.
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
# I was not an information set
continue
Gt = Gi_inv * G
#step 2.
y = r - vector([r[i] for i in I]) * Gt
g = Gt.rows()
#step 3.
for pi in range(p+1):
for A in itertools.combinations(range(k), pi):
for m in itertools.product(Fstar, repeat=pi):
e = y - sum(m[i]*g[A[i]] for i in range(pi))
errs = e.hamming_weight()
if errs >= tau[0] and errs <= tau[1]:
return r - e
def calibrate(self):
r"""
Run some test computations to estimate the optimal search size.
Let `p` be the search size. We should simply choose `p` such that the
average expected time is minimal. The algorithm succeeds when it chooses
an information set with at least `k - p` correct positions, where `k` is
the dimension of the code and `p` the search size. The expected number
of trials we need before this occurs is:
.. MATH::
\binom{n}{k}/(\rho \sum_{i=0}^p \binom{n-\tau}{k-i} \binom{\tau}{i})
Here `\rho` is the fraction of `k` subsets of indices which are
information sets. If `T` is the average time for steps 1 and 2
(including selecting `I` until an information set is found), while `P(i)`
is the time for the body of the ``for``-loop in step 3 for `m` of weight
`i`, then each information set trial takes roughly time `T +
\sum_{i=0}^{p} P(i) \binom{k}{i} (q-1)^i`, where `\GF{q}` is the base
field.
The values `T` and `P` are here estimated by running a few test
computations similar to those done by the decoding algorithm.
We don't explicitly estimate `\rho`.
OUTPUT: Does not output anything but sets private fields used by
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.parameters()`
and
:meth:`sage.coding.information_set_decoder.InformationSetAlgorithm.time_estimate()``.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.calibrate()
sage: A.parameters() #random
{'search_size': 1}
sage: A.time_estimate() #random
0.0008162108571427874
If we specify the parameter at construction time, calibrate does not override this choice::
sage: A = LeeBrickellISDAlgorithm(C, (0,3), search_size=2); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A.parameters()
{'search_size': 2}
sage: A.calibrate()
sage: A.parameters()
{'search_size': 2}
sage: A.time_estimate() #random
0.0008162108571427874
"""
from sage.matrix.special import random_matrix
from sage.misc.prandom import sample, randint
from sage.modules.free_module_element import random_vector
from time import process_time
C = self.code()
G = C.generator_matrix()
n, k = C.length(), C.dimension()
tau = self.decoding_interval()[1]
F = C.base_ring()
q = F.cardinality()
Fstar = F.list()[1:]
def time_information_set_steps():
before = process_time()
while True:
I = sample(range(n), k)
Gi = G.matrix_from_columns(I)
try:
Gi_inv = Gi.inverse()
except ZeroDivisionError:
continue
return process_time() - before
def time_search_loop(p):
y = random_vector(F, n)
g = random_matrix(F, p, n).rows()
scalars = [ [ Fstar[randint(0,q-2)] for i in range(p) ]
for s in range(100) ]
before = process_time()
for m in scalars:
e = y - sum(m[i]*g[i] for i in range(p))
return (process_time() - before) / 100.
T = sum([ time_information_set_steps() for s in range(5) ]) / 5.
P = [ time_search_loop(p) for p in range(tau+1) ]
def compute_estimate(p):
iters = 1.* binomial(n, k)/ \
sum( binomial(n-tau, k-i)*binomial(tau,i) for i in range(p+1) )
estimate = iters*(T + \
sum(P[pi] * (q-1)**pi * binomial(k, pi) for pi in range(p+1) ))
return estimate
if self._parameters_specified:
self._time_estimate = compute_estimate(self._parameters['search_size'])
else:
self._calibrate_select([ compute_estimate(p) for p in range(tau+1) ])
def _calibrate_select(self, estimates):
r"""
Internal method used by ``self.calibrate()``.
Given the timing estimates, select the best parameter and set the
appropriate private fields.
INPUT:
- `estimates` - list of time estimates, for the search size set to the
index of the list entry.
OUTPUT: None, but sets the private fields `self._parameters` and
`self._time_estimate`.
TESTS::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: C = codes.GolayCode(GF(2))
sage: A = LeeBrickellISDAlgorithm(C, (0,3)); A
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 3 errors
sage: A._calibrate_select([ 1.0, 2.0, 3.0, 0.5, 0.6, 1.0 ])
sage: A._time_estimate
0.500000000000000
sage: A._parameters
{'search_size': 3}
"""
search_size = 0
for p in range(1, len(estimates)):
if estimates[p] < estimates[search_size]:
search_size = p
self._parameters = { 'search_size': search_size }
self._time_estimate = estimates[search_size]
class LinearCodeInformationSetDecoder(Decoder):
r"""
Information-set decoder for any linear code.
Information-set decoding is a probabilistic decoding strategy that
essentially tries to guess `k` correct positions in the received word,
where `k` is the dimension of the code. A codeword agreeing with the
received word on the guessed position can easily be computed, and their
difference is one possible error vector. A "correct" guess is assumed when
this error vector has low Hamming weight.
The ISD strategy requires choosing how many errors is deemed acceptable. One
choice could be `d/2`, where `d` is the minimum distance of the code, but
sometimes `d` is not known, or sometimes more errors are expected. If one
chooses anything above `d/2`, the algorithm does not guarantee to return a
nearest codeword.
This simple algorithm is not very efficient in itself, but there are numerous
refinements to the strategy. Specifying which strategy to use among those
that Sage knows is done using the ``algorithm`` keyword. If this is not set,
an efficient choice will be made for you.
The various ISD algorithms all need to select a number of parameters. If you
choose a specific algorithm to use, you can pass these parameters as named
parameters directly to this class' constructor. If you don't, efficient
choices will be calibrated for you.
.. WARNING::
If there is no codeword within the specified decoding distance, then the
decoder may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``code`` -- A linear code for which to decode.
- ``number_errors`` -- an integer, the maximal number of errors to accept as
correct decoding. An interval can also be specified by giving a pair of
integers, where both end values are taken to be in the interval.
- ``algorithm`` -- (optional) the string name of the ISD algorithm to
employ. If this is not set, an appropriate one will be chosen.
A constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`
object may also be given. In this case ``number_errors`` must match that
of the passed algorithm.
- ``**kwargs`` -- (optional) any number of named arguments passed on to the
ISD algorithm. Such are usually not required, and they can only be set if
``algorithm`` is set to a specific algorithm. See the documentation for
each individual ISD algorithm class for information on any named arguments
they may accept. The easiest way to access this documentation is to first
construct the decoder without passing any named arguments, then accessing
the ISD algorithm using
:meth:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder.algorithm`,
and then reading the `?` help on the constructed object.
EXAMPLES:
The principal way to access this class is through the
:meth:`sage.code.linear_code.AbstractLinearCode.decoder` method::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
You can specify which algorithm you wish to use, and you should do so in
order to pass special parameters to it::
sage: C = codes.GolayCode(GF(3))
sage: D2 = C.decoder("InformationSet", 2, algorithm="Lee-Brickell", search_size=2); D2
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm()
ISD Algorithm (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: D2.algorithm().parameters()
{'search_size': 2}
If you specify an algorithm which is not known, you get a friendly error message::
sage: C.decoder("InformationSet", 2, algorithm="NoSuchThing")
Traceback (most recent call last):
...
ValueError: Unknown ISD algorithm 'NoSuchThing'. The known algorithms are ['Lee-Brickell'].
You can also construct an ISD algorithm separately and pass that. This is
mostly useful if you write your own ISD algorithms::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, algorithm=A); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
When passing an already constructed ISD algorithm, you can't also pass
parameters to the ISD algorithm when constructing the decoder::
sage: C.decoder("InformationSet", 2, algorithm=A, search_size=2)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
We can also information-set decode non-binary codes::
sage: C = codes.GolayCode(GF(3))
sage: D = C.decoder("InformationSet", 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
There are two other ways to access this class::
sage: D = codes.decoders.LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: D = LinearCodeInformationSetDecoder(C, 2); D
Information-set decoder (Lee-Brickell) for [12, 6, 6] Extended Golay code over GF(3) decoding up to 2 errors
"""
def __init__(self, code, number_errors, algorithm=None, **kwargs):
r"""
TESTS:
``number_errors`` has to be either a list of Integers/ints, a tuple of Integers/ints,
or an Integer/int::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", "aa")
Traceback (most recent call last):
...
ValueError: number_errors should be an integer or a pair of integers
If ``number_errors`` is passed as a list/tuple, it has to contain only
two values, the first one being at most the second one::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (4, 2))
Traceback (most recent call last):
...
ValueError: number_errors should be a positive integer or a valid interval within the positive integers
You cannot ask the decoder to correct more errors than the code length::
sage: D = C.decoder("InformationSet", 25)
Traceback (most recent call last):
...
ValueError: The provided number of errors should be at most the code's length
If ``algorithm`` is not set, additional parameters cannot be passed to
the ISD algorithm::
sage: D = C.decoder("InformationSet", 2, search_size=2)
Traceback (most recent call last):
...
ValueError: Additional arguments to an information-set decoder algorithm are only allowed if a specific algorithm is selected by setting the algorithm keyword
If ``algorithm`` is set to a constructed ISD algorithm, additional
parameters cannot be passed to the ISD algorithm::
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A, search_size=3)
Traceback (most recent call last):
...
ValueError: ISD algorithm arguments are not allowed when supplying a constructed ISD algorithm
If ``algorithm`` is set to a constructed
:class:`sage.coding.information_set_decoder.InformationSetAlgorithm`,
then ``number_errors`` must match that of the algorithm::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: A = LeeBrickellISDAlgorithm(C, (0, 2))
sage: D = C.decoder("InformationSet", 2, A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", (0,2), A); D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
sage: D = C.decoder("InformationSet", 3, A); D
Traceback (most recent call last):
...
ValueError: number_errors must match that of the passed ISD algorithm
"""
if isinstance(number_errors, (Integer, int)):
number_errors = (0, number_errors)
if isinstance(number_errors, (tuple, list)) and len(number_errors) == 2 \
and number_errors[0] in ZZ and number_errors[1] in ZZ:
if 0 > number_errors[0] or number_errors[0] > number_errors[1]:
raise ValueError(
"number_errors should be a positive integer or"
" a valid interval within the positive integers")
if number_errors[1] > code.length():
raise ValueError("The provided number of errors should be at"
" most the code's length")
else:
raise ValueError("number_errors should be an integer or a pair of integers")
self._number_errors = number_errors
super(LinearCodeInformationSetDecoder, self).__init__(
code, code.ambient_space(), code._default_encoder_name)
if algorithm is None:
if kwargs:
raise ValueError("Additional arguments to an information-set decoder"
" algorithm are only allowed if a specific"
" algorithm is selected by setting the algorithm"
" keyword")
algorithm = "Lee-Brickell"
algorithm_names = LinearCodeInformationSetDecoder.known_algorithms(dictionary=True)
if isinstance(algorithm, InformationSetAlgorithm):
if kwargs:
raise ValueError("ISD algorithm arguments are not allowed when"
" supplying a constructed ISD algorithm")
if number_errors != algorithm.decoding_interval():
raise ValueError("number_errors must match that of the passed"
" ISD algorithm")
self._algorithm = algorithm
elif algorithm in algorithm_names:
self._algorithm = algorithm_names[algorithm](code, number_errors, **kwargs)
else:
raise ValueError("Unknown ISD algorithm '{}'."
" The known algorithms are {}."\
.format(algorithm, sorted(algorithm_names)))
_known_algorithms = {
"Lee-Brickell": LeeBrickellISDAlgorithm
}
@staticmethod
def known_algorithms(dictionary=False):
r"""
Return the list of ISD algorithms that Sage knows.
Passing any of these to the constructor of
:class:`sage.coding.information_set_decoder.LinearCodeInformationSetDecoder`
will make the ISD decoder use that algorithm.
INPUT:
- ``dictionary`` - optional. If set to ``True``, return a ``dict``
mapping decoding algorithm name to its class.
OUTPUT: a list of strings or a ``dict`` from string to ISD algorithm class.
EXAMPLES::
sage: from sage.coding.information_set_decoder import LinearCodeInformationSetDecoder
sage: sorted(LinearCodeInformationSetDecoder.known_algorithms())
['Lee-Brickell']
"""
if dictionary:
return LinearCodeInformationSetDecoder._known_algorithms
else:
return LinearCodeInformationSetDecoder._known_algorithms.keys()
def algorithm(self):
r"""
Return the ISD algorithm used by this ISD decoder.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", (2,4), "Lee-Brickell")
sage: D.algorithm()
ISD Algorithm (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding between 2 and 4 errors
"""
return self._algorithm
def decode_to_code(self, r):
r"""
Decodes a received word with respect to the associated code of this decoder.
.. WARNING::
If there is no codeword within the decoding radius of this decoder, this
method may never terminate, or it may raise a
:exc:`sage.coding.decoder.DecodingError` exception, depending on the ISD
algorithm used.
INPUT:
- ``r`` -- a vector in the ambient space of :meth:`decoder.Decoder.code`.
OUTPUT: a codeword of :meth:`decoder.Decoder.code`.
EXAMPLES::
sage: M = matrix(GF(2), [[1,0,0,0,0,0,1,0,1,0,1,1,0,0,1],\
[0,1,0,0,0,1,1,1,1,0,0,0,0,1,1],\
[0,0,1,0,0,0,0,1,0,1,1,1,1,1,0],\
[0,0,0,1,0,0,1,0,1,0,0,0,1,1,0],\
[0,0,0,0,1,0,0,0,1,0,1,1,0,1,0]])
sage: C = LinearCode(M)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Information-set decoding a non-binary code::
sage: C = codes.GolayCode(GF(3)); C
[12, 6, 6] Extended Golay code over GF(3)
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 2)
sage: c == D.decode_to_code(r)
True
Let's take a bigger example, for which syndrome decoding or
nearest-neighbor decoding would be infeasible: the `[59, 30]` Quadratic
Residue code over `\GF{3}` has true minimum distance 17, so we can
correct 8 errors::
sage: C = codes.QuadraticResidueCode(59, GF(3))
sage: c = C.random_element()
sage: Chan = channels.StaticErrorRateChannel(C.ambient_space(), 2)
sage: r = Chan(c)
sage: D = C.decoder('InformationSet', 8)
sage: c == D.decode_to_code(r) # long time
True
"""
C = self.code()
if r in C:
return r
return self.algorithm().decode(r)
def decoding_radius(self):
r"""
Return the maximal number of errors this decoder can decode.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_radius()
2
"""
return self._number_errors[1]
def decoding_interval(self):
r"""
A pair of integers specifying the interval of number of errors this
decoder will attempt to correct.
The interval includes both end values.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D.decoding_interval()
(0, 2)
"""
return self._number_errors
def _repr_(self):
r"""
Returns a string representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: D = C.decoder("InformationSet", 2)
sage: D
Information-set decoder (Lee-Brickell) for [24, 12, 8] Extended Golay code over GF(2) decoding up to 2 errors
"""
return "Information-set decoder ({}) for {} decoding {} errors ".format(self.algorithm().name(), self.code(), _format_decoding_interval(self.decoding_interval()))
def _latex_(self):
r"""
Returns a latex representation of this decoding algorithm.
EXAMPLES::
sage: C = codes.GolayCode(GF(2))
sage: from sage.coding.information_set_decoder import LeeBrickellISDAlgorithm
sage: D = C.decoder("InformationSet", 2)
sage: latex(D)
\textnormal{Information-set decoder (Lee-Brickell) for }[24, 12, 8] \textnormal{ Extended Golay Code over } \Bold{F}_{2} \textnormal{decoding up to 2 errors}
"""
return "\\textnormal{{Information-set decoder ({}) for }}{} \\textnormal{{decoding {} errors}}".format(self.algorithm().name(), self.code()._latex_(), _format_decoding_interval(self.decoding_interval()))
LinearCodeInformationSetDecoder._decoder_type = {"hard-decision",
"probabilistic", "not-always-closest", "bounded-distance", "might-fail"}
|
mayo/session/train.py | deep-fry/mayo | 110 | 24147 | import math
import tensorflow as tf
from mayo.log import log
from mayo.util import (
Percent, memoize_method, memoize_property, object_from_params)
from mayo.session.base import SessionBase
class Train(SessionBase):
mode = 'train'
def __init__(self, config):
super().__init__(config)
self._run_train_ops = True
self._setup_train_operation()
self._init()
self._checkpoint_epoch = ''
@memoize_property
def learning_rate(self):
params = self.config.train.learning_rate
lr_class, params = object_from_params(params)
if lr_class is tf.train.piecewise_constant:
# `tf.train.piecewise_constant` uses argument name 'x' instead
# just to make life more difficult
step_name = 'x'
else:
step_name = 'global_step'
params[step_name] = self.num_epochs
log.debug(
'Using learning rate {!r} with params {}.'
.format(lr_class.__name__, params))
return lr_class(**params)
@memoize_property
def optimizer(self):
params = self.config.train.optimizer
optimizer_class, params = object_from_params(params)
log.debug('Using optimizer {!r}.'.format(optimizer_class.__name__))
return optimizer_class(self.learning_rate, **params)
@staticmethod
def _average_gradients(tower_grads):
tower_grads = list(tower_grads)
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
grads = []
for g, v in grad_and_vars:
# add 0 dimension to the gradients to represent the tower
if g is None:
raise ValueError(
'Gradient for variable {} is None, please check '
'connection.'.format(v))
g = tf.expand_dims(g, 0)
grads.append(g)
# average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# simply return the first tower's pointer to the Variable
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
@staticmethod
def _loss_formatter(key, name):
def formatter(estimator):
loss_mean, loss_std = estimator.get_mean_std(key)
if math.isnan(loss_mean):
raise ValueError('Model diverged with a nan-valued loss.')
loss_std = '±{}'.format(Percent(loss_std / loss_mean))
return '{}: {:10f}{:5}'.format(name, loss_mean, loss_std)
return formatter
@memoize_method
def _losses_and_gradients(self):
formatter = self._loss_formatter('regularization', 'regu')
regularization = self.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES, first_gpu=True)
if regularization:
self.estimator.register(
tf.add_n(regularization), 'regularization',
formatter=formatter)
def gradient(net, prediction, truth):
loss = [self.task.train(net, prediction, truth)] + regularization
loss = tf.add_n(loss)
return loss, self.optimizer.compute_gradients(loss)
tower_losses, tower_grads = zip(*self.task.map(gradient))
return tower_losses, self._average_gradients(tower_grads)
def _setup_train_operation(self):
ops = {}
self._losses, gradients = self._losses_and_gradients()
self._mean_loss = tf.reduce_mean(self._losses)
ops['app_grad'] = self.optimizer.apply_gradients(gradients)
# update ops
update_ops = list(self.get_collection(tf.GraphKeys.UPDATE_OPS))
ops['update'] = tf.group(*update_ops, name='update')
log.debug('Using update operations: {}'.format(update_ops))
log.debug('Using training operations: {}'.format(ops))
if self.extra_train_ops:
ops['extra'] = self.extra_train_ops
self._train_op = ops
def _init(self):
self.load_checkpoint(self.config.system.checkpoint.load)
formatter = self._loss_formatter('loss', 'loss')
self.estimator.register(self._mean_loss, 'loss', formatter=formatter)
def reset_num_epochs(self):
log.info('Reseting number of training epochs of the model...')
self.run(self.imgs_seen.initializer)
self.change.reset('checkpoint.epoch')
self.change.reset('step')
def once(self):
train_op = self._train_op if self._run_train_ops else []
tasks = [train_op, self.num_epochs]
_, num_epochs = self.run(tasks, batch=True)
return num_epochs
def overriders_assign(self):
log.info('Assigning overridden values of parameters to parameters...')
self._overriders_call('assign')
def overriders_update(self):
log.info('Updating overrider internal variables...')
self._overriders_call('update')
def overriders_reset(self):
log.info('Resetting overriders internal variables...')
self._overriders_call('reset')
def _iteration(self, max_epochs=None):
system = self.config.system
epoch = self.once()
floor_epoch = math.floor(epoch)
cp_interval = system.checkpoint.get('save.interval', 0)
if self.change.every('checkpoint.epoch', floor_epoch, cp_interval):
log.info(
'Saving checkpoint at epoch {}...'.format(epoch), update=True)
with log.demote():
self.save_checkpoint(floor_epoch)
self._checkpoint_epoch = floor_epoch
max_epochs = max_epochs or system.max_epochs
if max_epochs and epoch >= max_epochs:
log.info(
'Maximum epoch count {} reached.'.format(max_epochs))
if self._checkpoint_epoch and floor_epoch > self._checkpoint_epoch:
log.info('Saving final checkpoint...')
self.save_checkpoint(floor_epoch)
return False
return True
def train(self, max_epochs=None):
# final debug outputs
lr = self.run(self.learning_rate)
log.info('Training start with a learning rate {}.'.format(lr))
try:
# train iterations
while self._iteration(max_epochs=max_epochs):
pass
except KeyboardInterrupt:
log.info('Stopped.')
save = self.config.system.checkpoint.get('save', {})
if save:
countdown = save.get('countdown', 0)
if log.countdown('Saving checkpoint', countdown):
self.save_checkpoint('latest')
|
siem_integrations/clx_query_service/clxquery/apps.py | mdemoret-nv/clx | 143 | 24158 | <reponame>mdemoret-nv/clx<gh_stars>100-1000
from django.apps import AppConfig
class ClxQueryConfig(AppConfig):
name = "clxquery"
|
kolibri/plugins/utils/options.py | MBKayro/kolibri | 545 | 24180 | import copy
import logging
import warnings
from kolibri.plugins.registry import registered_plugins
logger = logging.getLogger(__name__)
def __validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
):
# Raise an error if someone tries to overwrite a base option
# except for the default value.
if section in base_config_spec:
if name in base_config_spec[section]:
raise ValueError("Cannot overwrite a core Kolibri options spec option")
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_specs:
if name in plugin_specs[section]:
warnings.warn(
"{plugin} set an option {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_specs[section][name]),
option=name,
section=section,
)
)
plugin_specs[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_specs[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_specs[section] = {name: [module_path]}
def __process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
):
for section, opts in option_spec.items():
for name, attrs in opts.items():
__validate_config_option(
section, name, base_config_spec, plugin_specs, module_path
)
if section not in final_spec:
final_spec[section] = {}
final_spec[section][name] = attrs
def __validate_option_default(section, name, plugin_default_overrides, module_path):
# Warn if a plugin tries to add an option that another plugin has already added
if section in plugin_default_overrides:
if name in plugin_default_overrides[section]:
warnings.warn(
"{plugin} set an option default {option} in section {section} but {plugins} had already set it".format(
plugin=module_path,
plugins=", ".join(plugin_default_overrides[section][name]),
option=name,
section=section,
)
)
plugin_default_overrides[section][name].append(module_path)
else:
# If not create the list for this option name
# to track this and future modifications
plugin_default_overrides[section][name] = [module_path]
else:
# If not create the dict for the section
# and the list for this option name
plugin_default_overrides[section] = {name: [module_path]}
def __process_option_defaults(
option_defaults, base_config_spec, plugin_default_overrides, module_path, final_spec
):
for section, opts in option_defaults.items():
for name, default in opts.items():
__validate_option_default(
section, name, plugin_default_overrides, module_path
)
if section not in final_spec:
logger.error(
"Tried to set a new default in section {}, but this is not a valid section".format(
section
)
)
continue
if name in final_spec[section]:
# This is valid, so set a default
# Note that we do not validation here for now,
# so it is up to the user to ensure the default value
# is kosher.
final_spec[section][name]["default"] = default
else:
logger.error(
"Tried to set a new default in section {}, for option {} but this is not a valid option".format(
section, name
)
)
def extend_config_spec(base_config_spec):
plugin_specs = {}
final_spec = copy.deepcopy(base_config_spec)
# First process options config spec additions
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.options_module
if plugin_options and hasattr(plugin_options, "option_spec"):
module_path = plugin_instance.module_path
option_spec = plugin_options.option_spec
__process_config_spec(
option_spec, base_config_spec, plugin_specs, module_path, final_spec
)
# Now process default value overrides, do this second in order to allow plugins
# to override default values for other plugins!
plugin_default_overrides = {}
for plugin_instance in registered_plugins:
plugin_options = plugin_instance.option_defaults_module
if plugin_options and hasattr(plugin_options, "option_defaults"):
module_path = plugin_instance.module_path
option_defaults = plugin_options.option_defaults
__process_option_defaults(
option_defaults,
base_config_spec,
plugin_default_overrides,
module_path,
final_spec,
)
return final_spec
|
pydeps/__main__.py | miketheman/pydeps | 981 | 24194 | <filename>pydeps/__main__.py<gh_stars>100-1000
from .pydeps import pydeps
pydeps()
|
tests/data/custom_loader2.py | cambiegroup/aizynthfinder | 219 | 24207 | <gh_stars>100-1000
def extract_smiles():
return ["c1ccccc1", "Cc1ccccc1", "c1ccccc1", "CCO"]
|
tools/find_protoc.py | Kill-Console/xresloader | 219 | 24227 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import stat
protoc_exec = None
def find_protoc():
global protoc_exec
if protoc_exec is not None:
return protoc_exec
script_dir = os.path.dirname(os.path.realpath(__file__))
if sys.platform[0:5].lower() == "linux":
protoc_exec = os.path.join(script_dir, 'linux_x86_64', 'protoc')
elif sys.platform[0:6].lower() == "darwin":
protoc_exec = os.path.join(script_dir, 'macos_x86_64', 'protoc')
else:
protoc_exec = os.path.join(script_dir, 'windows_x86_64', 'protoc.exe')
os.chmod(protoc_exec, stat.S_IRWXU + stat.S_IRWXG + stat.S_IRWXO)
return protoc_exec
""" run as a executable """
if __name__ == "__main__":
print(find_protoc())
|
data/test/test_queue.py | giuseppe/quay | 2,027 | 24231 | <filename>data/test/test_queue.py
import json
import time
import pytest
from contextlib import contextmanager
from datetime import datetime, timedelta
from functools import wraps
from data.database import QueueItem
from data.queue import (
WorkQueue,
MINIMUM_EXTENSION,
queue_items_locked,
queue_items_available,
queue_items_available_unlocked,
)
from test.fixtures import *
QUEUE_NAME = "testqueuename"
class AutoUpdatingQueue(object):
def __init__(self, queue_to_wrap):
self._queue = queue_to_wrap
def _wrapper(self, func):
@wraps(func)
def wrapper(*args, **kwargs):
to_return = func(*args, **kwargs)
self._queue.update_metrics()
return to_return
return wrapper
def __getattr__(self, attr_name):
method_or_attr = getattr(self._queue, attr_name)
if callable(method_or_attr):
return self._wrapper(method_or_attr)
else:
return method_or_attr
TEST_MESSAGE_1 = json.dumps({"data": 1})
TEST_MESSAGE_2 = json.dumps({"data": 2})
TEST_MESSAGES = [json.dumps({"data": str(i)}) for i in range(1, 101)]
@contextmanager
def fake_transaction(arg):
yield
@pytest.fixture()
def transaction_factory():
return fake_transaction
def gauge_value(g):
return g.collect()[0].samples[0].value
@pytest.fixture()
def queue(transaction_factory, initialized_db):
return AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory))
def test_get_single_item(queue, transaction_factory):
# Add a single item to the queue.
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
# Have two "instances" retrieve an item to claim. Since there is only one, both calls should
# return the same item.
now = datetime.utcnow()
first_item = queue._select_available_item(False, now)
second_item = queue._select_available_item(False, now)
assert first_item.id == second_item.id
assert first_item.state_id == second_item.state_id
# Have both "instances" now try to claim the item. Only one should succeed.
first_claimed = queue._attempt_to_claim_item(first_item, now, 300)
second_claimed = queue._attempt_to_claim_item(first_item, now, 300)
assert first_claimed
assert not second_claimed
# Ensure the item is no longer available.
assert queue.get() is None
# Ensure the item's state ID has changed.
assert first_item.state_id != QueueItem.get().state_id
def test_extend_processing(queue, transaction_factory):
# Add and retrieve a queue item.
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue_item = queue.get(processing_time=10)
assert queue_item is not None
existing_db_item = QueueItem.get(id=queue_item.id)
# Call extend processing with a timedelta less than the minimum and ensure its
# processing_expires and state_id do not change.
changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1)
assert not changed
updated_db_item = QueueItem.get(id=queue_item.id)
assert existing_db_item.processing_expires == updated_db_item.processing_expires
assert existing_db_item.state_id == updated_db_item.state_id
# Call extend processing with a timedelta greater than the minimum and ensure its
# processing_expires and state_id are changed.
changed = queue.extend_processing(queue_item, 10 + MINIMUM_EXTENSION.total_seconds() + 1)
assert changed
updated_db_item = QueueItem.get(id=queue_item.id)
assert existing_db_item.processing_expires != updated_db_item.processing_expires
assert existing_db_item.state_id != updated_db_item.state_id
# Call extend processing with a timedelta less than the minimum but also with new data and
# ensure its processing_expires and state_id are changed.
changed = queue.extend_processing(
queue_item, 10 + MINIMUM_EXTENSION.total_seconds() - 1, updated_data="newbody"
)
assert changed
updated_db_item = QueueItem.get(id=queue_item.id)
assert existing_db_item.processing_expires != updated_db_item.processing_expires
assert existing_db_item.state_id != updated_db_item.state_id
assert updated_db_item.body == "newbody"
def test_same_canonical_names(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
id_1 = int(queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1))
id_2 = int(queue.put(["abc", "def"], TEST_MESSAGE_2, available_after=-1))
assert id_1 + 1 == id_2
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one = queue.get(ordering_required=True)
assert one is not None
assert one.body == TEST_MESSAGE_1
assert queue._currently_processing
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
two_fail = queue.get(ordering_required=True)
assert two_fail is None
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
queue.complete(one)
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
two = queue.get(ordering_required=True)
assert two is not None
assert queue._currently_processing
assert two.body == TEST_MESSAGE_2
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
def test_different_canonical_names(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue.put(["abc", "ghi"], TEST_MESSAGE_2, available_after=-1)
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2
one = queue.get(ordering_required=True)
assert one is not None
assert one.body == TEST_MESSAGE_1
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2
two = queue.get(ordering_required=True)
assert two is not None
assert two.body == TEST_MESSAGE_2
assert gauge_value(queue_items_locked) == 2
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 2
def test_canonical_name(queue, transaction_factory):
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue.put(["abc", "def", "ghi"], TEST_MESSAGE_1, available_after=-1)
one = queue.get(ordering_required=True)
assert QUEUE_NAME + "/abc/def/" != one
two = queue.get(ordering_required=True)
assert QUEUE_NAME + "/abc/def/ghi/" != two
def test_expiration(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one = queue.get(processing_time=0.5, ordering_required=True)
assert one is not None
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one_fail = queue.get(ordering_required=True)
assert one_fail is None
time.sleep(1)
queue.update_metrics()
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
one_again = queue.get(ordering_required=True)
assert one_again is not None
assert gauge_value(queue_items_locked) == 1
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
def test_alive(queue, transaction_factory):
# No queue item = not alive.
assert not queue.alive(["abc", "def"])
# Add a queue item.
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
assert queue.alive(["abc", "def"])
# Retrieve the queue item.
queue_item = queue.get()
assert queue_item is not None
assert queue.alive(["abc", "def"])
# Make sure it is running by trying to retrieve it again.
assert queue.get() is None
# Delete the queue item.
queue.complete(queue_item)
assert not queue.alive(["abc", "def"])
def test_specialized_queue(queue, transaction_factory):
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue.put(["def", "def"], TEST_MESSAGE_2, available_after=-1)
my_queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, ["def"]))
two = my_queue.get(ordering_required=True)
assert two is not None
assert two.body == TEST_MESSAGE_2
one_fail = my_queue.get(ordering_required=True)
assert one_fail is None
one = queue.get(ordering_required=True)
assert one is not None
assert one.body == TEST_MESSAGE_1
def test_random_queue_no_duplicates(queue, transaction_factory):
for msg in TEST_MESSAGES:
queue.put(["abc", "def"], msg, available_after=-1)
seen = set()
for _ in range(1, 101):
item = queue.get()
json_body = json.loads(item.body)
msg = str(json_body["data"])
assert msg not in seen
seen.add(msg)
for body in TEST_MESSAGES:
json_body = json.loads(body)
msg = str(json_body["data"])
assert msg in seen
def test_bulk_insert(queue, transaction_factory):
queue_items_locked.labels(queue._queue_name).set(0)
queue_items_available.labels(queue._queue_name).set(0)
queue_items_available_unlocked.labels(queue._queue_name).set(0)
with queue.batch_insert() as queue_put:
queue_put(["abc", "def"], TEST_MESSAGE_1, available_after=-1)
queue_put(["abc", "def"], TEST_MESSAGE_2, available_after=-1)
queue.update_metrics()
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 1
with queue.batch_insert() as queue_put:
queue_put(["abd", "def"], TEST_MESSAGE_1, available_after=-1)
queue_put(["abd", "ghi"], TEST_MESSAGE_2, available_after=-1)
queue.update_metrics()
assert not queue._currently_processing
assert gauge_value(queue_items_locked) == 0
assert gauge_value(queue_items_locked) + gauge_value(queue_items_available_unlocked) == 3
def test_num_available_between(queue, transaction_factory):
now = datetime.utcnow()
queue.put(["abc", "def"], TEST_MESSAGE_1, available_after=-10)
queue.put(["abc", "ghi"], TEST_MESSAGE_2, available_after=-5)
# Partial results
count = queue.num_available_jobs_between(now - timedelta(seconds=8), now, ["abc"])
assert count == 1
# All results
count = queue.num_available_jobs_between(now - timedelta(seconds=20), now, ["/abc"])
assert count == 2
# No results
count = queue.num_available_jobs_between(now, now, "abc")
assert count == 0
def test_incomplete(queue, transaction_factory):
# Add an item.
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 1
# Retrieve it.
item = queue.get()
assert item is not None
assert queue._currently_processing
# Mark it as incomplete.
queue.incomplete(item, retry_after=-1)
assert not queue._currently_processing
# Retrieve again to ensure it is once again available.
same_item = queue.get()
assert same_item is not None
assert queue._currently_processing
assert item.id == same_item.id
def test_complete(queue, transaction_factory):
# Add an item.
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 1
# Retrieve it.
item = queue.get()
assert item is not None
assert queue._currently_processing
# Mark it as complete.
queue.complete(item)
assert not queue._currently_processing
def test_cancel(queue, transaction_factory):
# Add an item.
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_2, available_after=-5)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 2
# Retrieve it.
item = queue.get()
assert item is not None
# Make sure we can cancel it.
assert queue.cancel(item.id)
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 1
# Make sure it is gone.
assert not queue.cancel(item.id)
def test_deleted_namespaced_items(queue, transaction_factory):
queue = AutoUpdatingQueue(WorkQueue(QUEUE_NAME, transaction_factory, has_namespace=True))
queue.put(["somenamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
queue.put(["somenamespace", "abc", "ghi"], TEST_MESSAGE_2, available_after=-5)
queue.put(["anothernamespace", "abc", "def"], TEST_MESSAGE_1, available_after=-10)
# Ensure we have 2 items under `somenamespace` and 1 item under `anothernamespace`.
now = datetime.utcnow()
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 2
count = queue.num_available_jobs_between(
now - timedelta(seconds=60), now, ["/anothernamespace"]
)
assert count == 1
# Delete all `somenamespace` items.
queue.delete_namespaced_items("somenamespace")
# Check the updated counts.
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 0
count = queue.num_available_jobs_between(
now - timedelta(seconds=60), now, ["/anothernamespace"]
)
assert count == 1
# Delete all `anothernamespace` items.
queue.delete_namespaced_items("anothernamespace")
# Check the updated counts.
count = queue.num_available_jobs_between(now - timedelta(seconds=60), now, ["/somenamespace"])
assert count == 0
count = queue.num_available_jobs_between(
now - timedelta(seconds=60), now, ["/anothernamespace"]
)
assert count == 0
|
tools/external_converter_v2/parser/operations/op_io.py | pangge/Anakin | 533 | 24234 | #! /usr/bin/env python
# Copyright (c) 2017, Cuichaowen. All rights reserved.
# -*- coding: utf-8 -*-
# ops helper dictionary
class Dictionary(object):
"""
Dictionary for op param which needs to be combined
"""
def __init__(self):
self.__dict__ = {}
def set_attr(self, **kwargs):
"""
set dict from kwargs
"""
for key in kwargs.keys():
if type(kwargs[key]) == type(dict()):
for key_inner in kwargs[key].keys():
self.__dict__[key_inner] = kwargs[key][key_inner]
else:
self.__dict__[key] = kwargs[key]
return self
def __call__(self):
"""
call class function to generate dictionary param
"""
ret = {key: self.__dict__[key] for key in self.__dict__.keys()}
return ret
########### Object track and detection helper (for adu(caffe layer type)) Op io define #############
# NMSSSDParameter
nms_param = Dictionary().set_attr(need_nms=bool(),
overlap_ratio=list(),
top_n=list(),
add_score=bool(),
max_candidate_n=list(),
use_soft_nms=list(),
nms_among_classes=bool(),
voting=list(),
vote_iou=list(),
nms_gpu_max_n_per_time=int())
# BBoxRegParameter
bbox_reg_param = Dictionary().set_attr(bbox_mean=list(),
bbox_std=list())
# GenerateAnchorParameter
gen_anchor_param = Dictionary().set_attr(base_size=float(),
ratios=list(),
scales=list(),
anchor_width=list(),
anchor_height=list(),
anchor_x1=list(),
anchor_y1=list(),
anchor_x2=list(),
anchor_y2=list(),
zero_anchor_center=bool())
# KPTSParameter
kpts_param = Dictionary().set_attr(kpts_exist_bottom_idx=int(),
kpts_reg_bottom_idx=int(),
kpts_reg_as_classify=bool(),
kpts_classify_width=int(),
kpts_classify_height=int(),
kpts_reg_norm_idx_st=int(),
kpts_st_for_each_class=list(),
kpts_ed_for_each_class=list(),
kpts_classify_pad_ratio=float())
# ATRSParameter
# enum NormType {
# NONE,
# WIDTH,
# HEIGHT,
# WIDTH_LOG,
# HEIGHT_LOG
# }
atrs_param = Dictionary().set_attr(atrs_reg_bottom_idx=int(),
atrs_reg_norm_idx_st=int(),
atrs_norm_type=str())
# FTRSParameter
ftrs_param = Dictionary().set_attr(ftrs_bottom_idx=int())
# SPMPParameter
spmp_param = Dictionary().set_attr(spmp_bottom_idx=int(),
spmp_class_aware=list(),
spmp_label_width=list(),
spmp_label_height=list(),
spmp_pad_ratio=list())
# Cam3dParameter
cam3d_param = Dictionary().set_attr(cam3d_bottom_idx=int())
# DetectionOutputSSDParameter
# enum MIN_SIZE_MODE {
# HEIGHT_AND_WIDTH,
# HEIGHT_OR_WIDTH
# }
detection_output_ssd_param = Dictionary().set_attr(nms=nms_param(),
threshold=list(),
channel_per_scale=int(),
class_name_list=str(),
num_class=int(),
refine_out_of_map_bbox=bool(),
class_indexes=list(),
heat_map_a=list(),
heat_map_b=list(),
threshold_objectness=float(),
proposal_min_sqrt_area=list(),
proposal_max_sqrt_area=list(),
bg_as_one_of_softmax=bool(),
use_target_type_rcnn=bool(),
im_width=float(),
im_height=float(),
rpn_proposal_output_score=bool(),
regress_agnostic=bool(),
gen_anchor=gen_anchor_param(),
allow_border=float(),
allow_border_ratio=float(),
bbox_size_add_one=bool(),
read_width_scale=float(),
read_height_scale=float(),
read_height_offset=int(),
min_size_h=float(),
min_size_w=float(),
min_size_mode="HEIGHT_AND_WIDTH",
kpts=kpts_param(),
atrs=atrs_param(),
ftrs=ftrs_param(),
spmp=spmp_param(),
cam3d=cam3d_param())
# DFMBPSROIPoolingParameter
dfmb_psroi_pooling_param = Dictionary().set_attr(heat_map_a=float(),
heat_map_b=float(),
pad_ratio=float(),
output_dim=int(),
trans_std=float(),
sample_per_part=int(),
group_height=int(),
group_width=int(),
pooled_height=int(),
pooled_width=int(),
part_height=int(),
part_width=int())
# ProposalImgScaleToCamCoordsParameter
#
# enum NormType {
# HEIGHT,
# HEIGHT_LOG
# }
#
# enum OrienType {
# PI,
# PI2
# }
proposal_img_scale_to_cam_coords_param = Dictionary().set_attr(num_class=int(),
sub_class_num_class=list(),
sub_class_bottom_idx=list(),
prj_h_norm_type=str(),
has_size3d_and_orien3d=bool(),
orien_type=str(),
cls_ids_zero_size3d_w=list(),
cls_ids_zero_size3d_l=list(),
cls_ids_zero_orien3d=list(),
cmp_pts_corner_3d=bool(),
cmp_pts_corner_2d=bool(),
ctr_2d_means=list(),
ctr_2d_stds=list(),
prj_h_means=list(),
prj_h_stds=list(),
real_h_means=list(),
real_h_stds=list(),
real_w_means=list(),
real_w_stds=list(),
real_l_means=list(),
real_l_stds=list(),
sin_means=list(),
sin_stds=list(),
cos_means=list(),
cos_stds=list(),
cam_info_idx_st_in_im_info=int(),
im_width_scale=float(),
im_height_scale=float(),
cords_offset_x=float(),
cords_offset_y=float(),
bbox_size_add_one=bool(),
rotate_coords_by_pitch=bool(),
#refine_coords_by_bbox=bool(),
#refine_min_dist=float(),
#refine_dist_for_height_ratio_one=float(),
#max_3d2d_height_ratio_for_min_dist=float(),
with_trunc_ratio=bool(),
regress_ph_rh_as_whole=bool(),
real_h_means_as_whole=list(),
real_h_stds_as_whole=list())
# RPNProposalSSD parameter
RPNProposalSSD_param = Dictionary().set_attr(detection_output_ssd=detection_output_ssd_param(),
bbox_reg=bbox_reg_param())
|
classes/dns.py | double-beep/SmokeDetector | 464 | 24269 | import dns
import dns.resolver
import dns.rdatatype
def dns_resolve(domain: str) -> list:
addrs = []
resolver = dns.resolver.Resolver(configure=False)
# Default to Google DNS
resolver.nameservers = ['8.8.8.8', '8.8.4.4']
try:
for answer in resolver.resolve(domain, 'A').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.A:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
try:
for answer in resolver.resolve(domain, 'AAAA').response.answer:
for item in answer:
if item.rdtype == dns.rdatatype.AAAA:
addrs.append(item.address)
except dns.resolver.NoAnswer:
pass
return addrs
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.