max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/anyconfig/ioinfo/constants.py | ssato/python-anyconfig | 213 | 12795551 | #
# Copyright (C) 2018 - 2021 <NAME> <<EMAIL>>
# SPDX-License-Identifier: MIT
#
r"""ioinfo.constants to provide global constant variables.
"""
import os.path
GLOB_MARKER: str = '*'
PATH_SEP: str = os.path.sep
# vim:sw=4:ts=4:et:
| 1.5 | 2 |
run.py | zhfeing/graduation-project | 0 | 12795552 | import argparse
import os
import draw_his
import train
import test
from get_data import import_data
from model_zoo import googLeNet, resnet, load_model
import utils
import ensembel_model
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', action='store', type=str, default="0")
parser.add_argument('--lr', action='store', type=float, default=0.001)
parser.add_argument('--epochs', action='store', type=int, default=10)
parser.add_argument('--train_v', action='store', type=str, default="1.0")
parser.add_argument('--load_v', action='store', type=str, default="1.0")
default_load_data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "get_data/data")
# default_load_data_dir = "/media/Data/datasets/cifar/cifar-10-python/data"
parser.add_argument('--load_data_dir', action='store', type=str, default=default_load_data_dir)
parser.add_argument('--retrain', type=lambda x: bool(str2bool(x)), default=False)
parser.add_argument('--regularize', type=lambda x: bool(str2bool(x)), default=False)
parser.add_argument('--batch_size', action='store', type=int, default=32)
parser.add_argument('--T', action='store', type=float, default=10)
parser.add_argument('--alpha', action='store', type=float, default=0.1)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = "{}".format(args.gpu)
print("[info]: use gpu: {}".format(os.environ["CUDA_VISIBLE_DEVICES"]))
print("[info]: set learning rate: {}".format(args.lr))
print("[info]: epochs: {}".format(args.epochs))
print("[info]: train_version: {}".format(args.train_v))
print("[info]: load_version: {}".format(args.load_v))
print("[info]: retrain: {}".format(args.retrain))
print("[info]: regularize: {}".format(args.regularize))
print("[info]: batch_size: {}".format(args.batch_size))
print("[info]: T: {}".format(args.T))
print("[info]: alpha: {}".format(args.alpha))
# my_util = utils.GoogLeNetUtils()
my_util = utils.ResNetUtils()
# my_util = utils.DistillModelUtils(
# cumbersome_model=ensembel_model.my_ensembel_model(),
# T=args.T,
# alpha=args.alpha
# )
# new_model = googLeNet.my_googLeNet
new_model = resnet.my_resnet
model, create_new = load_model.load_model(
version=args.load_v,
new_model=new_model,
just_weights=False,
retrain=args.retrain,
to_cuda=True
)
train_set, valid_set, test_set = import_data.import_dataset(
load_dir=args.load_data_dir,
train_to_cuda=False,
test_to_cuda=True
)
train.train(
model=model,
train_set=train_set,
valid_set=valid_set,
lr=args.lr,
epoch=args.epochs,
batch_size=args.batch_size,
regularize=args.regularize,
train_version=args.train_v,
train_loss_function=my_util.loss_for_train,
get_true_pred=my_util.get_true_pred,
eval_loss_function=my_util.loss_for_eval,
detach_pred=my_util.detach_pred,
learn_rate_schedule=my_util.learn_rate_schedule
)
draw_his.draw_his(version=args.train_v, show=False)
model = model.cpu()
load_model.save_model(args.train_v, model)
test.test(
test_version=args.train_v,
test_set=test_set,
new_model=new_model,
batch_size=args.batch_size,
get_true_pred=my_util.get_true_pred,
eval_loss_function=my_util.loss_for_eval,
detach_pred=my_util.detach_pred,
just_weights=False
)
| 2.234375 | 2 |
audio_atari/gaze/human_utils.py | sahiljain11/ICML2019-TREX | 0 | 12795553 | <filename>audio_atari/gaze/human_utils.py
import numpy as np
import cv2
import csv
import os
import torch
from os import path, listdir
import gaze.gaze_heatmap as gh
import time
# TODO: add masking part for extra games
from baselines.common.trex_utils import normalize_state
import torch.nn.functional as F
import torch
cv2.ocl.setUseOpenCL(False)
# def normalize_state(obs):
# return obs / 255.0
# def normalize(obs, max_val):
# # TODO: discard frames with no gaze
# if(max_val != 0):
# norm_map = obs/float(max_val)
# else:
# norm_map = obs
# return norm_map
# need to grayscale and warp to 84x84
def GrayScaleWarpImage(image):
"""Warp frames to 84x84 as done in the Nature paper and later work."""
width = 84
height = 84
frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
#frame = np.expand_dims(frame, -1)
return frame
def MaxSkipAndWarpFrames(trajectory_dir, img_dirs, frames, actions):
"""take a trajectory file of frames and max over every 3rd and 4th observation"""
num_frames = len(frames)
assert(len(frames)==len(actions))
# print('total images:', num_frames)
skip = 4
sample_pic = np.random.choice(
listdir(path.join(trajectory_dir, img_dirs[0])))
image_path = path.join(trajectory_dir, img_dirs[0], sample_pic)
pic = cv2.imread(image_path)
obs_buffer = np.zeros((2,)+pic.shape, dtype=np.uint8)
max_frames = []
skipped_actions = []
for i in range(num_frames):
# TODO: check that i should max before warping.
img_name = frames[i] + ".png"
img_dir = img_dirs[i]
if i % skip == skip - 2:
obs = cv2.imread(path.join(trajectory_dir, img_dir, img_name))
obs_buffer[0] = obs
if i % skip == skip - 1:
obs = cv2.imread(path.join(trajectory_dir, img_dir, img_name))
obs_buffer[1] = obs
# Take the action of every 4th frame
skipped_actions.append(actions[i])
# warp max to 80x80 grayscale
image = obs_buffer.max(axis=0)
warped = GrayScaleWarpImage(image)
max_frames.append(warped)
assert(len(max_frames)==len(skipped_actions))
return max_frames, skipped_actions
def StackFrames(frames, actions):
import copy
"""stack every four frames to make an observation (84,84,4)"""
stacked = []
stacked_obs = np.zeros((84, 84, 4))
stacked_actions = []
for i in range(len(frames)):
if i >= 3:
stacked_obs[:, :, 0] = frames[i-3]
stacked_obs[:, :, 1] = frames[i-2]
stacked_obs[:, :, 2] = frames[i-1]
stacked_obs[:, :, 3] = frames[i]
stacked.append(np.expand_dims(copy.deepcopy(stacked_obs), 0))
stacked_actions.append(actions[i-3]) #action for first frame in stack
return stacked, stacked_actions
def MaxSkipGaze(gaze, heatmap_size):
"""take a list of gaze coordinates and max over every 3rd and 4th observation"""
num_frames = len(gaze)
skip = 4
obs_buffer = np.zeros((2,)+(heatmap_size, heatmap_size), dtype=np.float32)
max_frames = []
for i in range(num_frames):
g = gaze[i]
g = np.squeeze(g)
if i % skip == skip - 2:
obs_buffer[0] = g
if i % skip == skip - 1:
obs_buffer[1] = g
image = obs_buffer.max(axis=0)
max_frames.append(image)
if np.isnan(max_frames).any():
print('nan max gaze map created')
exit(1)
return max_frames
def CollapseGaze(gaze_frames, heatmap_size):
import copy
"""combine every four frames to make an observation (84,84)"""
stacked = []
stacked_obs = np.zeros((heatmap_size, heatmap_size))
for i in range(len(gaze_frames)):
if i >= 3:
# Sum over the gaze frequency counts across four frames
stacked_obs = gaze_frames[i-3]
stacked_obs = stacked_obs + gaze_frames[i-2]
stacked_obs = stacked_obs + gaze_frames[i-1]
stacked_obs = stacked_obs + gaze_frames[i]
# Normalize the gaze mask
max_gaze_freq = np.amax(stacked_obs)
#TODO: normalize gaze with softmax
# stacked_obs = normalize(stacked_obs, max_gaze_freq)
stacked_obs = torch.tensor(stacked_obs)
norm_map = F.softmax(stacked_obs.view(1,-1), dim=1).view(heatmap_size,heatmap_size)
norm_map = norm_map.cpu().detach().numpy()
stacked.append(np.expand_dims(
copy.deepcopy(stacked_obs), 0)) # shape: (1,7,7)
return stacked
def MaxSkipReward(rewards):
"""take a list of rewards and max over every 3rd and 4th observation"""
num_frames = len(rewards)
skip = 4
max_frames = []
obs_buffer = np.zeros((2,))
for i in range(num_frames):
r = rewards[i]
if i % skip == skip - 2:
obs_buffer[0] = r
if i % skip == skip - 1:
obs_buffer[1] = r
rew = obs_buffer.max(axis=0)
max_frames.append(rew)
return max_frames
def StackReward(rewards):
import copy
"""combine every four frames to make an observation"""
stacked = []
stacked_obs = np.zeros((1,))
for i in range(len(rewards)):
if i >= 3:
# Sum over the rewards across four frames
stacked_obs = rewards[i-3]
stacked_obs = stacked_obs + rewards[i-2]
stacked_obs = stacked_obs + rewards[i-1]
stacked_obs = stacked_obs + rewards[i]
stacked.append(np.expand_dims(copy.deepcopy(stacked_obs), 0))
return stacked
def get_sorted_traj_indices(env_name, dataset, use_gaze=False):
# need to pick out a subset of demonstrations based on desired performance
# first let's sort the demos by performance, we can use the trajectory number to index into the demos so just
# need to sort indices based on 'score'
game = env_name
# Note, I'm also going to try only keeping the full demonstrations that end in terminal
traj_indices = []
traj_scores = []
traj_dirs = []
traj_rewards = []
traj_gaze = []
traj_frames = []
traj_actions = []
print('traj length: ', len(dataset.trajectories[game]))
for t in dataset.trajectories[game]:
traj_indices.append(t)
traj_scores.append(dataset.trajectories[game][t][-1]['score'])
# a separate img_dir defined for every frame of the trajectory as two different trials could comprise an episode
traj_dirs.append([dataset.trajectories[game][t][i]['img_dir']
for i in range(len(dataset.trajectories[game][t]))])
traj_rewards.append([dataset.trajectories[game][t][i]['reward']
for i in range(len(dataset.trajectories[game][t]))])
if use_gaze:
traj_gaze.append([dataset.trajectories[game][t][i]['gaze_positions']
for i in range(len(dataset.trajectories[game][t]))])
traj_frames.append([dataset.trajectories[game][t][i]['frame']
for i in range(len(dataset.trajectories[game][t]))])
traj_actions.append([dataset.trajectories[game][t][i]['action']
for i in range(len(dataset.trajectories[game][t]))])
sorted_traj_indices = [x for _, x in sorted(
zip(traj_scores, traj_indices), key=lambda pair: pair[0])]
sorted_traj_scores = sorted(traj_scores)
sorted_traj_dirs = [x for _, x in sorted(
zip(traj_scores, traj_dirs), key=lambda pair: pair[0])]
sorted_traj_rewards = [x for _, x in sorted(
zip(traj_scores, traj_rewards), key=lambda pair: pair[0])]
if use_gaze:
sorted_traj_gaze = [x for _, x in sorted(
zip(traj_scores, traj_gaze), key=lambda pair: pair[0])]
else:
sorted_traj_gaze = []
sorted_traj_frames = [x for _, x in sorted(
zip(traj_scores, traj_frames), key=lambda pair: pair[0])]
sorted_traj_actions = [x for _, x in sorted(
zip(traj_scores, traj_actions), key=lambda pair: pair[0])]
print("Max human score", max(sorted_traj_scores))
print("Min human score", min(sorted_traj_scores))
# so how do we want to get demos? how many do we have if we remove duplicates?
seen_scores = set()
non_duplicates = []
if use_gaze:
for i, s, d, r, g, f, a in zip(sorted_traj_indices, sorted_traj_scores, sorted_traj_dirs, sorted_traj_rewards, sorted_traj_gaze, sorted_traj_frames, sorted_traj_actions):
if s not in seen_scores:
seen_scores.add(s)
non_duplicates.append((i, s, d, r, g, f, a))
else:
for i, s, d, r, f, a in zip(sorted_traj_indices, sorted_traj_scores, sorted_traj_dirs, sorted_traj_rewards, sorted_traj_frames, sorted_traj_actions):
if s not in seen_scores:
seen_scores.add(s)
non_duplicates.append((i, s, d, r, f, a))
print("num non duplicate scores", len(seen_scores))
if env_name == "spaceinvaders":
start = 0
skip = 3
elif env_name == "revenge":
start = 0
skip = 1
elif env_name == "qbert":
start = 0
skip = 3
elif env_name == "mspacman":
start = 0
skip = 1
else: # TODO: confirm best logic for all games
start = 0
skip = 3
# num_demos = 12
# demos = non_duplicates[start:num_demos*skip + start:skip]
demos = non_duplicates # don't skip any demos
return demos
def get_preprocessed_trajectories(env_name, dataset, data_dir, use_gaze=False):
"""returns an array of trajectories corresponding to what you would get running checkpoints from PPO
demonstrations are grayscaled, maxpooled, stacks of 4 with normalized values between 0 and 1 and
top section of screen is masked
"""
demos = get_sorted_traj_indices(env_name, dataset, use_gaze)
human_scores = []
human_demos = []
human_rewards = []
human_gaze = []
# print('len demos: ', len(demos))
for data in demos:
if use_gaze:
indx, score, img_dir, rew, gaze, frame, actions = data
else:
indx, score, img_dir, rew, frame, actions = data
human_scores.append(score)
# traj_dir = path.join(data_dir, 'screens', env_name, str(indx))
traj_dir = path.join(data_dir, env_name)
maxed_traj, actions = MaxSkipAndWarpFrames(traj_dir, img_dir, frame, actions)
stacked_traj, actions = StackFrames(maxed_traj, actions)
# demo_norm_mask = []
demo_norm = []
# normalize values to be between 0 and 1 and have top part masked
for ob in stacked_traj:
# print(env_name) #normalizing
# demo_norm_mask.append(preprocess(ob, env_name)[0]) # masking
demo_norm.append(normalize_state(ob)) # normalizing
# DONE: don't mask here! (masking effects gaze prediction)
print(len(frame), len(actions))
print(len(demo_norm), len(actions))
assert(len(demo_norm)==len(actions))
# sa = [(demo_norm_mask[i], actions[i]) for i in range(len(demo_norm_mask))]
sa = [(demo_norm[i], actions[i]) for i in range(len(actions))]
human_demos.append(sa)
# skip and stack reward
maxed_reward = MaxSkipReward(rew)
stacked_reward = StackReward(maxed_reward)
human_rewards.append(stacked_reward)
if use_gaze:
# generate gaze heatmaps as per Ruohan's algorithm
h = gh.DatasetWithHeatmap()
# if gaze_conv_layer == 1:
# conv_size = 26
# elif gaze_conv_layer == 2:
# conv_size = 11
# elif gaze_conv_layer == 3:
# conv_size = 9
# elif gaze_conv_layer == 4:
# conv_size = 7
# else:
# print('Invalid Gaze conv layer. Must be between 1-4.')
# exit(1)
conv_size = 84 # original image size
g = h.createGazeHeatmap(gaze, conv_size) # TODO: this heatmap is not normalized with softmax
maxed_gaze = MaxSkipGaze(g, conv_size)
stacked_gaze = CollapseGaze(maxed_gaze, conv_size)
human_gaze.append(stacked_gaze)
# print('stacked gaze: ', stacked_gaze[0].shape)
# if(use_gaze):
# print(len(human_demos[0]), len(human_rewards[0]), len(human_gaze[0]))
# print(len(human_demos), len(human_rewards), len(human_gaze))
return human_demos, human_scores, human_rewards, human_gaze
else:
return human_demos, human_scores, human_rewards | 2.234375 | 2 |
convert_smdb.py | AnonymousRandomPerson/TranscriptionUtils | 0 | 12795554 | <reponame>AnonymousRandomPerson/TranscriptionUtils
import os
import binascii
base_path = os.path.join(os.sep, 'Users', 'chenghanngan', 'Documents', 'Programs', 'Reverse Engineering', 'Support', 'Adventure Squad WAD', 'Pokemon Fushigi no Dungeon - Ikuzo! Arashi no Boukendan (Japan) (WiiWare)', '00000002_app_OUT', 'content')
file_names = [
'dun_boss.smd',
'dun_bossfloor.smd',
'dun_mount_1.smd',
'dun_mount_2.smd',
'dun_mount.smd',
'endroll.smd',
'ev_1.smd',
'ev_2.smd',
'ev_3.smd',
'ev_4.smd',
'ev_5.smd',
'ev_ed.smd',
'ev_fear.smd',
'ev_op.smd',
'gameclear.smd',
'gameover.smd',
'me_dunopen.smd',
'me_evolution_e.smd',
'me_evolution.smd',
'me_exclude.smd',
'me_item.smd',
'me_join.smd',
'me_lankup.smd',
'me_lvup.smd',
'me_reward.smd',
'me_system.smd',
'me_wave_m.smd',
'me_wave_s.smd',
'me_wind_m.smd',
'me_wind_s.smd',
'no_sound.smd',
'sys_bazar.smd',
'sys_clear.smd',
'sys_map.smd',
'sys_menu.smd',
'sys_monster.smd',
'sys_shop.smd',
'sys_steal.smd'
]
def flip_bytes(data, offset, count):
for i in range(count // 2):
start = offset + i
end = offset + count - i - 1
temp = data[start]
data[start] = data[end]
data[end] = temp
for file_name in file_names:
file_path = os.path.join(base_path, file_name)
with open(file_path, 'rb') as smd_file:
data = bytearray(smd_file.read())
data[3] = 0x6C
flip_bytes(data, 0x8, 4)
flip_bytes(data, 0xC, 2)
flip_bytes(data, 0xE, 2)
flip_bytes(data, 0x30, 2)
flip_bytes(data, 0x46, 2)
flip_bytes(data, 0x4C, 4)
flip_bytes(data, 0x50, 2)
flip_bytes(data, 0x52, 2)
flip_bytes(data, 0x62, 2)
flip_bytes(data, 0x64, 2)
flip_bytes(data, 0x66, 2)
for i in range(0x84, len(data), 4):
if data[i : i + 4] == bytearray([0, 0, 1, 0]):
flip_bytes(data, i + 2, 2)
flip_bytes(data, i + 8, 4)
new_file_path = os.path.join(base_path, 'Modified', file_name)
with open(new_file_path, 'wb+') as new_file:
new_file.write(data)
| 2.0625 | 2 |
pkgs/filetransferutils-pkg/src/genie/libs/filetransferutils/plugins/fileutils.py | wilbeacham85/genielibs | 0 | 12795555 | <reponame>wilbeacham85/genielibs<filename>pkgs/filetransferutils-pkg/src/genie/libs/filetransferutils/plugins/fileutils.py
""" File utils common base class """
# Logging
import logging
try:
from pyats.utils.fileutils import FileUtils as server
# Server FileUtils core implementation
# filemode_to_mode
from pyats.utils.fileutils.plugins.localhost.ftp.fileutils import \
filemode_to_mode
except ImportError:
try:
from pyats.utils.fileutils import FileUtils as server
# Server FileUtils core implementation
# filemode_to_mode
from pyats.utils.fileutils.plugins.localhost.ftp.fileutils import \
filemode_to_mode
except ImportError:
# For apidoc building only
from unittest.mock import Mock
server = Mock
filemode_to_mode = Mock()
# Parent inheritance
from .. import FileUtils as FileUtilsCommonDeviceBase
# Initialize the logger
logger = logging.getLogger(__name__)
class FileUtils(FileUtilsCommonDeviceBase):
def copyfile(self, source, destination, timeout_seconds, cmd, used_server,
*args, **kwargs):
""" Copy a file to/from NXOS device
Copy any file to/from a device to any location supported on the
device and on the running-configuration.
Parameters
----------
source: `str`
Full path to the copy 'from' location
destination: `str`
Full path to the copy 'to' location
timeout_seconds: `str`
The number of seconds to wait before aborting the operation
cmd: `str`
Command to be executed on the device
used_server: `str`
Server address/name
Returns
-------
`None`
Raises
------
Exception
When a device object is not present or device execution encountered
an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# copy file from device to server
>>> fu_device.copyfile(
... source='flash:/memleak.tcl',
... destination='ftp://10.1.0.213//auto/tftp-ssr/memleak.tcl',
... timeout_seconds='300', device=device)
# copy file from server to device
>>> fu_device.copyfile(
... source='ftp://10.1.0.213//auto/tftp-ssr/memleak.tcl',
... destination='flash:/new_file.tcl',
... timeout_seconds='300', device=device)
# copy file from server to device running configuration
>>> fu_device.copyfile(
... source='ftp://10.1.0.213//auto/tftp-ssr/memleak.tcl',
... destination='running-config',
... timeout_seconds='300', device=device)
"""
self.send_cli_to_device(cli=cmd, timeout_seconds=timeout_seconds,
used_server=used_server, **kwargs)
def parsed_dir(self, target, timeout_seconds, dir_output, *args, **kwargs):
""" Retrieve filenames contained in a directory.
Do not recurse into subdirectories, only list files at the top level
of the given directory.
Parameters
----------
target : `str`
The directory whose details are to be retrieved.
timeout_seconds : `int`
The number of seconds to wait before aborting the operation.
dir_output : `obj`
The OS corresponding `dir` parser object
Returns
-------
`dict` : Dict of filename URLs and the corresponding info (ex:size)
Raises
------
AttributeError
device object not passed in the function call
Exception
Parser encountered an issue
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# list all files on the device directory 'flash:'
>>> directory_output = fu_device.dir(target='flash:',
... timeout_seconds=300, device=device)
>>> directory_output['dir']['flash:/']['files']
... (Pdb) directory_output['dir']['flash:/']['files']['boothelper.log']
{'index': '69699', 'permissions': '-rw-', 'size': '76',
'last_modified_date': 'Mar 20 2018 10:25:46 +00:00'}
"""
# Extract device from the keyword arguments, if not passed raise an
# AttributeError
if 'device' in kwargs:
device = kwargs['device']
else:
raise AttributeError("Device object is missing, can't proceed with"
" execution")
# Call the parser
obj = dir_output(device=device)
parsed_output = obj.parse()
return parsed_output
def stat(self, target, timeout_seconds, dir_output, *args, **kwargs):
""" Retrieve file details such as length and permissions.
Parameters
----------
target : `str`
The URL of the file whose details are to be retrieved.
timeout_seconds : `int`
The number of seconds to wait before aborting the operation.
dir_output : `obj`
The OS corresponding `dir` parser object
Returns
-------
`file_details` : File details including size, permissions, index
and last modified date.
Raises
------
AttributeError
device object not passed in the function call
Exception
Parser encountered an issue
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# list the file details on the device 'flash:' directory
>>> directory_output = fu_device.stat(target='flash:memleak.tcl',
... timeout_seconds=300, device=device)
>>> directory_output['size']
... '104260'
>>> directory_output['permissions']
... '-rw-'
"""
# Extract device from the keyword arguments, if not passed raise an
# AttributeError
if 'device' not in kwargs:
raise AttributeError("Devisce object is missing, can't proceed with"
" execution")
parsed_output = self.parsed_dir(target=target,
timeout_seconds=timeout_seconds, dir_output=dir_output, **kwargs)
return parsed_output
def deletefile(self, target, timeout_seconds, *args, **kwargs):
""" Delete a file
Parameters
----------
target : `str`
The URL of the file whose details are to be retrieved.
timeout_seconds : `int`
The number of seconds to wait before aborting the operation.
Returns
-------
None
Raises
------
Exception
When a device object is not present or device execution encountered
an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# delete a specific file on device directory 'flash:'
>>> directory_output = fu_device.deletefile(
... target='flash:memleak_bckp.tcl',
... timeout_seconds=300, device=device)
"""
# delete flash:memleak.tcl
cmd = 'delete {f}'.format(f=target)
self.send_cli_to_device(cli=cmd, timeout_seconds=timeout_seconds,**kwargs)
def renamefile(self, source, destination, timeout_seconds, cmd,
*args, **kwargs):
""" Rename a file
Parameters
----------
source : `str`
The URL of the file to be renamed.
destination : `str`
The URL of the new file name.
timeout_seconds : `int`
Maximum allowed amount of time for the operation.
Returns
-------
None
Raises
------
Exception
When a device object is not present or device execution encountered
an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# rename the file on the device 'flash:' directory
>>> fu_device.renamefile(target='flash:memleak.tcl',
... destination='memleak_backup.tcl'
... timeout_seconds=300, device=device)
"""
self.send_cli_to_device(cli=cmd, timeout_seconds=timeout_seconds,**kwargs)
def chmod(self, target, mode, timeout_seconds, *args, **kwargs):
""" Change file permissions
Parameters
----------
target : `str`
The URL of the file whose permissions are to be changed.
mode : `int`
Same format as `os.chmod`.
timeout_seconds : `int`
Maximum allowed amount of time for the operation.
Returns
-------
`None` if operation succeeded.
"""
# To be used when implemented
# import stat as libstat
# stat.filemode(output.st_mode)
# libstat.filemode(mode)
raise NotImplementedError("The fileutils module {} "
"does not implement chmod.".format(self.__module__))
def validateserver(self, cmd, target, timeout_seconds=300, *args, **kwargs):
""" Make sure that the given server information is valid
Function that verifies if the server information given is valid, and if
the device can connect to it. It does this by saving `show clock`
output to a particular file using transfer protocol. Then deletes the
file.
Parameters
----------
cmd (`str`): Command to be executed on the device
target (`str`): File path including the protocol, server and
file location.
timeout_seconds: `str`
The number of seconds to wait before aborting the operation.
Default is 300
Returns
-------
`None`
Raises
------
Exception: If the command from the device to server is unreachable
or the protocol used doesn't support remote checks.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instanciate a filetransferutils instance for NXOS device
>>> fu_device = FileUtils.from_device(device)
# Validate server connectivity
>>> fu_device.validateserver(
... target='ftp://10.1.7.250//auto/tftp-ssr/show_clock',
... timeout_seconds=300, device=device)
"""
logger.info('Verifying if server can be reached and if a temp file can '
'be created')
# Send the command
try:
self.send_cli_to_device(cli=cmd, timeout_seconds=timeout_seconds,
**kwargs)
except Exception as e:
raise type(e)('TFTP/FTP server is unreachable') from e
# Instanciate a server
futlinux = server(testbed=self.testbed)
# Check server created file
try:
futlinux.checkfile(target)
except Exception as e:
raise type(e)("Server created file can't be checked") from e
# Delete server created file
try:
futlinux.deletefile(target)
except Exception as e:
raise type(e)("Server created file can't be deleted") from e
# Great success!
logger.info("Server is ready to be used")
def copyconfiguration(self, source, destination, cmd, used_server,
timeout_seconds=300, *args, **kwargs):
""" Copy configuration to/from device
Copy configuration on the device or between locations supported on the
device and on the server.
Parameters
----------
source: `str`
Full path to the copy 'from' location
destination: `str`
Full path to the copy 'to' location
timeout_seconds: `str`
The number of seconds to wait before aborting the operation
vrf: `str`
Vrf to be used during copy operation
Returns
-------
`None`
Raises
------
Exception
When a device object is not present or device execution
encountered an unexpected behavior.
Examples
--------
# FileUtils
>>> from pyats.utils.fileutils import FileUtils
# Instantiate a filetransferutils instance for NXOS device
>>> from pyats.utils.fileutils import FileUtils
>>> fu_device = FileUtils.from_device(device)
# copy file from server to device running configuration
>>> fu_device.copyconfiguration(
... source='ftp://10.1.0.213//auto/tftp-ssr/config.py',
... destination='running-config',
... timeout_seconds='300', device=device)
# copy running-configuration to device memory
>>> fu_device.copyconfiguration(
... from_file_url='running-config',
... to_file_url='bootflash:filename',
... timeout_seconds='300', device=device)
# copy startup-configuration running-configuration
>>> fu_device.copyconfiguration(
... from_file_url='startup-config',
... to_file_url='running-config',
... timeout_seconds='300', device=device)
"""
self.send_cli_to_device(cli=cmd, timeout_seconds=timeout_seconds,
used_server=used_server, **kwargs)
| 1.898438 | 2 |
accounts/urls.py | huseyinyilmaz/worklogger | 1 | 12795556 | <filename>accounts/urls.py
from django.conf.urls import patterns, url, include
from django.contrib import admin
from accounts import views
admin.autodiscover()
urlpatterns = patterns(
'',
# Examples:
# url(r'^$', 'worklogger.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'login/$', 'django.contrib.auth.views.login', name='accounts-login'),
url(r'logout/$', views.logout_view, name='accounts-logout'),
url(r'', include('registration.backends.default.urls')),
)
| 1.960938 | 2 |
example/quality_to_dbm_short_example.py | joshschmelzle/monitornetsh | 1 | 12795557 | <filename>example/quality_to_dbm_short_example.py<gh_stars>1-10
"""
retrieves signal quality from netsh.exe and converts it to rssi
"""
import subprocess
COMMAND = ['netsh', 'wlan', 'show', 'interface']
OUT = subprocess.check_output(COMMAND)
for line in OUT.decode("utf-8").lower().splitlines():
paramater = line.split(":", 1)[0].strip()
try:
value = line.split(":", 1)[1].strip()
except IndexError:
continue
if "signal" in line:
quality = int(value.replace("%", ""))
if quality <= 0:
dbm = -100
elif quality >= 100:
dbm = -50
else:
dbm = (quality / 2) - 100
dbm = int(dbm)
print("quality,dbm")
print("{},{}".format(quality, dbm))
| 2.953125 | 3 |
com/shbak/effective_python/_99_etc/_03_locals_globals/main.py | sanghyunbak/effective_python | 0 | 12795558 | <reponame>sanghyunbak/effective_python<gh_stars>0
import timeit
from termcolor import colored
G_VAR = 1
G_VAR2 = 2
def global_test():
return 1
def benchmark_globals():
def sum_test():
return G_VAR + 3
result = timeit.timeit(
setup='G_VAR2',
stmt='global_test()',
globals=globals(),
number=3
)
# locals = locals()
# dir(locals)
print(colored(f'{result} sec', 'green'))
if __name__ == '__main__':
benchmark_globals() | 2.8125 | 3 |
tests/app/celery/test_research_mode_tasks.py | cds-snc/notifier-api | 41 | 12795559 | <gh_stars>10-100
import uuid
from datetime import datetime
from unittest.mock import ANY, call
import pytest
import requests_mock
from flask import current_app, json
from freezegun import freeze_time
from app.aws.mocks import (
ses_notification_callback,
sns_failed_callback,
sns_success_callback,
)
from app.celery.research_mode_tasks import (
create_fake_letter_response_file,
send_email_response,
send_sms_response,
)
from app.config import QueueNames
from tests.conftest import Matcher, set_config_values
dvla_response_file_matcher = Matcher(
"dvla_response_file",
lambda x: "NOTIFY-20180125140000-RSP.TXT" < x <= "NOTIFY-20180125140030-RSP.TXT",
)
@pytest.mark.parametrize(
"phone_number, sns_callback, sns_callback_args",
[
("+15149301630", sns_success_callback, {}),
("+15149301631", sns_success_callback, {}),
("+15149301632", sns_failed_callback, {"provider_response": "Phone is currently unreachable/unavailable"}),
("+15149301633", sns_failed_callback, {"provider_response": "Phone carrier is currently unreachable/unavailable"}),
],
)
@freeze_time("2018-01-25 14:00:30")
def test_make_sns_success_callback(notify_api, mocker, phone_number, sns_callback, sns_callback_args):
mock_task = mocker.patch("app.celery.research_mode_tasks.process_sns_results")
some_ref = str(uuid.uuid4())
now = datetime.now()
timestamp = now.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
send_sms_response("sns", phone_number, some_ref)
mock_task.apply_async.assert_called_once_with(ANY, queue=QueueNames.RESEARCH_MODE)
message_celery = mock_task.apply_async.call_args[0][0][0]
sns_callback_args.update({"reference": some_ref, "destination": phone_number, "timestamp": timestamp})
assert message_celery == sns_callback(**sns_callback_args)
def test_make_ses_callback(notify_api, mocker):
mock_task = mocker.patch("app.celery.research_mode_tasks.process_ses_results")
some_ref = str(uuid.uuid4())
send_email_response(reference=some_ref, to="<EMAIL>")
mock_task.apply_async.assert_called_once_with(ANY, queue=QueueNames.RESEARCH_MODE)
assert mock_task.apply_async.call_args[0][0][0] == ses_notification_callback(some_ref)
@freeze_time("2018-01-25 14:00:30")
def test_create_fake_letter_response_file_uploads_response_file_s3(notify_api, mocker):
mocker.patch("app.celery.research_mode_tasks.file_exists", return_value=False)
mock_s3upload = mocker.patch("app.celery.research_mode_tasks.s3upload")
with requests_mock.Mocker() as request_mock:
request_mock.post(
"http://localhost:6011/notifications/letter/dvla",
content=b"{}",
status_code=200,
)
create_fake_letter_response_file("random-ref")
mock_s3upload.assert_called_once_with(
filedata="random-ref|Sent|0|Sorted",
region=current_app.config["AWS_REGION"],
bucket_name=current_app.config["DVLA_RESPONSE_BUCKET_NAME"],
file_location=dvla_response_file_matcher,
)
@freeze_time("2018-01-25 14:00:30")
def test_create_fake_letter_response_file_calls_dvla_callback_on_development(notify_api, mocker):
mocker.patch("app.celery.research_mode_tasks.file_exists", return_value=False)
mocker.patch("app.celery.research_mode_tasks.s3upload")
mock_task = mocker.patch("app.celery.research_mode_tasks.process_sns_results")
with set_config_values(notify_api, {"NOTIFY_ENVIRONMENT": "development"}):
some_ref = str(uuid.uuid4())
create_fake_letter_response_file(some_ref)
mock_task.apply_async.assert_called_once_with(ANY, queue=QueueNames.RESEARCH_MODE)
message = json.loads(mock_task.apply_async.call_args[0][0][0])
assert message["MessageId"] == some_ref
@freeze_time("2018-01-25 14:00:30")
def test_create_fake_letter_response_file_does_not_call_dvla_callback_on_preview(notify_api, mocker):
mocker.patch("app.celery.research_mode_tasks.file_exists", return_value=False)
mocker.patch("app.celery.research_mode_tasks.s3upload")
with set_config_values(notify_api, {"NOTIFY_ENVIRONMENT": "preview"}):
with requests_mock.Mocker() as request_mock:
create_fake_letter_response_file("random-ref")
assert request_mock.last_request is None
@freeze_time("2018-01-25 14:00:30")
def test_create_fake_letter_response_file_tries_to_create_files_with_other_filenames(notify_api, mocker):
mock_file_exists = mocker.patch("app.celery.research_mode_tasks.file_exists", side_effect=[True, True, False])
mock_s3upload = mocker.patch("app.celery.research_mode_tasks.s3upload")
create_fake_letter_response_file("random-ref")
assert mock_file_exists.mock_calls == [
call("test.notify.com-ftp", dvla_response_file_matcher),
call("test.notify.com-ftp", dvla_response_file_matcher),
call("test.notify.com-ftp", dvla_response_file_matcher),
]
mock_s3upload.assert_called_once_with(
filedata=ANY,
region=ANY,
bucket_name=ANY,
file_location=dvla_response_file_matcher,
)
@freeze_time("2018-01-25 14:00:30")
def test_create_fake_letter_response_file_gives_up_after_thirty_times(notify_api, mocker):
mock_file_exists = mocker.patch("app.celery.research_mode_tasks.file_exists", return_value=True)
mock_s3upload = mocker.patch("app.celery.research_mode_tasks.s3upload")
with pytest.raises(ValueError):
create_fake_letter_response_file("random-ref")
assert len(mock_file_exists.mock_calls) == 30
assert not mock_s3upload.called
| 2.03125 | 2 |
apt.py | henworth/dotbot-apt | 0 | 12795560 | from subprocess import CalledProcessError, check_call, DEVNULL
from typing import Any, List, Sequence
import dotbot
class Apt(dotbot.Plugin):
def can_handle(self, directive: str) -> bool:
return directive == "apt"
def handle(self, directive: str, packages: List[str]) -> bool:
success = self._run(["sudo", "apt", "update"], "Updating APT") \
and self._run(["sudo", "apt", "install", "-y"] + packages,
"Installing the APT packages: {}".format(", ".join(packages)))
if success:
self._log.info("APT packages installed successfully")
return success
def _run(self, command: Sequence[Any], low_info: str) -> bool:
self._log.lowinfo(low_info)
try:
check_call(command, stdout=DEVNULL, stderr=DEVNULL)
return True
except CalledProcessError as e:
self._log.error(e)
return False
| 2.234375 | 2 |
CombinationSumII40.py | Bit64L/LeetCode-Python- | 0 | 12795561 | <filename>CombinationSumII40.py<gh_stars>0
# encoding=utf8
class Solution:
def combinationSum2(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
candidates.sort()
self.ans = []
def help(work, candidates, target, path):
if target == 0:
self.ans.append(list(path))
return
if target < 0 or len(candidates) == 0:
return
for i in range(work, len(candidates)):
if i > work and candidates[i] == candidates[i - 1]:
continue
path.append(candidates[i])
help(i + 1, candidates, target - candidates[i], path)
path.pop()
help(0, candidates, target, [])
return self.ans
solution = Solution()
print(solution.combinationSum2([1, 1, 2, 5, 6, 7, 10], 8))
# Don't know python data structure well
# 内部函数使用外部函数的变量如何解决: 使用类变量或者传参
# 注意去重的方法
| 3.515625 | 4 |
dvgutils/pipeline/observable.py | jagin/dvg-utils | 7 | 12795562 | <gh_stars>1-10
from ..helpers import Observable
observable = Observable()
| 1.15625 | 1 |
Python3/Exercises/UnluckyNumbers/unlucky_numbers.py | norbertosanchezdichi/TIL | 0 | 12795563 | for number in range(1, 21):
if number == 4 or number == 13:
state = 'UNLUCKY'
elif number % 2 == 0:
state = 'EVEN'
else:
state = 'ODD'
print(f'{number} is {state}!') | 3.828125 | 4 |
xcube_hub/models/cubegen_config_cube_config.py | bcdev/xcube-hub | 3 | 12795564 | <gh_stars>1-10
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from xcube_hub.models.base_model_ import Model
from xcube_hub import util
class CubegenConfigCubeConfig(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, variable_names=None, crs=None, spatial_res=None, bbox=None, time_range=None, time_period=None): # noqa: E501
"""CubegenConfigCubeConfig - a model defined in OpenAPI
:param variable_names: The variable_names of this CubegenConfigCubeConfig. # noqa: E501
:type variable_names: List[str]
:param crs: The crs of this CubegenConfigCubeConfig. # noqa: E501
:type crs: str
:param spatial_res: The spatial_res of this CubegenConfigCubeConfig. # noqa: E501
:type spatial_res: float
:param bbox: The bbox of this CubegenConfigCubeConfig. # noqa: E501
:type bbox: List[float]
:param time_range: The time_range of this CubegenConfigCubeConfig. # noqa: E501
:type time_range: List[date]
:param time_period: The time_period of this CubegenConfigCubeConfig. # noqa: E501
:type time_period: str
"""
self.openapi_types = {
'variable_names': List[str],
'crs': str,
'spatial_res': float,
'bbox': List[float],
'time_range': List[date],
'time_period': str
}
self.attribute_map = {
'variable_names': 'variable_names',
'crs': 'crs',
'spatial_res': 'spatial_res',
'bbox': 'bbox',
'time_range': 'time_range',
'time_period': 'time_period'
}
self._variable_names = variable_names
self._crs = crs
self._spatial_res = spatial_res
self._bbox = bbox
self._time_range = time_range
self._time_period = time_period
@classmethod
def from_dict(cls, dikt) -> 'CubegenConfigCubeConfig':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The CubegenConfig_cube_config of this CubegenConfigCubeConfig. # noqa: E501
:rtype: CubegenConfigCubeConfig
"""
return util.deserialize_model(dikt, cls)
@property
def variable_names(self):
"""Gets the variable_names of this CubegenConfigCubeConfig.
:return: The variable_names of this CubegenConfigCubeConfig.
:rtype: List[str]
"""
return self._variable_names
@variable_names.setter
def variable_names(self, variable_names):
"""Sets the variable_names of this CubegenConfigCubeConfig.
:param variable_names: The variable_names of this CubegenConfigCubeConfig.
:type variable_names: List[str]
"""
if variable_names is None:
raise ValueError("Invalid value for `variable_names`, must not be `None`") # noqa: E501
self._variable_names = variable_names
@property
def crs(self):
"""Gets the crs of this CubegenConfigCubeConfig.
:return: The crs of this CubegenConfigCubeConfig.
:rtype: str
"""
return self._crs
@crs.setter
def crs(self, crs):
"""Sets the crs of this CubegenConfigCubeConfig.
:param crs: The crs of this CubegenConfigCubeConfig.
:type crs: str
"""
if crs is None:
raise ValueError("Invalid value for `crs`, must not be `None`") # noqa: E501
self._crs = crs
@property
def spatial_res(self):
"""Gets the spatial_res of this CubegenConfigCubeConfig.
:return: The spatial_res of this CubegenConfigCubeConfig.
:rtype: float
"""
return self._spatial_res
@spatial_res.setter
def spatial_res(self, spatial_res):
"""Sets the spatial_res of this CubegenConfigCubeConfig.
:param spatial_res: The spatial_res of this CubegenConfigCubeConfig.
:type spatial_res: float
"""
if spatial_res is None:
raise ValueError("Invalid value for `spatial_res`, must not be `None`") # noqa: E501
self._spatial_res = spatial_res
@property
def bbox(self):
"""Gets the bbox of this CubegenConfigCubeConfig.
:return: The bbox of this CubegenConfigCubeConfig.
:rtype: List[float]
"""
return self._bbox
@bbox.setter
def bbox(self, bbox):
"""Sets the bbox of this CubegenConfigCubeConfig.
:param bbox: The bbox of this CubegenConfigCubeConfig.
:type bbox: List[float]
"""
if bbox is None:
raise ValueError("Invalid value for `bbox`, must not be `None`") # noqa: E501
self._bbox = bbox
@property
def time_range(self):
"""Gets the time_range of this CubegenConfigCubeConfig.
:return: The time_range of this CubegenConfigCubeConfig.
:rtype: List[date]
"""
return self._time_range
@time_range.setter
def time_range(self, time_range):
"""Sets the time_range of this CubegenConfigCubeConfig.
:param time_range: The time_range of this CubegenConfigCubeConfig.
:type time_range: List[date]
"""
if time_range is None:
raise ValueError("Invalid value for `time_range`, must not be `None`") # noqa: E501
self._time_range = time_range
@property
def time_period(self):
"""Gets the time_period of this CubegenConfigCubeConfig.
:return: The time_period of this CubegenConfigCubeConfig.
:rtype: str
"""
return self._time_period
@time_period.setter
def time_period(self, time_period):
"""Sets the time_period of this CubegenConfigCubeConfig.
:param time_period: The time_period of this CubegenConfigCubeConfig.
:type time_period: str
"""
if time_period is None:
raise ValueError("Invalid value for `time_period`, must not be `None`") # noqa: E501
self._time_period = time_period
| 2.21875 | 2 |
generalCredentials.py | darren-ccab/Telegram-AI-chatbot-for-university-courses | 0 | 12795565 | <filename>generalCredentials.py
# General Chatbot Token
telegramToken = ""
# Announcement Channel ID
announcementID = ''
# Dialogflow Project ID
projectID = "newagent-XXXXXX"
| 1.171875 | 1 |
soc_test/interpolation_test.py | DeltaLabo/battery_characterizer | 0 | 12795566 | import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2*np.pi, 10)
y = np.sin(x) #Función original
xvals = np.linspace(0, 2*np.pi, 50)
yinterp = np.interp(xvals, x, y)
plt.plot(x, y, 'o')
plt.plot(xvals, yinterp, '-x')
plt.show() | 3.40625 | 3 |
apps/submission/urls.py | renatooliveira/pugpe | 11 | 12795567 | from django.conf.urls import patterns, include, url
from django.views.generic.simple import direct_to_template
from .views import SubmissionView, SubmissionListView, SubmissionSuccess
urlpatterns = patterns('',
url(r'^$',
SubmissionView.as_view(),
name='submission',
),
url(r'^success/$',
SubmissionSuccess.as_view(),
name='success_submission',
),
url(r'^end/$',
direct_to_template, {'template': 'submission/end.html'},
name='end',
),
url(r'^votar/$',
SubmissionListView.as_view(),
name='vote',
),
url(r'^votar/erro/$',
direct_to_template, {'template': 'submission/error.html'},
name='error',
),
url(r'^votar/success/$',
direct_to_template, {'template': 'submission/success.html'},
name='success',
),
)
| 1.90625 | 2 |
tests/test_area.py | irahorecka/python-craigslist-meta | 1 | 12795568 | import pytest
from fixtures import get_title, get_url
from craigslist_meta import Site
selector = "area"
# use a site key with areas
site_key = "sfbay"
@pytest.fixture
def area():
"""Get an instance of Area."""
area = next(iter(Site(site_key)))
global area_key
area_key = area.key
yield area
def test_key(area):
"""Test `key` attribute of area instance."""
expected_key = area._key
assert area_key == expected_key
def test_title(area, get_title):
"""Test `title` attribute of area instance."""
area_title = area.title
expected_title = get_title(selector, area_key)
assert area_title == expected_title
def test_url(area, get_url):
"""Test `url` attribute of area instance."""
area_url = area.url
expected_url = get_url(selector, area_key)
assert area_url == expected_url
def test_all_raises(area):
"""`all` class method should raise an exception for Area."""
with pytest.raises(AttributeError, match="'Area' object has no attribute 'all'"):
area.all()
def test_keys_raises(area):
"""`keys` class method should raise an exception for Area."""
with pytest.raises(AttributeError, match="'Area' object has no attribute 'keys'"):
area.keys
def test_children_raises(area):
"""`children` attribute should raise an exception for Area."""
with pytest.raises(AttributeError, match="'Area' object has no attribute 'children'"):
area.children
| 2.484375 | 2 |
scripts_python/suma.py | joanayala/Frameworks_8A | 0 | 12795569 | <gh_stars>0
a = 5
b = 10
suma = a + b
print("The addition is: ", suma)
| 3.03125 | 3 |
deploy/slice.py | lebedevdes/insightface | 0 | 12795570 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
import argparse
import numpy as np
import mxnet as mx
ctx = mx.cpu(0)
image_size = (112, 112)
prefix = "../models/resnet-50"
epoch = 0
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix,
epoch)
all_layers = sym.get_internals()
sym = all_layers['relu1_output']
dellist = []
for k,v in arg_params.iteritems():
if k.startswith('fc1'):
dellist.append(k)
for d in dellist:
del arg_params[d]
mx.model.save_checkpoint(prefix+"s", 0, sym, arg_params, aux_params)
digraph = mx.viz.plot_network(sym, shape={'data':(1,3,256,256)},
node_attrs={"fixedsize":"false"})
digraph.view()
| 2.234375 | 2 |
tests/torch/nn/parallel/expert_parallel/wrapper_test/init_test/gpt2.py | lipovsek/oslo | 0 | 12795571 | <reponame>lipovsek/oslo<filename>tests/torch/nn/parallel/expert_parallel/wrapper_test/init_test/gpt2.py
import random
from functools import partial
import os
import numpy as np
import torch
import torch.multiprocessing as mp
from transformers import AutoTokenizer, GPT2Config, GPT2LMHeadModel
from oslo.torch.nn.parallel.expert_parallel.expert_parallel import ExpertParallel
from oslo.torch.distributed import ParallelContext, ParallelMode
torch.set_printoptions(threshold=10_000)
num_experts = 4
top_k = 1
use_residual = False
def run_test(rank, port):
# 1. Configure for Parallelization
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(rank)
os.environ["WORLD_SIZE"] = "2"
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = str(port)
# 2. Set Parallel Context
parallel_context = ParallelContext.from_torch(
data_parallel_size=1,
pipeline_parallel_size=1,
tensor_parallel_size=1,
expert_parallel_size=2,
)
# 3. Create Tokenizer
tokenizer = AutoTokenizer.from_pretrained("gpt2")
tokenizer.pad_token = tokenizer.eos_token
# 4. Create Model to expert-parallelize
model_ep = GPT2LMHeadModel(GPT2Config.from_pretrained("gpt2"))
# 5. Wrap Model
wrapper_ep = ExpertParallel(
model_ep,
parallel_context,
num_experts=num_experts,
top_k=1,
use_kernel_optim=False,
use_residual=use_residual,
)
# 6. Print the result of wrapping
print(f"Worker #{rank} : {wrapper_ep.device}")
print(wrapper_ep)
print("=" * 89)
for param_name, module in wrapper_ep.named_parameters():
if wrapper_ep.expert_parallel_mapping.is_front_parallel(
wrapper_ep.model, param_name
) or wrapper_ep.expert_parallel_mapping.is_behind_parallel(
wrapper_ep.model, param_name
):
print(
f"Worker #{rank} - param_name : {param_name}, param_size : {module.size()}"
)
print(f"Worker #{rank} - param : {module}")
return
def test_expert_parallel_block():
world_size = 2
run_func = partial(run_test, port=29500)
mp.spawn(run_func, nprocs=world_size)
if __name__ == "__main__":
# Set Random Seed for Reproducibility
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
test_expert_parallel_block()
| 2.34375 | 2 |
tests/test_factory_container.py | ClanPlay/Python_IoC | 4 | 12795572 | from flying_ioc import IocManager, IocFactory
class TSingleton1:
def __init__(self):
pass
class TSingleton2:
def __init__(self):
pass
class TSingleton3(TSingleton1):
def __init__(self, ts: TSingleton2):
super().__init__()
self.ts = ts
class TSingleton3dot2(TSingleton3):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class TSingleton3dot1(TSingleton3):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class TSingleton3dot3:
def __init__(self, ts: TSingleton2):
self.ts = ts
class MyIocFactory(IocFactory):
@staticmethod
def get_instance(ioc_manager, name, frame_info):
if frame_info.function == 'test_factory_container':
return ioc_manager.TSingleton3dot1
if name == 'TSingleton3':
return ioc_manager.TSingleton3dot2
return ioc_manager.TSingleton3dot3
def test_factory_container():
ioc = IocManager(stats=True)
ioc.set_class(name='TSingleton1', cls=TSingleton1, singleton=True)
ioc.set_class(name='TSingleton2', cls=TSingleton2, singleton=False)
ioc.set_factory(name='TSingleton3', cls=MyIocFactory)
ioc.set_class(name='TSingleton3dot1', cls=TSingleton3dot1, singleton=False)
ioc.set_class(name='TSingleton3dot2', cls=TSingleton3dot2, singleton=False)
ioc.set_class(name='TSingleton3dot3', cls=TSingleton3dot3, singleton=False)
assert ioc.TSingleton1 is ioc.TSingleton1
ts3 = ioc.TSingleton3
assert isinstance(ts3, TSingleton3dot1)
ioc.print_stats()
| 2.421875 | 2 |
AlgoExpert/binary_search_trees/sameBsts.py | Muzque/Leetcode | 1 | 12795573 | """
Same BSTs
Write a function that takes in two arrays of integers and determines whether these arrays represent the same BST.
Note that you're not allowed to construct any BSTs in your code.
Sample:
10
-----------
8 15
----- -------
5 12 94
--- _____ _____
2 11 81
"""
testcases = [
{
'input': {
'arrayOne': [10, 15, 8, 12, 94, 81, 5, 2, 11],
'arrayTwo': [10, 8, 5, 15, 2, 12, 11, 94, 81]
},
'output': True
}
]
def find_smaller_edges(array):
tmp = []
for val in array[1:]:
if val < array[0]:
tmp.append(val)
return tmp
def find_bigger_edges(array):
tmp = []
for val in array[1:]:
if val >= array[0]:
tmp.append(val)
return tmp
def sameBsts(arrayOne, arrayTwo):
if len(arrayOne) != len(arrayTwo):
return False
if len(arrayOne) == len(arrayTwo) == 0:
return True
if arrayOne[0] != arrayTwo[0]:
return False
left1 = find_smaller_edges(arrayOne)
left2 = find_smaller_edges(arrayTwo)
right1 = find_bigger_edges(arrayOne)
right2 = find_bigger_edges(arrayTwo)
return sameBsts(left1, left2) and sameBsts(right1, right2)
| 3.859375 | 4 |
movies/templatetags/custom_filters.py | gorkemarslan/Django-Movie-Database | 2 | 12795574 | <filename>movies/templatetags/custom_filters.py
from django import template
from django.core.exceptions import ObjectDoesNotExist
register = template.Library()
@register.filter(name='user_star')
def user_star(user_rating, movie):
"""
Template tag which allows queryset filtering to get user ratings.
It gets user.user_rating2 and movie objects.
Usage:
.. code-block:: python
{{ user.user_rating2.all|user_star:movie }}
"""
try:
rating = user_rating.get(movie=movie).user_rating
return rating
except ObjectDoesNotExist:
return 0
| 2.515625 | 3 |
tests/__init__.py | zzw0929/my-flask | 0 | 12795575 | __author__ = 'zhuzw'
| 0.996094 | 1 |
blockchain.py | jcorbino/blockchain | 1 | 12795576 | from time import time
from hashlib import md5
from datetime import datetime
## Hashing functions:
# Slower, 64 bytes
#sha256 = sha256('content').hexdigest()
# Faster, 32 bytes
#md5 = md5('content').hexdigest()
class Block:
timestamp = ''
prev_hash = ''
content = ''
nonce = 0
hash = ''
def __init__(self, timestamp, prev_hash, content, nonce, hash):
self.timestamp = timestamp
self.prev_hash = prev_hash
self.content = content
self.nonce = nonce
self.hash = hash
def serialize(self):
return self.prev_hash+self.content+str(self.nonce)
class Blockchain:
MAX_NONCE = 999999 # To prevent infinite mining
prefix = '00000' # Mining difficulty
blocks = []
# Genesis block:
def __init__(self):
nonce = 622722 # For 00000
self.blocks.append(Block(datetime.now(), ''.zfill(32), 'Genesis', nonce,
md5((''.zfill(32)+'Genesis'+str(nonce)).encode('utf-8')).hexdigest()))
def add_block(self, content = ''):
nonce = 0
prev_hash = self.blocks[-1].hash
hash = md5((prev_hash+content+str(nonce)).encode('utf-8')).hexdigest()
# Mining:
while hash[0:len(self.prefix)] != self.prefix and nonce < self.MAX_NONCE:
nonce += 1
hash = md5((prev_hash+content+str(nonce)).encode('utf-8')).hexdigest()
if nonce < self.MAX_NONCE:
self.blocks.append(Block(datetime.now(), prev_hash, content, nonce,
hash))
else:
print('Unable to mine block #'+str(len(self.blocks)+1))
def print_chain(self):
i = 1
for block in self.blocks:
print('BLOCK #%d =======================' % i); i += 1
print(block.prev_hash)
print(block.timestamp)
print(block.content)
print(block.hash)
print('================================\n\t\t|\n\t\tV')
def check_block(self, block_num):
if block_num > 0:
block = self.blocks[block_num-1]
if md5((block.serialize()).encode('utf-8')).hexdigest() == block.hash:
print('Block #%d is valid' % block_num)
else:
print('Block #%d is invalid' % block_num)
else:
print('Invalid block number')
def check_chain(self):
for i in range(1, len(self.blocks)+1):
self.check_block(i)
b = Blockchain()
t1 = time()
b.add_block('Johnny')
b.add_block('Noelle')
t2 = time()
b.print_chain()
print('Elapsed time: %.2fs' % (t2-t1))
b.check_chain()
| 2.828125 | 3 |
mid_exam_preparation/counter_strike.py | PetkoAndreev/Python-fundamentals | 0 | 12795577 | energy = int(input())
distance = input()
won_battles = 0
while distance != 'End of battle':
distance = int(distance)
if energy >= distance:
energy -= distance
won_battles += 1
if won_battles % 3 == 0:
energy += won_battles
else:
print(f'Not enough energy! Game ends with {won_battles} won battles and {energy} energy')
break
distance = input()
if distance == 'End of battle':
print(f'Won battles: {won_battles}. Energy left: {energy}') | 4.03125 | 4 |
src/onapsdk/msb/k8s/__init__.py | krasm/python-onapsdk | 4 | 12795578 | """K8s package."""
from .definition import Definition, Profile, ConfigurationTemplate
from .connectivity_info import ConnectivityInfo
from .instance import InstantiationParameter, InstantiationRequest, Instance
| 1.0625 | 1 |
tests/test_factory.py | knowark/injectark | 0 | 12795579 | from injectark import Factory
def test_factory_extract() -> None:
class MockFactory(Factory):
def __init__(self, config):
super().__init__(config)
pass
def _my_method(self):
pass
factory = MockFactory({'key': 'value'})
method = factory.extract('_my_method')
assert method == factory._my_method
assert factory.config == {'key': 'value'}
| 2.515625 | 3 |
src/distance.py | haminhle192/face_recognition | 0 | 12795580 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Distance:
def __init__(self):
self.graph = tf.Graph()
self.sess = tf.Session(graph=self.graph)
with self.graph.as_default():
self.b_tf = tf.placeholder(shape=[None, 512], dtype=tf.float32)
self.A_tf = tf.placeholder(shape=[None, 512], dtype=tf.float32)
self.distance_tf = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(self.A_tf, tf.expand_dims(self.b_tf, 1))), axis=2))
def __del__(self):
self.sess.close()
def fit(self, A, b):
return self.sess.run(self.distance_tf, feed_dict={self.A_tf: A, self.b_tf: b})
| 2.65625 | 3 |
tests/test_api.py | usc-isi-i2/WEDC | 0 | 12795581 | import sys
import time
import os
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data')
# text_ = os.path.expanduser(os.path.join(TEST_DATA_DIR, 'text'))
from wedc.application.api import WEDC
class TestPostVectorMethods(unittest.TestCase):
def setUp(self):
self.api = WEDC()
def test(self):
print self.api.config('WEDC_CATEGORIES')
def tearDown(self):
pass
if __name__ == '__main__':
# unittest.main()
def run_main_test():
suite = unittest.TestSuite()
suite.addTest(TestPostVectorMethods("test"))
runner = unittest.TextTestRunner()
runner.run(suite)
run_main_test()
| 2.234375 | 2 |
rldb/db/paper__dqn2013/algo__contingency/__init__.py | seungjaeryanlee/sotarl | 45 | 12795582 | <reponame>seungjaeryanlee/sotarl
"""
Contingency scores from DQN2013 paper.
7 entries
------------------------------------------------------------------------
7 unique entries
"""
from .entries import entries
# Specify ALGORITHM
algo = {
# ALGORITHM
"algo-title": "Contingency",
"algo-nickname": "Contingency",
"algo-source-title": "Investigating Contingency Awareness Using Atari 2600 Games",
}
# Populate entries
entries = [{**entry, **algo} for entry in entries]
assert len(entries) == 7
| 2.5 | 2 |
unittests/linux/test_networkd.py | stepanandr/taf | 10 | 12795583 | #!/usr/bin/env python
# Copyright (c) 2015 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``test_networkd.py``
`NetworkD Unittests`
"""
from unittest.mock import MagicMock
from testlib.linux.networkd import NetworkD
class TestNetworkD(object):
def test_single_mgmt_port(self):
run_command = MagicMock()
n = NetworkD(run_command, ["test"])
n.clear_settings()
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'test.network' -or " \
"-name 'test.netdev' -or -name 'test.link' -or -name 'test.swport' \\) -delete"
def test_multiple_mgmt_port(self):
run_command = MagicMock()
n = NetworkD(run_command, ["test1", "test2"])
n.clear_settings()
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'test1.network' -or " \
"-name 'test1.netdev' -or -name 'test1.link' -or -name 'test1.swport' -or " \
"-name 'test2.network' -or -name 'test2.netdev' -or -name 'test2.link' -or " \
"-name 'test2.swport' \\) -delete"
def test_empty_list(self):
run_command = MagicMock()
n = NetworkD(run_command, [])
n.clear_settings()
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( \\) -delete"
def test_extra_excludes_are_appended(self):
run_command = MagicMock()
n = NetworkD(run_command, ["test1", "test2"])
n.clear_settings(exclude_ports=["extra1", "extra2"])
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'test1.network' -or " \
"-name 'test1.netdev' -or -name 'test1.link' -or -name 'test1.swport' -or " \
"-name 'test2.network' -or -name 'test2.netdev' -or -name 'test2.link' -or " \
"-name 'test2.swport' -or -name 'extra1.network' -or -name 'extra1.netdev' -or " \
"-name 'extra1.link' -or -name 'extra1.swport' -or -name 'extra2.network' -or " \
"-name 'extra2.netdev' -or -name 'extra2.link' -or -name 'extra2.swport' \\) -delete"
def test_just_extra_excludes(self):
run_command = MagicMock()
n = NetworkD(run_command, [])
n.clear_settings(exclude_ports=["extra1", "extra2"])
assert run_command.call_args_list[0][0][
0] == "find /etc/systemd/network/ -mindepth 1 -not \\( -name 'extra1.network' -or " \
"-name 'extra1.netdev' -or -name 'extra1.link' -or -name 'extra1.swport' -or " \
"-name 'extra2.network' -or -name 'extra2.netdev' -or -name 'extra2.link' -or " \
"-name 'extra2.swport' \\) -delete"
| 2.21875 | 2 |
consoleme/lib/v2/notifications.py | shyovn/consoleme | 2,835 | 12795584 | <gh_stars>1000+
import json as original_json
import sys
import time
from collections import defaultdict
from typing import Dict
import sentry_sdk
import ujson as json
from asgiref.sync import sync_to_async
from consoleme.config import config
from consoleme.lib.cache import (
retrieve_json_data_from_redis_or_s3,
store_json_results_in_redis_and_s3,
)
from consoleme.lib.dynamo import UserDynamoHandler
from consoleme.lib.json_encoder import SetEncoder
from consoleme.lib.notifications.models import (
ConsoleMeUserNotification,
GetNotificationsForUserResponse,
)
from consoleme.lib.singleton import Singleton
log = config.get_logger()
class RetrieveNotifications(metaclass=Singleton):
def __init__(self):
self.last_update = 0
self.all_notifications = []
async def retrieve_all_notifications(self, force_refresh=False):
if force_refresh or (
int(time.time()) - self.last_update
> config.get(
"get_notifications_for_user.notification_retrieval_interval", 20
)
):
self.all_notifications = await retrieve_json_data_from_redis_or_s3(
redis_key=config.get("notifications.redis_key", "ALL_NOTIFICATIONS"),
redis_data_type="hash",
s3_bucket=config.get("notifications.s3.bucket"),
s3_key=config.get(
"notifications.s3.key", "notifications/all_notifications_v1.json.gz"
),
default={},
)
self.last_update = int(time.time())
return self.all_notifications
async def get_notifications_for_user(
user,
groups,
max_notifications=config.get("get_notifications_for_user.max_notifications", 5),
force_refresh=False,
) -> GetNotificationsForUserResponse:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
log_data = {
"function": function,
"user": user,
"max_notifications": max_notifications,
"force_refresh": force_refresh,
}
current_time = int(time.time())
all_notifications = await RetrieveNotifications().retrieve_all_notifications(
force_refresh
)
unread_count = 0
notifications_for_user = []
for user_or_group in [user, *groups]:
# Filter out identical notifications that were already captured via user-specific attribution. IE: "UserA"
# performed an access deny operation locally under "RoleA" with session name = "UserA", so the generated
# notification is tied to the user. However, "UserA" is a member of "GroupA", which owns RoleA. We want
# to show the notification to members of "GroupA", as well as "UserA" but we don't want "UserA" to see 2
# notifications.
notifications = all_notifications.get(user_or_group)
if not notifications:
continue
notifications = json.loads(notifications)
for notification_raw in notifications:
try:
# We parse ConsoleMeUserNotification individually instead of as an array
# to account for future changes to the model that may invalidate older
# notifications
notification = ConsoleMeUserNotification.parse_obj(notification_raw)
except Exception as e:
log.error({**log_data, "error": str(e)})
sentry_sdk.capture_exception()
continue
if notification.version != 1:
# Skip unsupported versions of the notification model
continue
if user in notification.hidden_for_users:
# Skip this notification if it isn't hidden for the user
continue
seen = False
for existing_user_notification_raw in notifications_for_user:
existing_user_notification = ConsoleMeUserNotification.parse_obj(
existing_user_notification_raw
)
if (
notification.predictable_id
== existing_user_notification.predictable_id
):
seen = True
if not seen:
notifications_for_user.append(notification)
# Filter out "expired" notifications
notifications_for_user = [
v for v in notifications_for_user if v.expiration > current_time
]
# Show newest notifications first
notifications_for_user = sorted(
notifications_for_user, key=lambda i: i.event_time, reverse=True
)
# Increment Unread Count
notifications_to_return = notifications_for_user[0:max_notifications]
for notification in notifications_to_return:
if user in notification.read_by_users or notification.read_by_all:
notification.read_for_current_user = True
continue
unread_count += 1
return GetNotificationsForUserResponse(
notifications=notifications_to_return, unread_count=unread_count
)
async def fetch_notification(notification_id: str):
ddb = UserDynamoHandler()
notification = await sync_to_async(ddb.notifications_table.get_item)(
Key={"predictable_id": notification_id}
)
if notification.get("Item"):
return ConsoleMeUserNotification.parse_obj(notification["Item"])
async def cache_notifications_to_redis_s3() -> Dict[str, int]:
function = f"{__name__}.{sys._getframe().f_code.co_name}"
current_time = int(time.time())
log_data = {"function": function}
ddb = UserDynamoHandler()
notifications_by_user_group = defaultdict(list)
all_notifications_l = await ddb.parallel_scan_table_async(ddb.notifications_table)
changed_notifications = []
for existing_notification in all_notifications_l:
notification = ConsoleMeUserNotification.parse_obj(existing_notification)
if current_time > notification.expiration:
notification.expired = True
changed_notifications.append(notification.dict())
for user_or_group in notification.users_or_groups:
notifications_by_user_group[user_or_group].append(notification.dict())
if changed_notifications:
ddb.parallel_write_table(ddb.notifications_table, changed_notifications)
if notifications_by_user_group:
for k, v in notifications_by_user_group.items():
notifications_by_user_group[k] = original_json.dumps(v, cls=SetEncoder)
await store_json_results_in_redis_and_s3(
notifications_by_user_group,
redis_key=config.get("notifications.redis_key", "ALL_NOTIFICATIONS"),
redis_data_type="hash",
s3_bucket=config.get("notifications.s3.bucket"),
s3_key=config.get(
"notifications.s3.key", "notifications/all_notifications_v1.json.gz"
),
)
log_data["num_user_groups_for_notifications"] = len(
notifications_by_user_group.keys()
)
log_data["num_notifications"] = len(all_notifications_l)
log.debug(log_data)
return {
"num_user_groups_to_notify": len(notifications_by_user_group.keys()),
"num_notifications": len(all_notifications_l),
}
async def write_notification(notification: ConsoleMeUserNotification):
ddb = UserDynamoHandler()
await sync_to_async(ddb.notifications_table.put_item)(
Item=ddb._data_to_dynamo_replace(notification.dict())
)
await cache_notifications_to_redis_s3()
return True
| 1.90625 | 2 |
vintools/_tools/_deepTools/_deepTools_Module.py | mvinyard/vintools | 2 | 12795585 | <filename>vintools/_tools/_deepTools/_deepTools_Module.py
# package imports #
# --------------- #
import os
# local imports #
# ------------- #
from ..._utilities._system_utils._use_n_cores import _use_n_cores
from ..._utilities._system_utils._flexible_mkdir import _flexible_mkdir
from ._deepTools_supporting_functions import _find_sample_files, _plotCorrelation
class _deepTools:
def __init__(self, silent=False, verbose=False):
"""Notebook interface of deepTools"""
self.silent = silent
self.verbose = verbose
self.DataDict = {}
self.DataDict["bams"] = {}
self.DataDict["bigwigs"] = {}
def load_bams(self, path, sample_dirname=False, silent=False):
if silent:
self.silent = silent
self.DataDict["bams"].update(
_find_sample_files(
path,
file_extension="bam",
sample_dir_level=sample_dirname,
silent=silent,
)
)
def load_bw(self, path, sample_dirname=False, silent=False):
if silent:
self.silent = silent
self.DataDict["bigwigs"].update(
_find_sample_files(
path,
file_extension="bw",
sample_dir_level=sample_dirname,
silent=silent,
)
)
def bamCoverage(
self, BamDict=False, output_dir="./", dry_run=False, verbose=False, n_cores=2
):
"""If supplied, a dictionary of format: {'sample': '/path/to/sample/sample.bam'} is taken as input"""
if verbose:
self.verbose = verbose
self.n_cores = _use_n_cores(n_cores)
print(
"\tnote... this is {} cores per sample. Samples are run as background processes for parallelization.\n".format(
self.n_cores
)
)
if BamDict:
self.BamDict = BamDict
if not os.path.exists(output_dir):
_flexible_mkdir(output_dir)
if dry_run:
print("deepTools bamCoverage command issued:\n")
for sample, bamfile in self.BamDict.items():
outprefix = os.path.join(output_dir, sample)
self.bamCoverage_executable = "bamCoverage -b {} -o {}.bw -p {} -of bigwig &".format(
bamfile, outprefix, n_cores
)
if dry_run:
print(self.bamCoverage_executable, "\n")
else:
if self.verbose:
print("deepTools bamCoverage command issued:", "\n")
print(self.bamCoverage_executable, "\n")
os.system(self.bamCoverage_executable)
def multiBigwigsummary(
self, outfile="multiBigwigsummary.results.npz", n_cores=False
):
"""runs multiBigwigsummary from deepTools."""
self.outfile = outfile
self.outdir = os.path.dirname(self.outfile)
if not os.path.exists(self.outdir):
_flexible_mkdir(self.outdir)
bigwigs = " ".join(list(self.DataDict["bigwigs"].values()))
self.n_cores = _use_n_cores(n_cores)
self.multiBigwigsummary_executable = "multiBigwigSummary bins -b {} -o {} -p {}".format(
bigwigs, outfile, self.n_cores
)
os.system(self.multiBigwigsummary_executable)
def plotCorrelation(self, title=False, summary_results_file=False, silent=False):
if summary_results_file:
self.outfile = outfile
self.outdir = os.path.dirname(self.outfile)
if not title:
title = self.outdir
else:
title = os.path.join(self.outdir, title)
_plotCorrelation(title, self.outfile, silent)
| 2.046875 | 2 |
appmock/appmock_client.py | kzemek/helpers | 0 | 12795586 | <reponame>kzemek/helpers<filename>appmock/appmock_client.py
# coding=utf-8
"""
Authors: <NAME>
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Client library to contact appmock instances.
"""
import base64
import json
import time
import requests
# Appmock remote control port
appmock_rc_port = 9999
# These defines determine how often the appmock server will be requested
# to check for condition when waiting for something.
# Increment rate causes each next interval to be longer.
WAIT_STARTING_CHECK_INTERVAL = 250
WAIT_INTERVAL_INCREMENT_RATE = 1.3
DEFAULT_TIMEOUT = 60
requests.packages.urllib3.disable_warnings()
class AppmockClient(object):
def __init__(self, ip):
self.ip = ip
def tcp_endpoint(self, port):
return AppmockTCPEndpoint(self, port)
def reset_rest_history(self):
return reset_rest_history(self.ip)
def reset_tcp_history(self):
return reset_tcp_server_history(self.ip)
class AppmockTCPEndpoint(object):
def __init__(self, client, port):
self.client = client
self.ip = client.ip
self.port = port
def specific_message_count(self, message_binary):
return tcp_server_specific_message_count(self.ip, self.port,
message_binary)
def all_messages_count(self):
return tcp_server_all_messages_count(self.ip, self.port)
def connection_count(self):
return tcp_server_connection_count(self.ip, self.port)
def history(self):
return tcp_server_history(self.ip, self.port)
def send(self, message_binary, msg_count=1):
return tcp_server_send(self.ip, self.port, message_binary, msg_count)
def wait_for_any_messages(self, msg_count=1, accept_more=False,
return_history=False, timeout_sec=DEFAULT_TIMEOUT):
return tcp_server_wait_for_any_messages(self.ip, self.port, msg_count,
accept_more, return_history,
timeout_sec)
def wait_for_connections(self, number_of_connections=1, accept_more=False,
timeout_sec=DEFAULT_TIMEOUT):
return tcp_server_wait_for_connections(self.ip, self.port,
number_of_connections,
accept_more, timeout_sec)
def wait_for_specific_messages(self, message_binary, msg_count=1,
accept_more=False, return_history=False,
timeout_sec=DEFAULT_TIMEOUT):
return tcp_server_wait_for_specific_messages(self.ip, self.port,
message_binary, msg_count,
accept_more,
return_history,
timeout_sec)
def _http_post(ip, port, path, use_ssl, data):
"""
Helper function that perform a HTTP GET request
Returns a tuple (Code, Headers, Body)
"""
protocol = 'https' if use_ssl else 'http'
response = requests.post(
'{0}://{1}:{2}{3}'.format(protocol, ip, port, path),
data, verify=False, timeout=DEFAULT_TIMEOUT)
return response.status_code, response.headers, response.text
def rest_endpoint_request_count(appmock_ip, endpoint_port, endpoint_path):
"""
Returns how many times has given endpoint been requested.
IMPORTANT: the endpoint_path must be literally the same as in app_desc
module, for example: '/test1/[:binding]'
"""
json_data = {
'port': endpoint_port,
'path': endpoint_path
}
_, _, body = _http_post(appmock_ip, appmock_rc_port,
'/rest_endpoint_request_count', True,
json.dumps(json_data))
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'rest_endpoint_request_count returned error: ' + body['reason'])
return body['result']
def verify_rest_history(appmock_ip, expected_history):
"""
Verifies if rest endpoints were requested in given order.
Returns True or False.
The expected_history is a list of tuples (port, path), for example:
[(8080, '/test1/[:binding]'), (8080, '/test2')]
"""
def create_endpoint_entry(_port, _path):
entry = {
'endpoint': {
'path': _path,
'port': _port
}
}
return entry
json_data = [create_endpoint_entry(port, path) for (port, path) in
expected_history]
_, _, body = _http_post(appmock_ip, appmock_rc_port,
'/verify_rest_history', True,
json.dumps(json_data))
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'expected history does not match: ' + str(body['history']))
return body['result']
def reset_rest_history(appmock_ip):
"""
Performs a request to an appmock instance to reset
all the history connected with ALL mocked rest endpoints.
The reset will cause this instance
to act the same as if it was restarted clean.
"""
_, _, body = _http_post(appmock_ip, appmock_rc_port, '/reset_rest_history',
True, '')
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'reset_rest_history returned error: ' + body['reason'])
return body['result']
def tcp_server_specific_message_count(appmock_ip, tcp_port, message_binary):
"""
Returns number of messages exactly matching given message,
that has been received by the TCP server mock.
"""
encoded_message = base64.b64encode(message_binary)
path = '/tcp_server_specific_message_count/{0}'.format(tcp_port)
_, _, body = _http_post(appmock_ip, appmock_rc_port, path,
True, encoded_message)
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'tcp_server_specific_message_count returned error: ' +
body['reason'])
return body['result']
def tcp_server_wait_for_specific_messages(appmock_ip, tcp_port, message_binary,
msg_count=1, accept_more=False,
return_history=False, timeout_sec=DEFAULT_TIMEOUT):
"""
Returns when given number of specific messages
has been received on given port, or after it timeouts.
The accept_more flag makes the function succeed when
there is the same or more messages than expected.
The return_history flag causes the function
to return full msg history upon success.
"""
start_time = time.time()
wait_for = WAIT_STARTING_CHECK_INTERVAL
while True:
result = tcp_server_specific_message_count(appmock_ip, tcp_port,
message_binary)
if accept_more and result >= msg_count:
break
elif result == msg_count:
break
elif time.time() - start_time > timeout_sec:
raise Exception(
'tcp_server_wait_for_specific_messages returned error: timeout')
else:
time.sleep(wait_for / 1000.0)
wait_for *= WAIT_INTERVAL_INCREMENT_RATE
if return_history:
return tcp_server_history(appmock_ip, tcp_port)
def tcp_server_all_messages_count(appmock_ip, tcp_port):
"""
Returns number of all messages
that has been received by the TCP server mock.
"""
path = '/tcp_server_all_messages_count/{0}'.format(tcp_port)
_, _, body = _http_post(appmock_ip, appmock_rc_port, path, True, '')
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'tcp_server_all_messages_count returned error: ' + body['reason'])
return body['result']
def tcp_server_wait_for_any_messages(appmock_ip, tcp_port, msg_count=1,
accept_more=False, return_history=False,
timeout_sec=DEFAULT_TIMEOUT):
"""
Returns when given number of any messages has been received on given port,
or after it timeouts.
The accept_more flag makes the function succeed when
there is the same or more messages than expected.
The return_history flag causes the function to return
full msg history upon success.
"""
start_time = time.time()
wait_for = WAIT_STARTING_CHECK_INTERVAL
while True:
result = tcp_server_all_messages_count(appmock_ip, tcp_port)
if accept_more and result >= msg_count:
break
elif result == msg_count:
break
elif time.time() - start_time > timeout_sec:
raise Exception(
'tcp_server_wait_for_any_messages returned error: timeout')
else:
time.sleep(wait_for / 1000.0)
# No incrementing wait time here because
# this fun might be used for benchmarking.
if return_history:
return tcp_server_history(appmock_ip, tcp_port)
def tcp_server_send(appmock_ip, tcp_port, message_binary, msg_count=1):
"""
Orders appmock to send given message to all
connected clients, given amount of times.
"""
encoded_message = base64.b64encode(message_binary)
path = '/tcp_server_send/{0}/{1}'.format(tcp_port, msg_count)
_, _, body = _http_post(appmock_ip, appmock_rc_port, path, True,
encoded_message)
body = json.loads(body)
if body['result'] == 'error':
raise Exception('tcp_server_send returned error: ' + body['reason'])
return body['result']
def tcp_server_history(appmock_ip, tcp_port):
"""
Performs a request to an appmock instance to
obtain full history of messages received on given endpoint.
"""
_, _, body = _http_post(appmock_ip, appmock_rc_port,
'/tcp_server_history/{0}'.format(tcp_port),
True, '')
body = json.loads(body)
if body['result'] == 'error':
raise Exception('tcp_server_send returned error: ' + body['reason'])
for i in range(len(body['result'])):
body['result'][i] = base64.b64decode(body['result'][i])
return body['result']
def reset_tcp_server_history(appmock_ip):
"""
Performs a request to an appmock instance to reset
all the history connected with ALL mocked TCP endpoints.
The reset will cause this instance to act
the same as if it was restarted clean - e. g. counters will be reset.
Existing connections WILL NOT BE DISTURBED.
"""
_, _, body = _http_post(appmock_ip, appmock_rc_port,
'/reset_tcp_server_history', True, '')
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'reset_tcp_history returned error: ' + body['reason'])
return body['result']
def tcp_server_connection_count(appmock_ip, tcp_port):
"""
Performs a request to an appmock instance to check
how many clients are connected to given endpoint (by port).
"""
path = '/tcp_server_connection_count/{0}'.format(tcp_port)
_, _, body = _http_post(appmock_ip, appmock_rc_port, path, True, '')
body = json.loads(body)
if body['result'] == 'error':
raise Exception(
'tcp_server_connection_count returned error: ' + body['reason'])
return body['result']
def tcp_server_wait_for_connections(appmock_ip, tcp_port,
number_of_connections=1, accept_more=False,
timeout_sec=DEFAULT_TIMEOUT):
"""
Returns when given number of connections
are established on given port, or after it timeouts.
The accept_more flag makes the function succeed when
there is the same or more connections than expected.
"""
start_time = time.time()
wait_for = WAIT_STARTING_CHECK_INTERVAL
while True:
result = tcp_server_connection_count(appmock_ip, tcp_port)
if accept_more and result >= number_of_connections:
return
elif result == number_of_connections:
return
elif time.time() - start_time > timeout_sec:
raise Exception(
'tcp_server_connection_count returned error: timeout')
else:
time.sleep(wait_for / 1000.0)
wait_for *= WAIT_INTERVAL_INCREMENT_RATE
| 2.203125 | 2 |
test/DATA_FLOW/Descriptor/utils_test.py | globalSolutionsContinex/data_flow_driver | 0 | 12795587 | <filename>test/DATA_FLOW/Descriptor/utils_test.py
import DataFlow.Descriptor.utils as utils
record = {
"nombre": "<NAME>",
"identidad": "30664743",
"dv": "4"
}
s = lambda record: utils.return_format(record['nombre'] if utils.is_company(record['identidad'], record['dv']) else utils.get_name(record['nombre'], 0))
print(s(record))
print(utils.get_name(record['nombre'], 0))
print(utils.get_name(record['nombre'], 1))
print(utils.get_name(record['nombre'], 2))
print(utils.get_name(record['nombre'], 3))
| 2.765625 | 3 |
public/neumeeditor/views/__init__.py | jacobsanz97/cantus | 12 | 12795588 | <filename>public/neumeeditor/views/__init__.py
__author__ = 'afogarty'
| 1.109375 | 1 |
exercises/parsing.py | PetrWolf/pydata_nyc_2019 | 2 | 12795589 | <gh_stars>1-10
# Utilities for data cleaning
import os
def parse_location(location):
"""Extracts latitude and longitude from a location string.
Args:
location: Decimal Degrees (D.D) representation of a geographical location,
e.g. "34.56 N 123.45 W"
Returns:
latitude, longitude
"""
latitude_str, north_south, longitude_str, east_west = location.split()
latitude = float(latitude_str) * -1 if north_south == "S" else 1
longitude = float(longitude_str) * -1 if east_west == "W" else -1
return latitude, longitude_str
| 3.5 | 4 |
tool_sdk/api/basic/batch_get_tool_detail_pb2.py | easyopsapis/easyops-api-python | 5 | 12795590 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: batch_get_tool_detail.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tool_sdk.model.tool import tool_pb2 as tool__sdk_dot_model_dot_tool_dot_tool__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='batch_get_tool_detail.proto',
package='basic',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x1b\x62\x61tch_get_tool_detail.proto\x12\x05\x62\x61sic\x1a\x1etool_sdk/model/tool/tool.proto\",\n\x19\x42\x61tchGetToolDetailRequest\x12\x0f\n\x07toolIds\x18\x01 \x01(\t\"d\n\x1a\x42\x61tchGetToolDetailResponse\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\r\n\x05\x65rror\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\x12\x18\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32\n.tool.Tool\"\x86\x01\n!BatchGetToolDetailResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12/\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32!.basic.BatchGetToolDetailResponseb\x06proto3')
,
dependencies=[tool__sdk_dot_model_dot_tool_dot_tool__pb2.DESCRIPTOR,])
_BATCHGETTOOLDETAILREQUEST = _descriptor.Descriptor(
name='BatchGetToolDetailRequest',
full_name='basic.BatchGetToolDetailRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='toolIds', full_name='basic.BatchGetToolDetailRequest.toolIds', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=70,
serialized_end=114,
)
_BATCHGETTOOLDETAILRESPONSE = _descriptor.Descriptor(
name='BatchGetToolDetailResponse',
full_name='basic.BatchGetToolDetailResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='basic.BatchGetToolDetailResponse.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='basic.BatchGetToolDetailResponse.error', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='message', full_name='basic.BatchGetToolDetailResponse.message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='basic.BatchGetToolDetailResponse.data', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=116,
serialized_end=216,
)
_BATCHGETTOOLDETAILRESPONSEWRAPPER = _descriptor.Descriptor(
name='BatchGetToolDetailResponseWrapper',
full_name='basic.BatchGetToolDetailResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='basic.BatchGetToolDetailResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='basic.BatchGetToolDetailResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='basic.BatchGetToolDetailResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='basic.BatchGetToolDetailResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=353,
)
_BATCHGETTOOLDETAILRESPONSE.fields_by_name['data'].message_type = tool__sdk_dot_model_dot_tool_dot_tool__pb2._TOOL
_BATCHGETTOOLDETAILRESPONSEWRAPPER.fields_by_name['data'].message_type = _BATCHGETTOOLDETAILRESPONSE
DESCRIPTOR.message_types_by_name['BatchGetToolDetailRequest'] = _BATCHGETTOOLDETAILREQUEST
DESCRIPTOR.message_types_by_name['BatchGetToolDetailResponse'] = _BATCHGETTOOLDETAILRESPONSE
DESCRIPTOR.message_types_by_name['BatchGetToolDetailResponseWrapper'] = _BATCHGETTOOLDETAILRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BatchGetToolDetailRequest = _reflection.GeneratedProtocolMessageType('BatchGetToolDetailRequest', (_message.Message,), {
'DESCRIPTOR' : _BATCHGETTOOLDETAILREQUEST,
'__module__' : 'batch_get_tool_detail_pb2'
# @@protoc_insertion_point(class_scope:basic.BatchGetToolDetailRequest)
})
_sym_db.RegisterMessage(BatchGetToolDetailRequest)
BatchGetToolDetailResponse = _reflection.GeneratedProtocolMessageType('BatchGetToolDetailResponse', (_message.Message,), {
'DESCRIPTOR' : _BATCHGETTOOLDETAILRESPONSE,
'__module__' : 'batch_get_tool_detail_pb2'
# @@protoc_insertion_point(class_scope:basic.BatchGetToolDetailResponse)
})
_sym_db.RegisterMessage(BatchGetToolDetailResponse)
BatchGetToolDetailResponseWrapper = _reflection.GeneratedProtocolMessageType('BatchGetToolDetailResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _BATCHGETTOOLDETAILRESPONSEWRAPPER,
'__module__' : 'batch_get_tool_detail_pb2'
# @@protoc_insertion_point(class_scope:basic.BatchGetToolDetailResponseWrapper)
})
_sym_db.RegisterMessage(BatchGetToolDetailResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.265625 | 1 |
Python/boundary-of-binary-tree.py | RideGreg/LeetCode | 1 | 12795591 | # Time: O(n)
# Space: O(h)
# 545
# Given a binary tree, return the values of its boundary in anti-clockwise direction
# starting from root. Boundary includes left boundary, leaves, and right boundary
# in order without duplicate nodes.
#
# Left boundary is defined as the path from root to the left-most node. Right boundary
# is defined as the path from root to the right-most node. If the root doesn't have left
# subtree or right subtree, then the root itself is left boundary or right boundary.
# Note this definition only applies to the input binary tree, and not applies to any subtrees.
#
# The left-most node is defined as a leaf node you could reach when you always firstly
# travel to the left subtree if exists. If not, travel to the right subtree. Repeat until
# you reach a leaf node.
#
# The right-most node is also defined by the same way with left and right exchanged.
# Input:
# 1
# \
# 2
# / \
# 3 4
#
# Ouput:
# [1, 3, 4, 2]
#
# Explanation:
# The root doesn't have left subtree, so the root itself is left boundary.
# The leaves are node 3 and 4.
# The right boundary are node 1,2,4. Note the anti-clockwise direction means you should output reversed right boundary.
# So order them in anti-clockwise without duplicates and we have [1,3,4,2].
#
# Input:
# ____1_____
# / \
# 2 3
# / \ /
# 4 5 6
# / \ / \
# 7 8 9 10
#
# Ouput:
# [1,2,4,7,8,9,10,6,3]
#
# Explanation:
# The left boundary are node 1,2,4. (4 is the left-most node according to definition)
# The leaves are node 4,7,8,9,10.
# The right boundary are node 1,3,6,10. (10 is the right-most node).
# So order them in anti-clockwise without duplicate nodes we have [1,2,4,7,8,9,10,6,3].
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def boundaryOfBinaryTree(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def leftBoundary(root, nodes):
# don't process leaf node, leaving to leaves() method
if root and (root.left or root.right):
nodes.append(root.val)
if not root.left: # only do 1 branch
leftBoundary(root.right, nodes)
else:
leftBoundary(root.left, nodes)
def rightBoundary(root, nodes):
if root and (root.left or root.right):
if not root.right:
rightBoundary(root.left, nodes)
else:
rightBoundary(root.right, nodes)
nodes.append(root.val)
def leaves(root, nodes): # preorder
if root:
if not root.left and not root.right:
nodes.append(root.val)
return
leaves(root.left, nodes)
leaves(root.right, nodes)
if not root:
return []
nodes = [root.val]
leftBoundary(root.left, nodes)
leaves(root.left, nodes)
leaves(root.right, nodes)
rightBoundary(root.right, nodes)
return nodes
r = TreeNode(1)
r.left, r.right = TreeNode(2), TreeNode(3)
r.left.left, r.left.right = TreeNode(4), TreeNode(5)
r.left.right.left, r.left.right.right = TreeNode(7), TreeNode(8)
r.right.left = TreeNode(6)
r.right.left.left, r.right.left.right = TreeNode(9), TreeNode(10)
print(Solution().boundaryOfBinaryTree(r)) | 4.21875 | 4 |
codechallenges/serializers.py | alimustafashah/core | 0 | 12795592 | from rest_framework import serializers
from codechallenges.models import Question, Submission
class QuestionSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = (
"title",
"body",
"format",
"answer",
"release_date",
"expiration_date",
)
class QuestionHiddenSerializer(serializers.ModelSerializer):
class Meta:
model = Question
fields = ("title", "body", "format")
class SubmissionSerializer(serializers.ModelSerializer):
class Meta:
model = Submission
fields = ("email", "correct", "answer", "question")
| 2.515625 | 3 |
tests/utils/requires.py | RealA10N/cptk | 5 | 12795593 | <gh_stars>1-10
from __future__ import annotations
from shutil import which
import pytest
def requires(name: str):
""" A decorator for pytest tests that skips the test if a program with the
given name is not found of the machine. """
def decorator(f):
dec = pytest.mark.skipif(
which(name) is None,
reason=f"Requires program {name!r}",
)
return dec(f)
return decorator
| 2.65625 | 3 |
python/opscore/RO/Astro/Sph/AngSideAng.py | sdss/opscore | 0 | 12795594 | #!/usr/bin/env python
__all__ = ["angSideAng"]
import opscore.RO.MathUtil
import opscore.RO.SysConst
def angSideAng(side_aa, ang_B, side_cc):
"""
Solves a spherical triangle for two angles and the side connecting them,
given the remaining quantities.
Inputs:
- side_aa side aa; range of sides: [0, 180]
- ang_B angle b; range of angles: [0, 360)
- side_cc side cc
Returns a tuple containing:
- ang_A angle a
- side_bb side bb
- ang_C angle c
- unknownAng if true, angle A and angle C could not be computed
(and are both set to 90); bb will be 0 or 180
Error Conditions:
- If the inputs are too small to allow computation, raises ValueError
- If side bb is near 0 or 180 (see Special Cases below for when this occurs)
then angles a and c cannot be computed. In this case "unknownAng" = true,
ang_A = ang_C = 90.0. Also side_bb = 0.0, which is essentially correct.
Note that the sum ang_A + ang_C is 180, which is also essentially correct.
Special Cases (in the order they are handled):
side_aa ang_B side_cc ang_A side_bb ang_C
----------------------------------------------------------------
~0 any ~0 unknown(90) 0 unknown(90)
~0 any ~180 unknown(90) 180 unknown(90)
~0 any !pole 0 side_cc 180-ang_B
~180 any ~0 unknown(90) 180 unknown(90)
~180 any ~180 unknown(90) 0 unknown(90)
~180 any !pole 180 180-side_cc ang_B
!pole any ~0 180-ang_B side_aa 0
!pole any ~180 ang_B 180-side_aa 180
any ~0 ~=side_aa unknown(90) 0 unknown(90)
any ~0 <side_aa 180 side_aa-cc 0
any ~0 >side_aa 0 side_cc-aa 180
where:
- !pole means not nearly 0 and not nearly 180 (modulo 360)
- unknown(90) means unknownAng is set True and the angle is unknown and is
abitrarily set to 90 degrees. The sum of ang_A and ang_C is correct
and the value of side_bb is correct to within epsilon.
- all relations are modulo 360. For example ~0 means approximately zero, 360, etc.
Warnings:
Allowing angles in the 3rd and 4th quadrants is unusual.
References:
Selby, Standard Math Tables, crc, 15th ed, 1967, p161 (Spherical Trig.)
History:
2002-07-22 ROwen Converted from TCC's sph_AngSideAng 1-6.
2010-07-30 ROwen Changed output zero_bb to unknownAng; side_bb may be 180 instead of 0.
Bug fix: in some cases side_bb may be 180 and ang_A and ang_C unknown.
Improved accuracy in some corner cases; all unit tests now pass.
Greatly expanded the unit tests.
2010-08-04 ROwen Bug fix: mis-handled two cases:
- side_aa tiny + side_cc normal: special case table, code and unit test were incorrect
- side_aa normal + side_cc tiny: table was right but code and unit test had errors
2011-01-28 ROwen Bug fix: unknownAng should always be true if side_aa and side_cc are nearly 0 or 180
but that was not happening if ang_B was nearly 0. Fixed by evaluating ang_B
special cases after side_aa and side_cc special cases.
Tweaked the documentation to clarify the special cases.
"""
sin_h_aa = opscore.RO.MathUtil.sind(side_aa)
sin_h_cc = opscore.RO.MathUtil.sind(side_cc)
sin_h_B = opscore.RO.MathUtil.sind(ang_B * 0.5)
cos_h_B = opscore.RO.MathUtil.cosd(ang_B * 0.5)
sin_h_aa = opscore.RO.MathUtil.sind(side_aa * 0.5)
cos_h_aa = opscore.RO.MathUtil.cosd(side_aa * 0.5)
sin_h_cc = opscore.RO.MathUtil.sind(side_cc * 0.5)
cos_h_cc = opscore.RO.MathUtil.cosd(side_cc * 0.5)
if abs(sin_h_aa) < opscore.RO.SysConst.FAccuracy:
# side_aa is nearly zero (modulo 360)
if abs(sin_h_cc) < opscore.RO.SysConst.FAccuracy:
# side_cc is nearly 0 (modulo 360)
return (90.0, 0.0, 90.0, True)
elif abs(cos_h_cc) < opscore.RO.SysConst.FAccuracy:
# side_cc is nearly 180 (modulo 360)
return (90.0, 180.0, 90.0, True)
else:
# side_cc is not nearly 0 or 180
ang_A = 0.0
side_bb = side_cc
ang_C = 180.0 - ang_B
elif abs(cos_h_aa) < opscore.RO.SysConst.FAccuracy:
# side_aa is nearly 180 (modulo 360)
if abs(cos_h_cc) < opscore.RO.SysConst.FAccuracy:
# side_cc is nearly 180 (modulo 360)
return (90.0, 0.0, 90.0, True)
elif abs(sin_h_cc) < opscore.RO.SysConst.FAccuracy:
# side_cc is nearly 0 (modulo 360)
return (90.0, 180.0, 90.0, True)
else:
# side_cc is not nearly 0 or 180 (modulo 360)
ang_A = 180.0
side_bb = 180.0 - side_cc
ang_C = ang_B
elif abs(sin_h_cc) < opscore.RO.SysConst.FAccuracy:
# side_cc is nearly zero (modulo 360) and side_aa is not
ang_A = 180.0 - ang_B
side_bb = side_aa
ang_C = 0.0
elif abs(cos_h_cc) < opscore.RO.SysConst.FAccuracy:
# side_cc is nearly 180 (modulo 360) and side_aa is not
ang_A = ang_B
side_bb = 180.0 - side_aa
ang_C = 180.0
elif abs(sin_h_B) < opscore.RO.SysConst.FAccuracy:
# B is nearly 0 (modulo 360)
if abs(side_aa - side_cc) < opscore.RO.SysConst.FAccuracy:
# ang_B ~= 0 (modulo 360) and side_aa ~= side_cc (modulo 360); cannot compute ang_A or ang_C:
return (90.0, 0.0, 90.0, True)
elif side_cc < side_aa:
ang_A = 180.0
side_bb = side_aa - side_cc
ang_C = 0.0
else:
ang_A = 0.0
side_bb = side_cc - side_aa
ang_C = 180.0
else:
# +
# compute angles a and c using Napier's analogies
# -
# compute sin((aa +/- cc) / 2) and cos((aa +/- cc) / 2)
sin_h_sum_aacc = sin_h_aa * cos_h_cc + cos_h_aa * sin_h_cc
sin_h_diff_aacc = sin_h_aa * cos_h_cc - cos_h_aa * sin_h_cc
cos_h_sum_aacc = cos_h_aa * cos_h_cc - sin_h_aa * sin_h_cc
cos_h_diff_aacc = cos_h_aa * cos_h_cc + sin_h_aa * sin_h_cc
# compute numerator and denominator, where tan((a +/- c) / 2) = num/den
num1 = cos_h_B * cos_h_diff_aacc
den1 = sin_h_B * cos_h_sum_aacc
num2 = cos_h_B * sin_h_diff_aacc
den2 = sin_h_B * sin_h_sum_aacc
# if numerator and denominator are too small
# to accurately determine angle = atan2 (num, den), give up
if (((abs (num1) <= opscore.RO.SysConst.FAccuracy) and (abs (den1) <= opscore.RO.SysConst.FAccuracy))
or ((abs (num2) <= opscore.RO.SysConst.FAccuracy) and (abs (den2) <= opscore.RO.SysConst.FAccuracy))):
raise RuntimeError("Bug: can't compute ang_A and C with side_aa=%s, ang_B=%s, side_cc=%s" % (side_aa, ang_B, side_cc))
# compute (a +/- c) / 2, and use to compute angles a and c
h_sum_AC = opscore.RO.MathUtil.atan2d (num1, den1)
h_diff_AC = opscore.RO.MathUtil.atan2d (num2, den2)
# print "sin_h_B, cos_h_B =", sin_h_B, cos_h_B
# print "sin_h_aa, cos_h_aa =", sin_h_aa, cos_h_aa
# print "sin_h_cc, cos_h_cc =",sin_h_cc, cos_h_cc
# print "sin_h_diff_aacc, sin_h_sum_aacc =", sin_h_diff_aacc, sin_h_sum_aacc
# print "num1, den1, num2, den2 =", num1, den1, num2, den2
# print "h_sum_AC, h_diff_AC =", h_sum_AC, h_diff_AC
ang_A = h_sum_AC + h_diff_AC
ang_C = h_sum_AC - h_diff_AC
# +
# compute side bb using one of two Napier's analogies
# (one is for bb - aa, one for bb + aa)
# -
# preliminaries
sin_h_A = opscore.RO.MathUtil.sind(ang_A * 0.5)
cos_h_A = opscore.RO.MathUtil.cosd(ang_A * 0.5)
sin_h_sum_BA = sin_h_B * cos_h_A + cos_h_B * sin_h_A
sin_h_diff_BA = sin_h_B * cos_h_A - cos_h_B * sin_h_A
cos_h_sum_BA = cos_h_B * cos_h_A - sin_h_B * sin_h_A
cos_h_diff_BA = cos_h_B * cos_h_A + sin_h_B * sin_h_A
# numerator and denominator for analogy for bb - aa
num3 = sin_h_cc * sin_h_diff_BA
den3 = cos_h_cc * sin_h_sum_BA
# numerator and denominator for analogy for bb + aa
num4 = sin_h_cc * cos_h_diff_BA
den4 = cos_h_cc * cos_h_sum_BA
# compute side bb
if abs (num3) + abs (den3) > abs (num4) + abs (den4):
# use Napier's analogy for bb - aa
side_bb = 2.0 * opscore.RO.MathUtil.atan2d (num3, den3) + side_aa
else:
side_bb = 2.0 * opscore.RO.MathUtil.atan2d (num4, den4) - side_aa
side_bb = opscore.RO.MathUtil.wrapPos (side_bb)
return (opscore.RO.MathUtil.wrapPos(ang_A), side_bb, opscore.RO.MathUtil.wrapPos(ang_C), False)
if __name__ == "__main__":
import opscore.RO.SeqUtil
print("testing angSideAng")
Eps = 1.0e-15
EpsTest = Eps * 1.001
testData = []
# test data is formatted as follows:
# a list of entries, each consisting of:
# - the input argument
# - the expected result: ang_C, side_bb, ang_A, [unknownAng] (unknownAng defaults to False)
# a ~ 0, B = various, c various:
# if c nearly 0 (modulo 360): expect C = 90, b = 0, A = 90, unknownAng
# if c nearly 180 (modulo 360): expect C = 90, b = 180, A = 90, unknownAng
# else: expect A = 0, b = a - c, C = 180 - B
for side_aa in (-Eps, 0.0, Eps):
for ang_B in (0.0, Eps, 32.0, 97.0, 179.0, 180.0 - Eps, 180.0, 180.0 + Eps, 210.0, 360.0 - Eps, 360.0):
for side_cc in (180.0, 180.0 - Eps, 179.0, 47.0, Eps, 0.0):
if abs(side_cc % 360.0) < EpsTest:
expRes = (90.0, 0.0, 90.0, True)
elif abs((side_cc - 180) % 360.0) < EpsTest:
expRes = (90.0, 180.0, 90.0, True)
else:
expRes = (0.0, side_cc - side_aa, 180.0 - ang_B)
testData.append(((side_aa, ang_B, side_cc), expRes))
# a ~ 180, B = various, c various:
# if c nearly 180 (modulo 360): expect C = 90, b = 0, A = 90, unknownAng
# if c nearly 0 (modulo 360): expect C = 90, b = 180, A = 90, unknownAng
# else: expect A = 180, b = 180 - c, C = B
for side_aa in (180.0 - Eps, 180.0, 180.0 + Eps):
for ang_B in (0.0, Eps, 32.0, 97.0, 179.0, 180.0 - Eps, 180.0, 180.0 + Eps, 210.0, 360.0 - Eps, 360.0):
for side_cc in (180.0, 180.0 - Eps, 179.0, 47.0, Eps, 0.0):
if abs((180.0 - side_cc) % 360.0) < EpsTest:
expRes = (90.0, 0.0, 90.0, True)
elif abs(side_cc % 360.0) < EpsTest:
expRes = (90.0, 180.0, 90.0, True)
else:
expRes = (180.0, 180.0 - side_cc, ang_B)
testData.append(((side_aa, ang_B, side_cc), expRes))
# c ~ 0, B = various, a various:
# if a nearly 0: expect C = 90, b = 0, A = 90, unknownAng
# if a nearly 180: expect C = 90, b = 180, A = 90, unknownAng
# else: expect A = 180 - B, b = a, C = 0
for side_cc in (0.0, Eps):
for ang_B in (0.0, Eps, 32.0, 97.0, 179.0, 180.0 - Eps, 180.0, 180.0 + Eps, 210.0, 360.0 - Eps, 360.0):
for side_aa in (180.0, 180.0 - Eps, 179.0, 47.0, Eps, 0.0):
if abs(side_aa % 360.0) < EpsTest:
expRes = (90.0, 0.0, 90.0, True)
elif abs((180.0 - side_aa) % 360.0) < EpsTest:
expRes = (90.0, 180.0, 90.0, True)
else:
expRes = (180.0 - ang_B, side_aa, 0.0)
testData.append(((side_aa, ang_B, side_cc), expRes))
# c ~ 180, B = various, a various:
# if a nearly 0 (modulo 360): expect C = 90, b = 180, A = 90, unknownAng
# if a nearly 180 (modulo 360): expect C = 90, b = 0, A = 90, unknownAng
# else: expect A = 180, b = 180 - c, C = B
for side_cc in (180.0 - Eps, 180.0):
for ang_B in (0.0, Eps, 32.0, 97.0, 179.0, 180.0 - Eps, 180.0, 180.0 + Eps, 210.0, 360.0 - Eps, 360.0):
for side_aa in (180.0, 180.0 - Eps, 179.0, 47.0, Eps, 0.0):
if side_aa < EpsTest:
expRes = (90.0, 180.0, 90.0, True)
elif 180.0 - side_aa < EpsTest:
expRes = (90.0, 0.0, 90.0, True)
else:
expRes = (ang_B, 180.0 - side_aa, 180.0)
testData.append(((side_aa, ang_B, side_cc), expRes))
# a = 90, B varies but not nearly 0 or 360, c fairly small but >> Eps
# expect: A = 180 - B, b = a + c cos(B), C ~= 0
side_aa = 90.0
for side_cc in (1.0e-12, 1.0e-10):
for ang_B in (23, 90, 180 - Eps, 180, 180 + Eps, 256, 359):
expRes = (180.0 - ang_B, side_aa + (side_cc * opscore.RO.MathUtil.cosd(ang_B)), 0.0)
testData.append(((side_aa, ang_B, side_cc), expRes))
# a fairly small but >> Eps, B varies, c = 90
# expect: C = 180 - B, b = c + a cos(B), A ~= 0
side_cc = 90.0
for side_aa in (1.0e-12, 1.0e-10):
for ang_B in (23, 90, 180 - Eps, 180, 180 + Eps, 256, 359):
expRes = (0.0, side_cc + (side_aa * opscore.RO.MathUtil.cosd(ang_B)), 180.0 - ang_B)
testData.append(((side_aa, ang_B, side_cc), expRes))
# B small, a = any not small, c = any not small:
# if c != a: expect A = 90, b = 0, C = 90, unknown
# if c << a: expect A = 180, b = c - a, C = 0
# if c >> a: expect A = 0, b = a - c, C = 180
for side_aa in (179.9, -27.0, 27.0, 0.1):
for side_cc in (side_aa - 45.0, side_aa - Eps, side_aa, side_aa + Eps, side_aa + 45.0):
if abs(side_cc - side_aa) < EpsTest:
expRes = (90.0, 0.0, 90.0, True)
elif side_cc < side_aa:
expRes = (180.0, side_aa - side_cc, 0.0)
else:
expRes = (0.0, side_cc - side_aa, 180.0)
for ang_B in (-Eps, 0.0, Eps):
testData.append(((side_aa, ang_B, side_cc), expRes))
# right triangle: B = 90, a and c vary but avoid poles
# tan C = tan c / sin a
# tan c = (tan a / sinA * sinb)
# with some tweaks to handle the other quadrants
ang_B = 90.0
for side_aa in (1.0, 20.0, 45.0, 90, 110.0, 179.0):
for side_cc in (1.0, 20.0, 45.0, 90.0, 110.0, 179.0):
ang_A = opscore.RO.MathUtil.atan2d(opscore.RO.MathUtil.tand(side_aa), opscore.RO.MathUtil.sind(side_cc))
ang_C = opscore.RO.MathUtil.atan2d(opscore.RO.MathUtil.tand(side_cc), opscore.RO.MathUtil.sind(side_aa))
side_bb = opscore.RO.MathUtil.atan2d(opscore.RO.MathUtil.tand(side_aa), opscore.RO.MathUtil.sind(ang_A) * opscore.RO.MathUtil.cosd(side_cc))
# these tweaks handle other quadrants; they're based on what works, so are somewhat suspect
if side_bb < 0:
side_bb = - side_bb
if ang_A < 0:
ang_A = 180.0 + ang_A
if ang_C < 0:
ang_C = 180.0 + ang_C
testData.append(((side_aa, ang_B, side_cc), (ang_A, side_bb, ang_C)))
testData += [
# 90/90/90 triangle
((90, 90, 90), (90, 90, 90)),
# inputs that might cause side_bb < 0, (but should not)
((45, 1, 45), (89.6464421219342, 0.707102293688337, 89.6464421219342)),
((45, -1, 45), (270.353557878066, 0.707102293688337, 270.353557878066)),
((135, 1, 135), (90.3535578780658, 0.707102293688337, 90.3535578780658)),
((135, -1, 135), (269.646442121934, 0.707102293688308, 269.646442121934)),
]
def processOutput(outputVec):
return (
opscore.RO.MathUtil.sind(outputVec[0]), opscore.RO.MathUtil.cosd(outputVec[0]),
outputVec[1],
opscore.RO.MathUtil.sind(outputVec[2]), opscore.RO.MathUtil.cosd(outputVec[2]),
outputVec[3],
)
for testInput, expectedOutput in testData:
if len(expectedOutput) < 4:
expectedOutput = expectedOutput + (False,)
actualOutput = angSideAng(*testInput)
# to handle angles comparing things like 359.999... to 0, compare sin and cos of ang_A and ang_C:
procExpected = processOutput(expectedOutput)
procActual = processOutput(actualOutput)
if opscore.RO.SeqUtil.matchSequences(procExpected, procActual, rtol=1.0e-10, atol=1.0e-10):
print("failed on input:", testInput)
print("expected output:", expectedOutput)
print("actual output:", actualOutput)
print()
if actualOutput[0] < 0.0 or actualOutput[0] >= 360.0 \
or actualOutput[1] < 0.0 or actualOutput[1] >= 360.0 \
or actualOutput[2] < 0.0 or actualOutput[2] >= 360.0:
print("failed on input:", testInput)
print("one or more angles out of range:", actualOutput)
print()
| 3.71875 | 4 |
src/subtitle_translator/main.py | BercziSandor/subtitle_translator | 0 | 12795595 | """
TODO
"""
import io
import logging
import re
import string
import sys
import textwrap
import time
from datetime import timedelta
from pathlib import Path
from typing import List
import sublib
from deep_translator import GoogleTranslator
# https://pypi.org/project/sublib/
# https://pypi.org/project/deep-translator
sample_file = Path(__file__).parent.parent.parent.absolute() / Path('input/1_short.srt')
sample_str = io.StringIO(textwrap.dedent('''\
1
00:00:00,123 --> 00:00:03,456
Hi there
2
00:01:04,843 --> 00:01:05,428
This is an example of a
subtitle file in SRT format
'''))
def translate_array(texts: List[str], source_language='auto', target_language='hu'):
"""
It takes a list of texts and translates them from source language to target language
:param texts: The list of texts to be translated
:type texts: List[str]
:param source_language: The language you want to translate from, defaults to auto (optional)
:param target_language: The language to translate the text into, defaults to hu (optional)
:return: A list of translated texts.
"""
for i, text in enumerate(texts):
if not text or not isinstance(text, str) or not text.strip():
texts[i] = " zzz "
if text.isdigit() or all(i in string.punctuation for i in text):
texts[i] += " zzz "
result = GoogleTranslator(source=source_language, target=target_language).translate_batch(texts)
return result
def split_up(text: str, pieces_count: int = 2) -> List[str]:
"""
Given a text and a number of pieces, split the text into pieces
:param text: The text to split up
:type text: str
:param pieces_count: The number of pieces to split the text into, defaults to 2
:type pieces_count: int (optional)
:return: A list of strings.
"""
pieces = []
if pieces_count < 1:
logging.error("pieces error.")
sys.exit(1)
elif pieces_count == 1:
return [text]
def get_optimal_split(where: float, p_split_points: List[int]):
"""
Get the optimal split point from a list of split points
:param where: the point where you want to split the data
:type where: float
:param p_split_points: The list of split points
:type p_split_points: List[int]
:return: The optimal split point and the list of split points with the optimal split point removed.
"""
distance_min = 9999.0
min_point = None
for a_split_point in p_split_points:
distance = abs(where - a_split_point)
if distance < distance_min:
distance_min = distance
min_point = a_split_point
if min_point:
p_split_points.remove(min_point)
return min_point, p_split_points
len_of_a_piece = len(text) / pieces_count
optimal_split_positions = [len_of_a_piece * x for x in range(1, pieces_count)]
indices_object = re.finditer(pattern=r'\w+', string=text)
possible_split_points = [index.start() for index in indices_object]
if 0 in possible_split_points:
possible_split_points.remove(0)
if len(possible_split_points) + 1 < pieces_count:
logging.info("[{}]".format(" | ".join(re.split(r'\W+', text).remove(''))))
logging.error(
f"There are {len(possible_split_points)} split points and we want "
f"to split the text '{text}' in {pieces_count} pieces... Giving up.")
sys.exit(42)
def get_split_points(optimal_split_positions: List[float],
p_possible_split_points: List[int] = possible_split_points):
"""
Given a list of optimal split positions, return a list of the corresponding split points
:param optimal_split_positions: The list of optimal split positions
:type optimal_split_positions: List[float]
:param p_possible_split_points: List[int] = possible_split_points
:type p_possible_split_points: List[int]
:return: The list of optimal split points.
"""
split_points = []
for an_optimal_position in optimal_split_positions:
a_split_point, p_possible_split_points = get_optimal_split(where=an_optimal_position,
p_split_points=p_possible_split_points)
split_points.append(a_split_point)
return split_points
start_ind = 0
for split_point in get_split_points(optimal_split_positions=optimal_split_positions,
p_possible_split_points=possible_split_points):
pieces.append(text[start_ind:split_point].strip())
start_ind = split_point
pieces.append(text[start_ind:].strip())
logging.debug(f"Splitting up '{text}' in {pieces_count} pieces: {pieces}")
return pieces
def translate_subtitle_file(input_file=sample_file, target_language='hu'):
"""
It takes a subtitle file, splits it up into sentences, translates them, and then puts them back together
:param input_file: The subtitle file to be translated
:param target_language: The language you want the text to be translated to, defaults to hu (optional)
:return: The lines of the translated file.
"""
translation_char_limit = 4000 # 4000
subtitle = sublib.SubRip(input_file, "utf-8")
# s2 = copy.deepcopy(subtitle)
general = subtitle.get_general_format()
def is_end_of_sentence(text: str):
return text.endswith('.') or text.endswith('?') or text.endswith('!')
def starts_with_lowercase(text: str):
first_char = text[0]
return first_char.isalpha() and first_char.islower()
translated_all = []
entries_to_be_translated = []
entry = {'index_start': 0, 'index_end': 0, 'text': ''}
logging.info("# Phase 1: Prepare translation: Join entries to sentences.")
for i, a_general in enumerate(general):
start, end, text = a_general
text = text.replace('|', ' ').replace(' ', '')
if len(entry['text']) > 0:
entry['text'] += ' '
entry['text'] += text
if len(general) > i + 1:
start_next = general[i + 1][0]
else:
start_next = end + timedelta(100)
silence_to_next = start_next - end
if is_end_of_sentence(text) or silence_to_next.seconds > 1:
entry['index_end'] = i
entries_to_be_translated.append(entry)
entry = {'index_start': i + 1, 'index_end': i + 1, 'text': ''}
logging.info("# Phase 2: Translate (5000 char limitation)")
start = 0
last_i = len(entries_to_be_translated)
translated_all = []
for i in range(last_i):
an_entry = entries_to_be_translated[start:i + 1]
chars_sum = sum([len(t['text']) for t in an_entry])
if chars_sum > translation_char_limit - 10 or i == last_i - 1:
texts = [t['text'] for t in entries_to_be_translated[start:i + 1]]
time_start = general[entries_to_be_translated[start]['index_end']][1]
time_end = general[entries_to_be_translated[i]['index_end']][1]
# strfdelta(time_start, "{hours}:{minutes}:{seconds}")
logging.info("Translating {} - {}".format(str(time_start)[:-3], str(time_end)[:-3]))
start = time.time()
translated = translate_array(texts=texts, target_language=target_language)
end = time.time()
logging.info(
"{} requests in {:.2f} seconds,{:.0f} ch/s, "
"{:.2f} req/s".format(len(texts),
end - start,
float(chars_sum) / (
end - start),
float(len(texts)) / (
end - start)))
for res in zip(texts, translated):
logging.debug(f" [{res[0]}] -> [{res[1]}]")
translated_all.extend(translated)
# print(translated)
start = i + 1
logging.info("# Phase 3: Split up sentences (undo #1)")
for i, entry in enumerate(entries_to_be_translated):
text_long = translated_all[i]
split_pieces = entry['index_end'] - entry['index_start'] + 1
texts = split_up(text=text_long, pieces_count=split_pieces)
if len(texts) != split_pieces:
logging.error("bahh")
insert_start = entry['index_start']
insert_end = entry['index_end']
for i2 in range(insert_end - insert_start + 1):
iii = insert_start + i2 - 1
if iii < len(general) - 1:
general[iii][2] = texts[i2]
else:
logging.error("Index overrun.")
sys.exit(1)
logging.info("# Phase 4: Split up lines")
for i, entry in enumerate(general):
pieces = int(len(entry[2]) / 40) + 1
if pieces > 1:
new_text = "\n".join(split_up(entry[2], pieces_count=pieces))
entry[2] = new_text
logging.info("# Phase 5: Saving file")
empty_subtitle = sublib.SubRip()
empty_subtitle.set_from_general_format(general)
lines = empty_subtitle.content
output_name = str(input_file).replace('.srt', '.out.srt')
logging.info(f" Writing output to {output_name}")
with open(output_name, 'w', encoding='utf-8') as out:
out.writelines(lines)
return lines
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# ss = split_up("Ööö, mit csináltál?", 3)
# ss = split_up("Ööö, mit sdfg sfhg wert sxghsfhgdfhg dfhg g ghdfhg csináltál?", 15)
# result = translate_array(texts=["hallo welt", "guten morgen",
# 'Weltfrieden für Manuela'], target_language='hu')
translate_subtitle_file(input_file=sample_file)
| 3.125 | 3 |
main.py | jnsougata/Ezcord | 0 | 12795596 | import os
from src.ezcord import *
APP = 874663148374880287
TEST = 877399405056102431
bot = Bot(prefix='-', app_id=APP, guild_id=TEST, intents=Intents.members)
@bot.command(name='ping')
async def ping(ctx: Context):
emd = Embed(description=f'**Pong: {bot.latency}ms**')
await ctx.reply(embed=emd)
@bot.command(name='foo')
async def _foo(ctx: Context):
await ctx.send(f'{bot.user()}')
@bot.event
async def on_ready():
print(f'Logged in as {bot.user} (ID: {bot.user.id})')
print(f'------')
bot.run(os.getenv('DISCORD_TOKEN'))
| 2.09375 | 2 |
tests/test_create_json_schema.py | kaiba-tech/kaiba | 5 | 12795597 | from kaiba.models.kaiba_object import KaibaObject
def test_create_jsonschema_from_model():
"""Test that we can create jsonschema."""
assert KaibaObject.schema_json(indent=2)
| 2.25 | 2 |
src/tests/pyetheroll/test_etherscan_utils.py | homdx/EtherollApp | 0 | 12795598 | <reponame>homdx/EtherollApp<gh_stars>0
import unittest
from unittest import mock
from pyetheroll.etherscan_utils import get_etherscan_api_key
class TestEtherscanlUtils(unittest.TestCase):
def test_get_etherscan_api_key(self):
"""
Verifies the key can be retrieved from either:
1) environment
2) file
3) or fallbacks on default key
"""
expected_key = '0102030405060708091011121314151617'
# 1) environment
with mock.patch.dict(
'os.environ', {'ETHERSCAN_API_KEY': expected_key}):
actual_key = get_etherscan_api_key()
self.assertEqual(actual_key, expected_key)
# 2) file
read_data = '{ "key" : "%s" }' % (expected_key)
with mock.patch('builtins.open', mock.mock_open(read_data=read_data)) \
as m_open:
actual_key = get_etherscan_api_key()
self.assertEqual(expected_key, actual_key)
# verifies the file was read
self.assertTrue(
m_open.call_args_list[0][0][0].endswith(
'/pyetheroll/api_key.json'))
self.assertEqual(m_open.call_args_list[0][1], {'mode': 'r'})
# 3) or fallbacks on default key
with mock.patch('builtins.open') as m_open, \
mock.patch('pyetheroll.etherscan_utils.logger') as m_logger:
m_open.side_effect = FileNotFoundError
actual_key = get_etherscan_api_key()
self.assertEqual('YourApiKeyToken', actual_key)
# verifies the fallback warning was logged
self.assertTrue(
'Cannot get Etherscan API key.'
in m_logger.warning.call_args_list[0][0][0])
| 2.515625 | 3 |
src/a06pliki/pknorlen.py | tborzyszkowski/PSPI_Wstep_do_programowania | 0 | 12795599 | import csv
import statistics
from matplotlib import pyplot as plt
from scipy.interpolate import interp1d
import numpy as np
with open('pknorlen_akcje.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
next(readCSV)
counter = 0
kursy_lista = []
for row in readCSV:
kursy_lista.append({"data": row[0], "kurs_max": float(row[5]), "kurs_min": float(row[6])})
counter += 1
if counter > 5000:
break
print(counter)
print("Srednia: ", statistics.mean([k["kurs_max"] for k in kursy_lista]))
print("Odch.Std:", statistics.stdev([k["kurs_max"] for k in kursy_lista]))
print("Max: ", max([k["kurs_max"] for k in kursy_lista]))
print("Min:", min([k["kurs_max"] for k in kursy_lista]))
y_es = [k["kurs_max"] for k in kursy_lista]
x_es = range(0,len(y_es))
f_linear = interp1d(x_es, y_es, kind='linear')
# xnew = np.arange(1, len(y_es), 0.1)
# plt.plot(x_es, y_es, 'o', x_es, f_linear(x_es), '-')
plt.plot(x_es, f_linear(x_es), '-')
plt.show() | 2.921875 | 3 |
projects/iosDataGeneration/udemy/test.py | eu-snehagupta/learningpython | 0 | 12795600 | import os
import csv
from csv import reader
def read_data():
data = []
with open ("data.csv", "r") as f:
csv_reader = reader(f)
header = next(csv_reader)
if header != None:
for rows in csv_reader:
data.append(rows[0].replace(";",","))
print(data)
with open ("data1.csv", "w") as f:
for eachitem in data:
f.write(eachitem + "\n")
read_data() | 3.421875 | 3 |
Products_infos_Functions.py | AndreVinni89/MercadoLivre_WebScraping | 0 | 12795601 | from bs4 import BeautifulSoup as bs
import re
import mysql.connector
class Products_Infos():
def __init__(self, products):
self.products = products
self.productInfos = []
def insertSpaces(self):
for i in self.products:
self.productInfos.append([])
def get_product_link(self):
cont = 0
for link in self.products:
product_link = link.find('a', class_='item-link item__js-link').get('href')
self.productInfos[cont].append(product_link)
cont += 1
def get_product_name(self):
cont = 0
for name in self.products:
product_name = name.find('span', class_='main-title').string
self.productInfos[cont].append(product_name)
cont += 1
def get_product_price(self):
cont = 0
for price in self.products:
try:
product_price = price.find('span', class_='price__fraction').string
except:
try:
product_price_label = price.find('div', class_=re.compile('pdp_options__text'))
product_price = product_price_label.find('span').string
except:
print('HOUVE UM ERRO AO LER O PREÇO DO PRODUTO')
cont += 1
else:
self.productInfos[cont].append(product_price)
cont += 1
print(product_price)
else:
self.productInfos[cont].append(product_price)
cont += 1
def get_shipping_info(self):
cont = 0
for ship in self.products:
try:
product_shipping_info = ship.find('span', class_='text-shipping').string
except:
self.productInfos[cont].append(0)
cont += 1
else:
self.productInfos[cont].append(1)
cont += 1
def get_product_image(self):
cont = 0
for image in self.products:
try:
product_image = image.find('img', src=re.compile('https://http2.mlstatic.com')).get('src')
except:
print('ERRO AO LER A IMAGEM')
self.productInfos[cont].append("")
cont += 1
else:
self.productInfos[cont].append(product_image)
cont += 1
| 2.75 | 3 |
certbot_dns_directadmin/__init__.py | cybercinch/certbot-dns-directadmin | 8 | 12795602 | <reponame>cybercinch/certbot-dns-directadmin
"""
The `~certbot-dns-directadmin:directadmin` plugin automates the process of
completing a ``dns-01`` challenge (`~acme.challenges.DNS01`) by creating, and
subsequently removing, TXT records using the DirectAdmin API.
Named Arguments
---------------
=========================================================================================================
``--directadmin-credentials`` DirectAdmin Credentials file. (Required)
``--directadmin-propagation-seconds`` The number of seconds to wait for DNS to
propagate before asking the ACME server
to verify the DNS record. (Default: 60)
=========================================================================================================
Credentials
-----------
Use of this plugin requires an account on a DirectAdmin Server.
Supported are both Username/Password authentication or Login Key.
To use Login Key authentication (Recommended) you will need to create a key with
the following permissions:
* ``CMD_API_LOGIN_TEST``
* ``CMD_API_DNS_CONTROL``
* ``CMD_API_SHOW_DOMAINS``
DirectAdmin provides instructions for creating a login key - `here <https://help.directadmin.com/item.php?id=523>`_
.. code-block:: ini
:name: directadmin.ini
:caption: Example credentials file:
# The DirectAdmin Server url
# include the scheme and the port number (Normally 2222)
directadmin_url = https://my.directadminserver.com:2222
# The DirectAdmin username
directadmin_username = username
# The DirectAdmin password
directadmin_password = <PASSWORD>
The path to this file can be provided interactively or using the
``--directadmin-credentials`` command-line argument. Certbot records the path
to this file for use during renewal, but does not store the file's contents.
.. caution::
You should protect these API credentials as you would a password. Users who
can read this file can use these credentials to issue some types of API calls
on your behalf, limited by the permissions assigned to the account. Users who
can cause Certbot to run using these credentials can complete a ``dns-01``
challenge to acquire new certificates or revoke existing certificates for
domains these credentials are authorized to manage.
Certbot will emit a warning if it detects that the credentials file can be
accessed by other users on your system. The warning reads "Unsafe permissions
on credentials configuration file", followed by the path to the credentials
file. This warning will be emitted each time Certbot uses the credentials file,
including for renewal, and cannot be silenced except by addressing the issue
(e.g., by using a command like ``chmod 600`` to restrict access to the file).
Examples
--------
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``
certbot certonly \\
--authenticator directadmin \\
--directadmin-credentials ~/.secrets/certbot/directadmin.ini \\
-d example.com
.. code-block:: bash
:caption: To acquire a single certificate for both ``example.com`` and
``www.example.com``
certbot certonly \\
--authenticator directadmin \\
--directadmin-credentials ~/.secrets/certbot/directadmin.ini \\
-d example.com \\
-d www.example.com
.. code-block:: bash
:caption: To acquire a certificate for ``example.com``, waiting 120 seconds
for DNS propagation
certbot certonly \\
--authenticator directadmin \\
--directadmin-credentials ~/.secrets/certbot/directadmin.ini \\
--directadmin-propagation-seconds 120 \\
-d example.com
"""
| 2.015625 | 2 |
fate/test/test_insertoperations.py | Mattias1/fate | 0 | 12795603 | """
This module provides testcases for insertoperations.
Auto indentation is covered.
"""
from ..commands import selectnextline, selectpreviousword
from ..insertoperations import ChangeAfter, ChangeBefore, ChangeInPlace, ChangeAround
from ..undotree import undo
from .basetestcase import BaseTestCase
from .. import document
from .. import run
def deactivate(doc):
document.activedocument = None
class OperatorTest(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
selectnextline(self.document)
def test_change_before(self):
self.document.ui.feedinput(ChangeBefore)
for char in '\nasdf\b\b \n \n \n\n\b\b\n':
self.document.ui.feedinput(char)
self.document.ui.feedinput('Cancel')
self.document.ui.feedinput(deactivate)
run()
expected = '\nas \n \n \n \n\nimport sys'
self.assertEqual(expected, self.document.text[:len(expected)])
undo(self.document)
self.assertEqual('import sys\n\n', self.document.text[:12])
def test_change_after(self):
self.document.ui.feedinput(ChangeAfter)
for char in '\nasdf\b\b \n \n \n\n\b\b\n':
self.document.ui.feedinput(char)
self.document.ui.feedinput('Cancel')
self.document.ui.feedinput(deactivate)
run()
expected = 'import sys\nas \n \n \n \n\n'
self.assertEqual(expected, self.document.text[:len(expected)])
undo(self.document)
self.assertEqual('import sys\n\n', self.document.text[:12])
def test_change_in_place(self):
self.document.ui.feedinput(ChangeInPlace)
for char in '\nasdf\b\b \n \n \n\n\b\b\n':
self.document.ui.feedinput(char)
self.document.ui.feedinput('Cancel')
self.document.ui.feedinput(deactivate)
run()
expected = '\nas \n \n \n \n\n'
self.assertEqual(expected, self.document.text[:len(expected)])
undo(self.document)
self.assertEqual('import sys\n\n', self.document.text[:12])
def test_change_around(self):
self.document.ui.feedinput(ChangeAfter)
for char in '\n\n (hi)':
self.document.ui.feedinput(char)
self.document.ui.feedinput('Cancel')
self.document.ui.feedinput(selectpreviousword)
self.document.ui.feedinput(ChangeAround)
self.document.ui.feedinput('\n')
self.document.ui.feedinput('Cancel')
self.document.ui.feedinput(deactivate)
run()
expected = 'import sys\n\n (\n hi\n )\n'
self.assertEqual(expected, self.document.text[:len(expected)])
undo(self.document)
undo(self.document)
self.assertEqual('import sys\n\n', self.document.text[:12])
| 2.59375 | 3 |
backfill/urls.py | appasahebs/bzTakeHome | 0 | 12795604 | <reponame>appasahebs/bzTakeHome
from django.urls import path
from .views import BackfillView
urlpatterns = [
path('', BackfillView.as_view(), name='list')
]
| 1.648438 | 2 |
module3-nosql-and-document-oriented-databases/Cluster.py | John-G-Thomas/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 12795605 | # If colab not locally Find out the IP address of this Colab Instance
# !curl ipecho.net/plain
"""
"How was working with MongoDB different from working with PostgreSQL? What was easier, and what was harder?":
-MongoDB and PostgrSQL biggest difference is hpw the data/information is stored.
-In PostgreSQL there are schemas that are used to connect to individual data tables. MongoDB formats and stores the
all the data formated in documents. The two have unique differences and but in my opinon MongoDB seems simpler/easier
way to store data and PostgreSQL has in my opinion extra steps so its harder.
"""
"""first make shell and install pymongo and dnspython"""
import pymongo
password = '<PASSWORD>'commit/share this! Reset it if it leak
User = 'John-Thomas'
dbname = 'test'
connection = (
"mongodb+srv://John-Thomas:" + password + "@cluster.y2ftp.mongodb.net/" + dbname + "?retryWrites=true&w"
"=majority")
client = pymongo.MongoClient(connection)
db = client.test
dir(db.test)
# Let's figure out inserting some data
db.test.count_documents({'x': 1})
# 0
db.test.insert_one({'x': 1})
# <pymongo.results.InsertOneResult at 0x7f52ad9fd208>
db.test.count_documents({'x': 1})
# 1
# Let's start the afternoon project
rpg_character = (1, "<NAME>", 10, 3, 0, 0, 0)
# We need key-value pairs, i.e. a dictionary!
# Lazy way (probably not ideal)
db.test.insert_one({'rpg_character': rpg_character})
db.test.find_one({'rpg_character': rpg_character})
# We can do better
# Mongo doesn't force us to have a schema, but
# we *should* try to choose useful/informative key names
rpg_doc = {
'sql_key': rpg_character[0],
'name': rpg_character[1],
'hp': rpg_character[2],
'level': rpg_character[3]
}
db.test.insert_one(rpg_doc)
list(db.test.find({'level': 3}))
# Make our doc better - annotate type so we can query on it
rpg_doc = {
'doc_type': 'rpg_character',
'sql_key': rpg_character[0],
'name': rpg_character[1],
'hp': rpg_character[2],
'level': rpg_character[3]
}
db.test.insert_one(rpg_doc)
list(db.test.find({'doc_type': 'rpg_character'}))
# Our goal - copy the charactercreator_character table
get_characters = 'SELECT * FROM charactercreator_character;'
characters = sl_curs.execute(get_characters).fetchall()
characters[:10]
# worked on first two assignents to review and stufy guide
| 2.90625 | 3 |
ex09/age_de_mon_chien.py | Nath39/Checkpoint00 | 0 | 12795606 | age = int(input ("Combien d'année à votre chien ?\n"))
ageVrai = age*7
print (f"Votre chien a {ageVrai} ans.")
| 3.78125 | 4 |
general/views.py | MarcinSzyc/KRA_PYT_W_02_WARSZTATY_3 | 0 | 12795607 | from django.shortcuts import render, redirect
from django.views import View
from .forms import UserLogin, UserRegistration
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.views import PasswordResetView
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse_lazy
# Main page view
class Home(View):
template = 'general/main_page.html'
def get(self, request):
return render(request, self.template)
# Login handling
class Login(View):
template = 'general/login_page.html'
def get(self, request):
empty_form = UserLogin
return render(request, self.template, locals())
def post(self, request):
filled_form = UserLogin(request.POST)
if filled_form.is_valid():
username = filled_form.cleaned_data.get('username_field')
password = <PASSWORD>_form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user:
messages.success(request, f'Hello {user} !!')
login(request, user)
return redirect(self.request.META.get('HTTP_REFERER'))
else:
messages.error(request, 'There is no such username in the database!')
return redirect(self.request.META.get('HTTP_REFERER'))
else:
error_list = [item for item in filled_form.errors.values()]
messages.error(request, f'Upps something went wrong!! \n {error_list[0]}')
return redirect(self.request.META.get('HTTP_REFERER'))
# Logout handling
class Logout(View):
def get(self, request):
logout(request)
return redirect('Home')
# Register handling
class Register(View):
template = 'general/login_page.html'
def get(self, request):
empty_form = UserRegistration
return render(request, self.template, locals())
def post(self, request):
filled_form = UserRegistration(request.POST)
if filled_form.is_valid():
filled_form.save()
messages.success(request, 'User created!')
return redirect(self.request.META.get('HTTP_REFERER'))
else:
error_list = [item for item in filled_form.errors.values()]
messages.error(request, f'Upps something went wrong!! \n {error_list}')
return redirect(self.request.META.get('HTTP_REFERER'))
class ModifiedPasswordResetView(PasswordResetView):
success_url = reverse_lazy('password_reset_done')
def get_success_url(self):
"""Return the URL to redirect to after processing a valid form."""
if not self.success_url:
raise ImproperlyConfigured("No URL to redirect to. Provide a success_url.")
else:
messages.info(self.request,
'''We've emailed you instructions for setting your password, if an account exists with the email you entered. You should receive them shortly. \n
If you don't receive an email, please make sure you've entered the address you registered with,and check your spam folder.''')
return str(self.request.META.get('HTTP_REFERER')) # success_url may be lazy
| 2.109375 | 2 |
crossmodal/door_models/dynamics.py | brentyi/multimodalfilter | 21 | 12795608 | <gh_stars>10-100
import torch
import torch.nn as nn
import torchfilter
import torchfilter.types as types
from fannypack.nn import resblocks
from . import layers
# TODO: Merge this with DoorDynamicsModelBrent
class DoorDynamicsModel(torchfilter.base.DynamicsModel):
def __init__(self, units=64):
"""Initializes a dynamics model for our door task."""
super().__init__(state_dim=3)
control_dim = 7
# Fixed dynamics covariance
self.Q_scale_tril = nn.Parameter(
torch.cholesky(torch.diag(torch.FloatTensor([0.05, 0.01, 0.01]))),
requires_grad=False,
)
# Build neural network
self.state_layers = layers.state_layers(units=units)
self.control_layers = layers.control_layers(units=units)
self.shared_layers = nn.Sequential(
nn.Linear(units * 2, units),
resblocks.Linear(units),
resblocks.Linear(units),
resblocks.Linear(units),
nn.Linear(units, self.state_dim + 1),
)
self.units = units
def forward(
self,
*,
initial_states: types.StatesTorch,
controls: types.ControlsTorch,
) -> types.StatesTorch:
N, state_dim = initial_states.shape[:2]
assert state_dim == self.state_dim
# (N, control_dim) => (N, units // 2)
control_features = self.control_layers(controls)
# (N, state_dim) => (N, units // 2)
state_features = self.state_layers(initial_states)
# (N, units)
merged_features = torch.cat((control_features, state_features), dim=-1)
# (N, units * 2) => (N, state_dim + 1)
output_features = self.shared_layers(merged_features)
# We separately compute a direction for our network and a scalar "gate"
# These are multiplied to produce our final state output
state_update_direction = output_features[..., :state_dim]
state_update_gate = torch.sigmoid(output_features[..., -1:])
state_update = state_update_direction * state_update_gate
# Return residual-style state update, constant uncertainties
states_new = initial_states + state_update
scale_trils = self.Q_scale_tril[None, :, :].expand(N, state_dim, state_dim)
return states_new, scale_trils
# This is the same as above, but with the noise tweaked/parameterized to make learning
# the noise model easier.
#
# Separate because the checkpoint files will no longer be compatible, and we don't want
# to just casually nuke Michelle's models...
#
class DoorDynamicsModelBrent(torchfilter.base.DynamicsModel):
def __init__(self, units=64):
"""Initializes a dynamics model for our door task."""
super().__init__(state_dim=3)
control_dim = 7
# Fixed dynamics covariance
self.Q_scale_tril_diag = nn.Parameter(
torch.sqrt(torch.FloatTensor([0.05, 0.01, 0.01])) / 8.0,
requires_grad=False,
)
# Build neural network
self.state_layers = layers.state_layers(units=units)
self.control_layers = layers.control_layers(units=units)
self.shared_layers = nn.Sequential(
nn.Linear(units * 2, units),
resblocks.Linear(units),
resblocks.Linear(units),
resblocks.Linear(units),
nn.Linear(units, self.state_dim + 1),
)
self.units = units
def forward(
self,
*,
initial_states: types.StatesTorch,
controls: types.ControlsTorch,
) -> types.StatesTorch:
N, state_dim = initial_states.shape[:2]
assert state_dim == self.state_dim
# (N, control_dim) => (N, units // 2)
control_features = self.control_layers(controls)
# (N, state_dim) => (N, units // 2)
state_features = self.state_layers(initial_states)
# (N, units)
merged_features = torch.cat((control_features, state_features), dim=-1)
# (N, units * 2) => (N, state_dim + 1)
output_features = self.shared_layers(merged_features)
# We separately compute a direction for our network and a scalar "gate"
# These are multiplied to produce our final state output
state_update_direction = output_features[..., :state_dim]
state_update_gate = torch.sigmoid(output_features[..., -1:])
state_update = state_update_direction * state_update_gate
# Return residual-style state update, constant uncertainties
states_new = initial_states + state_update
scale_trils = torch.diag(self.Q_scale_tril_diag)[None, :, :].expand(
N, state_dim, state_dim
)
return states_new, scale_trils
| 2.328125 | 2 |
util/__init__.py | atlas-calo-ml/GraphNets4Pions_LLNL | 1 | 12795609 | <gh_stars>1-10
# i am an import | 1.078125 | 1 |
goldman/queryparams/include.py | sassoo/goldman | 2 | 12795610 | <gh_stars>1-10
"""
queryparams.include
~~~~~~~~~~~~~~~~~~~
Determine relationship resources to include in the response
according to one or more criteria. Documented here:
jsonapi.org/format/#fetching-includes
"""
from goldman.exceptions import InvalidQueryParams
LINK = 'jsonapi.org/format/#fetching-includes'
PARAM = 'include'
def _validate_no_nesting(param):
""" Ensure the include field is not a nested relationship """
if '.' in param:
raise InvalidQueryParams(**{
'detail': 'The include query param of the "%s" field '
'is not supported. Nested relationship '
'inclusions are not currently supported' % param,
'links': LINK,
'parameter': PARAM,
})
def _validate_rels(param, rels):
""" Ensure the include field is a relationship """
if param not in rels:
raise InvalidQueryParams(**{
'detail': 'The include query param of the "%s" field '
'is not possible. It does not represent a '
'relationship field & on the primary resource '
'& is not eligible for inclusion as a compound '
'document.' % param,
'links': LINK,
'parameter': PARAM,
})
def init(req, model):
""" Return an array of fields to include. """
rels = model.relationships
params = req.get_param_as_list('include') or []
params = [param.lower() for param in params]
for param in params:
_validate_no_nesting(param)
_validate_rels(param, rels)
return params
| 2.453125 | 2 |
python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py | L-Net-1992/Paddle | 11 | 12795611 | <filename>python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_op.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2022 NVIDIA Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.fluid.core as core
from op_test import OpTest, skip_check_grad_ci, skip_check_inplace_ci
def gelu(x):
y_ref = 0.5 * x * (
1.0 + np.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * np.power(x, 3))))
return y_ref.astype(x.dtype)
def relu(x):
mask = x > 0
return x * mask
def get_output(X, Y, bias, act):
out = np.dot(X, Y) + bias
if act == 'relu':
return relu(out)
elif act == 'gelu':
return gelu(out)
else:
return out
@skip_check_inplace_ci(reason="no inplace op")
class TestFuseGemmBase(OpTest):
pass
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMFP16(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.outputs = {
'Out':
get_output(self.inputs['X'], self.inputs['Y'], self.inputs['Bias'],
'relu')
}
self.attrs = {"activation": 'relu'}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMFP32(TestFuseGemmEpilogueOpReluMMFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMFP64(TestFuseGemmEpilogueOpReluMMFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMFP16(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((4, 8)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.outputs = {
'Out':
get_output(self.inputs['X'].T, self.inputs['Y'],
self.inputs['Bias'], 'relu')
}
self.attrs = {'trans_x': True, "activation": 'relu'}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMFP32(TestFuseGemmEpilogueOpReluMTMFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMFP64(TestFuseGemmEpilogueOpReluMTMFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMTFP16(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((128, 4)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.outputs = {
'Out':
get_output(self.inputs['X'], self.inputs['Y'].T,
self.inputs['Bias'], 'relu')
}
self.attrs = {'trans_y': True, "activation": 'relu'}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMTFP32(TestFuseGemmEpilogueOpReluMMTFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMTFP64(TestFuseGemmEpilogueOpReluMMTFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMTFP16(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((4, 8)).astype(self.dtype) - 0.5,
'Y': np.random.random((128, 4)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.outputs = {
'Out':
get_output(self.inputs['X'].T, self.inputs['Y'].T,
self.inputs['Bias'], 'relu')
}
self.attrs = {'trans_x': True, 'trans_y': True, "activation": 'relu'}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMTFP32(TestFuseGemmEpilogueOpReluMTMTFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMTFP64(TestFuseGemmEpilogueOpReluMTMTFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMFP16MultiDimX(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((2, 2, 8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.outputs = {
'Out':
get_output(self.inputs['X'].reshape((-1, 4)), self.inputs['Y'],
self.inputs['Bias'], 'relu').reshape((2, 2, 8, 128))
}
self.attrs = {"activation": 'relu'}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMFP32MultiDimX(
TestFuseGemmEpilogueOpReluMMFP16MultiDimX):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMMFP64MultiDimX(
TestFuseGemmEpilogueOpReluMMFP16MultiDimX):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMFP16MultiDimX(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((4, 2, 2, 8)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.outputs = {
'Out':
get_output(self.inputs['X'].reshape((4, -1)).T, self.inputs['Y'],
self.inputs['Bias'], 'relu').reshape((2, 2, 8, 128))
}
self.attrs = {'trans_x': True, "activation": 'relu'}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMFP32MultiDimX(
TestFuseGemmEpilogueOpReluMTMFP16MultiDimX):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpReluMTMFP64MultiDimX(
TestFuseGemmEpilogueOpReluMTMFP16MultiDimX):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpGeluMMFP16(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'gelu'}
self.outputs = {
'Out':
get_output(self.inputs['X'], self.inputs['Y'], self.inputs['Bias'],
'gelu')
}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpGeluMMFP32(TestFuseGemmEpilogueOpGeluMMFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpGeluMMFP64(TestFuseGemmEpilogueOpGeluMMFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpNoneMMFP16(TestFuseGemmBase):
def setUp(self):
self.op_type = "fused_gemm_epilogue"
self.place = core.CUDAPlace(0)
self.init_dtype_type()
self.inputs = {
'X': np.random.random((8, 4)).astype(self.dtype) - 0.5,
'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5,
'Bias': np.random.random((128, )).astype(self.dtype) - 0.5
}
self.attrs = {"activation": 'none'}
self.outputs = {
'Out':
get_output(self.inputs['X'], self.inputs['Y'], self.inputs['Bias'],
'none')
}
def init_dtype_type(self):
self.dtype = np.float16
self.atol = 1e-3
def test_check_output(self):
if self.dtype == np.float16 and not core.is_float16_supported(
self.place):
return
self.check_output_with_place(self.place, atol=self.atol)
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpNoneMMFP32(TestFuseGemmEpilogueOpNoneMMFP16):
def init_dtype_type(self):
self.dtype = np.single
self.atol = 1e-6
@skip_check_grad_ci(reason="no grap op")
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestFuseGemmEpilogueOpNoneMMFP64(TestFuseGemmEpilogueOpNoneMMFP16):
def init_dtype_type(self):
self.dtype = np.double
self.atol = 1e-6
if __name__ == "__main__":
paddle.enable_static()
np.random.seed(0)
unittest.main()
| 2.046875 | 2 |
School146/forms/article_form.py | mihdenis85/Synergy | 0 | 12795612 | import re
from flask_wtf import FlaskForm
from flask_wtf.file import FileField, FileAllowed, FileRequired
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, regexp
class ArticleForm(FlaskForm):
title = StringField('Название статьи', validators=[DataRequired()])
text = TextAreaField('Текст статьи', validators=[DataRequired()])
picture = FileField('Картинка', validators=[FileRequired(),
FileAllowed(['jpg', 'png'],
'Допустимы только изображения форматов jpg и png')])
submit = SubmitField('Подтвердить') | 2.53125 | 3 |
cc/stream.py | markokr/cc | 3 | 12795613 | """Wrapper around ZMQStream
"""
import sys
import time
import zmq
from zmq.eventloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
import skytools
from cc.message import CCMessage, zmsg_size
from cc.util import stat_inc
__all__ = ['CCStream', 'CCReqStream']
#
# simple wrapper around ZMQStream
#
class CCStream (ZMQStream):
"""
Adds CCMessage methods to ZMQStream as well as protection (on by default)
against unlimited memory (send queue) growth.
"""
def __init__ (self, *args, **kwargs):
self.qmaxsize = kwargs.pop ('qmaxsize', None)
if self.qmaxsize is None:
self.qmaxsize = 1000
elif self.qmaxsize <= 0:
self.qmaxsize = sys.maxsize
super(CCStream, self).__init__(*args, **kwargs)
def send_multipart (self, msg, *args, **kwargs):
if self._send_queue.qsize() < self.qmaxsize:
super(CCStream, self).send_multipart (msg, *args, **kwargs)
else:
stat_inc ('count.dropped', 1)
stat_inc ('bytes.dropped', zmsg_size (msg))
def send_cmsg(self, cmsg):
"""Send CCMessage to socket"""
self.send_multipart(cmsg.zmsg)
def on_recv_cmsg(self, cbfunc):
"""Set callback that receives CCMessage."""
def convert_cmsg(zmsg):
cmsg = CCMessage(zmsg)
cbfunc(cmsg)
self.on_recv(convert_cmsg)
#
# request multiplexer on single stream
#
class QueryInfo:
"""Store callback details for query."""
log = skytools.getLogger('QueryInfo')
def __init__(self, qid, cmsg, cbfunc, rqs):
self.qid = qid
self.orig_cmsg = cmsg
self.cbfunc = cbfunc
self.timeout_ref = None
self.ioloop = rqs.ioloop
self.remove_query = rqs.remove_query
def on_timeout(self):
"""Called by ioloop on timeout, needs to handle exceptions"""
try:
self.timeout_ref = None
self.launch_cb(None)
except:
self.log.exception('timeout callback crashed')
def launch_cb(self, arg):
"""Run callback, re-wire timeout and query if needed."""
keep, timeout = self.cbfunc(arg)
self.log.trace('keep=%r', keep)
if keep:
self.set_timeout(timeout)
else:
self.remove_query(self.qid)
def set_timeout(self, timeout):
"""Set new timeout for task, None means drop it"""
if self.timeout_ref:
self.ioloop.remove_timeout(self.timeout_ref)
self.timeout_ref = None
if timeout:
deadline = time.time() + timeout
self.timeout_ref = self.ioloop.add_timeout(deadline, self.on_timeout)
def send_to(self, cc):
self.orig_cmsg.send_to(cc)
class CCReqStream:
"""Request-based API for CC socket.
Add request-id into route, later map replies to original request
based on that.
"""
log = skytools.getLogger('CCReqStream')
zmq_hwm = 100
zmq_linger = 500
def __init__(self, cc_url, xtx, ioloop=None, zctx=None):
"""Initialize stream."""
zctx = zctx or zmq.Context.instance()
ioloop = ioloop or IOLoop.instance()
s = zctx.socket (zmq.XREQ)
s.setsockopt (zmq.HWM, self.zmq_hwm)
s.setsockopt (zmq.LINGER, self.zmq_linger)
s.connect (cc_url)
self.ccs = CCStream(s, ioloop, qmaxsize = self.zmq_hwm)
self.ioloop = ioloop
self.xtx = xtx
self.query_id_seq = 1
self.query_cache = {}
self.ccs.on_recv(self.handle_recv)
def remove_query(self, qid):
"""Drop query state. Further replies are ignored."""
qi = self.query_cache.get(qid)
if qi:
del self.query_cache[qid]
qi.set_timeout(None)
def ccquery_sync(self, msg, timeout=0):
"""Synchronous query.
Returns first reply.
"""
res = [None]
def sync_cb(_rep):
res[0] = _rep
self.ioloop.stop()
return (False, 0)
self.ccquery_async(msg, sync_cb, timeout)
self.ioloop.start()
return res[0]
def ccquery_async(self, msg, cbfunc, timeout=0):
"""Asynchronous query.
Maps replies to callback function based on request id.
"""
# create query id prefix
qid = "Q%06d" % self.query_id_seq
self.query_id_seq += 1
# create message, add query id
cmsg = self.xtx.create_cmsg(msg)
cmsg.set_route([qid])
qi = QueryInfo(qid, cmsg, cbfunc, self)
self.query_cache[qid] = qi
qi.set_timeout(timeout)
qi.send_to(self.ccs)
return qid
def ccpublish(self, msg):
"""Broadcast API."""
cmsg = self.xtx.create_cmsg(msg)
cmsg.send_to(self.ccs)
def handle_recv(self, zmsg):
"""Internal callback on ZMQStream.
It must not throw exceptions.
"""
try:
self.handle_recv_real(zmsg)
except Exception:
self.log.exception('handle_recv_real crashed, dropping msg: %r', zmsg)
def handle_recv_real(self, zmsg):
"""Actual callback that can throw exceptions."""
cmsg = CCMessage(zmsg)
route = cmsg.get_route()
if len(route) != 1:
self.log.error('Invalid reply route: %r', route)
return
qid = route[0]
if qid not in self.query_cache:
self.log.error('reply for unknown query: %r', qid)
return
msg = cmsg.get_payload(self.xtx)
qi = self.query_cache[qid]
qi.launch_cb(msg)
def resend(self, qid, timeout=0):
if qid in self.query_cache:
qi = self.query_cache[qid]
qi.send_to(self.ccs)
qi.set_timeout(timeout)
else:
pass # ?
| 2.25 | 2 |
gen-input.py | SirGFM/FallingBlocks | 1 | 12795614 | import pathlib
import os.path
cwd = pathlib.Path(__file__).parent.absolute()
cwd = os.path.abspath(cwd)
fp = os.path.join(cwd, 'ProjectSettings', 'InputManager.asset')
def output_unity_axis(f, joy_idx, name, button='', axis_type=0, joy_axis=0):
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: joystick {} {}\n'.format(joy_idx, name))
f.write(' descriptiveName: Reconfigurable gamepad input\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: \n')
f.write(' positiveButton: {}\n'.format(button))
f.write(' altNegativeButton: \n')
f.write(' altPositiveButton: \n')
f.write(' gravity: 3\n')
f.write(' dead: 0.01\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 0\n')
f.write(' type: {}\n'.format(axis_type))
f.write(' axis: {}\n'.format(joy_axis))
f.write(' joyNum: {}\n'.format(joy_idx))
def output_axis(f, joy_idx, joy_axis):
name = 'axis {}'.format(joy_axis)
output_unity_axis(f, joy_idx, name, axis_type=2, joy_axis=joy_axis)
def output_button(f, joy_idx, button):
name = 'button {}'.format(button)
output_unity_axis(f, joy_idx, name, button='joystick button {}'.format(button))
joystick_count = 9
joystick_axis_count = 10
joystick_button_count = 20
with open(fp, 'wt') as f:
f.write('%YAML 1.1\n')
f.write('%TAG !u! tag:unity3d.com,2011:\n')
f.write('--- !u!13 &1\n')
f.write('InputManager:\n')
f.write(' m_ObjectHideFlags: 0\n')
f.write(' serializedVersion: 2\n')
f.write(' m_Axes:\n')
# Default values, required by the UI
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: Horizontal\n')
f.write(' descriptiveName: UI Horizontal\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: left\n')
f.write(' positiveButton: right\n')
f.write(' altNegativeButton: a\n')
f.write(' altPositiveButton: d\n')
f.write(' gravity: 3\n')
f.write(' dead: 0.001\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 0\n')
f.write(' type: 0\n')
f.write(' axis: 0\n')
f.write(' joyNum: 0\n')
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: Vertical\n')
f.write(' descriptiveName: UI Vertical\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: down\n')
f.write(' positiveButton: up\n')
f.write(' altNegativeButton: s\n')
f.write(' altPositiveButton: w\n')
f.write(' gravity: 3\n')
f.write(' dead: 0.001\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 0\n')
f.write(' type: 0\n')
f.write(' axis: 0\n')
f.write(' joyNum: 0\n')
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: Horizontal\n')
f.write(' descriptiveName: UI Horizontal (gamepad)\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: \n')
f.write(' positiveButton: \n')
f.write(' altNegativeButton: \n')
f.write(' altPositiveButton: \n')
f.write(' gravity: 3\n')
f.write(' dead: 0.01\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 0\n')
f.write(' type: 2\n')
f.write(' axis: 0\n')
f.write(' joyNum: 0\n')
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: Vertical\n')
f.write(' descriptiveName: UI Vertical (gamepad)\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: \n')
f.write(' positiveButton: \n')
f.write(' altNegativeButton: \n')
f.write(' altPositiveButton: \n')
f.write(' gravity: 3\n')
f.write(' dead: 0.01\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 1\n')
f.write(' type: 2\n')
f.write(' axis: 1\n')
f.write(' joyNum: 0\n')
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: Submit\n')
f.write(' descriptiveName: Unity UI...\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: \n')
f.write(' positiveButton: enter\n')
f.write(' altNegativeButton: \n')
f.write(' altPositiveButton: joystick button 0\n')
f.write(' gravity: 3\n')
f.write(' dead: 0.01\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 0\n')
f.write(' type: 0\n')
f.write(' axis: 1\n')
f.write(' joyNum: 0\n')
f.write(' - serializedVersion: 3\n')
f.write(' m_Name: Cancel\n')
f.write(' descriptiveName: Unity UI...\n')
f.write(' descriptiveNegativeName: \n')
f.write(' negativeButton: \n')
f.write(' positiveButton: escape\n')
f.write(' altNegativeButton: \n')
f.write(' altPositiveButton: joystick button 1\n')
f.write(' gravity: 3\n')
f.write(' dead: 0.01\n')
f.write(' sensitivity: 3\n')
f.write(' snap: 1\n')
f.write(' invert: 0\n')
f.write(' type: 0\n')
f.write(' axis: 1\n')
f.write(' joyNum: 0\n')
for joy_idx in range(joystick_count):
for joy_axis in range(joystick_axis_count):
output_axis(f, joy_idx, joy_axis)
for joy_bt in range(joystick_button_count):
output_button(f, joy_idx, joy_bt)
| 2.328125 | 2 |
lego/apps/tags/validators.py | HoboKristian/lego | 0 | 12795615 | <filename>lego/apps/tags/validators.py
from django.core.validators import RegexValidator, _lazy_re_compile
slug_re = _lazy_re_compile(r"^[-a-z0-9æøå_.#%$&/]+\Z")
validate_tag = RegexValidator(
slug_re,
"Enter a valid 'tag' consisting of letters, numbers, underscores or hyphens.",
"invalid",
)
| 2.65625 | 3 |
saber/xbrain/split_cells.py | elenimath/saber | 12 | 12795616 | # Copyright 2019 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import argparse
import sys
import itertools
import numpy as np
sphere_radius = 5
#Take output of cell detect step, split into two streams- one list of cells, the other the map of cells
def split_cells(args):
cells = np.load(args.input)
cell_map = cells[1]
cell_list = cells[0]
with open(args.map_output, 'wb') as f:
np.save(f, cell_map)
# Make volume out of cell_list
cell_centroid_volume = np.zeros(cell_map.shape)
for cell in cell_list:
axes_range = [[],[],[]]
for i,axes in enumerate(cell[:3]):
min_range = max(int(axes-args.sphere_size), 0)
max_range = min(int(axes+args.sphere_size), cell_map.shape[i]-1)
axes_range[i]=range(min_range, max_range)
coords = list(itertools.product(*axes_range))
for pixel in coords:
if np.linalg.norm(np.array(cell[:3])-np.array(pixel)) <= args.sphere_size:
cell_centroid_volume[pixel] = 1
with open(args.list_output, 'wb') as f:
np.save(f, cell_list)
with open(args.centroid_volume_output, 'wb') as f:
np.save(f, cell_centroid_volume)
def main():
parser = argparse.ArgumentParser(description='cell results splitting script')
parser.set_defaults(func=lambda _: parser.print_help())
parser.add_argument('-i', '--input', required=True, help='Input file')
parser.add_argument('--map_output', required=True, help='Map Output file')
parser.add_argument('--list_output', required=True, help='List Output file')
parser.add_argument('--centroid_volume_output', required=True, help='Output volume with spheres')
parser.add_argument('--sphere_size', required=False, help='Size of the spheres in the centroids volume', default=5, type=int)
args = parser.parse_args()
split_cells(args)
if __name__ == '__main__':
main()
| 2.40625 | 2 |
train.py | nsouff/digits_guesser | 0 | 12795617 | <gh_stars>0
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
class Model():
def __init__(self, filepath='model.h5'):
self.filepath = filepath
mnist = tf.keras.datasets.mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
self.x_test = x_test
self.y_test = y_test
if os.path.exists(filepath):
self.model = tf.keras.models.load_model(filepath)
else:
self.model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
self.model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
self.model.fit(x_train, y_train, epochs=10)
self.model.evaluate(x_test, y_test)
self.save()
def save(self):
self.model.save(self.filepath)
def predict(self, pixels):
prediction = self.model.predict([pixels])
return np.argmax(prediction[0])
def examples(self, n=5):
predictions = self.model.predict(self.x_test)
for i in range(n):
plt.imshow(self.x_test[i], cmap='binary')
print("Prediction", np.argmax(predictions[i]), ", actual:", self.y_test[i])
plt.show()
def evaluate(self):
self.model.evaluate(self.x_test, self.y_test)
if __name__ == '__main__':
if os.path.exists('model.h5'):
os.remove('model.h5')
Model()
| 2.578125 | 3 |
cours/python/franceioi/concours-de-tir-a-la-corde.py | jusdepatate/pieces-of-code | 1 | 12795618 | <gh_stars>1-10
nbMembres = int(input())
poidsE1 = 0
poidsE2 = 0
for i in range(1, (nbMembres * 2) + 1):
if i % 2:
poidsE1 = poidsE1 + int(input())
else:
poidsE2 = poidsE2 + int(input())
if poidsE1 > poidsE2:
print("L'équipe 1 a un avantage")
else:
print("L'équipe 2 a un avantage")
print("Poids total pour l'équipe 1 : " + str(poidsE1))
print("Poids total pour l'équipe 2 : " + str(poidsE2))
| 3.40625 | 3 |
Problemset/binary-tree-paths/binary-tree-paths.py | worldwonderer/algorithm | 1 | 12795619 | <filename>Problemset/binary-tree-paths/binary-tree-paths.py<gh_stars>1-10
# @Title: 二叉树的所有路径 (Binary Tree Paths)
# @Author: 18015528893
# @Date: 2021-02-14 17:30:19
# @Runtime: 32 ms
# @Memory: 15 MB
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def binaryTreePaths(self, root: TreeNode) -> List[str]:
result = []
def dfs(root, path):
if root is None:
return
path.append(root.val)
if root.left is None and root.right is None:
result.append('->'.join([str(i) for i in path]))
dfs(root.left, path)
dfs(root.right, path)
path.pop()
dfs(root, [])
return result
| 3.359375 | 3 |
unit_tests.py | leguiart/Evolutionary_Computing | 1 | 12795620 | <reponame>leguiart/Evolutionary_Computing
from test_genetic_algorithm import TestGeneticAlgorithm
from hklearn_genetic.problem import Rastrigin, Beale, Himmelblau, Eggholder
import numpy as np
### Proportional selection, no elitism
ga = TestGeneticAlgorithm([[0.25410149, 0.71410111, 0.31915886, 0.45725239]], pc = 0.9, pm = 0.5, max_iter=10)
rast = Rastrigin(n_dim=2, n_prec=0)
beale = Beale(n_prec=0)
himme = Himmelblau(n_prec=0)
egg = Eggholder(n_prec=0)
rast_init_pop = np.array([[1, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 1, 0, 0, 0, 1, 1]])
beale_init_pop = np.array([[1, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 1, 1, 0],
[0, 1, 0, 0, 0, 0, 1, 1],
[1, 0, 0, 0, 0, 1, 1, 0]])
himme_init_pop = np.array([[1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 1, 0, 1, 1, 1, 1],
[1, 0, 1, 1, 1, 0, 1, 1],
[1, 0, 0, 0, 1, 0, 0, 1]])
egg_init_pop = np.array([[1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0]])
# print(rast_init_pop)
# rast_selected = ga.select(rast, rast_init_pop)
# print(rast_selected)
# rast_cross = ga.crossover(rast_selected[0])
# print(rast_cross)
# print(ga.mutate(rast_cross))
# print(beale_init_pop)
# beale_selected = ga.select(beale, beale_init_pop)
# print(beale_selected)
# beale_crossover = ga.crossover(beale_selected[0])
# print(beale_crossover)
# print(ga.mutate(beale_crossover))
# print(himme_init_pop)
# himme_selected = ga.select(himme, himme_init_pop)
# print(himme_selected)
# himme_crossover = ga.crossover(himme_selected[0])
# print(himme_crossover)
# print(ga.mutate(himme_crossover))
# print(egg_init_pop)
# egg_selected = ga.select(egg, egg_init_pop)
# print(egg_selected)
# egg_crossover = ga.crossover(egg_selected[0])
# print(egg_crossover)
# print(ga.mutate(egg_crossover))
### Proportional selection, elitism
# ga = TestGeneticAlgorithm(pc = 0.9, pm = 0.5, max_iter=10, elitism = 0.4)
# print(rast_init_pop)
# rast_selected = ga.select(rast, rast_init_pop)
# print(rast_selected)
# rast_cross = ga.crossover(rast_selected[0])
# print(rast_cross)
# print(ga.mutate(rast_cross))
# print(beale_init_pop)
# beale_selected = ga.select(beale, beale_init_pop)
# print(beale_selected)
# beale_crossover = ga.crossover(beale_selected[0])
# print(beale_crossover)
# print(ga.mutate(beale_crossover))
# print(himme_init_pop)
# himme_selected = ga.select(himme, himme_init_pop)
# print(himme_selected)
# himme_crossover = ga.crossover(himme_selected[0])
# print(himme_crossover)
# print(ga.mutate(himme_crossover))
# print(egg_init_pop)
# egg_selected = ga.select(egg, egg_init_pop)
# print(egg_selected)
# egg_crossover = ga.crossover(egg_selected[0])
# print(egg_crossover)
# print(ga.mutate(egg_crossover))
### Tournament selection
ga = TestGeneticAlgorithm(pc = 0.9, pm = 0.5, max_iter=10, selection = "tournament")
print(rast_init_pop)
rast_selected = ga.select(rast, rast_init_pop)
print(rast_selected)
rast_cross = ga.crossover(rast_selected[0])
print(rast_cross)
print(ga.mutate(rast_cross))
print(beale_init_pop)
beale_selected = ga.select(beale, beale_init_pop)
print(beale_selected)
beale_crossover = ga.crossover(beale_selected[0])
print(beale_crossover)
print(ga.mutate(beale_crossover))
print(himme_init_pop)
himme_selected = ga.select(himme, himme_init_pop)
print(himme_selected)
himme_crossover = ga.crossover(himme_selected[0])
print(himme_crossover)
print(ga.mutate(himme_crossover))
print(egg_init_pop)
egg_selected = ga.select(egg, egg_init_pop)
print(egg_selected)
egg_crossover = ga.crossover(egg_selected[0])
print(egg_crossover)
print(ga.mutate(egg_crossover))
### Tournament selection w elitism
ga = TestGeneticAlgorithm(pc = 0.9, pm = 0.5, max_iter=10, elitism = 0.4, selection = "tournament")
print(rast_init_pop)
rast_selected = ga.select(rast, rast_init_pop)
print(rast_selected)
rast_cross = ga.crossover(rast_selected[0])
print(rast_cross)
print(ga.mutate(rast_cross))
print(beale_init_pop)
beale_selected = ga.select(beale, beale_init_pop)
print(beale_selected)
beale_crossover = ga.crossover(beale_selected[0])
print(beale_crossover)
print(ga.mutate(beale_crossover))
print(himme_init_pop)
himme_selected = ga.select(himme, himme_init_pop)
print(himme_selected)
himme_crossover = ga.crossover(himme_selected[0])
print(himme_crossover)
print(ga.mutate(himme_crossover))
print(egg_init_pop)
egg_selected = ga.select(egg, egg_init_pop)
print(egg_selected)
egg_crossover = ga.crossover(egg_selected[0])
print(egg_crossover)
print(ga.mutate(egg_crossover))
### SUS selection
ga = TestGeneticAlgorithm(pc = 0.9, pm = 0.5, max_iter=10, selection = "sus")
print(rast_init_pop)
rast_selected = ga.select(rast, rast_init_pop)
print(rast_selected)
rast_cross = ga.crossover(rast_selected[0])
print(rast_cross)
print(ga.mutate(rast_cross))
print(beale_init_pop)
beale_selected = ga.select(beale, beale_init_pop)
print(beale_selected)
beale_crossover = ga.crossover(beale_selected[0])
print(beale_crossover)
print(ga.mutate(beale_crossover))
print(himme_init_pop)
himme_selected = ga.select(himme, himme_init_pop)
print(himme_selected)
himme_crossover = ga.crossover(himme_selected[0])
print(himme_crossover)
print(ga.mutate(himme_crossover))
print(egg_init_pop)
egg_selected = ga.select(egg, egg_init_pop)
print(egg_selected)
egg_crossover = ga.crossover(egg_selected[0])
print(egg_crossover)
print(ga.mutate(egg_crossover))
### SUS selection w elitism
ga = TestGeneticAlgorithm(pc = 0.9, pm = 0.5, max_iter=10, elitism = 0.4)
print(rast_init_pop)
rast_selected = ga.select(rast, rast_init_pop)
print(rast_selected)
rast_cross = ga.crossover(rast_selected[0])
print(rast_cross)
print(ga.mutate(rast_cross))
print(beale_init_pop)
beale_selected = ga.select(beale, beale_init_pop)
print(beale_selected)
beale_crossover = ga.crossover(beale_selected[0])
print(beale_crossover)
print(ga.mutate(beale_crossover))
print(himme_init_pop)
himme_selected = ga.select(himme, himme_init_pop)
print(himme_selected)
himme_crossover = ga.crossover(himme_selected[0])
print(himme_crossover)
print(ga.mutate(himme_crossover))
print(egg_init_pop)
egg_selected = ga.select(egg, egg_init_pop)
print(egg_selected)
egg_crossover = ga.crossover(egg_selected[0])
print(egg_crossover)
print(ga.mutate(egg_crossover)) | 2.71875 | 3 |
src/util/flu_data_source.py | dfarrow0/nowcast | 3 | 12795621 | <filename>src/util/flu_data_source.py
"""
===============
=== Purpose ===
===============
A wrapper for the Epidata API as used for nowcasting. Caching is used
extensively to reduce the number of requests made to the API.
"""
# standard library
import functools
# first party
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.nowcast.fusion.nowcast import DataSource
from delphi.operations import secrets
from delphi.utils.epidate import EpiDate
from delphi.utils.epiweek import add_epiweeks, range_epiweeks
from delphi.utils.geo.locations import Locations
class FluDataSource(DataSource):
"""The interface by which all input data is provided."""
# the first epiweek for which we have ground truth ILI in all locations
FIRST_DATA_EPIWEEK = 201040
# all known sensors, past and present
SENSORS = ['gft', 'ght', 'twtr', 'wiki', 'cdc', 'epic', 'sar3', 'arch']
@staticmethod
def new_instance():
return FluDataSource(
Epidata, FluDataSource.SENSORS, Locations.region_list)
def __init__(self, epidata, sensors, locations):
self.epidata = epidata
self.sensors = sensors
self.sensor_locations = locations
# cache for prefetching bulk flu data
self.cache = {}
@functools.lru_cache(maxsize=1)
def get_truth_locations(self):
"""Return a list of locations in which ground truth is available."""
return Locations.region_list
@functools.lru_cache(maxsize=1)
def get_sensor_locations(self):
"""Return a list of locations in which sensors are available."""
return self.sensor_locations
@functools.lru_cache(maxsize=None)
def get_missing_locations(self, epiweek):
"""Return a tuple of locations which did not report on the given week."""
# only return missing atoms, i.e. locations that can't be further split
atomic_locations = set(Locations.atom_list)
available_locations = []
for loc in atomic_locations:
if self.get_truth_value(epiweek, loc) is None:
# this atomic location didn't report (or it's a future week)
continue
available_locations.append(loc)
if available_locations:
return tuple(atomic_locations - set(available_locations))
else:
# no data is available, assume that all locations will be reporting
return ()
@functools.lru_cache(maxsize=1)
def get_sensors(self):
"""Return a list of sensor names."""
return self.sensors
@functools.lru_cache(maxsize=1)
def get_weeks(self):
"""Return a list of weeks on which truth and sensors are both available."""
latest_week = self.get_most_recent_issue()
week_range = range_epiweeks(
FluDataSource.FIRST_DATA_EPIWEEK, latest_week, inclusive=True)
return list(week_range)
def get_truth_value(self, epiweek, location):
"""Return ground truth (w)ILI."""
try:
return self.cache['ilinet'][location][epiweek]
except KeyError:
print('cache miss: get_truth_value', epiweek, location)
auth = secrets.api.fluview
response = self.epidata.fluview(location, epiweek, auth=auth)
if response['result'] != 1:
return self.add_to_cache('ilinet', location, epiweek, None)
data = response['epidata'][0]
if data['num_providers'] == 0:
return self.add_to_cache('ilinet', location, epiweek, None)
return self.add_to_cache('ilinet', location, epiweek, data['wili'])
@functools.lru_cache(maxsize=None)
def get_sensor_value(self, epiweek, location, name):
"""Return a sensor reading."""
try:
return self.cache[name][location][epiweek]
except KeyError:
print('cache miss: get_sensor_value', epiweek, location, name)
response = self.epidata.sensors(
secrets.api.sensors, name, location, epiweek)
if response['result'] != 1:
return self.add_to_cache(name, location, epiweek, None)
value = response['epidata'][0]['value']
return self.add_to_cache(name, location, epiweek, value)
@functools.lru_cache(maxsize=1)
def get_most_recent_issue(self):
"""Return the most recent epiweek for which FluView data is available."""
ew2 = EpiDate.today().get_ew()
ew1 = add_epiweeks(ew2, -9)
response = self.epidata.fluview('nat', self.epidata.range(ew1, ew2))
issues = [row['issue'] for row in self.epidata.check(response)]
return max(issues)
def add_to_cache(self, name, location, epiweek, value):
"""Add the given value to the cache."""
if name not in self.cache:
self.cache[name] = {}
if location not in self.cache[name]:
self.cache[name][location] = {}
self.cache[name][location][epiweek] = value
return value
def prefetch(self, epiweek):
"""
Fetch all data in all locations up to the given epiweek.
Requests are batched. This is significantly more efficient (and faster)
than querying each sensor/location/epiweek data point individually.
"""
def extract(response):
if response['result'] == -2:
return []
return self.epidata.check(response)
weeks = Epidata.range(FluDataSource.FIRST_DATA_EPIWEEK, epiweek)
sensor_locations = set(self.get_sensor_locations())
# loop over locations to avoid hitting the limit of ~3.5k rows
for loc in self.get_truth_locations():
print('fetching %s...' % loc)
# default to None to prevent cache misses on missing values
for week in range_epiweeks(
FluDataSource.FIRST_DATA_EPIWEEK, epiweek, inclusive=True):
for name in ['ilinet'] + self.get_sensors():
self.add_to_cache(name, loc, week, None)
# ground truth
response = self.epidata.fluview(loc, weeks, auth=secrets.api.fluview)
for row in extract(response):
# skip locations with no reporters
if row['num_providers'] > 0:
self.add_to_cache('ilinet', loc, row['epiweek'], row['wili'])
# sensor readings
if loc not in sensor_locations:
# skip withheld locations (i.e. a retrospective experiment)
continue
for sen in self.get_sensors():
response = self.epidata.sensors(secrets.api.sensors, sen, loc, weeks)
for row in extract(response):
self.add_to_cache(sen, loc, row['epiweek'], row['value'])
| 2.1875 | 2 |
tests/unit/providers/traversal/test_selector_py3.py | YelloFam/python-dependency-injector | 0 | 12795622 | <gh_stars>0
"""Selector provider traversal tests."""
from dependency_injector import providers
def test_traverse():
switch = lambda: "provider1"
provider1 = providers.Callable(list)
provider2 = providers.Callable(dict)
provider = providers.Selector(
switch,
provider1=provider1,
provider2=provider2,
)
all_providers = list(provider.traverse())
assert len(all_providers) == 2
assert provider1 in all_providers
assert provider2 in all_providers
def test_traverse_switch():
switch = providers.Callable(lambda: "provider1")
provider1 = providers.Callable(list)
provider2 = providers.Callable(dict)
provider = providers.Selector(
switch,
provider1=provider1,
provider2=provider2,
)
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert switch in all_providers
assert provider1 in all_providers
assert provider2 in all_providers
def test_traverse_overridden():
provider1 = providers.Callable(list)
provider2 = providers.Callable(dict)
selector1 = providers.Selector(lambda: "provider1", provider1=provider1)
provider = providers.Selector(
lambda: "provider2",
provider2=provider2,
)
provider.override(selector1)
all_providers = list(provider.traverse())
assert len(all_providers) == 3
assert provider1 in all_providers
assert provider2 in all_providers
assert selector1 in all_providers
| 3.03125 | 3 |
2017/iker/day12.py | bbglab/adventofcode | 0 | 12795623 | <gh_stars>0
"""
--- Day 12: Digital Plumber ---
Walking along the memory banks of the stream, you find a small village that is experiencing a little confusion: some programs can't communicate with each other.
Programs in this village communicate using a fixed system of pipes. Messages are passed between programs using these pipes, but most programs aren't connected to each other directly. Instead, programs pass messages between each other until the message reaches the intended recipient.
For some reason, though, some of these messages aren't ever reaching their intended recipient, and the programs suspect that some pipes are missing. They would like you to investigate.
You walk through the village and record the ID of each program and the IDs with which it can communicate directly (your puzzle input). Each program has one or more programs with which it can communicate, and these pipes are bidirectional; if 8 says it can communicate with 11, then 11 will say it can communicate with 8.
You need to figure out how many programs are in the group that contains program ID 0.
For example, suppose you go door-to-door like a travelling salesman and record the following list:
0 <-> 2
1 <-> 1
2 <-> 0, 3, 4
3 <-> 2, 4
4 <-> 2, 3, 6
5 <-> 6
6 <-> 4, 5
In this example, the following programs are in the group that contains program ID 0:
Program 0 by definition.
Program 2, directly connected to program 0.
Program 3 via program 2.
Program 4 via program 2.
Program 5 via programs 6, then 4, then 2.
Program 6 via programs 4, then 2.
Therefore, a total of 6 programs are in this group; all but program 1, which has a pipe that connects it to itself.
How many programs are in the group that contains program ID 0?
--- Part Two ---
There are more programs than just the ones in the group containing program ID 0. The rest of them have no way of reaching that group, and still might have no way of reaching each other.
A group is a collection of programs that can all communicate via pipes either directly or indirectly. The programs you identified just a moment ago are all part of the same group. Now, they would like you to determine the total number of groups.
In the example above, there were 2 groups: one consisting of programs 0,2,3,4,5,6, and the other consisting solely of program 1.
How many groups are there in total?
"""
def parse(lines):
d = {}
for line in lines:
l = line.strip()
k, v = l.split(' <-> ')
d[k] = v.split(', ')
return d
def read():
with open('inputs/day12.txt') as fd:
return fd.readlines()
test_list = """0 <-> 2
1 <-> 1
2 <-> 0, 3, 4
3 <-> 2, 4
4 <-> 2, 3, 6
5 <-> 6
6 <-> 4, 5""".split('\n')
def find_group(direct_conections, id):
connected = {prog for prog in direct_conections[id]}
toexplore = connected.copy()
explored = set()
while toexplore:
prog = toexplore.pop()
if prog in explored:
continue
else:
for p in direct_conections[prog]:
toexplore.add(p)
connected.add(p)
explored.add(prog)
return connected
def test1():
assert 6 == len(find_group(parse(test_list), '0'))
def part1():
print(len(find_group(parse(read()), '0')))
def count_groups(direct_conections):
ungrouped_progs = list(direct_conections.keys())
grouped = set()
groups = 0
while ungrouped_progs:
prog = ungrouped_progs[0]
grouped.update(find_group(direct_conections, prog))
ungrouped_progs = [prog for prog in direct_conections.keys() if prog not in grouped]
groups += 1
return groups
def test2():
assert 2 == count_groups(parse(test_list))
def part2():
print(count_groups(parse(read())))
if __name__ == '__main__':
# test1()
# part1()
# test2()
part2()
| 3.640625 | 4 |
messaging/serializers.py | jpaul121/Banter | 0 | 12795624 | <reponame>jpaul121/Banter
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import Message
class MessageSerializer(serializers.ModelSerializer):
message_id = serializers.SlugField(source='id', read_only=True, required=False)
title = serializers.CharField(required=False)
content = serializers.CharField()
sender = serializers.SlugField()
recipient = serializers.SlugField()
timestamp = serializers.DateTimeField(read_only=True)
def create(self, validated_data):
title = validated_data['title']
content = validated_data['content']
sender = User.objects.get(id=int(validated_data['sender']))
recipient = User.objects.get(username=validated_data['recipient'])
response_data = {
'title': title,
'content': content,
'recipient': recipient,
'sender': sender,
}
return Message.objects.create(**response_data)
class Meta:
model = Message
fields = [ 'message_id', 'title', 'content', 'sender', 'recipient', 'timestamp' ]
| 2.125 | 2 |
wishing_well/util.py | Ennea/wishing-well | 16 | 12795625 | <reponame>Ennea/wishing-well<filename>wishing_well/util.py
import logging
import os
import socket
import sys
import tkinter
import webbrowser
from pathlib import Path
from tkinter import ttk
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
from .exceptions import LogNotFoundError
def get_data_path():
if sys.platform == 'win32':
path = Path(os.environ['APPDATA']) / 'wishing-well'
elif sys.platform == 'linux':
if 'XDG_DATA_HOME' in os.environ:
path = Path(os.environ['XDG_DATA_HOME']) / 'wishing-well'
else:
path = Path('~/.local/share/wishing-well').expanduser()
elif sys.platform == 'darwin':
if 'XDG_DATA_HOME' in os.environ:
path = Path(os.environ['XDG_DATA_HOME']) / 'wishing-well'
else:
path = Path('~/Library/Application Support/wishing-well').expanduser()
else:
show_error('Wishing Well is only designed to run on Windows or Linux based systems.')
# create dir if it does not yet exist
if not path.exists():
path.mkdir(parents=True)
# path exists, but is a file
if not path.is_dir():
show_error(f'{path} already exists, but is a file.')
return path
def get_log_path():
if sys.platform != 'win32':
raise LogNotFoundError('Cannot find the log file on non-Windows systems.')
path = Path(os.environ['USERPROFILE']) / 'AppData/LocalLow/miHoYo/Genshin Impact/output_log.txt'
if not path.exists():
return None
return path
def set_up_logging():
log_level = logging.DEBUG if len(sys.argv) > 1 and sys.argv[1] == '--debug' else logging.INFO
log_format = '%(asctime)s %(levelname)s: %(message)s'
logging.basicConfig(filename=(get_data_path() / 'wishing-well.log'), format=log_format, level=log_level)
# add a stream handler for log output to stdout
root_logger = logging.getLogger()
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(log_level)
formatter = logging.Formatter(log_format)
stdout_handler.setFormatter(formatter)
root_logger.addHandler(stdout_handler)
logging.info('Starting Wishing Well')
def is_port_in_use(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
try:
s.bind(( 'localhost', port ))
return False
except OSError:
return True
def get_usable_port():
port = 39000
while is_port_in_use(port):
# check if wishing well is already running on this port
try:
with urlopen(f'http://localhost:{port}/wishing-well', timeout=0.1) as _:
pass
webbrowser.open(f'http://localhost:{port}')
logging.info('Wishing Well is already running on port %d. Quitting', port)
sys.exit(1)
except (URLError, HTTPError):
port += 1
if port == 39010:
show_error('No suitable port found.')
return port
def show_error(message):
logging.error(message)
root = tkinter.Tk()
root.title('Wishing Well')
root.minsize(300, 0)
root.resizable(False, False)
root.iconphoto(False, tkinter.PhotoImage(file=Path(sys.path[0]) / 'icon.png'))
frame = ttk.Frame(root, padding=10)
frame.pack()
ttk.Label(frame, text=message).pack()
ttk.Frame(frame, height=5).pack()
ttk.Button(frame, text='Okay', command=root.destroy).pack()
# center the window
window_width = root.winfo_reqwidth()
window_height = root.winfo_reqheight()
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
root.geometry('+{}+{}'.format(int(screen_width / 2 - window_width / 2), int(screen_height / 2 - window_height / 2)))
root.mainloop()
logging.info('Quitting')
sys.exit(1)
| 2.625 | 3 |
Tools/Scripts/webkitpy/tool/steps/promptforbugortitle.py | jacadcaps/webkitty | 6 | 12795626 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
import sys
from webkitpy.tool.steps.abstractstep import AbstractStep
from webkitpy.tool.steps.options import Options
if sys.version_info > (3, 0):
from urllib.parse import urlparse
else:
from urlparse import urlparse
class PromptForBugOrTitle(AbstractStep):
@classmethod
def options(cls):
return AbstractStep.options() + [
Options.non_interactive,
]
def run(self, state):
# No need to prompt if we alrady have the bug_id.
if state.get("bug_id"):
return
user_response = self._tool.user.prompt("Please enter a bug number/bugzilla URL or a title for a new bug:\n")
# If the user responds with a number or a valid bugzilla URL, we assume it's bug number.
# Otherwise we assume it's a bug subject.
try:
state["bug_id"] = int(user_response)
except ValueError as TypeError:
parsed_url = None
try:
parsed_url = urlparse(user_response)
except ValueError:
# urlparse can throw a value error for some strings.
pass
if parsed_url and re.match("bugs.webkit.org", parsed_url.netloc):
match = re.match("id=(?P<bug_id>\d+)", parsed_url.query)
if match:
state["bug_id"] = int(match.group("bug_id"))
return
if not self._options.non_interactive and not self._tool.user.confirm("Are you sure you want to create a new bug?", default="n"):
self._exit(1)
state["bug_title"] = user_response
# FIXME: This is kind of a lame description.
state["bug_description"] = user_response
| 1.515625 | 2 |
MRC/utils_qa.py | alinghi/AI-Portfolio-Hub | 0 | 12795627 | <filename>MRC/utils_qa.py
# coding=utf-8
# Copyright 2020 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pre-processing
Post-processing utilities for question answering.
"""
import collections
import json
import logging
import os
from typing import Optional, Tuple
import numpy as np
from tqdm.auto import tqdm
from konlpy.tag import Mecab
import torch
import random
from transformers import is_torch_available, PreTrainedTokenizerFast
from transformers.trainer_utils import get_last_checkpoint
logger = logging.getLogger(__name__)
ban_words=("이따금","아마","절대로","무조건","한때","대략","오직",
"오로지","감히","최소","아예","반드시","꼭","때때로","이미", "심지어"
,"종종","졸곧","약간","기꺼이", "비록","꾸준히","일부러","어쩔", "문득", "어쨌든", "순전히", "필수","자칫", "다소", "간혹", "적어도", "왜냐하면", "아무래도")
mecab = Mecab()
def tokenize(text):
# return text.split(" ")
return mecab.morphs(text)
def sigmoid(x):
return 1/(1+np.exp(-x))
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if use multi-GPU
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def postprocess_qa_predictions(
examples,
features,
predictions: Tuple[np.ndarray, np.ndarray],
version_2_with_negative: bool = False,
n_best_size: int = 20,
max_answer_length: int = 30,
null_score_diff_threshold: float = 0.0,
output_dir: Optional[str] = None,
prefix: Optional[str] = None,
is_world_process_zero: bool = True,
consider=20
):
"""
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the
original contexts. This is the base postprocessing functions for models that only return start and end logits.
Args:
examples: The non-preprocessed dataset (see the main script for more information).
features: The processed dataset (see the main script for more information).
predictions (:obj:`Tuple[np.ndarray, np.ndarray]`):
The predictions of the model: two arrays containing the start logits and the end logits respectively. Its
first dimension must match the number of elements of :obj:`features`.
version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the underlying dataset contains examples with no answers.
n_best_size (:obj:`int`, `optional`, defaults to 20):
The total number of n-best predictions to generate when looking for an answer.
max_answer_length (:obj:`int`, `optional`, defaults to 30):
The maximum length of an answer that can be generated. This is needed because the start and end predictions
are not conditioned on one another.
null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0):
The threshold used to select the null answer: if the best answer has a score that is less than the score of
the null answer minus this threshold, the null answer is selected for this example (note that the score of
the null answer for an example giving several features is the minimum of the scores for the null answer on
each feature: all features must be aligned on the fact they `want` to predict a null answer).
Only useful when :obj:`version_2_with_negative` is :obj:`True`.
output_dir (:obj:`str`, `optional`):
If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if
:obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null
answers, are saved in `output_dir`.
prefix (:obj:`str`, `optional`):
If provided, the dictionaries mentioned above are saved with `prefix` added to their names.
is_world_process_zero (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether this process is the main process or not (used to determine if logging/saves should be done).
"""
assert (
len(predictions) == 2
), "`predictions` should be a tuple with two elements (start_logits, end_logits)."
all_start_logits, all_end_logits = predictions
assert len(predictions[0]) == len(
features
), f"Got {len(predictions[0])} predictions and {len(features)} features."
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
features_per_example = collections.defaultdict(list)
for i, feature in enumerate(features):
features_per_example[example_id_to_index[feature["example_id"]]].append(i)
# The dictionaries we have to fill.
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
if version_2_with_negative:
scores_diff_json = collections.OrderedDict()
# Logging.
logger.setLevel(logging.INFO if is_world_process_zero else logging.WARN)
logger.info(
f"Post-processing {len(examples)} example predictions split into {len(features)} features."
)
# Let's loop over all the examples!
for example_index, example in enumerate(tqdm(examples)):
# Those are the indices of the features associated to the current example.
feature_indices = features_per_example[example_index]
min_null_prediction = None
prelim_predictions = []
# Looping through all the features associated to the current example.
for feature_index in feature_indices:
# We grab the predictions of the model for this feature.
start_logits = all_start_logits[feature_index]
end_logits = all_end_logits[feature_index]
# This is what will allow us to map some the positions in our logits to span of texts in the original
# context.
offset_mapping = features[feature_index]["offset_mapping"]
# Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context
# available in the current feature.
token_is_max_context = features[feature_index].get(
"token_is_max_context", None
)
# Update minimum null prediction.
feature_null_score = start_logits[0] + end_logits[0]
if (
min_null_prediction is None
or min_null_prediction["score"] > feature_null_score
):
min_null_prediction = {
"offsets": (0, 0),
"score": feature_null_score,
"start_logit": start_logits[0],
"end_logit": end_logits[0],
}
# Go through all possibilities for the `n_best_size` greater start and end logits.
start_indexes = np.argsort(start_logits)[
-1 : -consider - 1 : -1
].tolist()
end_indexes = np.argsort(end_logits)[-1 : -consider - 1 : -1].tolist()
for start_index in start_indexes:
for end_index in end_indexes:
# Don't consider out-of-scope answers, either because the indices are out of bounds or correspond
# to part of the input_ids that are not in the context.
if (
start_index >= len(offset_mapping)
or end_index >= len(offset_mapping)
or offset_mapping[start_index] is None
or offset_mapping[end_index] is None
):
continue
# Don't consider answers with a length that is either < 0 or > max_answer_length.
if (
end_index < start_index
or end_index - start_index + 1 > max_answer_length
):
continue
# Don't consider answer that don't have the maximum context available (if such information is
# provided).
if (
token_is_max_context is not None
and not token_is_max_context.get(str(start_index), False)
):
continue
flag=False
candidate_word=example["context"][offset_mapping[start_index][0]:offset_mapping[end_index][1]]
for ban_word in ban_words:
if ban_word in candidate_word:
flag=True
break
if flag:
print("BAN",candidate_word)
continue
else:
print("ACCEPT",candidate_word)
prelim_predictions.append(
{
"offsets": (
offset_mapping[start_index][0],
offset_mapping[end_index][1],
),
#"score": start_logits[start_index]+end_logits[end_index],
"score": sigmoid(start_logits[start_index])*sigmoid(end_logits[end_index]),
#"score": max(start_logits[start_index]+5,0)*max(end_logits[end_index]+5,0),
"start_logit": start_logits[start_index],
"end_logit": end_logits[end_index],
}
)
if version_2_with_negative:
# Add the minimum null prediction
prelim_predictions.append(min_null_prediction)
null_score = min_null_prediction["score"]
# Only keep the best `n_best_size` predictions.
predictions = sorted(
prelim_predictions, key=lambda x: x["score"], reverse=True
)[:n_best_size]
# Add back the minimum null prediction if it was removed because of its low score.
if version_2_with_negative and not any(
p["offsets"] == (0, 0) for p in predictions
):
predictions.append(min_null_prediction)
# Use the offsets to gather the answer text in the original context.
context = example["context"]
for pred in predictions:
offsets = pred.pop("offsets")
pred["text"] = context[offsets[0] : offsets[1]]
#print(pred["text"],pred["score"],pred["start_logit"],pred["end_logit"])
# In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid
# failure.
if len(predictions) == 0 or (
len(predictions) == 1 and predictions[0]["text"] == ""
):
predictions.insert(
0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}
)
# Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using
# the LogSumExp trick).
scores = np.array([pred.pop("score") for pred in predictions])
exp_scores = np.exp(scores - np.max(scores))
probs = exp_scores / exp_scores.sum()
# Include the probabilities in our predictions.
for prob, pred in zip(probs, predictions):
pred["probability"] = prob
# Pick the best prediction. If the null answer is not possible, this is easy.
if not version_2_with_negative:
all_predictions[example["id"]] = predictions[0]["text"]
else:
# Otherwise we first need to find the best non-empty prediction.
i = 0
while predictions[i]["text"] == "":
i += 1
best_non_null_pred = predictions[i]
# Then we compare to the null prediction using the threshold.
score_diff = (
null_score
- best_non_null_pred["start_logit"]
- best_non_null_pred["end_logit"]
)
scores_diff_json[example["id"]] = float(
score_diff
) # To be JSON-serializable.
if score_diff > null_score_diff_threshold:
all_predictions[example["id"]] = ""
else:
#all_predictions[example["id"]] = best_non_null_pred["text"]
all_predictions[example["id"]] = best_non_null_pred
# Make `predictions` JSON-serializable by casting np.float back to float.
all_nbest_json[example["id"]] = [
{
k: (
float(v)
if isinstance(v, (np.float16, np.float32, np.float64))
else v
)
for k, v in pred.items()
}
for pred in predictions
]
# If we have an output_dir, let's save all those dicts.
if output_dir is not None:
assert os.path.isdir(output_dir), f"{output_dir} is not a directory."
prediction_file = os.path.join(
output_dir,
"predictions.json" if prefix is None else f"predictions_{prefix}".json,
)
nbest_file = os.path.join(
output_dir,
"nbest_predictions.json"
if prefix is None
else f"nbest_predictions_{prefix}".json,
)
if version_2_with_negative:
null_odds_file = os.path.join(
output_dir,
"null_odds.json" if prefix is None else f"null_odds_{prefix}".json,
)
logger.info(f"Saving predictions to {prediction_file}.")
with open(prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4, ensure_ascii=False) + "\n")
logger.info(f"Saving nbest_preds to {nbest_file}.")
with open(nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4, ensure_ascii=False) + "\n")
if version_2_with_negative:
logger.info(f"Saving null_odds to {null_odds_file}.")
with open(null_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4, ensure_ascii=False) + "\n")
return all_predictions
def check_no_error(training_args, data_args, tokenizer, datasets):
# Detecting last checkpoint.
last_checkpoint = None
if (
os.path.isdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warn(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
return last_checkpoint, max_seq_length | 1.671875 | 2 |
Reservoirs/Task1bis_Non-Periodic-Delay/task1bis.py | grezesf/Research | 1 | 12795628 | import mdp
import Oger
import numpy
import pylab
import random
import math
### README
# the goal is to teach the reservoir to recreate the input signal with a certain delay
def main():
### Create/Load dataset
set_size=200
data_length=100
delay=25
[x, y] = gen_random_data(set_size, data_length, delay)
# print x,y
print numpy.shape(x)
# should be (set_size, data_dim, point_dim) (in this case point_dim=1)
# print x[0][0:10]
print numpy.shape(x[0]),numpy.shape(x[1]),numpy.shape(x[2]),numpy.shape(x[3]),numpy.shape(x[4])
# print numpy.shape(y)
# create MDP format input
data = [None, zip(x, y)]
# print data
### Create reservoir
# construct individual nodes,
reservoir_size = 500
reservoir = Oger.nodes.ReservoirNode(output_dim=reservoir_size, input_scaling=0.05)
readout = Oger.nodes.RidgeRegressionNode()
# build network with MDP framework
flow = mdp.Flow([reservoir, readout], verbose=1)
Oger.utils.make_inspectable(Oger.nodes.ReservoirNode)
# train the flow
flow.train(data)
#apply the trained flow to the training data and test data
trainout = flow(x[0])
# gen test data
set_size=2
[x_test, y_test] = gen_random_data(set_size, data_length, delay)
print numpy.shape(x_test)
# should be (set_size, data_dim, point_dim) (in this case point_dim=1)
# print x[0][0:10]
print numpy.shape(x_test[0]),numpy.shape(x_test[1])
testout1 = flow(x_test[0])
testout2 = flow(x_test[1])
print "NRMSE: " + str(Oger.utils.nrmse(y_test[0], testout1))
# plot results
nx = 2
ny = 1
# plot a few inputs
pylab.subplot(nx, ny, 1)
pylab.plot(x[0],'r', label='input')
pylab.legend()
# pylab.subplot(nx, ny, 1)
# pylab.plot(x[1],'b')
# pylab.subplot(nx, ny, 1)
# pylab.plot(x[2],'g')
#plot the input and target
# pylab.subplot(nx, ny, 2)
# pylab.plot(x[0],'r')
# pylab.subplot(nx, ny, 2)
# pylab.plot(y[0], 'b')
#plot the training output and target
pylab.subplot(nx, ny, 2)
pylab.plot(y[0],'b', label='target')
pylab.subplot(nx, ny, 2)
pylab.plot(trainout, 'g', label='output')
pylab.legend()
#plot the testing output and target
# pylab.subplot(nx, ny, 4)
# pylab.plot(y_test[0],'b')
# pylab.subplot(nx, ny, 4)
# pylab.plot(testout1, 'g')
# #plot the testing output and target
# pylab.subplot(nx, ny, 5)
# pylab.plot(y_test[1],'b')
# pylab.subplot(nx, ny, 5)
# pylab.plot(testout2, 'g')
pylab.show()
# end of main
return None
# data generating function
def gen_random_data(set_size=100, data_length=500, delay=20):
# set_size is the number of waves
# data_length is the number of points per wave
# the target is delayed by delay
# sets starts empty
input_set = []
target_set = []
# generate set_size signals
for nb in range(set_size):
# waves start empty
input_wave = [numpy.array([20.0*random.random()-10.0]) for x in range(data_length)]
target_wave = [numpy.array([0]) for x in range(delay)]
target_wave.extend(input_wave[:-delay])
input_set.append(numpy.array(input_wave))
target_set.append(numpy.array(target_wave))
return numpy.array([input_set, target_set])
# Call to main
if __name__=='__main__':
main() | 2.828125 | 3 |
configs/paths_config.py | Svoka/pixel2style2pixel | 0 | 12795629 | dataset_paths = {
'train_source': 'datasets/f2c/train_source',
'train_target': 'datasets/f2c/train_target',
'test_source': 'datasets/f2c/test_source',
'test_target': 'datasets/f2c/test_target',
}
model_paths = {
'stylegan_ffhq': 'pretrained_models/stylegan2-ffhq-config-f.pt',
'ir_se50': 'pretrained_models/model_ir_se50.pth',
'circular_face': 'pretrained_models/CurricularFace_Backbone.pth',
'mtcnn_pnet': 'pretrained_models/mtcnn/pnet.npy',
'mtcnn_rnet': 'pretrained_models/mtcnn/rnet.npy',
'mtcnn_onet': 'pretrained_models/mtcnn/onet.npy',
'shape_predictor': 'shape_predictor_68_face_landmarks.dat',
'moco': 'pretrained_models/moco_v2_800ep_pretrain.pth.tar'
}
| 1.054688 | 1 |
31_histogram.py | Larilok/image_processing | 0 | 12795630 | from PIL import Image
from pylab import *
img = Image.open('images/profile.jpg').convert('L')
print(array(img)[500])
imgArray = array(img)
figure()
hist(imgArray.flatten(), 300)
show()
# img.show()
| 2.5 | 2 |
quantity/digger/engine/context/data_context.py | wyjcpu/quantity | 0 | 12795631 | # -*- coding: utf-8 -*-
##
# @file data_context.py
# @brief
# @author wondereamer
# @version 0.1
# @date 2016-11-27
import datetime
from quantity.digger.engine.series import SeriesBase, NumberSeries, DateTimeSeries
from quantity.digger.technicals.base import TechnicalBase
from quantity.digger.util import elogger as logger
from quantity.digger.datastruct import (
Bar
)
class DataContext(object):
""" A DataContext expose data should be visited by multiple strategie.
which including bars of specific PContract, technicals and series of
strategie.
"""
def __init__(self, Helper):
data = Helper.data
self.open = NumberSeries(data.open.values, 'open')
self.close = NumberSeries(data.close.values, 'close')
self.high = NumberSeries(data.high.values, 'high')
self.low = NumberSeries(data.low.values, 'low')
self.volume = NumberSeries(data.volume.values, 'volume')
self.datetime = DateTimeSeries(data.index, 'datetime')
self.ith_comb = -1 # 第i个组合
self.ith_strategy = -1 # 第j个策略
self.bar = Bar(None, None, None, None, None, None)
self.new_row = False
self.next_datetime = datetime.datetime(2100, 1, 1)
self.technicals = [[{}]]
self._curbar = -1
self._Helper = Helper
self._series = [[{}]]
self._variables = [[{}]]
self._all_variables = [[{}]]
self._size = len(data.close)
@property
def raw_data(self):
return self._Helper.data
@property
def curbar(self):
return self._curbar + 1
@property
def pcontract(self):
return self._Helper.pcontract
@property
def contract(self):
return self._Helper.pcontract.contract
def __getattr__(self, name):
return self.get_item(name)
def update_system_vars(self):
# self.data = np.append(data, tracker.container_day)
self._curbar = self.last_curbar
self.open.update_curbar(self._curbar)
self.close.update_curbar(self._curbar)
self.high.update_curbar(self._curbar)
self.low.update_curbar(self._curbar)
self.volume.update_curbar(self._curbar)
self.datetime.update_curbar(self._curbar)
self.bar = Bar(self.datetime[0], self.open[0], self.close[0],
self.high[0], self.low[0], self.volume[0])
self.new_row = False
def update_user_vars(self):
# Update series defined by user if exist.
try:
series = self._series[self.ith_comb][self.ith_strategy].values()
except IndexError:
pass
else:
for s in series:
s.update_curbar(self._curbar)
s.duplicate_last_element()
# Update technicals if exist.
try:
technicals = self.technicals[self.ith_comb][self.ith_strategy].values()
except IndexError:
pass
else:
for tec in technicals:
if tec.is_multiple:
for s in tec.series.values():
s.update_curbar(self._curbar)
else:
for s in tec.series:
s.update_curbar(self._curbar)
def rolling_forward(self):
""" 滚动读取下一步的数据。 """
self.new_row, self.last_curbar = self._Helper.rolling_forward()
if not self.new_row:
self.last_curbar -= 1
return False, None
self.next_datetime = self._Helper.data.index[self.last_curbar]
if self.datetime[0] >= self.next_datetime and self.curbar != 0:
logger.error('合约[%s] 数据时间逆序或冗余' % self.pcontract)
raise
return True, self.new_row
def __len__(self):
return len(self._Helper)
def get_item(self, name):
""" 获取用户在策略on_init函数中初始化的变量 """
return self._all_variables[self.ith_comb][self.ith_strategy][name]
def add_item(self, name, value):
""" 添加用户初始化的变量。 """
# @TODO ...
if self.ith_comb < len(self._all_variables):
if self.ith_strategy < len(self._all_variables[self.ith_comb]):
self._all_variables[self.ith_comb][self.ith_strategy][name] = value
else:
self._all_variables[self.ith_comb].append({name: value})
else:
self._all_variables.append([{name: value}])
if isinstance(value, SeriesBase):
self.add_series(name, value)
elif isinstance(value, TechnicalBase):
self.add_indicator(name, value)
else:
self.add_variable(name, value)
def add_series(self, attr, s):
""" 添加on_init中初始化的序列变量
Args:
attr (str): 属性名
s (Series): 序列变量
"""
s.reset_data([], self._size)
if self.ith_comb < len(self._series):
if self.ith_strategy < len(self._series[self.ith_comb]):
self._series[self.ith_comb][self.ith_strategy][attr] = s
else:
self._series[self.ith_comb].append({attr: s})
else:
self._series.append([{attr: s}])
def add_indicator(self, attr, indic):
if self.ith_comb < len(self.technicals):
if self.ith_strategy < len(self.technicals[self.ith_comb]):
self.technicals[self.ith_comb][self.ith_strategy][attr] = indic
else:
self.technicals[self.ith_comb].append({attr: indic})
else:
self.technicals.append([{attr: indic}])
def add_variable(self, attr, var):
if self.ith_comb < len(self._variables):
if self.ith_strategy < len(self._variables[self.ith_comb]):
self._variables[self.ith_comb][self.ith_strategy][attr] = var
else:
self._variables[self.ith_comb].append({attr: var})
else:
self._variables.append([{attr: var}])
class DataContextAttributeHelper(object):
""""""
def __init__(self, data):
self.data = data
def __setattr__(self, name, value):
if name == 'data':
super(DataContextAttributeHelper, self).__setattr__(name, value)
return
data = self.data
if name in data._all_variables[data.ith_comb][data.ith_strategy]:
data.add_item(name, value)
def __getattr__(self, name):
return getattr(self.data, name)
| 2.3125 | 2 |
examples/mnist_cnn.py | llv22/keras | 0 | 12795632 | <reponame>llv22/keras
'''Trains a simple convnet on the MNIST dataset.
Gets to 99.25% test accuracy after 12 epochs
(there is still a lot of margin for parameter tuning).
16 seconds per epoch on a GRID K520 GPU.
'''
from __future__ import print_function
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import tensorflow as tf
## 1. simple setup of default tensorflow session
# tf_config = tf.ConfigProto(log_device_placement=True)
# tf_config.gpu_options.allow_growth = True
# tf_config.gpu_options.per_process_gpu_memory_fraction = 0.1
# sess = tf.Session(config=tf_config)
## 2. more strategy setup of default tensorflow session
num_cores = 1
GPU = len([v for v in os.environ["CUDA_VISIBLE_DEVICES"].split(',') if len(v)>0])
if GPU > 0:
num_GPU = 1
num_CPU = 1
else:
num_GPU = 0
num_CPU = 1
tf_config = tf.ConfigProto(
intra_op_parallelism_threads=num_cores, \
inter_op_parallelism_threads=num_cores, \
allow_soft_placement=True, \
# log_device_placement=True, \
device_count = {'CPU' : num_CPU, 'GPU' : num_GPU}
)
tf_config.gpu_options.allow_growth = True
tf_config.gpu_options.per_process_gpu_memory_fraction = 0.55
sess = tf.Session(config=tf_config)
from keras import backend as K
K.set_session(sess)
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| 2.390625 | 2 |
fix_network.py | gauthamv529/Neurohex | 0 | 12795633 | <reponame>gauthamv529/Neurohex
import cPickle
import argparse
from inputFormat import *
from network import network
from theano import tensor as T
parser = argparse.ArgumentParser()
parser.add_argument("source", type=str, help="Pickled network to steal params from.")
parser.add_argument("dest", type=str, help="File to place new network in.")
parser.add_argument("--cpu", "-c", dest="cpu", action='store_const',
const=True, default=False,
help="Convert network to run on a CPU.")
args = parser.parse_args()
print "loading model..."
f = file(args.source, 'rb')
old_network = cPickle.load(f)
f.close()
params = old_network.params
if args.cpu:
print "converting gpu parameters..."
new_params=[]
for param in params:
param = T._shared(param.get_value())
new_params.append(param)
params = new_params
new_network = network(batch_size=None, params = params)
print "saving model..."
f = file(args.dest, 'wb')
cPickle.dump(new_network, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close() | 2.421875 | 2 |
day05.py | jamesconstable/aoc2021 | 0 | 12795634 | #! /usr/bin/env python
'''
Solvers and example test cases for Day 5 of the Advent of Code 2021.
Problem description: <https://adventofcode.com/2021/day/5>
'''
from collections import Counter
from dataclasses import dataclass
from typing import Iterable, List, Tuple
import unittest
def part1(lines: Iterable[str]) -> int:
'''
Solver for Day 5, part 1
'''
vents = parse_input(lines)
vent_points: Counter = Counter()
for (start, end) in vents:
if start.x == end.x or start.y == end.y:
vent_points.update(points_between(start, end))
return sum(1 for (_, count) in vent_points.most_common() if count >= 2)
def part2(lines: Iterable[str]) -> int:
'''
Solver for Day 5, part 2
'''
vents = parse_input(lines)
vent_points: Counter = Counter()
for (start, end) in vents:
vent_points.update(points_between(start, end))
return sum(1 for (_, count) in vent_points.most_common() if count >= 2)
@dataclass(frozen=True)
class Point:
'''
Represents a single (x, y) coordinate.
'''
x: int
y: int
def points_between(start: Point, end: Point) -> Iterable[Point]:
'''
Iterates over the integral points between start and end (inclusive). Line
must be either vertical, horizontal, or 45 degrees.
'''
x_step = sign(end.x - start.x)
y_step = sign(end.y - start.y)
x = start.x
y = start.y
while x != end.x or y != end.y:
yield Point(x, y)
x += x_step
y += y_step
yield Point(x, y)
def sign(value: int) -> int:
'''
Returns the sign of value, i.e. 1 if value is positive, -1 if value is
negative, or 0 if value is zero.
'''
if value < 0:
return -1
if value == 0:
return 0
return 1
def parse_input(lines: Iterable[str]) -> List[Tuple[Point, Point]]:
'''
Parses the problem input and returns a list of (Point, Point) tuples
describing the vents.
'''
vents = []
for line in lines:
start, _, end = line.split()
p1_x, p1_y = start.split(',')
p2_x, p2_y = end.split(',')
vents.append((Point(int(p1_x), int(p1_y)),
Point(int(p2_x), int(p2_y))))
return vents
class TestDay05(unittest.TestCase):
'''
Example test cases for Day 5, as specified in the problem description
'''
# pylint: disable=missing-function-docstring
def setUp(self):
self.data = [
'0,9 -> 5,9',
'8,0 -> 0,8',
'9,4 -> 3,4',
'2,2 -> 2,1',
'7,0 -> 7,4',
'6,4 -> 2,0',
'0,9 -> 2,9',
'3,4 -> 1,4',
'0,0 -> 8,8',
'5,5 -> 8,2']
def test_part1_example(self):
self.assertEqual(part1(self.data), 5)
def test_part2_example(self):
self.assertEqual(part2(self.data), 12)
| 3.921875 | 4 |
src/Utils/Python/Tests/test_IterativeDiagonalizer.py | qcscine/utilities | 0 | 12795635 | __copyright__ = """This code is licensed under the 3-clause BSD license.
Copyright ETH Zurich, Laboratory of Physical Chemistry, Reiher Group.
See LICENSE.txt for details.
"""
import pytest
import scine_utilities as scine
import numpy as np
import os
class SigmaVectorEvaluatorPython(scine.SigmaVectorEvaluator):
def __init__(self, matrix):
scine.SigmaVectorEvaluator.__init__(self)
self.matrix = matrix
def evaluate(self, guess_vectors):
return np.dot(self.matrix, guess_vectors)
def collapsed(self, newSubspaceDimension):
return
def swap(self, i, j):
return
def create_matrix():
# create a selfadjoint matrix
matrix = np.random.rand(100,100)
matrix = 0.5*(matrix + np.transpose(matrix))
matrix[np.diag_indices_from(matrix)] += 1
return matrix
def initialize_diagonalizer(matrix):
# Create sigma vector evaluator and preconditioner
sve = scine.IndirectSigmaVectorEvaluator(matrix)
prec = scine.IndirectPreconditionerEvaluator(matrix[np.diag_indices_from(matrix)])
# Create and fill Non Orthogonal Davidson
diag = scine.NonOrthogonalDavidson(5,100)
diag.sigma_vector_evaluator = sve
diag.set_preconditioner(prec)
return diag
def test_SigmaVectorEvaluator():
ref = create_matrix()
sve = scine.IndirectSigmaVectorEvaluator(ref)
result = sve.evaluate(2.0 * np.identity(100))
assert np.all(2.0 * ref[:,:] == result[:,:])
def test_Preconditioner():
'''
Test that if you try to precondition a vector of ones, you just get
-1.0 / (difference btw the diagonal and the current eigenvalue)
'''
ref = create_matrix()
diag = ref[np.diag_indices_from(ref)]
ones_vector = np.ones(100)
arbitrary_eigenvalue = 3.5
prec = scine.IndirectPreconditionerEvaluator(diag)
result = prec.evaluate(ones_vector, arbitrary_eigenvalue)
assert np.all(result[:] == -1.0 / (diag - arbitrary_eigenvalue))
def test_InitializeDiagonalizer():
diag = initialize_diagonalizer(create_matrix())
def test_DiagonalizeWithNonOrthogonalDavidson():
ref = create_matrix()
diag = initialize_diagonalizer(ref)
result = diag.solve(scine.core.Log.silent())
# Get reference numbers
w, v = np.linalg.eig(ref)
assert np.all(result.eigenvalues[:] - sorted(w)[:5] <= 1.0e-5)
def test_DiagonalizeWithOrthogonalDavidson():
ref = create_matrix()
# Create sigma vector evaluator and preconditioner
sve = scine.IndirectSigmaVectorEvaluator(ref)
prec = scine.IndirectPreconditionerEvaluator(ref[np.diag_indices_from(ref)])
# Create and fill Non Orthogonal Davidson
diag = scine.OrthogonalDavidson(5,100)
diag.sigma_vector_evaluator = sve
diag.set_preconditioner(prec)
result = diag.solve(scine.core.Log.silent())
# Get reference numbers
w, v = np.linalg.eig(ref)
assert np.all(result.eigenvalues[:] - sorted(w)[:5] <= 1.0e-5)
def test_DiagonalizeWithPythonSigmaVectorEvaluator():
ref = create_matrix()
diag = initialize_diagonalizer(ref)
# Set python specific sigma vector evaluator
# Note: first initialize, then assign to prevent auto casting.
# If I write diag.sigma_vector_evaluator = SigmaVectorEvaluatorPython(ref)
# then it it tried to look for the method SigmaVectorEvaluator::evaluate()
# instead of SigmaVectorEvaluatorPython::evaluate()
sve = SigmaVectorEvaluatorPython(ref)
diag.sigma_vector_evaluator = sve
result = diag.solve(scine.core.Log.silent())
# Get reference numbers
w, v = np.linalg.eig(ref)
assert np.all(result.eigenvalues[:] - sorted(w)[:5] <= 1.0e-5)
| 2.21875 | 2 |
dl_keras/resnet.py | jarvisqi/deep_learning | 32 | 12795636 | import os
from keras import layers
from keras.layers import Input, merge
from keras.layers.convolutional import (AveragePooling2D, Conv2D, MaxPooling2D,
ZeroPadding2D)
from keras.layers.core import Activation, Dense, Flatten
from keras.layers.normalization import BatchNormalization
from keras.models import Model,Sequential
from keras.utils import plot_model
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def identity_block(x, nb_filter, kernel_size=3):
k1, k2, k3 = nb_filter
shortcut = x
out = Conv2D(k1, kernel_size=(1,1), strides=(1,1),padding="valid",activation="relu")(x)
out = BatchNormalization(axis=3)(out)
out = Conv2D(k2, kernel_size=(3,3), strides=(1,1), padding='same',activation="relu")(out)
out = BatchNormalization(axis=3)(out)
out = Conv2D(k3, kernel_size=(1,1), strides=(1,1),padding="valid")(out)
out = BatchNormalization(axis=3)(out)
# out = merge([out, shortcut], mode='sum')
out= layers.add([out,shortcut])
out = Activation('relu')(out)
return out
def conv_block(x, nb_filter, kernel_size=3):
k1, k2, k3 = nb_filter
shortcut = x
out = Conv2D(k1, kernel_size=(1,1), strides=(2,2), padding="valid",activation="relu")(x)
out = BatchNormalization(axis=3)(out)
out = out = Conv2D(k2, kernel_size=(kernel_size,kernel_size), strides=(1,1), padding="same",activation="relu")(out)
out = BatchNormalization()(out)
out = Conv2D(k3, kernel_size=(1,1), strides=(1,1), padding="valid")(out)
out = BatchNormalization(axis=3)(out)
shortcut = Conv2D(k3, kernel_size=(1,1), strides=(2,2), padding="valid")(shortcut)
shortcut = BatchNormalization(axis=3)(shortcut)
# out = merge([out, shortcut], mode='sum')
out = layers.add([out, shortcut])
out = Activation('relu')(out)
return out
def buildNet():
inp = Input(shape=(224, 224, 3))
out = ZeroPadding2D((3, 3))(inp)
out = Conv2D(64, kernel_size=(7, 7), strides=(2, 2),activation="relu")(out)
out = BatchNormalization()(out)
out = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(out)
out = conv_block(out, [64, 64, 256])
out = identity_block(out, [64, 64, 256])
out = identity_block(out, [64, 64, 256])
out = conv_block(out, [128, 128, 512])
out = identity_block(out, [128, 128, 512])
out = identity_block(out, [128, 128, 512])
out = identity_block(out, [128, 128, 512])
out = conv_block(out, [256, 256, 1024])
out = identity_block(out, [256, 256, 1024])
out = identity_block(out, [256, 256, 1024])
out = identity_block(out, [256, 256, 1024])
out = identity_block(out, [256, 256, 1024])
out = identity_block(out, [256, 256, 1024])
out = conv_block(out, [512, 512, 2048])
out = identity_block(out, [512, 512, 2048])
out = identity_block(out, [512, 512, 2048])
out = AveragePooling2D((4, 4))(out)
out = Flatten()(out) # 展平
out = Dense(1000, activation='softmax')(out)
model = Model(inputs=inp, outputs=out)
return model
if __name__ == '__main__':
# resNet18 = ResNet(block_num=[2,2,2,2])
# resNet34 = ResNet(block_num=[3,4,6,3])
# resNet50 = ResNet(block_num=[3,4,6,3])
# resNet101 = ResNet(block_num=[3,4,23,3])
# resNet152 = ResNet(block_num=[3,8,36,3])
net = buildNet()
net.compile(optimizer='adam',loss='categorical_crossentropy', metrics=['accuracy'])
plot_model(net, to_file='./models/resnet.png')
net.summary()
| 2.859375 | 3 |
tests/test_percentage_indicator.py | tahmidbintaslim/pyprind | 411 | 12795637 | """
<NAME> 2014-2016
Python Progress Indicator Utility
Author: <NAME> <<EMAIL>>
License: BSD 3 clause
Contributors: https://github.com/rasbt/pyprind/graphs/contributors
Code Repository: https://github.com/rasbt/pyprind
PyPI: https://pypi.python.org/pypi/PyPrind
"""
import sys
import time
import pyprind
n = 100
sleeptime = 0.02
def test_basic_percent():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_stdout():
perc = pyprind.ProgPercent(n, stream=sys.stdout)
for i in range(n):
time.sleep(sleeptime)
perc.update()
def test_generator():
for i in pyprind.prog_percent(range(n), stream=sys.stdout):
time.sleep(sleeptime)
def test_monitoring():
perc = pyprind.ProgPercent(n, monitor=True)
for i in range(n):
time.sleep(sleeptime)
perc.update()
print(perc)
def test_item_tracking():
items = ['file_%s.csv' % i for i in range(0, n)]
perc = pyprind.ProgPercent(len(items))
for i in items:
time.sleep(sleeptime)
perc.update(item_id=i)
def test_force_flush():
perc = pyprind.ProgPercent(n)
for i in range(n):
time.sleep(sleeptime)
perc.update(force_flush=True)
def test_update_interval():
perc = pyprind.ProgPercent(n, update_interval=4)
for i in range(n):
time.sleep(sleeptime)
perc.update()
if __name__ == "__main__":
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Basic Percentage Indicator\n')
test_basic_percent()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing stdout Stream\n')
test_stdout()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Percentage Indicator Generator\n')
test_generator()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing monitor function\n')
test_monitoring()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Item Tracking\n')
test_item_tracking()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Force Flush\n')
test_force_flush()
print('\n%s' % (80 * '='))
print('%s\n' % (80 * '='))
print('Testing Update Interval\n')
test_update_interval()
| 2.609375 | 3 |
sock.py | danielgweb/Developer-Test | 0 | 12795638 | <gh_stars>0
#!/usr/bin/python
# coding: utf-8
# imports
import socket
import sys
import hashlib
import binascii
import collections
# classes
class SockClient(object):
"""SockClient for handling the connection to the server"""
def __init__(self):
# Creates a TCP/IP socket
try:
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, e:
print >> sys.stderr, e
sys.exit()
def __receiveBytes(self, amount):
try:
received = self.client.recv(amount)
except socket.error, e:
print >> sys.stderr, e
self.client.close()
sys.exit()
# Debug
print "\nReceived: %d" % len(received)
return received
def __getPacketLength(self):
packetlength = self.__receiveBytes(2)
# Debug
print "\n\nPacket Length: %d\n - Bytes: %s\n - Hex: %s" % \
(int(''.join([ x.encode('hex') for x in packetlength ]),16),
[ ord(x) for x in packetlength ],
[ x.encode('hex') for x in packetlength])
return packetlength
def __getMD5Sum(self):
md5sum = self.__receiveBytes(16)
# Debug
print "\n\nMD5 Sum: %s\n - Bytes: %s\n - Hex: %s" % \
(md5sum.encode('hex'),
[ ord(x) for x in md5sum ],
[ x.encode('hex') for x in md5sum])
return md5sum
def __getData(self, amount):
data = self.__receiveBytes(amount)
# Debug
print "\n\nData: %s\n - Bytes: %s\n - Hex: %s" % \
(data.encode('hex'),
[ ord(x) for x in data ],
[ x.encode('hex') for x in data])
return data
def __getParityByte(self):
parity = self.__receiveBytes(1)
# Debug
print "\n\nParity: %s\n - Bytes: %s\n - Hex: %s" % \
(parity.encode('hex'),
[ ord(x) for x in parity ],
[ x.encode('hex') for x in parity])
return parity
def __checkMessageParity(self, bits):
num_1bits = bits.count('1')
# Check if parity byte exists
if(int(bits[len(bits)-8:]) > 1):
print "Parity byte does not exists!"
else:
if(bits[:len(bits)-8].count('1') % 2 == 0):
print "Message number of 1 bits is Even (%d), checking parity byte..." % bits[:len(bits)-8].count('1')
print "Parity byte is %s" % bits[len(bits)-8:]
else:
print "Message number of 1 bits is ODD (%d), checking parity byte..." % bits[:len(bits)-8].count('1')
print "Parity byte is %s" % bits[len(bits)-8:]
if(num_1bits % 2 == 0):
print "Even number of 1 bits (%d), message parity is ok" % num_1bits
return 0
else:
print "Odd number of 1 bits (%d), message parity is not ok" % num_1bits
return 1
def __checkDataMD5Sum(self, data, message_md5):
newmd5 = hashlib.md5()
newmd5.update(data)
md5sum = newmd5.hexdigest()
if(md5sum == message_md5):
print "Data MD5 sum is OK %s == %s" % (message_md5, md5sum)
else:
print "Data MD5 sum is NOT ok %s != %s" % (message_md5, md5sum)
def __getMostCommonByte(self, data):
counts = collections.Counter([ x.encode('hex') for x in data]).most_common()
self.mostcommonbyte = counts[0][0]
print "Most commom byte in data is hex: %s" % self.mostcommonbyte
def __getCipherKey(self):
self.cipherkey = int(self.mostcommonbyte,16) ^ 0x20
print "Cipherkey: Int: %s - Hex: %s" % (self.cipherkey, hex(self.cipherkey)[2:])
def __decodeData(self, data):
mdata = [ x.encode('hex') for x in data ]
decodedmessage = [ chr(int(x,16) ^ self.cipherkey) for x in mdata ]
print decodedmessage
print "Decoded data hex: %s" % [ x.encode('hex') for x in decodedmessage]
decodedmessage = ''.join(decodedmessage)
print "\nDecoded data str: %s" % decodedmessage
return decodedmessage
def __createDecodedMessagePacket(self, decodedmessage):
nm_length = 2 + 16 + len(decodedmessage) + 1
hexnmlength = hex(nm_length)[2:]
if (len(hexnmlength) == 3):
hexnmlength = '0'+hexnmlength
print "\nNM length: %d - Hex: %s" % (nm_length, hexnmlength)
message_length = [hexnmlength[i:i+2] for i in range(0, len(hexnmlength), 2)]
# Miau por falta de conhecimento como adicionar 0's em 2 bytes hex no python
if(nm_length <= 0xff):
print 'True'
zb = ['00']
zb.extend(message_length)
nm_length = zb
print nm_length
else:
nm_length = message_length
# Fim do Miau
nm_newmd5 = hashlib.md5()
nm_newmd5.update(decodedmessage)
md5sum = nm_newmd5.hexdigest()
print "\nNM decoded data MD5 sum: %s" % md5sum
nm_md5sum = [md5sum[i:i+2] for i in range(0, len(md5sum), 2)]
print nm_md5sum
nm_decodedmessage = [ x.encode('hex') for x in decodedmessage]
nm_parity = 0x0
nm_message = []
nm_message.extend(nm_length)
nm_message.extend(nm_md5sum)
nm_message.extend(nm_decodedmessage)
print "NM message: "
print nm_message
nm_binary = (bin(int(''.join(nm_message), 16))[2:]).zfill(len(''.join(nm_message)) * 4)
print "\nNM binary: %s" % nm_binary
nm_parity = self.__checkMessageParity(nm_binary)
nm_parity = [nm_parity]
nm_parity = [''.join('{:02x}'.format(x) for x in nm_parity)]
nm_message.extend(nm_parity)
# Recheck message parity
nm_binary = (bin(int(''.join(nm_message), 16))[2:]).zfill(len(''.join(nm_message)) * 4)
nm_parity = self.__checkMessageParity(nm_binary)
print "\nNM binary: %s" % nm_binary
print "NM message: "
print nm_message
createdmessage = ''.join(nm_message)
print "NM message str: %s" % createdmessage
return createdmessage
def getEncryptedMessage(self):
print "Client: Receiving new message..."
packetlength = self.__getPacketLength()
md5sum = self.__getMD5Sum()
data = self.__getData(int(''.join([ x.encode('hex') for x in packetlength ]),16) - 16 - 2 - 1)
parity = self.__getParityByte()
message = packetlength + md5sum + data + parity
binarymessage = (bin(int(message.encode('hex'), 16))[2:]).zfill(len(message.encode('hex')) * 4)
print "\n\nMessage: %s\n - Hex: %s\n - Bin: %s" % \
([ ord(x) for x in message ],
message.encode('hex'),
binarymessage)
self.__checkMessageParity(binarymessage)
self.__checkDataMD5Sum(data, md5sum.encode('hex'))
self.__getMostCommonByte(data)
self.__getCipherKey()
return data
def getDecodedMessage(self, encryptedMessagedata):
decodedmessage = self.__decodeData(encryptedMessagedata)
return decodedmessage
def sendDecodedMessage(self, decodedmessage):
print "Client: Creating decoded message..."
createdmessage = self.__createDecodedMessagePacket(decodedmessage)
print "Client: Sending decoded message..."
try:
self.client.send(createdmessage.decode('hex'))
except socket.error, e:
print "Error sending decoded data: %s" % e
sys.exit(1)
print "Client: Decoded message has been successfully sent!"
def getServerResponse(self):
print "Client: Getting server response..."
packetlength = self.__getPacketLength()
md5sum = self.__getMD5Sum()
data = self.__getData(int(''.join([ x.encode('hex') for x in packetlength ]),16) - 16 - 2 - 1)
parity = self.__getParityByte()
message = packetlength + md5sum + data + parity
binarymessage = (bin(int(message.encode('hex'), 16))[2:]).zfill(len(message.encode('hex')) * 4)
print "\n\nMessage: %s\n - Hex: %s\n - Bin: %s" % \
([ ord(x) for x in message ],
message.encode('hex'),
binarymessage)
self.__checkMessageParity(binarymessage)
self.__checkDataMD5Sum(data, md5sum.encode('hex'))
return data
def connect(self, address, port):
try:
self.client.connect((address, port))
except socket.error, e:
print >> sys.stderr, e
self.client.close()
sys.exit()
def disconnect(self):
self.client.close()
| 3.140625 | 3 |
subscriptions/models.py | Sukriva/open-city-profile | 5 | 12795639 | <reponame>Sukriva/open-city-profile<filename>subscriptions/models.py
from adminsortable.models import SortableMixin
from django.db import models
from django.db.models import Max
from parler.models import TranslatableModel, TranslatedFields
from utils.models import SerializableMixin
def get_next_subscription_type_category_order():
order_max = SubscriptionTypeCategory.objects.aggregate(Max("order"))["order__max"]
return order_max + 1 if order_max else 1
def get_next_subscription_type_order():
order_max = SubscriptionType.objects.aggregate(Max("order"))["order__max"]
return order_max + 1 if order_max else 1
class SubscriptionTypeCategory(TranslatableModel, SortableMixin):
code = models.CharField(max_length=32, unique=True)
translations = TranslatedFields(label=models.CharField(max_length=255))
created_at = models.DateTimeField(auto_now_add=True)
order = models.PositiveIntegerField(
default=get_next_subscription_type_category_order, editable=False, db_index=True
)
class Meta:
ordering = ["order"]
def __str__(self):
return self.code
class SubscriptionType(TranslatableModel, SortableMixin):
subscription_type_category = models.ForeignKey(
SubscriptionTypeCategory,
on_delete=models.CASCADE,
related_name="subscription_types",
)
code = models.CharField(max_length=32, unique=True)
translations = TranslatedFields(label=models.CharField(max_length=255))
created_at = models.DateTimeField(auto_now_add=True)
order = models.PositiveIntegerField(
default=get_next_subscription_type_order, editable=False, db_index=True
)
class Meta:
ordering = ["order"]
def __str__(self):
return self.code
class Subscription(SerializableMixin):
profile = models.ForeignKey(
"profiles.Profile", on_delete=models.CASCADE, related_name="subscriptions"
)
subscription_type = models.ForeignKey(
SubscriptionType, on_delete=models.CASCADE, related_name="subscriptions"
)
created_at = models.DateTimeField(auto_now_add=True)
enabled = models.BooleanField(default=True)
def __str__(self):
return f"{self.subscription_type.code}: {self.enabled}"
serialize_fields = (
{"name": "subscription_type", "accessor": lambda x: getattr(x, "code")},
{"name": "created_at", "accessor": lambda x: x.strftime("%Y-%m-%d")},
{"name": "enabled"},
)
| 1.984375 | 2 |
04_CNN_advances/use_vgg_finetune.py | jastarex/DL_Notes | 203 | 12795640 |
# coding: utf-8
# # 使用预训练的VGG模型Fine-tune CNN
# In[1]:
# Import packs
import numpy as np
import os
import scipy.io
from scipy.misc import imread, imresize
import matplotlib.pyplot as plt
import skimage.io
import skimage.transform
import tensorflow as tf
get_ipython().magic(u'matplotlib inline')
cwd = os.getcwd()
print ("Package loaded")
print ("Current folder is %s" % (cwd) )
# In[2]:
# 下载预先训练好的vgg-19模型,为Matlab的.mat格式,之后会用scipy读取
# (注意此版本模型与此处http://www.vlfeat.org/matconvnet/pretrained/最新版本不同)
import os.path
if not os.path.isfile('./data/imagenet-vgg-verydeep-19.mat'):
get_ipython().system(u'wget -O data/imagenet-vgg-verydeep-19.mat http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat')
# # 载入图像,调节尺寸,生成数据集
# In[3]:
# Configure the locations of the images and reshaping sizes
# ------------------------------------------------------------------- #
paths = {"images/cats", "images/dogs"}
imgsize = [64, 64] # The reshape size
use_gray = 0 # Grayscale
data_name = "data4vgg" # Save name
valid_exts = [".jpg",".gif",".png",".tga", ".jpeg"]
# ------------------------------------------------------------------- #
imgcnt = 0
nclass = len(paths)
for relpath in paths:
fullpath = cwd + "/" + relpath
flist = os.listdir(fullpath)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(fullpath, f)
imgcnt = imgcnt + 1
# Grayscale
def rgb2gray(rgb):
if len(rgb.shape) is 3:
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
else:
print ("Current Image is GRAY!")
return rgb
if use_gray:
totalimg = np.ndarray((imgcnt, imgsize[0]*imgsize[1]))
else:
totalimg = np.ndarray((imgcnt, imgsize[0]*imgsize[1]*3))
totallabel = np.ndarray((imgcnt, nclass))
imgcnt = 0
for i, relpath in zip(range(nclass), paths):
path = cwd + "/" + relpath
flist = os.listdir(path)
for f in flist:
if os.path.splitext(f)[1].lower() not in valid_exts:
continue
fullpath = os.path.join(path, f)
currimg = imread(fullpath)
# Convert to grayscale
if use_gray:
grayimg = rgb2gray(currimg)
else:
grayimg = currimg
# Reshape
graysmall = imresize(grayimg, [imgsize[0], imgsize[1]])/255.
grayvec = np.reshape(graysmall, (1, -1))
# Save
totalimg[imgcnt, :] = grayvec
totallabel[imgcnt, :] = np.eye(nclass, nclass)[i]
imgcnt = imgcnt + 1
# Divide total data into training and test set
randidx = np.random.randint(imgcnt, size=imgcnt)
trainidx = randidx[0:int(4*imgcnt/5)]
testidx = randidx[int(4*imgcnt/5):imgcnt]
trainimg = totalimg[trainidx, :]
trainlabel = totallabel[trainidx, :]
testimg = totalimg[testidx, :]
testlabel = totallabel[testidx, :]
ntrain = trainimg.shape[0]
nclass = trainlabel.shape[1]
dim = trainimg.shape[1]
ntest = testimg.shape[0]
print ("Number of total images is %d (train: %d, test: %d)"
% (imgcnt, ntrain, ntest))
print ("Shape of an image is (%d, %d, %d)" % (imgsize[0], imgsize[1], 3))
# # 定义VGG网络结构
# In[4]:
def net(data_path, input_image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
data = scipy.io.loadmat(data_path)
mean = data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = data['layers'][0]
net = {}
current = input_image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = np.transpose(kernels, (1, 0, 2, 3))
bias = bias.reshape(-1)
current = _conv_layer(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current)
elif kind == 'pool':
current = _pool_layer(current)
net[name] = current
assert len(net) == len(layers)
return net, mean_pixel
def _conv_layer(input, weights, bias):
conv = tf.nn.conv2d(input, tf.constant(weights), strides=(1, 1, 1, 1),
padding='SAME')
return tf.nn.bias_add(conv, bias)
def _pool_layer(input):
return tf.nn.max_pool(input, ksize=(1, 2, 2, 1), strides=(1, 2, 2, 1),
padding='SAME')
def preprocess(image, mean_pixel):
return image - mean_pixel
def unprocess(image, mean_pixel):
return image + mean_pixel
print ("VGG net ready")
# # 使用VGG计算卷积特征图
# In[5]:
# Preprocess
trainimg_tensor = np.ndarray((ntrain, imgsize[0], imgsize[1], 3))
testimg_tensor = np.ndarray((ntest, imgsize[0], imgsize[1], 3))
for i in range(ntrain):
currimg = trainimg[i, :]
currimg = np.reshape(currimg, [imgsize[0], imgsize[1], 3])
trainimg_tensor[i, :, :, :] = currimg
print ("Shape of trainimg_tensor is %s" % (trainimg_tensor.shape,))
for i in range(ntest):
currimg = testimg[i, :]
currimg = np.reshape(currimg, [imgsize[0], imgsize[1], 3])
testimg_tensor[i, :, :, :] = currimg
print ("Shape of trainimg_tensor is %s" % (testimg_tensor.shape,))
# Get conv features
VGG_PATH = cwd + "/data/imagenet-vgg-verydeep-19.mat"
with tf.Graph().as_default(), tf.Session() as sess:
with tf.device("/cpu:0"):
img_placeholder = tf.placeholder(tf.float32
, shape=(None, imgsize[0], imgsize[1], 3))
nets, mean_pixel = net(VGG_PATH, img_placeholder)
train_features = nets['relu5_4'].eval(feed_dict={img_placeholder: trainimg_tensor})
test_features = nets['relu5_4'].eval(feed_dict={img_placeholder: testimg_tensor})
print("Convolutional map extraction done")
# # 卷积特征图的形状
# In[6]:
print ("Shape of 'train_features' is %s" % (train_features.shape,))
print ("Shape of 'test_features' is %s" % (test_features.shape,))
# # 向量化
# In[7]:
# Vectorize
train_vectorized = np.ndarray((ntrain, 4*4*512))
test_vectorized = np.ndarray((ntest, 4*4*512))
for i in range(ntrain):
curr_feat = train_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
train_vectorized[i, :] = curr_feat_vec
for i in range(ntest):
curr_feat = test_features[i, :, :, :]
curr_feat_vec = np.reshape(curr_feat, (1, -1))
test_vectorized[i, :] = curr_feat_vec
print ("Shape of 'train_vectorized' is %s" % (train_features.shape,))
print ("Shape of 'test_vectorized' is %s" % (test_features.shape,))
# # 定义finetuning的结构
# In[8]:
# Parameters
learning_rate = 0.0001
training_epochs = 100
batch_size = 100
display_step = 10
# tf Graph input
x = tf.placeholder(tf.float32, [None, 4*4*512])
y = tf.placeholder(tf.float32, [None, nclass])
keepratio = tf.placeholder(tf.float32)
# Network
with tf.device("/cpu:0"):
n_input = dim
n_output = nclass
weights = {
'wd1': tf.Variable(tf.random_normal([4*4*512, 1024], stddev=0.1)),
'wd2': tf.Variable(tf.random_normal([1024, n_output], stddev=0.1))
}
biases = {
'bd1': tf.Variable(tf.random_normal([1024], stddev=0.1)),
'bd2': tf.Variable(tf.random_normal([n_output], stddev=0.1))
}
def conv_basic(_input, _w, _b, _keepratio):
# Input
_input_r = _input
# Vectorize
_dense1 = tf.reshape(_input_r, [-1, _w['wd1'].get_shape().as_list()[0]])
# Fc1
_fc1 = tf.nn.relu(tf.add(tf.matmul(_dense1, _w['wd1']), _b['bd1']))
_fc_dr1 = tf.nn.dropout(_fc1, _keepratio)
# Fc2
_out = tf.add(tf.matmul(_fc_dr1, _w['wd2']), _b['bd2'])
# Return everything
out = {'input_r': _input_r, 'dense1': _dense1,
'fc1': _fc1, 'fc_dr1': _fc_dr1, 'out': _out }
return out
# Functions!
_pred = conv_basic(x, weights, biases, keepratio)['out']
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=_pred, labels=y))
optm = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
_corr = tf.equal(tf.argmax(_pred,1), tf.argmax(y,1))
accr = tf.reduce_mean(tf.cast(_corr, tf.float32))
init = tf.initialize_all_variables()
print ("Network Ready to Go!")
# # 优化
# In[9]:
# Launch the graph
sess = tf.Session()
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
num_batch = int(ntrain/batch_size)+1
# Loop over all batches
for i in range(num_batch):
randidx = np.random.randint(ntrain, size=batch_size)
batch_xs = train_vectorized[randidx, :]
batch_ys = trainlabel[randidx, :]
# Fit training using batch data
sess.run(optm, feed_dict={x: batch_xs, y: batch_ys, keepratio:0.7})
# Compute average loss
avg_cost += sess.run(cost, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})/num_batch
# Display logs per epoch step
if epoch % display_step == 0:
print ("Epoch: %03d/%03d cost: %.9f" % (epoch, training_epochs, avg_cost))
train_acc = sess.run(accr, feed_dict={x: batch_xs, y: batch_ys, keepratio:1.})
print (" Training accuracy: %.3f" % (train_acc))
test_acc = sess.run(accr, feed_dict={x: test_vectorized, y: testlabel, keepratio:1.})
print (" Test accuracy: %.3f" % (test_acc))
print ("Optimization Finished!")
| 2.53125 | 3 |
setup.py | cameronmaske/s3env | 20 | 12795641 | from setuptools import setup
requires = [
'click==6.7',
'bucketstore==0.1.1'
]
setup(
name="s3env",
version="0.0.4",
author="<NAME>",
description="Manipulate a key/value JSON object file in an S3 bucket through the CLI",
author_email="<EMAIL>",
url='https://github.com/cameronmaske/s3env',
py_modules=['s3env'],
license='MIT',
install_requires=requires,
entry_points='''
[console_scripts]
s3env=s3env:cli
''',
)
| 1.554688 | 2 |
Lista04/ex007.py | Guilherme-Schwann/Listas-de-Exercicios-UFV-CCF-110 | 2 | 12795642 | <gh_stars>1-10
#Ex.11
hora = str(input('Insira o valor hora (HH:MM): ')).split(':')
min = int(hora[1])
min += int(hora[0]) * 60
print(f'Passaram-se {min} minutos desde as 00:00h.')
| 3.25 | 3 |
tests/unit/testplan/testing/multitest/test_result.py | armarti/testplan | 0 | 12795643 | """Unit tests for the testplan.testing.multitest.result module."""
import collections
import mock
import pytest
from testplan.testing.multitest import result as result_mod
from testplan.testing.multitest.suite import testcase, testsuite
from testplan.testing.multitest import MultiTest
from testplan.common.utils import comparison, testing
@testsuite
class AssertionOrder(object):
@testcase
def case(self, env, result):
summary = result.subresult()
first = result.subresult()
second = result.subresult()
second.true(True, 'AssertionSecond')
result.true(True, 'AssertionMain1')
result.true(True, 'AssertionMain2')
first.true(True, 'AssertionFirst1')
first.true(True, 'AssertionFirst2')
summary.append(first)
result.true(first.passed, 'Report passed so far.')
if first.passed:
summary.append(second)
result.prepend(summary)
def test_assertion_orders():
mtest = MultiTest(name='AssertionsOrder', suites=[AssertionOrder()])
mtest.run()
expected = ['AssertionFirst1', 'AssertionFirst2', 'AssertionSecond',
'AssertionMain1', 'AssertionMain2', 'Report passed so far.']
# pylint: disable=invalid-sequence-index
assertions = (
entry for entry in mtest.report.flatten()
if isinstance(entry, dict) and entry['meta_type'] == 'assertion')
for idx, entry in enumerate(assertions):
assert entry['description'] == expected[idx]
@pytest.fixture
def dict_ns():
"""Dict namespace with a mocked out result object."""
mock_result = mock.MagicMock()
mock_result.entries = collections.deque()
return result_mod.DictNamespace(mock_result)
@pytest.fixture
def fix_ns():
"""FIX namespace with a mocked out result object."""
mock_result = mock.MagicMock()
mock_result.entries = collections.deque()
return result_mod.FixNamespace(mock_result)
class TestDictNamespace(object):
"""Unit testcases for the result.DictNamespace class."""
def test_basic_match(self, dict_ns):
"""
Test the match method against identical expected and actual dicts.
"""
expected = {'key': 123}
actual = expected.copy()
assert dict_ns.match(
actual,
expected,
description='Basic dictmatch of identical dicts passes')
assert dict_ns.match(
actual,
expected,
description='Force type-check of values',
value_cmp_func=comparison.COMPARE_FUNCTIONS['check_types'])
assert dict_ns.match(
actual,
expected,
description='Convert values to strings before comparing',
value_cmp_func=comparison.COMPARE_FUNCTIONS['stringify'])
def test_duck_match(self, dict_ns):
"""
Test the match method by seting different types that can be compared.
Due to duck-typing, ints and floats can be equal if they refer to the
same numeric value - in this case, 123 == 123.0. However if
type-checking is forced by use of the check_types comparison method
the assertion will fail.
"""
expected = {'key': 123}
actual = {'key': 123.0}
assert dict_ns.match(
actual,
expected,
description='Dictmatch passes since the numeric values are equal.')
assert not dict_ns.match(
actual,
expected,
description='Dictmatch fails when type comparison is forced.',
value_cmp_func=comparison.COMPARE_FUNCTIONS['check_types'])
assert not dict_ns.match(
actual,
expected,
description='Dictmatch with string conversion fails due to '
'different string representations of int/float.',
value_cmp_func=comparison.COMPARE_FUNCTIONS['stringify'])
def test_fail_match(self, dict_ns):
"""
Test the match method for types that do not compare equal - in this
case, 123 should not match "123".
"""
expected = {'key': 123}
actual = {'key': '123'}
assert not dict_ns.match(
actual,
expected,
description='Dictmatch fails because 123 != "123')
assert not dict_ns.match(
actual,
expected,
description='Dictmatch fails due to type mismatch',
value_cmp_func=comparison.COMPARE_FUNCTIONS['check_types'])
assert dict_ns.match(
actual,
expected,
description='Dictmatch passes when values are converted to strings',
value_cmp_func=comparison.COMPARE_FUNCTIONS['stringify'])
def test_custom_match(self, dict_ns):
"""Test a dict match using a user-defined comparison function."""
expected = {'key': 174.24}
actual = {'key': 174.87}
tolerance = 1.0
def cmp_with_tolerance(lhs, rhs):
"""Check that both values are within a given tolerance range."""
return abs(lhs - rhs) < tolerance
assert not dict_ns.match(
actual,
expected,
description='Values are not exactly equal')
assert dict_ns.match(
actual,
expected,
description='Values are equal within tolerance',
value_cmp_func=cmp_with_tolerance)
def test_report_modes(self, dict_ns):
"""Test controlling report modes for a dict match."""
expected = {'key{}'.format(i): i for i in range(10)}
actual = expected.copy()
expected['wrong'] = 'expected'
actual['wrong'] = 'actual'
assert not dict_ns.match(
actual,
expected,
description='Keep all comparisons by default')
assert len(dict_ns.result.entries) == 1
dict_assert = dict_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 11
assert dict_ns.match(
actual,
expected,
description='Keep ignored comparisons',
include_keys=['key{}'.format(i) for i in range(3)])
assert len(dict_ns.result.entries) == 1
dict_assert = dict_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 11
assert dict_ns.match(
actual,
expected,
description='Discard ignored comparisons',
include_keys=['key{}'.format(i) for i in range(3)],
report_mode=comparison.ReportOptions.NO_IGNORED)
assert len(dict_ns.result.entries) == 1
dict_assert = dict_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 3
assert not dict_ns.match(
actual,
expected,
report_mode=comparison.ReportOptions.FAILS_ONLY,
description='Discard passing comparisons')
assert len(dict_ns.result.entries) == 1
dict_assert = dict_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 1
class TestFIXNamespace(object):
"""Unit testcases for the result.FixNamespace class."""
def test_untyped_fixmatch(self, fix_ns):
"""Test FIX matches between untyped FIX messages."""
expected = testing.FixMessage(
((35, 'D'), (38, '1000000'), (44, '125.83')))
actual = expected.copy()
assert fix_ns.match(actual, expected, description='Basic FIX match')
def test_typed_fixmatch(self, fix_ns):
"""Test FIX matches between typed FIX messages."""
expected = testing.FixMessage(
((35, 'D'), (38, 1000000), (44, 125.83)),
typed_values=True)
actual = expected.copy()
assert fix_ns.match(actual, expected, description='Basic FIX match')
# Now change the type of the actual 38 key's value to str. The assert
# should fail since we are performing a typed match.
actual[38] = '1000000'
assert not fix_ns.match(
actual, expected, description='Failing str/int comparison')
# Change the type to a float. The match should still fail because of
# the type difference, despite the numeric values being equal.
actual[38] = 1000000.0
assert not fix_ns.match(
actual, expected, description='Failing float/int comparison')
def test_mixed_fixmatch(self, fix_ns):
"""Test FIX matches between typed and untyped FIX messages."""
expected = testing.FixMessage(
((35, 'D'), (38, '1000000'), (44, '125.83')),
typed_values=False)
actual = testing.FixMessage(
((35, 'D'), (38, '1000000'), (44, 125.83)),
typed_values=True)
assert fix_ns.match(actual, expected, description='Mixed FIX match')
def test_report_modes(self, fix_ns):
"""Test controlling report modes for FIX match."""
expected = testing.FixMessage((i, (25 * i) - 4) for i in range(10))
actual = expected.copy()
expected['wrong'] = 'expected'
actual['wrong'] = 'actual'
assert not fix_ns.match(
actual,
expected,
description='Keep all comparisons by default')
assert len(fix_ns.result.entries) == 1
dict_assert = fix_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 11
assert fix_ns.match(
actual,
expected,
description='Keep ignored comparisons',
include_tags=[0, 1, 2])
assert len(fix_ns.result.entries) == 1
dict_assert = fix_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 11
assert fix_ns.match(
actual,
expected,
description='Discard ignored comparisons',
include_tags=[0, 1, 2],
report_mode=comparison.ReportOptions.NO_IGNORED)
assert len(fix_ns.result.entries) == 1
dict_assert = fix_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 3
assert not fix_ns.match(
actual,
expected,
report_mode=comparison.ReportOptions.FAILS_ONLY,
description='Discard passing comparisons')
assert len(fix_ns.result.entries) == 1
dict_assert = fix_ns.result.entries.popleft()
assert len(dict_assert.comparison) == 1
| 2.53125 | 3 |
src/cli.py | martinlecs/ap_challenge | 0 | 12795644 | <gh_stars>0
import click
from datetime import datetime
from typing import List
import string
from src.Runner import RinexRunner
from src.Downloader import RinexDownloader
from src.Merger import RinexMerger
@click.command()
@click.argument('station', type=str)
@click.argument('start_date', type=click.DateTime(formats=['%Y-%m-%dT%H:%M:%SZ']))
@click.argument('end_date', type=click.DateTime(formats=['%Y-%m-%dT%H:%M:%SZ']))
def cli(station: str, start_date: datetime, end_date: datetime):
""" Downloads RINEX files from FTP server and merges them into one file
Args:
station: 4-character site (base) identifier
start_date: datetime object
end_date: datetime object
"""
try:
if start_date > end_date:
raise ValueError('Start date is past end date')
if start_date > datetime.now() or end_date > datetime.now():
raise ValueError(
'FTP does not have log files that extend all the way to your end date yet.')
if start_date.year < 1994 or end_date.year < 1994:
raise ValueError('Date is too early')
runner = RinexRunner(station, start_date, end_date,
RinexDownloader, RinexMerger)
runner.run()
except Exception as e:
print("Error:", e)
| 2.828125 | 3 |
ismir2020_featuresubsets.py | pvankranenburg/ismir2020 | 2 | 12795645 | <gh_stars>1-10
ismir2020featsets = {}
##########################################################################
ismir2020featsets['ismir2020_all_lyr_gt'] = {
'scaledegreefirst',
'scaledegreesecond',
'scaledegreethird',
'scaledegreefourth',
'scaledegreefifth',
'diatonicpitchfirst',
'diatonicpitchsecond',
'diatonicpitchthird',
'diatonicpitchfourth',
'diatonicpitchfifth',
'midipitchfirst',
'midipitchsecond',
'midipitchthird',
'midipitchfourth',
'midipitchfifth',
'intervalfirst',
'intervalsecond',
'intervalthird',
'intervalfourth',
'intervalfifth',
'VosCenterGravityfirst',
'VosCenterGravitysecond',
'VosCenterGravitythird',
'VosCenterGravityfourth',
'VosCenterGravityfifth',
'VosHarmonyfirst',
'VosHarmonysecond',
'VosHarmonythird',
'VosHarmonyfourth',
'VosHarmonyfifth',
'informationcontentfirst',
'informationcontentsecond',
'informationcontentthird',
'informationcontentfourth',
'informationcontentfifth',
'contourfirst',
'contoursecond',
'contourthird',
'contourfourth',
'contourfifth',
'registraldirectionchange',
'largetosmall',
'contourreversal',
'isascending',
'isdescending',
'ambitus',
'containsleap',
'meternumerator',
'meterdenominator',
'nextisrestfirst',
'nextisrestsecond',
'nextisrestthird',
'nextisrestfourth',
'nextisrestfifth',
'beatstrengthfirst',
'beatstrengthsecond',
'beatstrengththird',
'beatstrengthfourth',
'beatstrengthfifth',
'IOIbeatfractionfirst',
'IOIbeatfractionsecond',
'IOIbeatfractionthird',
'IOIbeatfractionfourth',
'IOIbeatfractionfifth',
'durationcummulation',
'onthebeatfirst',
'onthebeatsecond',
'onthebeatthird',
'onthebeatfourth',
'onthebeatfifth',
'completesmeasurephrase',
'completesmeasuresong',
'completesbeatphrase',
'completesbeatsong',
'grouperfirst',
'groupersecond',
'grouperthird',
'grouperfourth',
'grouperfifth',
'noteoffset',
'beatoffset',
'beatduration',
'beatcount',
'gprsumfirst',
'gprsumsecond',
'gprsumthird',
'gprsumfourth',
'gprsumfifth',
'pitchproximityfirst',
'pitchproximitysecond',
'pitchproximitythird',
'pitchproximityfourth',
'pitchproximityfifth',
'pitchreversalfirst',
'pitchreversalsecond',
'pitchreversalthird',
'pitchreversalfourth',
'pitchreversalfifth',
'lbdmfirst',
'lbdmsecond',
'lbdmthird',
'lbdmfourth',
'lbdmfifth',
'wordstressfirst',
'wordstresssecond',
'wordstressthird',
'wordstressfourth',
'wordstressfifth',
'rhymesfirst',
'rhymessecond',
'rhymesthird',
'rhymesfourth',
'rhymesfifth',
'rhyme_noteoffset',
'rhyme_beatoffset',
'noncontentwordfirst',
'noncontentwordsecond',
'noncontentwordthird',
'noncontentwordfourth',
'noncontentwordfifth',
'wordendfirst',
'wordendsecond',
'wordendthird',
'wordendfourth',
'wordendfifth',
'melismastatefirst',
'melismastatesecond',
'melismastatethird',
'melismastatefourth',
'melismastatefifth',
'intervalsizefirstsecond',
'intervalsizesecondthird',
'intervalsizethirdfourth',
'intervalsizefourthfifth',
'intervaldirfirstsecond',
'intervaldirsecondthird',
'intervaldirthirdfourth',
'intervaldirfourthfifth',
'diatonicpitchfirstsecond',
'diatonicpitchsecondthird',
'diatonicpitchthirdfourth',
'diatonicpitchfourthfifth',
'VosHarmonyfirstsecond',
'VosHarmonysecondthird',
'VosHarmonythirdfourth',
'VosHarmonyfourthfifth',
'beatstrengthfirstsecond',
'beatstrengthsecondthird',
'beatstrengththirdfourth',
'beatstrengthfourthfifth',
'IOIbeatfractionfirstsecond',
'IOIbeatfractionsecondthird',
'IOIbeatfractionthirdfourth',
'IOIbeatfractionfourthfifth',
'wordstressfirstsecond',
'wordstresssecondthird',
'wordstressthirdfourth',
'wordstressfourthfifth',
'informationcontentfirstsecond',
'informationcontentsecondthird',
'informationcontentthirdfourth',
'informationcontentfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_all_gt'] = {
'scaledegreefirst',
'scaledegreesecond',
'scaledegreethird',
'scaledegreefourth',
'scaledegreefifth',
'diatonicpitchfirst',
'diatonicpitchsecond',
'diatonicpitchthird',
'diatonicpitchfourth',
'diatonicpitchfifth',
'midipitchfirst',
'midipitchsecond',
'midipitchthird',
'midipitchfourth',
'midipitchfifth',
'intervalfirst',
'intervalsecond',
'intervalthird',
'intervalfourth',
'intervalfifth',
'VosCenterGravityfirst',
'VosCenterGravitysecond',
'VosCenterGravitythird',
'VosCenterGravityfourth',
'VosCenterGravityfifth',
'VosHarmonyfirst',
'VosHarmonysecond',
'VosHarmonythird',
'VosHarmonyfourth',
'VosHarmonyfifth',
'informationcontentfirst',
'informationcontentsecond',
'informationcontentthird',
'informationcontentfourth',
'informationcontentfifth',
'contourfirst',
'contoursecond',
'contourthird',
'contourfourth',
'contourfifth',
'registraldirectionchange',
'largetosmall',
'contourreversal',
'isascending',
'isdescending',
'ambitus',
'containsleap',
'meternumerator',
'meterdenominator',
'nextisrestfirst',
'nextisrestsecond',
'nextisrestthird',
'nextisrestfourth',
'nextisrestfifth',
'beatstrengthfirst',
'beatstrengthsecond',
'beatstrengththird',
'beatstrengthfourth',
'beatstrengthfifth',
'IOIbeatfractionfirst',
'IOIbeatfractionsecond',
'IOIbeatfractionthird',
'IOIbeatfractionfourth',
'IOIbeatfractionfifth',
'durationcummulation',
'onthebeatfirst',
'onthebeatsecond',
'onthebeatthird',
'onthebeatfourth',
'onthebeatfifth',
'completesmeasurephrase',
'completesmeasuresong',
'completesbeatphrase',
'completesbeatsong',
'grouperfirst',
'groupersecond',
'grouperthird',
'grouperfourth',
'grouperfifth',
'noteoffset',
'beatoffset',
'beatduration',
'beatcount',
'gprsumfirst',
'gprsumsecond',
'gprsumthird',
'gprsumfourth',
'gprsumfifth',
'pitchproximityfirst',
'pitchproximitysecond',
'pitchproximitythird',
'pitchproximityfourth',
'pitchproximityfifth',
'pitchreversalfirst',
'pitchreversalsecond',
'pitchreversalthird',
'pitchreversalfourth',
'pitchreversalfifth',
'lbdmfirst',
'lbdmsecond',
'lbdmthird',
'lbdmfourth',
'lbdmfifth',
'intervalsizefirstsecond',
'intervalsizesecondthird',
'intervalsizethirdfourth',
'intervalsizefourthfifth',
'intervaldirfirstsecond',
'intervaldirsecondthird',
'intervaldirthirdfourth',
'intervaldirfourthfifth',
'diatonicpitchfirstsecond',
'diatonicpitchsecondthird',
'diatonicpitchthirdfourth',
'diatonicpitchfourthfifth',
'VosHarmonyfirstsecond',
'VosHarmonysecondthird',
'VosHarmonythirdfourth',
'VosHarmonyfourthfifth',
'beatstrengthfirstsecond',
'beatstrengthsecondthird',
'beatstrengththirdfourth',
'beatstrengthfourthfifth',
'IOIbeatfractionfirstsecond',
'IOIbeatfractionsecondthird',
'IOIbeatfractionthirdfourth',
'IOIbeatfractionfourthfifth',
'informationcontentfirstsecond',
'informationcontentsecondthird',
'informationcontentthirdfourth',
'informationcontentfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_all_lyr'] = {
'scaledegreefirst',
'scaledegreesecond',
'scaledegreethird',
'scaledegreefourth',
'scaledegreefifth',
'diatonicpitchfirst',
'diatonicpitchsecond',
'diatonicpitchthird',
'diatonicpitchfourth',
'diatonicpitchfifth',
'midipitchfirst',
'midipitchsecond',
'midipitchthird',
'midipitchfourth',
'midipitchfifth',
'intervalfirst',
'intervalsecond',
'intervalthird',
'intervalfourth',
'intervalfifth',
'VosCenterGravityfirst',
'VosCenterGravitysecond',
'VosCenterGravitythird',
'VosCenterGravityfourth',
'VosCenterGravityfifth',
'VosHarmonyfirst',
'VosHarmonysecond',
'VosHarmonythird',
'VosHarmonyfourth',
'VosHarmonyfifth',
'informationcontentfirst',
'informationcontentsecond',
'informationcontentthird',
'informationcontentfourth',
'informationcontentfifth',
'contourfirst',
'contoursecond',
'contourthird',
'contourfourth',
'contourfifth',
'registraldirectionchange',
'largetosmall',
'contourreversal',
'isascending',
'isdescending',
'ambitus',
'containsleap',
'meternumerator',
'meterdenominator',
'beatstrengthfirst',
'beatstrengthsecond',
'beatstrengththird',
'beatstrengthfourth',
'beatstrengthfifth',
'IOIbeatfractionfirst',
'IOIbeatfractionsecond',
'IOIbeatfractionthird',
'IOIbeatfractionfourth',
'IOIbeatfractionfifth',
'durationcummulation',
'onthebeatfirst',
'onthebeatsecond',
'onthebeatthird',
'onthebeatfourth',
'onthebeatfifth',
'completesmeasuresong',
'completesbeatsong',
'grouperfirst',
'groupersecond',
'grouperthird',
'grouperfourth',
'grouperfifth',
'beatduration',
'beatcount',
'gprsumfirst',
'gprsumsecond',
'gprsumthird',
'gprsumfourth',
'gprsumfifth',
'pitchproximityfirst',
'pitchproximitysecond',
'pitchproximitythird',
'pitchproximityfourth',
'pitchproximityfifth',
'pitchreversalfirst',
'pitchreversalsecond',
'pitchreversalthird',
'pitchreversalfourth',
'pitchreversalfifth',
'lbdmfirst',
'lbdmsecond',
'lbdmthird',
'lbdmfourth',
'lbdmfifth',
'wordstressfirst',
'wordstresssecond',
'wordstressthird',
'wordstressfourth',
'wordstressfifth',
'rhymesfirst',
'rhymessecond',
'rhymesthird',
'rhymesfourth',
'rhymesfifth',
'rhyme_noteoffset',
'rhyme_beatoffset',
'noncontentwordfirst',
'noncontentwordsecond',
'noncontentwordthird',
'noncontentwordfourth',
'noncontentwordfifth',
'wordendfirst',
'wordendsecond',
'wordendthird',
'wordendfourth',
'wordendfifth',
'melismastatefirst',
'melismastatesecond',
'melismastatethird',
'melismastatefourth',
'melismastatefifth',
'intervalsizefirstsecond',
'intervalsizesecondthird',
'intervalsizethirdfourth',
'intervalsizefourthfifth',
'intervaldirfirstsecond',
'intervaldirsecondthird',
'intervaldirthirdfourth',
'intervaldirfourthfifth',
'diatonicpitchfirstsecond',
'diatonicpitchsecondthird',
'diatonicpitchthirdfourth',
'diatonicpitchfourthfifth',
'VosHarmonyfirstsecond',
'VosHarmonysecondthird',
'VosHarmonythirdfourth',
'VosHarmonyfourthfifth',
'beatstrengthfirstsecond',
'beatstrengthsecondthird',
'beatstrengththirdfourth',
'beatstrengthfourthfifth',
'IOIbeatfractionfirstsecond',
'IOIbeatfractionsecondthird',
'IOIbeatfractionthirdfourth',
'IOIbeatfractionfourthfifth',
'wordstressfirstsecond',
'wordstresssecondthird',
'wordstressthirdfourth',
'wordstressfourthfifth',
'informationcontentfirstsecond',
'informationcontentsecondthird',
'informationcontentthirdfourth',
'informationcontentfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_all'] = {
'scaledegreefirst',
'scaledegreesecond',
'scaledegreethird',
'scaledegreefourth',
'scaledegreefifth',
'diatonicpitchfirst',
'diatonicpitchsecond',
'diatonicpitchthird',
'diatonicpitchfourth',
'diatonicpitchfifth',
'midipitchfirst',
'midipitchsecond',
'midipitchthird',
'midipitchfourth',
'midipitchfifth',
'intervalfirst',
'intervalsecond',
'intervalthird',
'intervalfourth',
'intervalfifth',
'VosCenterGravityfirst',
'VosCenterGravitysecond',
'VosCenterGravitythird',
'VosCenterGravityfourth',
'VosCenterGravityfifth',
'VosHarmonyfirst',
'VosHarmonysecond',
'VosHarmonythird',
'VosHarmonyfourth',
'VosHarmonyfifth',
'informationcontentfirst',
'informationcontentsecond',
'informationcontentthird',
'informationcontentfourth',
'informationcontentfifth',
'contourfirst',
'contoursecond',
'contourthird',
'contourfourth',
'contourfifth',
'registraldirectionchange',
'largetosmall',
'contourreversal',
'isascending',
'isdescending',
'ambitus',
'containsleap',
'meternumerator',
'meterdenominator',
'beatstrengthfirst',
'beatstrengthsecond',
'beatstrengththird',
'beatstrengthfourth',
'beatstrengthfifth',
'IOIbeatfractionfirst',
'IOIbeatfractionsecond',
'IOIbeatfractionthird',
'IOIbeatfractionfourth',
'IOIbeatfractionfifth',
'durationcummulation',
'onthebeatfirst',
'onthebeatsecond',
'onthebeatthird',
'onthebeatfourth',
'onthebeatfifth',
'completesmeasuresong',
'completesbeatsong',
'grouperfirst',
'groupersecond',
'grouperthird',
'grouperfourth',
'grouperfifth',
'beatduration',
'beatcount',
'gprsumfirst',
'gprsumsecond',
'gprsumthird',
'gprsumfourth',
'gprsumfifth',
'pitchproximityfirst',
'pitchproximitysecond',
'pitchproximitythird',
'pitchproximityfourth',
'pitchproximityfifth',
'pitchreversalfirst',
'pitchreversalsecond',
'pitchreversalthird',
'pitchreversalfourth',
'pitchreversalfifth',
'lbdmfirst',
'lbdmsecond',
'lbdmthird',
'lbdmfourth',
'lbdmfifth',
'intervalsizefirstsecond',
'intervalsizesecondthird',
'intervalsizethirdfourth',
'intervalsizefourthfifth',
'intervaldirfirstsecond',
'intervaldirsecondthird',
'intervaldirthirdfourth',
'intervaldirfourthfifth',
'diatonicpitchfirstsecond',
'diatonicpitchsecondthird',
'diatonicpitchthirdfourth',
'diatonicpitchfourthfifth',
'VosHarmonyfirstsecond',
'VosHarmonysecondthird',
'VosHarmonythirdfourth',
'VosHarmonyfourthfifth',
'beatstrengthfirstsecond',
'beatstrengthsecondthird',
'beatstrengththirdfourth',
'beatstrengthfourthfifth',
'IOIbeatfractionfirstsecond',
'IOIbeatfractionsecondthird',
'IOIbeatfractionthirdfourth',
'IOIbeatfractionfourthfifth',
'informationcontentfirstsecond',
'informationcontentsecondthird',
'informationcontentthirdfourth',
'informationcontentfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_othermodels'] = {
'informationcontentfirst',
'informationcontentsecond',
'informationcontentthird',
'informationcontentfourth',
'informationcontentfifth',
'registraldirectionchange',
'largetosmall',
'contourreversal',
'beatstrengthfirst',
'beatstrengthsecond',
'beatstrengththird',
'beatstrengthfourth',
'beatstrengthfifth',
'durationcummulation',
'grouperfirst',
'groupersecond',
'grouperthird',
'grouperfourth',
'grouperfifth',
'gprsumfirst',
'gprsumsecond',
'gprsumthird',
'gprsumfourth',
'gprsumfifth',
'pitchproximityfirst',
'pitchproximitysecond',
'pitchproximitythird',
'pitchproximityfourth',
'pitchproximityfifth',
'pitchreversalfirst',
'pitchreversalsecond',
'pitchreversalthird',
'pitchreversalfourth',
'pitchreversalfifth',
'lbdmfirst',
'lbdmsecond',
'lbdmthird',
'lbdmfourth',
'lbdmfifth',
'intervalsizefirstsecond',
'intervalsizesecondthird',
'intervalsizethirdfourth',
'intervalsizefourthfifth',
'intervaldirfirstsecond',
'intervaldirsecondthird',
'intervaldirthirdfourth',
'intervaldirfourthfifth',
'informationcontentfirstsecond',
'informationcontentsecondthird',
'informationcontentthirdfourth',
'informationcontentfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_elementarylyrics'] = {
'wordstressfirst',
'wordstresssecond',
'wordstressthird',
'wordstressfourth',
'wordstressfifth',
'rhymesfirst',
'rhymessecond',
'rhymesthird',
'rhymesfourth',
'rhymesfifth',
'rhyme_noteoffset',
'rhyme_beatoffset',
'noncontentwordfirst',
'noncontentwordsecond',
'noncontentwordthird',
'noncontentwordfourth',
'noncontentwordfifth',
'wordendfirst',
'wordendsecond',
'wordendthird',
'wordendfourth',
'wordendfifth',
'melismastatefirst',
'melismastatesecond',
'melismastatethird',
'melismastatefourth',
'melismastatefifth',
'wordstressfirstsecond',
'wordstresssecondthird',
'wordstressthirdfourth',
'wordstressfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_elementaryrhythm'] = {
'meternumerator',
'meterdenominator',
'beatstrengthfirst',
'beatstrengthsecond',
'beatstrengththird',
'beatstrengthfourth',
'beatstrengthfifth',
'IOIbeatfractionfirst',
'IOIbeatfractionsecond',
'IOIbeatfractionthird',
'IOIbeatfractionfourth',
'IOIbeatfractionfifth',
'durationcummulation',
'onthebeatfirst',
'onthebeatsecond',
'onthebeatthird',
'onthebeatfourth',
'onthebeatfifth',
'completesmeasuresong',
'completesbeatsong',
'beatduration',
'beatcount',
'beatstrengthfirstsecond',
'beatstrengthsecondthird',
'beatstrengththirdfourth',
'beatstrengthfourthfifth',
'IOIbeatfractionfirstsecond',
'IOIbeatfractionsecondthird',
'IOIbeatfractionthirdfourth',
'IOIbeatfractionfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_elementarypitch'] = {
'scaledegreefirst',
'scaledegreesecond',
'scaledegreethird',
'scaledegreefourth',
'scaledegreefifth',
'diatonicpitchfirst',
'diatonicpitchsecond',
'diatonicpitchthird',
'diatonicpitchfourth',
'diatonicpitchfifth',
'midipitchfirst',
'midipitchsecond',
'midipitchthird',
'midipitchfourth',
'midipitchfifth',
'intervalfirst',
'intervalsecond',
'intervalthird',
'intervalfourth',
'intervalfifth',
'VosCenterGravityfirst',
'VosCenterGravitysecond',
'VosCenterGravitythird',
'VosCenterGravityfourth',
'VosCenterGravityfifth',
'VosHarmonyfirst',
'VosHarmonysecond',
'VosHarmonythird',
'VosHarmonyfourth',
'VosHarmonyfifth',
'contourfirst',
'contoursecond',
'contourthird',
'contourfourth',
'contourfifth',
'registraldirectionchange',
'largetosmall',
'contourreversal',
'isascending',
'isdescending',
'ambitus',
'containsleap',
'intervalsizefirstsecond',
'intervalsizesecondthird',
'intervalsizethirdfourth',
'intervalsizefourthfifth',
'intervaldirfirstsecond',
'intervaldirsecondthird',
'intervaldirthirdfourth',
'intervaldirfourthfifth',
'diatonicpitchfirstsecond',
'diatonicpitchsecondthird',
'diatonicpitchthirdfourth',
'diatonicpitchfourthfifth',
'VosHarmonyfirstsecond',
'VosHarmonysecondthird',
'VosHarmonythirdfourth',
'VosHarmonyfourthfifth',
}
##########################################################################
ismir2020featsets['ismir2020_elementarypitchrhythm'] = ismir2020featsets['ismir2020_elementarypitch'] | ismir2020featsets['ismir2020_elementaryrhythm']
ismir2020featsets['ismir2020_elementaryall'] = ismir2020featsets['ismir2020_elementarypitchrhythm'] | ismir2020featsets['ismir2020_elementarylyrics']
#for k in ismir2020featsets.keys():
# print(k)
| 0.84375 | 1 |
soa2rts.py | acasadoalonso/RealTimeScoring | 0 | 12795646 | #!/usr/bin/python3
# -*- coding: UTF-8 -*-
# This module gets that daya from SoaringSpot and prepaeres the infor for the REAL TIME SCORING
#
# Author: <NAME> - May 2021
#
#import sys
import json
import urllib.request
import urllib.error
import urllib.parse
import hmac
import hashlib
import base64
import os
import socket
import os.path
from datetime import datetime
import pycountry
from ognddbfuncs import getognreg, getognflarmid
from simplehal import HalDocument, Resolver
#-------------------------------------------------------------------------------------------------------------------#
#########################################################################
global apiurl
global auth # auth and apiurl are globals
#########################################################################
def getapidata(url, autho): # get the data from the API server
req = urllib.request.Request(url)
req.add_header('Authorization', autho) # build the authorization header
req.add_header("Accept", "application/json")
req.add_header("Content-Type", "application/hal+json")
r = urllib.request.urlopen(req) # open the url resource
j_obj = json.load(r) # convert to JSON
return j_obj # return the JSON object
###################################################################
########################################################################
def oksta(station):
if (station != "FLYMASTER"):
return(True)
return(False)
#####################
def chkfilati(latitude, flatil, flatiu):
if (flatil == 0.0):
return (False)
if (flatil > 0): # northern hemisphere
if (latitude < flatil or latitude > flatiu):
return (True)
else: # southern hemisfere
if (latitude > flatil or latitude < flatiu):
return (True)
return(False)
########################################################################
# get the data from the soaring spot and return it as a HAL document
def gdata(url, key, prt='no'):
global auth # auth and apiurl are globals
global apiurl
j_obj = getapidata(url, auth) # call the fuction that get it
# convert to HAL
if prt == 'yes': # if print is required
print(json.dumps(j_obj, indent=4))
cd = HalDocument.get_data(HalDocument.from_python(
j_obj), apiurl+'rel/' + key) # get the data from the HAL document
return cd
def getemb(base, ctype):
global apiurl
return(base['_embedded'][apiurl+'rel/'+ctype])
def getlinks(base, ctype):
global apiurl
return (base['_links'][apiurl+'rel/'+ctype]['href'])
###################################################################
#
# ---------- main code ---------------
#
# gather the competition data from SoaringSpot
def soa2rts(RTS, client, secretkey, prt=False):
global apiurl
global auth
date = datetime.now() # get the date
hostname = socket.gethostname()
# directory where to store the IGC file
# see if index day is requestedd
# --------------------------------------#
# ===== SETUP parameters =======================#
# where to find the SQLITE3 database
cwd = os.getcwd() # get the current working directory
# where to find the clientid and secretkey files
secpath = cwd+"/SoaringSpot/"
apiurl = "http://api.soaringspot.com/" # soaringspot API URL
rel = "v1" # we use API version 1
# ==============================================#
utc = datetime.utcnow() # the UTC time
date = utc.strftime("%Y-%m-%dT%H:%M:%SZ") # get the UTC time
local_time = datetime.now() # the local time
# print the time for information only
if prt:
print("Hostname:", hostname)
print("UTC Time is now:", utc)
print(date) #
# print the time for information only
print("Local Time is now:", local_time)
print("Config params. SECpath:", secpath)
# nonce=base64.b64encode(OpenSSL.rand.bytes(36)) # get the once base
nonce = base64.b64encode(os.urandom(36)) # get the once base
# build the message
message = nonce+date.encode(encoding='utf-8') + \
client.encode(encoding='utf-8')
# and the message digest
digest = hmac.new(secretkey, msg=message,
digestmod=hashlib.sha256).digest()
signature = str(base64.b64encode(digest).decode()
) # build the digital signature
# the AUTHORIZATION ID is built now
auth = apiurl+rel+'/hmac/v1 ClientID="'+client+'",Signature="' + \
signature+'",Nonce="' + \
nonce.decode(encoding='utf-8')+'",Created="'+date+'" '
#print ("URLiauth:", auth)
# get the initial base of the tree
url1 = apiurl+rel
# get the contest data, first instance
cd = gdata(url1, 'contests', prt='no')[0]
# get the main data from the contest
category = cd['category']
eventname = cd['name']
compid = cd['id']
country = cd['country'] # country code - 2 chars code
compcountry = country # contry as defaults for pilots
# convert the 2 chars ID to the 3 chars ID
ccc = pycountry.countries.get(alpha_2=country)
country = ccc.alpha_3
endate = cd['end_date']
lc = getemb(cd, 'location') # location data
lcname = lc['name'] # location name
print("\n\n= Contest ===============================")
print("Category:", category, "Comp name:", eventname, "Comp ID:", compid)
print("Loc Name:", lcname, "Country: ",
country, country, "End date:", endate)
print("=========================================\n\n")
if prt:
print("Classes:\n========\n\n")
npil = 0 # init the number of pilots
classes = []
pilots = []
devicesid = ""
# Build the tracks and turn points, exploring the contestants and task within each class
# go thru the different classes now within the daya
pilots = []
for cl in getemb(cd, 'classes'):
# print "CLCLCL", cl
classname = cl["type"] # search for each class
if prt:
print("Class:", classname, "\n\n") # search for each class
# search for the contestants on each class
url3 = getlinks(cl, "contestants")
ctt = gdata(url3, "contestants") # get the contestants data
# print "CTTCTT",ctt
for contestants in ctt:
# print "FT", ft, "\n\n"
fname = getemb(contestants, 'pilot')[0]['first_name']
lname = getemb(contestants, 'pilot')[0]['last_name']
# convert it to utf8 in order to avoid problems
pname = fname.encode('utf-8').decode('utf-8') + \
" "+lname.encode('utf-8').decode('utf-8')
if 'club' in contestants:
club = contestants['club'].encode('utf-8').decode('utf-8')
else:
club = "club_NOTYET"
if 'aircraft_model' in contestants:
ar = contestants['aircraft_model']
else:
ar = "am_NOTYET"
if 'contestant_number' in contestants:
cn = contestants['contestant_number']
else:
cn = "cn_NOTYET"
if 'nationality' in getemb(contestants, 'pilot')[0]:
nation = getemb(contestants, 'pilot')[0]['nationality']
else:
if compcountry != '':
nation = compcountry
else:
nation = "ES" # by default is SPAIN
# convert the 2 chars ID to the 3 chars ID
ccc = pycountry.countries.get(alpha_2=nation)
country3 = ccc.alpha_3
igcid = getemb(contestants, 'pilot')[0]['igc_id']
idflarm = ""
ognpair = ""
ognid = ""
idfreg = ""
if 'live_track_id' in contestants: # check if we have the FlarmId from the SoaringSpot
livetrk = contestants['live_track_id'] # flarmID and OGN pair
if prt:
print ("Live_track:", livetrk)
if len(livetrk) == 9:
idflarm = livetrk # case that just the FlarmID, no piaring
if len(livetrk) == 19: # format: FLR123456 OGN654321
# case that just the FlarmID and OGN tracker pair
idflarm = livetrk[0:9]
ognpair = livetrk[10:] # OGN trackers paired
if len(idflarm) == 6: # in case of missing FLR/ICA/OGN (deprecated)
if idflarm[0] == 'D':
idflarm = "FLR"+idflarm # assume a Flarm type
elif idflarm[0].isdigit():
idflarm = "ICA"+idflarm # assume a ICAO type
else:
idflarm = "OGN"+idflarm # assume a OGN type
# get the registration from OGN DDB
idfreg = getognreg(idflarm[3:9])
if 'aircraft_registration' in contestants:
regi = contestants['aircraft_registration']
# get the flarm if from the OGN DDB
ognid = getognflarmid(regi)
else:
# if we do not have the registration ID on the soaringspota
regi = "reg_NOTYET"
if idflarm == '':
idflarm = ognid
if idflarm != '':
devicesid += idflarm+'/'
if prt:
print("Pilot:", pname, "Club:", club, "CompID:", cn, "Nation:", nation, "Country Code",
country3, "IGCID:", igcid, "Reg:", regi, "Model:", ar, "Flarm:", idflarm, "idf:", idfreg, "OGNpair", ognpair, ognid)
npil += 1
pil = {"PilotName": pname, "Club": club, "CompID": cn, "Nation": nation, "CountryCode": country3, "Registration": regi, "Class": classname,
"IgcID": igcid, "AcftModel": ar, "Flarmid": idflarm, "OGNpair": ognpair}
pilots.append(pil)
cll = {"Class": classname}
classes.append(cll)
if prt:
print("----------------------------------------------------------------\n\n")
# print the number of pilots as a reference and control
if len(devicesid) > 0:
devicesid = devicesid[0:-1]
if prt:
print("= Pilots ===========================", npil, "\n\n")
print(devicesid)
RTS = {"Compname": eventname, "Category": category, "Country": country,
"EndDate": endate, "Location": lcname, "Classes": classes, "Pilots": pilots, "Devices": devicesid}
return (RTS)
| 2.625 | 3 |
RevitPyCVC/Fluides/fluide_creer.py | Nahouhak/pythoncvc.net | 6 | 12795647 | <filename>RevitPyCVC/Fluides/fluide_creer.py
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Architecture import *
from Autodesk.Revit.DB.Analysis import *
from Autodesk.Revit.DB.Plumbing import *
from Autodesk.Revit.Exceptions import *
from Autodesk.Revit.UI import TaskDialog
from Autodesk.Revit.UI import TaskDialogCommonButtons
from Autodesk.Revit.UI import TaskDialogResult
import ctypes
#Load CoolProp shared library and configure PropsSI c_types units
CP = ctypes.cdll.LoadLibrary(r"C:\Program Files (x86)\pythoncvc.net\RevitPyCVC\Fluides\dll\CoolProp.dll")
PropsSI = CP.PropsSI
PropsSI.argtypes = (ctypes.c_char_p, ctypes.c_char_p, ctypes.c_double, ctypes.c_char_p, ctypes.c_double, ctypes.c_char_p)
PropsSI.restype = ctypes.c_double
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
#Set desired fluid, initial temperature(freezing temperature ?), desired pressure for properties call
fluid = 'Water'
t_init = 273.15
pressure = 101325
#Check if fluid_type exist and create it if not
fluid_type = FluidType.GetFluidType(doc, fluid)
if fluid_type == None:
t = Transaction(doc, "Create fluid type")
t.Start()
FluidType.Create(doc, fluid)
t.Commit()
fluid_type = FluidType.GetFluidType(doc, fluid)
#Add new temperature with associated heat capacity and viscosity
t = Transaction(doc, "Add temperature")
t.Start()
for i in xrange(1,100):
#Call CoolProp to get fluid properties and convert it to internal units if necessary
temperature = 273.15+i
viscosity = UnitUtils.ConvertToInternalUnits(PropsSI('V','T',t_init+i,'P',pressure,fluid),DisplayUnitType.DUT_PASCAL_SECONDS)
density = UnitUtils.ConvertToInternalUnits(PropsSI('D','T',t_init+i,'P',pressure,fluid),DisplayUnitType.DUT_KILOGRAMS_PER_CUBIC_METER)
#Catching exceptions and trying to overwrite temperature if asked by user in the TaskDialog
try:
fluid_type.AddTemperature(FluidTemperature(temperature,viscosity,density))
except ArgumentException:
result = TaskDialog.Show("Error", "Temperature already exist, do you want to overwrite it ?",TaskDialogCommonButtons.Yes | TaskDialogCommonButtons.No | TaskDialogCommonButtons.Cancel, TaskDialogResult.Yes)
if result == TaskDialogResult.Yes:
try:
fluid_type.RemoveTemperature(temperature)
fluid_type.AddTemperature(FluidTemperature(temperature,viscosity,density))
except ArgumentException:
TaskDialog.Show("Overwrite error", "Temperature is currently in use and cannot be overwritten")
elif result == TaskDialogResult.No:
pass
else:
break
t.Commit()
| 2.09375 | 2 |
build_release.py | JDCalvert/foundryvtt-drag-ruler | 0 | 12795648 | #!/usr/bin/env python3
import json
from pathlib import PurePath, Path
import subprocess
import tempfile
import zipfile
wasm_pack = Path("~/.cargo/bin/wasm-pack").expanduser()
root_files = ["module.json", "README.md", "CHANGELOG.md", "LICENSE"]
wasm_files = ["gridless_pathfinding_bg.wasm", "gridless_pathfinding.js"]
output_dir = Path("artifact")
copy_everything_directories = ["js", "lang", "templates"]
wasm_dir = Path("wasm")
root_dir = Path(".")
rust_dir = Path("rust")
build_dir_tmp = tempfile.TemporaryDirectory()
build_dir = Path(build_dir_tmp.name)
with open("module.json", "r") as file:
manifest = json.load(file)
zip_root = PurePath(f'{manifest["name"]}')
filename = f'{manifest["name"]}-{manifest["version"]}.zip'
result = subprocess.run([wasm_pack, "build", "--target", "web", "--out-dir", build_dir, root_dir / rust_dir])
if result.returncode != 0:
raise Exception("Wasm build failed")
output_dir.mkdir(parents=True, exist_ok=True)
def write_directory(archive, d):
for f in (root_dir / d).iterdir():
if f.is_dir():
write_directory(archive, f)
else:
assert(f.is_file())
archive.write(f, arcname=zip_root / d / f.name)
with zipfile.ZipFile(output_dir / filename, mode="w", compression=zipfile.ZIP_DEFLATED, compresslevel=9) as archive:
for f in root_files:
archive.write(root_dir / f, arcname=zip_root / f)
for d in copy_everything_directories:
write_directory(archive, d)
for f in wasm_files:
archive.write(build_dir / f, arcname=zip_root / wasm_dir / f)
print(f"Successfully built {output_dir / filename}")
| 2.078125 | 2 |
Session1_2018/pathsum2.py | vedantc6/LCode | 1 | 12795649 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def dfs(self, root, cur_sum, lnodes, result):
if not root.left and not root.right and cur_sum == root.val:
lnodes.append(root.val)
result.append(lnodes)
if root.left:
self.dfs(root.left, cur_sum - root.val, lnodes + [root.val], result)
if root.right:
self.dfs(root.right, cur_sum - root.val, lnodes + [root.val], result)
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
result = []
if not root:
return result
self.dfs(root, sum, [], result)
return result
# Iterative
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
if not root:
return []
res = []
stack = [(root, sum, [])]
while stack:
node, cur_sum, lnodes = stack.pop()
if not node.left and not node.right and node.val == cur_sum:
res.append(lnodes + [node.val])
if node.left:
stack.append((node.left, cur_sum - node.val, lnodes + [node.val]))
if node.right:
stack.append((node.right, cur_sum - node.val, lnodes + [node.val]))
return res
| 3.84375 | 4 |
files.py | VergilTheHuragok/SciFi_Text_Adventure_Python | 0 | 12795650 | <reponame>VergilTheHuragok/SciFi_Text_Adventure_Python
import base64
import jsonpickle
from cryptography.fernet import Fernet
key = b'\<KEY>'
key = base64.urlsafe_b64encode(key)
def decrypt(bytes_message):
"""Takes an encrypted bytes object and returns a decrypted one"""
cipher = Fernet(key)
return cipher.decrypt(bytes_message)
def encrypt(message):
"""Takes a decrypted string and returns an encrypted bytes object"""
cipher = Fernet(key)
return cipher.encrypt(bytes(message, "utf8"))
def encode(obj):
"""Encodes an object as a string"""
return jsonpickle.encode(obj)
def decode(obj):
"""Decodes a string into an object"""
return jsonpickle.decode(obj)
def encrypt_obj_to_file(obj, path):
"""Writes an object to a file and obfuscates"""
obj = encode(obj)
obj = encrypt(obj)
file = open(path, "wb")
file.write(obj)
file.close()
def decrypt_obj_from_file(path):
"""Reads an an obfuscated object from a file"""
file = open(path, "rb")
obj = file.read()
file.close()
obj = decrypt(obj)
obj = decode(obj)
return obj
| 3.5625 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.