max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
utils/nlp.py | splovyt/SFPython-Project-Night | 1 | 6500 | import ssl
import nltk
from textblob import TextBlob
from nltk.corpus import stopwords
# set SSL
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
# download noun data (if required)
nltk.download('brown')
nltk.download('punkt')
nltk.download('stopwords')
def extract_nouns(sentence):
"""Extract the nouns from a sentence using the 'textblob' library."""
blob = TextBlob(sentence)
return blob.noun_phrases
def remove_stopwords(sentence):
"""Remove stopwords from a sentence and return the list of words."""
blob = TextBlob(sentence)
return [word for word in blob.words if word not in stopwords.words('english') and len(word)>2]
| 3.390625 | 3 |
toolbox/core/management/commands/celery_beat_resource_scraper.py | akshedu/toolbox | 0 | 6501 |
from django_celery_beat.models import PeriodicTask, IntervalSchedule
from django.core.management.base import BaseCommand
from django.db import IntegrityError
class Command(BaseCommand):
def handle(self, *args, **options):
try:
schedule_channel, created = IntervalSchedule.objects.get_or_create(
every=4,
period=IntervalSchedule.HOURS,
)
except IntegrityError as e:
pass
try:
schedule_video, created = IntervalSchedule.objects.get_or_create(
every=6,
period=IntervalSchedule.HOURS,
)
except IntegrityError as e:
pass
try:
PeriodicTask.objects.create(
interval=schedule_channel,
name='Scrape Channels',
task='toolbox.scraper.tasks.scrape_youtube_channels',
)
except IntegrityError as e:
pass
try:
PeriodicTask.objects.create(
interval=schedule_video,
name='Scrape Videos',
task='toolbox.scraper.tasks.scrape_youtube_videos',
)
except IntegrityError as e:
pass
| 2.21875 | 2 |
ppcls/data/preprocess/__init__.py | zhusonghe/PaddleClas-1 | 3,763 | 6502 | <filename>ppcls/data/preprocess/__init__.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ppcls.data.preprocess.ops.autoaugment import ImageNetPolicy as RawImageNetPolicy
from ppcls.data.preprocess.ops.randaugment import RandAugment as RawRandAugment
from ppcls.data.preprocess.ops.timm_autoaugment import RawTimmAutoAugment
from ppcls.data.preprocess.ops.cutout import Cutout
from ppcls.data.preprocess.ops.hide_and_seek import HideAndSeek
from ppcls.data.preprocess.ops.random_erasing import RandomErasing
from ppcls.data.preprocess.ops.grid import GridMask
from ppcls.data.preprocess.ops.operators import DecodeImage
from ppcls.data.preprocess.ops.operators import ResizeImage
from ppcls.data.preprocess.ops.operators import CropImage
from ppcls.data.preprocess.ops.operators import RandCropImage
from ppcls.data.preprocess.ops.operators import RandFlipImage
from ppcls.data.preprocess.ops.operators import NormalizeImage
from ppcls.data.preprocess.ops.operators import ToCHWImage
from ppcls.data.preprocess.ops.operators import AugMix
from ppcls.data.preprocess.batch_ops.batch_operators import MixupOperator, CutmixOperator, OpSampler, FmixOperator
import numpy as np
from PIL import Image
def transform(data, ops=[]):
""" transform """
for op in ops:
data = op(data)
return data
class AutoAugment(RawImageNetPolicy):
""" ImageNetPolicy wrapper to auto fit different img types """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
return img
class RandAugment(RawRandAugment):
""" RandAugment wrapper to auto fit different img types """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
return img
class TimmAutoAugment(RawTimmAutoAugment):
""" TimmAutoAugment wrapper to auto fit different img tyeps. """
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, img):
if not isinstance(img, Image.Image):
img = np.ascontiguousarray(img)
img = Image.fromarray(img)
img = super().__call__(img)
if isinstance(img, Image.Image):
img = np.asarray(img)
return img
| 1.421875 | 1 |
src/scalar_net/visualisations.py | scheeloong/lindaedynamics_icml2018 | 1 | 6503 | # required modules
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib import cm
from matplotlib.colors import Normalize
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.animation import FuncAnimation
# two-dimesional version
def plot_mse_loss_surface_2d(fig, ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2)):
# create weight space
n_w = 100
w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1
w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2
ws_x, ws_y = np.meshgrid(w1, w2)
cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(n_w):
for j in range(n_w):
y_pred = ws_x[i, j] * ws_y[i, j] * x
y_true = y
cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \
0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2
# compute gradients
dy, dx = np.gradient(cost_ws)
# plot vector space
skip = (slice(None, None, 5), slice(None, None, 5))
# fig, ax = plt.subplots(figsize=(8, 8))
#ax.contour(ws_x, ws_y, cost_ws, 200)
im = ax.imshow(cost_ws, extent=[ws_x.min(), ws_x.max(
), ws_y.min(), ws_y.max()], cmap=cm.coolwarm)
ax.quiver(ws_x[skip], ws_y[skip], -dx[skip], dy[skip], cost_ws[skip])
cbar = fig.colorbar(im, ax=ax)
# ax.set(aspect=1, title='Loss Surface')
cbar.ax.set_ylabel('$Loss$', fontsize=15)
ax.set_xlabel('$w_1$', fontsize=15)
ax.set_ylabel('$w_2$', fontsize=15)
# ax.grid()
# add saddle point
ax.scatter(0, 0, label='Saddle point', c='red', marker='*')
# ax.scatter(0,0, c='black', marker=r'$\rightarrow$', label='Negative gradient')
settings = (x, y, v, l2, w1_range, w2_range)
return ax, settings
# three-dimensional version
def plot_mse_loss_surface_3d(ax, x, y, v=0.0, l2=0.0, w1_range=(-2, 2), w2_range=(2, -2), angle=30):
# create weight space
n_w = 100
w1 = np.linspace(w1_range[0], w1_range[1], num=n_w) # weight 1
w2 = np.linspace(w2_range[0], w2_range[1], num=n_w) # weight 2
ws_x, ws_y = np.meshgrid(w1, w2)
cost_ws = np.zeros((n_w, n_w)) # initialize cost matrix
# Fill the cost matrix for each combination of weights
for i in range(n_w):
for j in range(n_w):
y_pred = ws_x[i, j] * ws_y[i, j] * x
y_true = y
cost_ws[i, j] = 0.5 * (y_true - y_pred)**2 + \
0.5 * l2 * (ws_x[i, j]**2 + ws_y[i, j]**2) + 0.5 * v * (ws_x[i, j]*ws_y[i, j])**2
X = ws_x
Y = ws_y
Z = cost_ws
#fig, ax = plt.subplots(figsize=(8, 8))
#ax = fig.add_subplot(1,1,1, projection='3d')
# fourth dimention - colormap
# create colormap according to x-value (can use any 50x50 array)
color_dimension = Z # change to desired fourth dimension
minn, maxx = color_dimension.min(), color_dimension.max()
norm = Normalize(minn, maxx)
m = plt.cm.ScalarMappable(norm=norm, cmap='jet')
m.set_array([])
fcolors = m.to_rgba(color_dimension)
# plot
# fig = plt.figure(figsize=(8, 8))
# ax = fig.gca(projection='3d')
ax.set_zlim(0, 50)
ax.plot([0], [0], 'ro', c='red', marker='*', label='Saddle point')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, facecolors=fcolors,
vmin=minn, vmax=maxx, shade=False, alpha=1)
ax.set_xlabel('$w_1$', fontsize=20)
ax.set_ylabel('$w_2$', fontsize=20)
ax.set_zlabel('$Loss$', fontsize=20)
settings = (x, y, v, l2, w1_range, w2_range)
ax.view_init(angle, 10)
return ax, settings
def plot_global_minimum_manifold_2d(ax, settings):
# retieve cached settings
x, y, v, l2, w1_range, w2_range = settings
n_w = 1000
man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w)
man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w)
man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2)
loss = 0.5 * y *(1 - man_ws_x * man_ws_y * x)**2 + \
0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x * man_ws_y)**2
min_loss = np.min(loss)
manifold_indices = loss < min_loss + 1e-5
manifold_x = man_ws_x[manifold_indices]
manifold_y = man_ws_y[manifold_indices]
# plot manifold of global minima
ax.scatter(manifold_y, manifold_x, s=0.1, c='cyan',
label='Manifold of global minima')
def plot_global_minimum_manifold_3d(ax, settings):
# retieve cached settings
x, y, v, l2, w1_range, w2_range = settings
n_w = 1000
man_w1 = np.linspace(w1_range[0], w1_range[1], num=n_w)
man_w2 = np.linspace(w2_range[0], w2_range[1], num=n_w)
man_ws_x, man_ws_y = np.meshgrid(man_w1, man_w2)
loss = 0.5 * y * (1 - man_ws_x * man_ws_y * x)**2 + \
0.5 * l2 * (man_ws_x**2 + man_ws_y**2) + 0.5 * v * (man_ws_x*man_ws_y)**2
min_loss = np.min(loss)
manifold_indices = loss < min_loss + 1e-5
manifold_x = man_ws_x[manifold_indices]
manifold_y = man_ws_y[manifold_indices]
pos = np.where(np.abs(np.diff(manifold_y)) >= 0.1)[0]+1
x = np.insert(manifold_x, pos, np.nan)
y = np.insert(manifold_y, pos, np.nan)
# plot manifold of global minima
#ax.scatter(manifold_y, manifold_x, 0, s=0.5, c='cyan',
# label='Manifold of global minima')
ax.plot(y, x, c='cyan',
label='Manifold of global minima')
def plot_optimiser_trajectory_2d(ax, weights, **kwargs):
w1_vals = weights['w1']
w2_vals = weights['w2']
ax.plot(w1_vals, w2_vals, **kwargs)
def plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs):
x, y, v, l2, _, _ = settings
w1_vals = np.array(weights['w1'])
w2_vals = np.array(weights['w2'])
loss = 0.5 * y * (1 - w1_vals * w2_vals * x)**2 + \
0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2
ax.plot(w1_vals, w2_vals, loss, **kwargs)
def plot_optimiser_trajectory(x, y, weights, dim='2d', angle=45, manifold=False, **kwargs):
if dim == '3d':
ax, settings = plot_mse_loss_surface_3d(x, y, angle=angle)
if manifold:
plot_global_minimum_manifold_3d(ax, settings)
plot_optimiser_trajectory_3d(ax, settings, weights, **kwargs)
else:
ax, settings = plot_mse_loss_surface_2d(x, y)
if manifold:
plot_global_minimum_manifold_2d(ax, settings)
plot_optimiser_trajectory_2d(ax, weights, **kwargs)
def plot_weight_norm(ax, weights, **kwargs):
w1_vals = np.array(weights['w1'])
w2_vals = np.array(weights['w2'])
epochs = np.arange(0, len(w1_vals), 1)
norms = np.sqrt(w1_vals**2 + w2_vals**2)
ax.set_xlabel('Epoch', fontsize=12)
ax.set_ylabel('Weight norm', fontsize=12)
ax.plot(epochs, norms, linewidth=2.0, **kwargs)
def animate_optimiser_trajectory_2d(i, ax, weights, **kwargs):
w1_vals = weights['w1']
w2_vals = weights['w2']
ax.plot(w1_vals[:i], w2_vals[:i], **kwargs)
return ax
def animate_optimiser_trajectory_3d(i, ax, settings, weights, **kwargs):
x, y, v, l2, _, _ = settings
w1_vals = np.array(weights['w1'])
w2_vals = np.array(weights['w2'])
loss = 0.5 * y * (1 - w1_vals * w2_vals * x)**2 + \
0.5 * l2 * (w1_vals**2 + w2_vals**2) + 0.5 * v * (w1_vals*w2_vals)**2
ax.plot(w1_vals[:i], w2_vals[:i], loss[:i], **kwargs)
return ax
def plot_optimiser_loss(x, y, v, l2, weights, **kwargs):
loss = []
epoch = np.arange(0, len(weights['w1']))
for w1, w2 in zip(weights['w1'], weights['w2']):
loss_val = 0.5 * y * (1 - w1 * w2 * x)**2 + 0.5 * l2 * (w1**2 + w2**2) + 0.5 * v * (w1 * w2)**2
loss.append(loss_val)
plt.plot(epoch, loss, **kwargs)
plt.xlabel('Epoch')
plt.ylabel('Loss')
def plot_interpolated_trajectory_2d(ax, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):
alpha = np.arange(start, end, 0.001)
w1_path = []
w2_path = []
for a in alpha:
ww1 = (1 - a) * w1_a + a * w1_b
ww2 = (1 - a) * w2_a + a * w2_b
w1_path.append(ww1)
w2_path.append(ww2)
ax.plot(w1_path, w2_path, **kwargs)
def plot_interpolated_trajectory_3d(ax, settings, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):
x, y, _, _ = settings
alpha = np.arange(start, end, 0.001)
w1_path = []
w2_path = []
loss = []
for a in alpha:
ww1 = (1 - a) * w1_a + a * w1_b
ww2 = (1 - a) * w2_a + a * w2_b
loss_val = 0.5 * (y - ww1 * ww2 * x)**2 + 0.5 * l2 * (ww1**2 + ww2**2)
loss.append(loss_val)
w1_path.append(ww1)
w2_path.append(ww2)
ax.plot(w1_path, w2_path, loss, **kwargs)
def plot_interpolated_loss(x, y, w1_a, w2_a, w1_b, w2_b, start=0, end=1, **kwargs):
alpha = np.arange(start, end, 0.001)
interpolated_loss = []
for a in alpha:
ww1 = (1 - a) * w1_a + a * w1_b
ww2 = (1 - a) * w2_a + a * w2_b
loss_val = 0.5 * (y - ww1 * ww2 * x)**2 + 0.5 * l2 * (ww1**2 + ww2**2)
interpolated_loss.append(loss_val)
plt.plot(alpha, interpolated_loss, **kwargs)
plt.xlabel(r'$\alpha$')
plt.ylabel('Loss')
def plot_learning_dynamics(ax, weights, **kwargs):
epoch = np.arange(0, len(weights['w1']))
scores = []
for w1, w2 in zip(weights['w1'], weights['w2']):
scores.append(w1 * w2)
ax.plot(epoch, scores, **kwargs)
def animate_learning_dynamics(i, ax, weights, y, **kwargs):
n_epoch = len(weights['w1'])
epoch = np.arange(1, n_epoch)
scores = []
for w1, w2 in zip(weights['w1'], weights['w2']):
scores.append(w1 * w2)
ax.set_xlim((1, n_epoch))
ax.set_ylim((0, y))
ax.set_xlabel('Epoch', fontsize=15)
ax.set_ylabel('$w_2 \cdot w_1$', fontsize=15)
ax.plot(epoch[:i], scores[:i], **kwargs)
return ax
def animate_learning(weights, save=False, name='anim'):
gs = gridspec.GridSpec(2, 4)
gs.update(wspace=0.5)
fig = plt.figure(figsize=(12, 8))
ax1 = fig.add_subplot(gs[0, :2], )
ax2 = fig.add_subplot(gs[0, 2:], projection='3d')
ax3 = fig.add_subplot(gs[1, 1:3])
# ax1 = fig.add_subplot(2, 2, 1)
# ax2 = fig.add_subplot(2, 2, 2, projection = '3d')
# ax3 = fig.add_subplot(2, 2, 3)
# ax4 = fig.add_subplot(2, 2, 4)
ax1, settings = plot_mse_loss_surface_2d(ax1, 1, 1)
ax2, settings = plot_mse_loss_surface_3d(ax2, 1, 1, angle=60)
plot_global_minimum_manifold_2d(ax1, settings)
plot_global_minimum_manifold_3d(ax2, settings)
def update(i):
animate_optimiser_trajectory_2d(
i, ax1, settings, weights, 'Gradient descent')
animate_optimiser_trajectory_3d(
i, ax2, settings, weights, 'Gradient descent')
animate_learning_dynamics(i, ax3, weights, 1)
# animate_weight_norm(i, ax4, scalarNet.history)
# suncAnimation will call the 'update' function for each frame
anim = FuncAnimation(fig, update, frames=100, interval=5, save_count=50)
# HTML(anim.to_html5_video())
if save:
anim.save(name + '.gif', dpi=80, writer='imagemagick')
plt.show()
| 2.59375 | 3 |
tests/qconvolutional_test.py | kshithijiyer/qkeras | 0 | 6504 | # Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test layers from qconvolutional.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from numpy.testing import assert_allclose
import pytest
import tempfile
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.backend import clear_session
from qkeras import binary
from qkeras import ternary
from qkeras import QActivation
from qkeras import QDense
from qkeras import QConv1D
from qkeras import QConv2D
from qkeras import QSeparableConv2D
from qkeras import quantized_bits
from qkeras import quantized_relu
from qkeras.utils import model_save_quantized_weights
from qkeras.utils import quantized_model_from_json
from qkeras.utils import load_qmodel
from qkeras import print_qstats
from qkeras import extract_model_operations
# TODO(hzhuang):
# qoctave_conv test
# qbatchnorm test
def test_qnetwork():
x = x_in = Input((28, 28, 1), name='input')
x = QSeparableConv2D(
32, (2, 2),
strides=(2, 2),
depthwise_quantizer=binary(alpha=1.0),
pointwise_quantizer=quantized_bits(4, 0, 1, alpha=1.0),
depthwise_activation=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='conv2d_0_m')(
x)
x = QActivation('quantized_relu(6,2,1)', name='act0_m')(x)
x = QConv2D(
64, (3, 3),
strides=(2, 2),
kernel_quantizer=ternary(alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='conv2d_1_m',
activation=quantized_relu(6, 3, 1))(
x)
x = QConv2D(
64, (2, 2),
strides=(2, 2),
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='conv2d_2_m')(
x)
x = QActivation('quantized_relu(6,4,1)', name='act2_m')(x)
x = Flatten(name='flatten')(x)
x = QDense(
10,
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='dense')(
x)
x = Activation('softmax', name='softmax')(x)
model = Model(inputs=[x_in], outputs=[x])
# reload the model to ensure saving/loading works
json_string = model.to_json()
clear_session()
model = quantized_model_from_json(json_string)
# generate same output for weights
np.random.seed(42)
for layer in model.layers:
all_weights = []
for i, weights in enumerate(layer.get_weights()):
input_size = np.prod(layer.input.shape.as_list()[1:])
if input_size is None:
input_size = 576 * 10 # to avoid learning sizes
shape = weights.shape
assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
# he normal initialization with a scale factor of 2.0
all_weights.append(
10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
if all_weights:
layer.set_weights(all_weights)
# apply quantizer to weights
model_save_quantized_weights(model)
all_weights = []
for layer in model.layers:
for i, weights in enumerate(layer.get_weights()):
w = np.sum(weights)
all_weights.append(w)
all_weights = np.array(all_weights)
# test_qnetwork_weight_quantization
all_weights_signature = np.array(
[2., -6.75, -0.625, -2., -0.25, -56., 1.125, -1.625, -1.125])
assert all_weights.size == all_weights_signature.size
assert np.all(all_weights == all_weights_signature)
# test_qnetwork_forward:
expected_output = np.array(
[[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 0.e+00, 0.e+00, 6.e-08, 1.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[ 0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00, 0.e+00, 0.e+00, 5.e-07, 1.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00 ,1.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 1.e+00, 0.e+00, 0.e+00, 0.e+00,
0.e+00 ,0.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 1.e+00,
0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00],
[0.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00,
1.e+00, 0.e+00, 0.e+00, 0.e+00, 0.e+00]]).astype(np.float16)
inputs = 2 * np.random.rand(10, 28, 28, 1)
actual_output = model.predict(inputs).astype(np.float16)
assert_allclose(actual_output, expected_output, rtol=1e-4)
def test_qconv1d():
np.random.seed(33)
x = Input((4, 4,))
y = QConv1D(
2, 1,
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='qconv1d')(
x)
model = Model(inputs=x, outputs=y)
# Extract model operations
model_ops = extract_model_operations(model)
# Assertion about the number of operations for this Conv1D layer
assert model_ops['qconv1d']['number_of_operations'] == 32
# Print qstats to make sure it works with Conv1D layer
print_qstats(model)
# reload the model to ensure saving/loading works
# json_string = model.to_json()
# clear_session()
# model = quantized_model_from_json(json_string)
for layer in model.layers:
all_weights = []
for i, weights in enumerate(layer.get_weights()):
input_size = np.prod(layer.input.shape.as_list()[1:])
if input_size is None:
input_size = 10 * 10
shape = weights.shape
assert input_size > 0, 'input size for {} {}'.format(layer.name, i)
all_weights.append(
10.0 * np.random.normal(0.0, np.sqrt(2.0 / input_size), shape))
if all_weights:
layer.set_weights(all_weights)
# Save the model as an h5 file using Keras's model.save()
fd, fname = tempfile.mkstemp('.h5')
model.save(fname)
del model # Delete the existing model
# Return a compiled model identical to the previous one
model = load_qmodel(fname)
# Clean the created h5 file after loading the model
os.close(fd)
os.remove(fname)
# apply quantizer to weights
model_save_quantized_weights(model)
inputs = np.random.rand(2, 4, 4)
p = model.predict(inputs).astype(np.float16)
y = np.array([[[-2.441, 3.816], [-3.807, -1.426], [-2.684, -1.317],
[-1.659, 0.9834]],
[[-4.99, 1.139], [-2.559, -1.216], [-2.285, 1.905],
[-2.652, -0.467]]]).astype(np.float16)
assert np.all(p == y)
if __name__ == '__main__':
pytest.main([__file__])
| 1.71875 | 2 |
discord/ext/ui/select.py | Lapis256/discord-ext-ui | 0 | 6505 | from typing import Optional, List, TypeVar, Generic, Callable
import discord.ui
from .item import Item
from .select_option import SelectOption
from .custom import CustomSelect
def _default_check(_: discord.Interaction) -> bool:
return True
C = TypeVar("C", bound=discord.ui.Select)
class Select(Item, Generic[C]):
def __init__(
self,
placeholder: Optional[str] = None,
min_values: int = 1,
max_values: int = 1,
options: Optional[list] = None,
cls: C = CustomSelect,
custom_id: Optional[str] = None,
) -> None:
self._placeholder: Optional[str] = placeholder
self._min_values: int = min_values
self._max_values: int = max_values
self._options: list = [] if options is None else options
self._row: Optional[int] = None
self.cls: C = cls
self._custom_id: Optional[str] = custom_id
self.func: Optional[Callable] = None
self.check_func: Callable[[discord.Interaction], bool] = _default_check
def placeholder(self, placeholder: str) -> 'Select':
self._placeholder = placeholder
return self
def min_values(self, min_values: int) -> 'Select':
self._min_values = min_values
return self
def max_values(self, max_values: int) -> 'Select':
self._max_values = max_values
return self
def options(self, options: List[SelectOption]) -> 'Select':
self._options = options
return self
def row(self, row: int) -> 'Select':
self._row = row
return self
def on_select(self, func: Callable) -> 'Select':
self.func = func
return self
def custom_id(self, custom_id: str) -> 'Select':
self._custom_id = custom_id
return self
def check(self, func: Callable[[discord.Interaction], bool]) -> 'Select':
self.check_func = func
return self
def to_discord(self) -> C:
return self.cls(
placeholder=self._placeholder,
min_values=self._min_values,
max_values=self._max_values,
options=[o.to_discord_select_option() for o in self._options],
row=self._row,
custom_id=self._custom_id,
check_func=self.check_func,
callback=self.func
)
| 2.4375 | 2 |
ucscsdk/mometa/storage/StorageScsiLunRef.py | parag-may4/ucscsdk | 9 | 6506 | """This module contains the general information for StorageScsiLunRef ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class StorageScsiLunRefConsts():
pass
class StorageScsiLunRef(ManagedObject):
"""This is StorageScsiLunRef class."""
consts = StorageScsiLunRefConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("StorageScsiLunRef", "storageScsiLunRef", "scsi-lun-ref-[id]", VersionMeta.Version131a, "InputOutput", 0x1f, [], ["read-only"], [u'storageLunReplica', u'storageLunSnapshot', u'storageScsiLun', u'storageVirtualDrive'], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version131a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version131a, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"ls_dn": MoPropertyMeta("ls_dn", "lsDn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"lun_name": MoPropertyMeta("lun_name", "lunName", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, None, None, r"""[\-\.:_a-zA-Z0-9]{0,16}""", [], []),
"pn_dn": MoPropertyMeta("pn_dn", "pnDn", "string", VersionMeta.Version141a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"profile_dn": MoPropertyMeta("profile_dn", "profileDn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version131a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version131a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"lsDn": "ls_dn",
"lunName": "lun_name",
"pnDn": "pn_dn",
"profileDn": "profile_dn",
"rn": "rn",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.ls_dn = None
self.lun_name = None
self.pn_dn = None
self.profile_dn = None
self.status = None
ManagedObject.__init__(self, "StorageScsiLunRef", parent_mo_or_dn, **kwargs)
| 1.984375 | 2 |
saxstools/fullsaxs.py | latrocinia/saxstools | 0 | 6507 | <filename>saxstools/fullsaxs.py
from __future__ import print_function, absolute_import, division
from sys import stdout as _stdout
from time import time as _time
import numpy as np
try:
import pyfftw
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(10)
rfftn = pyfftw.interfaces.numpy_fft.rfftn
irfftn = pyfftw.interfaces.numpy_fft.irfftn
except ImportError:
from numpy.fft import rfftn, irfftn
from disvis import volume
from disvis.points import dilate_points
from disvis.libdisvis import (rotate_image3d, dilate_points_add, longest_distance)
from powerfit.solutions import Solutions
from saxstools.saxs_curve import scattering_curve, create_fifj_lookup_table
from saxstools.helpers import coarse_grain
from saxstools.libsaxstools import calc_chi2
from saxstools.kernels import Kernels as saxs_Kernels
try:
import pyopencl as cl
import pyopencl.array as cl_array
import disvis.pyclfft
from disvis.kernels import Kernels
from disvis import pyclfft
except ImportError:
pass
class FullSAXS(object):
def __init__(self):
# parameters to be defined
self._receptor = None
self._ligand = None
# parameters with standard values
self.rotations = [[[1, 0, 0], [0, 1, 0], [0, 0, 1]]]
self.weights = None
self.voxelspacing = 1.0
self.interaction_radius = 2.5
self.max_clash = 100
self.min_interaction = 300
self.coarse_grain = True
self.beads_per_residue = 2
# CPU or GPU
self._queue = None
# unchangeable
self._data = {}
self._q = None
self._Iq = None
self._sq = None
@property
def receptor(self):
return self._receptor
@receptor.setter
def receptor(self, receptor):
self._receptor = receptor.duplicate()
@property
def ligand(self):
return self._ligand
@ligand.setter
def ligand(self, ligand):
self._ligand = ligand.duplicate()
@property
def rotations(self):
return self._rotations
@rotations.setter
def rotations(self, rotations):
rotmat = np.asarray(rotations, dtype=np.float64)
if rotmat.ndim != 3:
raise ValueError("Input should be a list of rotation matrices.")
self._rotations = rotmat
@property
def weights(self):
return self._weights
@weights.setter
def weights(self, weights):
self._weights = weights
@property
def interaction_radius(self):
return self._interaction_radius
@interaction_radius.setter
def interaction_radius(self, radius):
if radius <= 0:
raise ValueError("Interaction radius should be bigger than zero")
self._interaction_radius = radius
@property
def voxelspacing(self):
return self._voxelspacing
@voxelspacing.setter
def voxelspacing(self, voxelspacing):
self._voxelspacing = voxelspacing
@property
def max_clash(self):
return self._max_clash
@max_clash.setter
def max_clash(self, max_clash):
if max_clash < 0:
raise ValueError("Maximum allowed clashing volume cannot be negative")
self._max_clash = max_clash + 0.9
@property
def min_interaction(self):
return self._min_interaction
@min_interaction.setter
def min_interaction(self, min_interaction):
if min_interaction < 1:
raise ValueError("Minimum required interaction volume cannot be smaller than 1")
self._min_interaction = min_interaction + 0.9
@property
def queue(self):
return self._queue
@queue.setter
def queue(self, queue):
self._queue = queue
@property
def data(self):
return self._data
@property
def saxsdata(self):
return self._q, self._Iq, self._sq
@saxsdata.setter
def saxsdata(self, saxsdata):
self._q, self._Iq, self._sq = saxsdata
def _initialize(self):
# check if requirements are set
if any(x is None for x in (self.receptor, self.ligand)):
raise ValueError("Not all requirements are met for a search")
if self.weights is None:
self.weights = np.ones(self.rotations.shape[0], dtype=np.float64)
if len(self.weights) != len(self.rotations):
raise ValueError("")
d = self.data
# determine size for grid
shape = grid_shape(self.receptor.coor, self.ligand.coor, self.voxelspacing)
# calculate the interaction surface and core of the receptor
vdw_radii = self.receptor.vdw_radius
radii = vdw_radii + self.interaction_radius
d['rsurf'] = rsurface(self.receptor.coor, radii,
shape, self.voxelspacing)
d['rcore'] = rsurface(self.receptor.coor, vdw_radii,
shape, self.voxelspacing)
# keep track of some data for later calculations
d['origin'] = np.asarray(d['rcore'].origin, dtype=np.float64)
d['shape'] = d['rcore'].shape
d['start'] = d['rcore'].start
d['nrot'] = self.rotations.shape[0]
# set ligand center to the origin of the receptor map
# and make a grid of the ligand
radii = self.ligand.vdw_radius
d['lsurf'] = dilate_points((self.ligand.coor - self.ligand.center \
+ self.receptor.center), radii, volume.zeros_like(d['rcore']))
d['im_center'] = np.asarray((self.receptor.center - d['rcore'].origin)/self.voxelspacing, dtype=np.float64)
d['max_clash'] = self.max_clash/self.voxelspacing**3
d['min_interaction'] = self.min_interaction/self.voxelspacing**3
# SAXS data
d['q'] = self._q
d['targetIq'] = self._Iq
d['sq'] = self._sq
if self.coarse_grain:
e1, xyz1 = coarse_grain(self.receptor, bpr=self.beads_per_residue)
e2, xyz2 = coarse_grain(self.ligand, bpr=self.beads_per_residue)
else:
e1, xyz1 = self.receptor.elements, self.receptor.coor
e2, xyz2 = self.ligand.elements, self.ligand.coor
d['base_Iq'] = scattering_curve(self._q, e1, xyz1, bpr=self.beads_per_residue)
d['base_Iq'] += scattering_curve(self._q, e2, xyz2, bpr=self.beads_per_residue)
d['fifj'], d['rind'], d['lind'] = create_fifj_lookup_table(d['q'], e1, e2, bpr=self.beads_per_residue)
d['rxyz'] = xyz1
d['lxyz'] = xyz2 - self.ligand.center
d['chi2'] = np.zeros(d['rcore'].shape, dtype=np.float64)
d['best_chi2'] = np.zeros_like(d['chi2'])
def search(self):
self._initialize()
if self.queue is None:
self._cpu_init()
self._cpu_search()
else:
self._gpu_init()
self._gpu_search()
if _stdout.isatty():
print()
d = self.data
ind = d['best_chi2'] > 0
d['best_chi2'][ind] -= d['best_chi2'][ind].min()
best_chi2 = volume.Volume(d['best_chi2'], voxelspacing=self.voxelspacing, origin=d['origin'])
return Solutions(best_chi2, self.rotations, d['rot_ind'])
def _cpu_init(self):
self.cpu_data = {}
c = self.cpu_data
d = self.data
c['rcore'] = d['rcore'].array
c['rsurf'] = d['rsurf'].array
c['im_lsurf'] = d['lsurf'].array
c['lsurf'] = np.zeros_like(c['rcore'])
c['clashvol'] = np.zeros_like(c['rcore'])
c['intervol'] = np.zeros_like(c['rcore'])
c['interspace'] = np.zeros_like(c['rcore'], dtype=np.int64)
# complex arrays
c['ft_shape'] = list(d['shape'])
c['ft_shape'][-1] = d['shape'][-1]//2 + 1
c['ft_lsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128)
c['ft_rcore'] = np.zeros(c['ft_shape'], dtype=np.complex128)
c['ft_rsurf'] = np.zeros(c['ft_shape'], dtype=np.complex128)
# initial calculations
c['ft_rcore'] = rfftn(c['rcore'])
c['ft_rsurf'] = rfftn(c['rsurf'])
c['rotmat'] = np.asarray(self.rotations, dtype=np.float64)
c['weights'] = np.asarray(self.weights, dtype=np.float64)
c['nrot'] = d['nrot']
c['shape'] = d['shape']
c['max_clash'] = d['max_clash']
c['min_interaction'] = d['min_interaction']
c['vlength'] = int(np.linalg.norm(self.ligand.coor - \
self.ligand.center, axis=1).max() + \
self.interaction_radius + 1.5)/self.voxelspacing
c['origin'] = d['origin']
# SAXS arrays
c['q'] = d['q']
c['targetIq'] = d['targetIq']
c['sq'] = d['sq']
c['base_Iq'] = d['base_Iq']
c['fifj'] = d['fifj']
c['rind'] = d['rind']
c['lind'] = d['lind']
c['rxyz'] = d['rxyz']
c['lxyz'] = d['lxyz']
c['chi2'] = d['chi2']
c['best_chi2'] = d['best_chi2']
c['rot_ind'] = np.zeros(d['shape'], dtype=np.int32)
c['Iq'] = np.zeros_like(c['targetIq'])
c['tmplxyz'] = np.zeros_like(c['lxyz'])
def _cpu_search(self):
d = self.data
c = self.cpu_data
time0 = _time()
for n in xrange(c['rotmat'].shape[0]):
# rotate ligand image
rotate_image3d(c['im_lsurf'], c['vlength'],
np.linalg.inv(c['rotmat'][n]), d['im_center'], c['lsurf'])
c['ft_lsurf'] = rfftn(c['lsurf']).conj()
c['clashvol'] = irfftn(c['ft_lsurf'] * c['ft_rcore'], s=c['shape'])
c['intervol'] = irfftn(c['ft_lsurf'] * c['ft_rsurf'], s=c['shape'])
np.logical_and(c['clashvol'] < c['max_clash'],
c['intervol'] > c['min_interaction'],
c['interspace'])
print('Number of complexes to analyze: ', c['interspace'].sum())
c['chi2'].fill(0)
calc_chi2(c['interspace'], c['q'], c['base_Iq'],
c['rind'], c['rxyz'], c['lind'], (np.mat(c['rotmat'][n])*np.mat(c['lxyz']).T).T,
c['origin'], self.voxelspacing,
c['fifj'], c['targetIq'], c['sq'], c['chi2'])
ind = c['chi2'] > c['best_chi2']
c['best_chi2'][ind] = c['chi2'][ind]
c['rot_ind'][ind] = n
if _stdout.isatty():
self._print_progress(n, c['nrot'], time0)
d['best_chi2'] = c['best_chi2']
d['rot_ind'] = c['rot_ind']
def _print_progress(self, n, total, time0):
m = n + 1
pdone = m/total
t = _time() - time0
_stdout.write('\r{:d}/{:d} ({:.2%}, ETA: {:d}s) '\
.format(m, total, pdone,
int(t/pdone - t)))
_stdout.flush()
def _gpu_init(self):
self.gpu_data = {}
g = self.gpu_data
d = self.data
q = self.queue
g['rcore'] = cl_array.to_device(q, float32array(d['rcore'].array))
g['rsurf'] = cl_array.to_device(q, float32array(d['rsurf'].array))
g['im_lsurf'] = cl.image_from_array(q.context, float32array(d['lsurf'].array))
g['sampler'] = cl.Sampler(q.context, False, cl.addressing_mode.CLAMP,
cl.filter_mode.LINEAR)
g['lsurf'] = cl_array.zeros_like(g['rcore'])
g['clashvol'] = cl_array.zeros_like(g['rcore'])
g['intervol'] = cl_array.zeros_like(g['rcore'])
g['interspace'] = cl_array.zeros(q, d['shape'], dtype=np.int32)
# complex arrays
g['ft_shape'] = list(d['shape'])
g['ft_shape'][0] = d['shape'][0]//2 + 1
g['ft_rcore'] = cl_array.zeros(q, g['ft_shape'], dtype=np.complex64)
g['ft_rsurf'] = cl_array.zeros_like(g['ft_rcore'])
g['ft_lsurf'] = cl_array.zeros_like(g['ft_rcore'])
g['ft_clashvol'] = cl_array.zeros_like(g['ft_rcore'])
g['ft_intervol'] = cl_array.zeros_like(g['ft_rcore'])
# allocate SAXS arrays
g['q'] = cl_array.to_device(q, float32array(d['q']))
g['targetIq'] = cl_array.to_device(q, float32array(d['targetIq']))
g['sq'] = cl_array.to_device(q, float32array(d['sq']))
g['base_Iq'] = cl_array.to_device(q, float32array(d['base_Iq']))
g['fifj'] = cl_array.to_device(q, float32array(d['fifj']))
g['rind'] = cl_array.to_device(q, d['rind'].astype(np.int32))
g['lind'] = cl_array.to_device(q, d['lind'].astype(np.int32))
g_rxyz = np.zeros((d['rxyz'].shape[0], 4), dtype=np.float32)
g_rxyz[:, :3] = d['rxyz'][:]
g_lxyz = np.zeros((d['lxyz'].shape[0], 4), dtype=np.float32)
g_lxyz[:, :3] = d['lxyz'][:]
g['rxyz'] = cl_array.to_device(q, g_rxyz)
g['lxyz'] = cl_array.to_device(q, g_lxyz)
g['rot_lxyz'] = cl_array.zeros_like(g['lxyz'])
g['chi2'] = cl_array.to_device(q, d['chi2'].astype(np.float32))
g['best_chi2'] = cl_array.to_device(q, d['best_chi2'].astype(np.float32))
g['rot_ind'] = cl_array.zeros(q, d['shape'], dtype=np.int32)
g['origin'] = np.zeros(4, dtype=np.float32)
g['origin'][:3] = d['origin'].astype(np.float32)
g['voxelspacing'] = np.float32(self.voxelspacing)
# kernels
g['k'] = Kernels(q.context)
g['saxs_k'] = saxs_Kernels(q.context)
g['k'].rfftn = pyclfft.RFFTn(q.context, d['shape'])
g['k'].irfftn = pyclfft.iRFFTn(q.context, d['shape'])
g['k'].rfftn(q, g['rcore'], g['ft_rcore'])
g['k'].rfftn(q, g['rsurf'], g['ft_rsurf'])
g['nrot'] = d['nrot']
g['max_clash'] = d['max_clash']
g['min_interaction'] = d['min_interaction']
def _gpu_search(self):
d = self.data
g = self.gpu_data
q = self.queue
k = g['k']
time0 = _time()
for n in xrange(g['nrot']):
k.rotate_image3d(q, g['sampler'], g['im_lsurf'],
self.rotations[n], g['lsurf'], d['im_center'])
k.rfftn(q, g['lsurf'], g['ft_lsurf'])
k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rcore'], g['ft_clashvol'])
k.irfftn(q, g['ft_clashvol'], g['clashvol'])
k.c_conj_multiply(q, g['ft_lsurf'], g['ft_rsurf'], g['ft_intervol'])
k.irfftn(q, g['ft_intervol'], g['intervol'])
k.touch(q, g['clashvol'], g['max_clash'],
g['intervol'], g['min_interaction'],
g['interspace'])
g['saxs_k'].rotate_points(q, g['lxyz'], self.rotations[n], g['rot_lxyz'])
k.fill(q, g['chi2'], 0)
g['saxs_k'].calc_chi2(q, g['interspace'], g['q'], g['base_Iq'],
g['rind'], g['rxyz'], g['lind'], g['rot_lxyz'], g['origin'],
g['voxelspacing'], g['fifj'], g['targetIq'], g['sq'], g['chi2'])
g['saxs_k'].take_best(q, g['chi2'], g['best_chi2'], g['rot_ind'], n)
if _stdout.isatty():
self._print_progress(n, g['nrot'], time0)
self.queue.finish()
d['best_chi2'] = g['best_chi2'].get()
d['rot_ind'] = g['rot_ind'].get()
def rsurface(points, radius, shape, voxelspacing):
dimensions = [x*voxelspacing for x in shape]
origin = volume_origin(points, dimensions)
rsurf = volume.zeros(shape, voxelspacing, origin)
rsurf = dilate_points(points, radius, rsurf)
return rsurf
def volume_origin(points, dimensions):
center = points.mean(axis=0)
origin = [(c - d/2.0) for c, d in zip(center, dimensions)]
return origin
def grid_restraints(restraints, voxelspacing, origin, lcenter):
nrestraints = len(restraints)
g_restraints = np.zeros((nrestraints, 8), dtype=np.float64)
for n in range(nrestraints):
r_sel, l_sel, mindis, maxdis = restraints[n]
r_pos = (r_sel.center - origin)/voxelspacing
l_pos = (l_sel.center - lcenter)/voxelspacing
g_restraints[n, 0:3] = r_pos
g_restraints[n, 3:6] = l_pos
g_restraints[n, 6] = mindis/voxelspacing
g_restraints[n, 7] = maxdis/voxelspacing
return g_restraints
def grid_shape(points1, points2, voxelspacing):
shape = min_grid_shape(points1, points2, voxelspacing)
shape = [volume.radix235(x) for x in shape]
return shape
def min_grid_shape(points1, points2, voxelspacing):
# the minimal grid shape is the size of the fixed protein in
# each dimension and the longest diameter is the scanning chain
dimensions1 = points1.ptp(axis=0)
dimension2 = longest_distance(points2)
grid_shape = np.asarray(((dimensions1 + dimension2)/voxelspacing) + 10, dtype=np.int32)[::-1]
return grid_shape
def float32array(array_like):
return np.asarray(array_like, dtype=np.float32)
| 1.78125 | 2 |
lib/generate_random_obs.py | zehuilu/Learning-from-Sparse-Demonstrations | 8 | 6508 | #!/usr/bin/env python3
import os
import sys
import time
sys.path.append(os.getcwd()+'/lib')
import random
from dataclasses import dataclass, field
from ObsInfo import ObsInfo
def generate_random_obs(num_obs: int, size_list: list, config_data):
"""
config_file_name = "config.json"
json_file = open(config_file_name)
config_data = json.load(json_file)
size_list = [length, width, height]
"""
ObsList = []
if (num_obs > 0.5):
for i in range(0, num_obs):
# random center
center = [random.uniform(config_data["LAB_SPACE_LIMIT"]["LIMIT_X"][0], config_data["LAB_SPACE_LIMIT"]["LIMIT_X"][1]), \
random.uniform(config_data["LAB_SPACE_LIMIT"]["LIMIT_Y"][0], config_data["LAB_SPACE_LIMIT"]["LIMIT_Y"][1]), \
random.uniform(config_data["LAB_SPACE_LIMIT"]["LIMIT_Z"][0], config_data["LAB_SPACE_LIMIT"]["LIMIT_Z"][1])]
ObsList.append( ObsInfo(center, size_list) )
return ObsList | 2.546875 | 3 |
userbot/helper_funcs/misc.py | Abucuyy/Uciha | 0 | 6509 | <filename>userbot/helper_funcs/misc.py
# TG-UserBot - A modular Telegram UserBot script for Python.
# Copyright (C) 2019 Kandarp <https://github.com/kandnub>
#
# TG-UserBot is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# TG-UserBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with TG-UserBot. If not, see <https://www.gnu.org/licenses/>.
from typing import Tuple, Union
from telethon.tl import types
from ..utils.client import UserBotClient
from ..utils.helpers import get_chat_link
ChatBannedRights = {
'until_date': 'Banned until:',
'view_messages': 'Read messages:',
'send_messages': 'Send messages:',
'send_media': 'Send media:',
'send_stickers': 'Send stickers:',
'send_gifs': 'Send GIFs:',
'send_games': 'Send games:',
'send_inline': 'Send inline messages:',
'embed_links': 'Send embed links:',
'send_polls': 'Send polls:',
'change_info': 'Change info:',
'invite_users': 'Add users:',
'pin_messages': 'Pin messages:'
}
ChatAdminRights = {
'change_info': 'Change chat info:',
'post_messages': 'Post messages:',
'edit_messages': 'Edit messages:',
'delete_messages': 'Delete messages:',
'ban_users': 'Ban users:',
'invite_users': 'Invite users:',
'pin_messages': 'Pin messages:',
'add_admins': 'Add new admins:'
}
async def parse_admin_rights(AdminRights: types.ChatAdminRights) -> str:
text = []
for attr, string in ChatAdminRights.items():
right = getattr(AdminRights, attr, False)
if right:
text.append(f'{string} {right}')
return '\n'.join(text)
async def parse_banned_rights(BannedRights: types.ChatBannedRights) -> str:
text = []
for attr, string in ChatBannedRights.items():
right = getattr(BannedRights, attr, False)
if right:
if attr == "until_date":
text.append(f'{string} {right.ctime()} (UTC)')
else:
text.append(f'{string} {right}')
return '\n'.join(text)
async def get_entity_info(
arg: Union[types.ChatFull, types.ChannelFull]
) -> Tuple[int, int, int, int, int, int]:
creator, admins, bots, participants, kicked, banned = (None, None, None,
None, None, None)
full_chat = arg.full_chat
if isinstance(full_chat, types.ChannelFull):
if hasattr(full_chat, 'participants_count'):
participants = full_chat.participants_count
if hasattr(full_chat, 'admins_count'):
admins = full_chat.admins_count
if hasattr(full_chat, 'kicked_count'):
kicked = full_chat.kicked_count
if hasattr(full_chat, 'banned_count'):
banned = full_chat.banned_count
if hasattr(full_chat, 'bot_info'):
bots = len(full_chat.bot_info)
else:
if hasattr(full_chat, 'bot_info'):
bots = len(full_chat.bot_info)
if hasattr(full_chat, 'participants'):
admins, participants = 0, 0
for p in full_chat.participants.participants:
if isinstance(p, types.ChatParticipantCreator):
creator = p.user_id
if isinstance(p, types.ChatParticipant):
participants += 1
if isinstance(p, types.ChatParticipantAdmin):
admins += 1
return creator, admins, bots, participants, kicked, banned
async def unparse_info(client: UserBotClient, creator: int, admins: int,
bots: int, users: int, kicked: int, banned: int) -> str:
text = ''
if creator:
c = await client.get_entity(creator)
text += f"\n**Creator:** {await get_chat_link(c)}"
if users:
text += f"\n**Participants:** {users}"
if admins:
text += f"\n**Admins:** {admins}"
if bots:
text += f"\n**Bots:** {bots}"
if kicked:
text += f"\n**Kicked:** {kicked}"
if banned:
text += f"\n**Banned:** {banned}"
return text
async def unparse_rights(title: str, rights: str) -> str:
text = f"**{title}**"
for l in rights.split('\n'):
splat = l.split(':')
text += f"\n **{splat[0]}:** `{':'.join(splat[1:])}`"
return text
async def resolve_channel(client: UserBotClient,
channel: types.ChannelFull) -> str:
text = ''
default_banned_rights = None
banned_rights = None
admin_rights = None
channel_type = "Channel"
for c in channel.chats:
if c.id == channel.full_chat.id:
if c.megagroup:
channel_type = "Megagroup"
admin_rights = c.admin_rights
banned_rights = c.banned_rights
default_banned_rights = c.default_banned_rights
break
text += f"\n**{channel_type} ID:** `{channel.full_chat.id}`"
info = await get_entity_info(channel)
text += await unparse_info(client, *info)
if admin_rights:
parsed = await parse_admin_rights(admin_rights)
unparsed = await unparse_rights("Admin rights:", parsed)
text += f"\n{unparsed}"
if banned_rights:
parsed = await parse_banned_rights(banned_rights)
unparsed = await unparse_rights("Banned rights:", parsed)
text += f"\n{unparsed}"
if default_banned_rights:
parsed = await parse_banned_rights(default_banned_rights)
unparsed = await unparse_rights("Default banned rights:", parsed)
text += f"\n{unparsed}"
return text
async def resolve_chat(client: UserBotClient, chat: types.ChatFull) -> str:
text = f"\n**Chat ID:** `{chat.full_chat.id}``"
info = await get_entity_info(chat)
text += await unparse_info(client, *info)
admin_rights = None
default_banned_rights = None
for c in chat.chats:
if c.id == chat.full_chat.id:
admin_rights = c.admin_rights
default_banned_rights = c.default_banned_rights
break
if admin_rights:
parsed = await parse_admin_rights(admin_rights)
unparsed = await unparse_rights("Admin rights:", parsed)
text += f"\n{unparsed}"
if default_banned_rights:
parsed = await parse_banned_rights(default_banned_rights)
unparsed = await unparse_rights("Default banned rights:", parsed)
text += f"\n{unparsed}"
return text
| 2.140625 | 2 |
gym-multilayerthinfilm/utils.py | HarryTheBird/gym-multilayerthinfilm | 10 | 6510 | <filename>gym-multilayerthinfilm/utils.py
import numpy as np
def get_n_from_txt(filepath, points=None, lambda_min=400, lambda_max=700, complex_n=True):
ntxt = np.loadtxt(filepath)
if np.min(np.abs(ntxt[:, 0] - lambda_min)) > 25 or np.min(np.abs(ntxt[:, 0] - lambda_max)) > 25:
print('No measurement data for refractive indicies are available within 25 nm in \n' + filepath)
if points is None:
points = lambda_max - lambda_min + 1
idxmin = np.argmin(np.abs(ntxt[:, 0] - lambda_min))
idxmax = np.argmin(np.abs(ntxt[:, 0] - lambda_max))
if idxmax == idxmin:
if complex_n:
indicies = np.vectorize(complex)(np.array([ntxt[idxmin, 1]]), np.array([ntxt[idxmin, 2]]))
else:
indicies = np.array([ntxt[idxmin, 1]])
else:
xp = ntxt[idxmin:idxmax, 0]
fpn = ntxt[idxmin:idxmax, 1]
n = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpn)
if complex_n:
fpk = ntxt[idxmin:idxmax, 2].squeeze()
k = np.interp(np.linspace(lambda_min, lambda_max, points), xp, fpk)
indicies = np.vectorize(complex)(n, k)
else:
indicies = n
return indicies
def get_N(path_list, lambda_min, lambda_max, points=None, complex_n=False):
n = []
for path in path_list:
n.append(get_n_from_txt(path, points, lambda_min=lambda_min, lambda_max=lambda_max, complex_n=complex_n))
return np.vstack((n))
| 2.421875 | 2 |
pyrocco/__init__.py | joaopalmeiro/pyrocco | 0 | 6511 | __package_name__ = "pyrocco"
__version__ = "0.1.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__description__ = "A Python CLI to add the Party Parrot to a custom background image."
__url__ = "https://github.com/joaopalmeiro/pyrocco"
| 1.023438 | 1 |
2020/day08/machine.py | ingjrs01/adventofcode | 0 | 6512 | <reponame>ingjrs01/adventofcode<gh_stars>0
class Machine():
def __init__(self):
self.pointer = 0
self.accum = 0
self.visited = []
def run(self,program):
salir = False
while (salir == False):
if (self.pointer in self.visited):
return False
if (self.pointer >= len(program)):
return True
self.visited.append(self.pointer)
incremento = 1
if (program[self.pointer][0] == "acc"):
self.accum += program[self.pointer][1]
if (program[self.pointer][0] == "jmp"):
incremento = program[self.pointer][1]
self.pointer += incremento
return True
def getVisited(self):
return self.visited
def getAccum(self):
return self.accum
| 3.296875 | 3 |
EduData/Task/__init__.py | BAOOOOOM/EduData | 98 | 6513 | # coding: utf-8
# 2019/8/23 @ tongshiwei
| 0.824219 | 1 |
010-round.py | richardvecsey/python-basics | 3 | 6514 | <filename>010-round.py
"""
Round a number
--------------
Input (float) A floating point number
(int) Number of decimals
Default value is: 0
Output (float) Rounded number
(int) Whether using the default decimals value, the return number
will be the nearest integer
"""
number = 103.14159
# Rounding with 2 decimals
number_rounded = round(number, 2)
print('Rounding with 2 decimals')
print('original number: {}, rounded: {}, type of rounded: {}'
.format(number, number_rounded, type(number_rounded)))
# Rounding with -2 decimals
number_rounded = round(number, -2)
print('\nRounding with -2 decimals')
print('original number: {}, rounded: {}, type of rounded: {}'
.format(number, number_rounded, type(number_rounded)))
# Rounding with 0 decimals
number_rounded = round(number, 0)
print('\nRounding with 0 decimals')
print('original number: {}, rounded: {}, type of rounded: {}'
.format(number, number_rounded, type(number_rounded)))
# Rounding with default
# Result will be integer (!)
number_rounded = round(number)
print('\nRounding with default')
print('original number: {}, rounded: {}, type of rounded: {}'
.format(number, number_rounded, type(number_rounded)))
| 4.4375 | 4 |
service.py | Kleist/MusicPlayer | 1 | 6515 | <filename>service.py
#!/usr/bin/env python3
import RPi.GPIO as GPIO
from mfrc522 import SimpleMFRC522
import play
import time
class TagPlayer(object):
def __init__(self):
self._current = None
self.reader = SimpleMFRC522()
self._failed = 0
def step(self):
id, text = self.reader.read_no_block()
print(id,text)
if id:
self._failed = 0
if text != self._current:
stripped_text = text.strip()
print("Read text: \"{}\"".format(stripped_text))
play.play(stripped_text)
self._current = text
elif self._current:
self._failed += 1
if self._failed > 2:
self._current = None
print("Stopping")
play.stop()
time.sleep(1)
def main():
try:
player = TagPlayer()
while 1:
player.step()
finally:
GPIO.cleanup()
if __name__ == "__main__":
main()
| 2.890625 | 3 |
mypy/defaults.py | ckanesan/mypy | 0 | 6516 | <gh_stars>0
import os
MYPY = False
if MYPY:
from typing_extensions import Final
PYTHON2_VERSION = (2, 7) # type: Final
PYTHON3_VERSION = (3, 6) # type: Final
PYTHON3_VERSION_MIN = (3, 4) # type: Final
CACHE_DIR = '.mypy_cache' # type: Final
CONFIG_FILE = 'mypy.ini' # type: Final
SHARED_CONFIG_FILES = ['setup.cfg', ] # type: Final
USER_CONFIG_FILES = ['~/.config/mypy/config', '~/.mypy.ini', ] # type: Final
if os.environ.get('XDG_CONFIG_HOME'):
USER_CONFIG_FILES.insert(0, os.path.join(os.environ['XDG_CONFIG_HOME'], 'mypy/config'))
CONFIG_FILES = [CONFIG_FILE, ] + SHARED_CONFIG_FILES + USER_CONFIG_FILES # type: Final
# This must include all reporters defined in mypy.report. This is defined here
# to make reporter names available without importing mypy.report -- this speeds
# up startup.
REPORTER_NAMES = ['linecount',
'any-exprs',
'linecoverage',
'memory-xml',
'cobertura-xml',
'xml',
'xslt-html',
'xslt-txt',
'html',
'txt'] # type: Final
| 1.8125 | 2 |
skcriteria/preprocessing/push_negatives.py | elcolie/scikit-criteria | 0 | 6517 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# License: BSD-3 (https://tldrlegal.com/license/bsd-3-clause-license-(revised))
# Copyright (c) 2016-2021, <NAME>; Luczywo, Nadia
# All rights reserved.
# =============================================================================
# DOCS
# =============================================================================
"""Functionalities for remove negatives from criteria.
In addition to the main functionality, an MCDA agnostic function is offered
to push negatives values on an array along an arbitrary axis.
"""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
from ..core import SKCMatrixAndWeightTransformerABC
from ..utils import doc_inherit
# =============================================================================
# FUNCTIONS
# =============================================================================
def push_negatives(arr, axis):
r"""Increment the array until all the valuer are sean >= 0.
If an array has negative values this function increment the values
proportionally to made all the array positive along an axis.
.. math::
\overline{X}_{ij} =
\begin{cases}
X_{ij} + min_{X_{ij}} & \text{if } X_{ij} < 0\\
X_{ij} & \text{otherwise}
\end{cases}
Parameters
----------
arr: :py:class:`numpy.ndarray` like.
A array with values
axis : :py:class:`int` optional
Axis along which to operate. By default, flattened input is used.
Returns
-------
:py:class:`numpy.ndarray`
array with all values >= 0.
Examples
--------
.. code-block:: pycon
>>> from skcriteria.preprocess import push_negatives
>>> mtx = [[1, 2], [3, 4]]
>>> mtx_lt0 = [[-1, 2], [3, 4]] # has a negative value
>>> push_negatives(mtx) # array without negatives don't be affected
array([[1, 2],
[3, 4]])
# all the array is incremented by 1 to eliminate the negative
>>> push_negatives(mtx_lt0)
array([[0, 3],
[4, 5]])
# by column only the first one (with the negative value) is affected
>>> push_negatives(mtx_lt0, axis=0)
array([[0, 2],
[4, 4]])
# by row only the first row (with the negative value) is affected
>>> push_negatives(mtx_lt0, axis=1)
array([[0, 3],
[3, 4]])
"""
arr = np.asarray(arr)
mins = np.min(arr, axis=axis, keepdims=True)
delta = (mins < 0) * mins
return arr - delta
class PushNegatives(SKCMatrixAndWeightTransformerABC):
r"""Increment the matrix/weights until all the valuer are sean >= 0.
If the matrix/weights has negative values this function increment the
values proportionally to made all the matrix/weights positive along an
axis.
.. math::
\overline{X}_{ij} =
\begin{cases}
X_{ij} + min_{X_{ij}} & \text{if } X_{ij} < 0\\
X_{ij} & \text{otherwise}
\end{cases}
"""
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_weights)
def _transform_weights(self, weights):
return push_negatives(weights, axis=None)
@doc_inherit(SKCMatrixAndWeightTransformerABC._transform_matrix)
def _transform_matrix(self, matrix):
return push_negatives(matrix, axis=0)
| 2.03125 | 2 |
ingenico/direct/sdk/domain/customer_token.py | Ingenico/direct-sdk-python3 | 0 | 6518 | # -*- coding: utf-8 -*-
#
# This class was auto-generated from the API references found at
# https://support.direct.ingenico.com/documentation/api/reference/
#
from ingenico.direct.sdk.data_object import DataObject
from ingenico.direct.sdk.domain.address import Address
from ingenico.direct.sdk.domain.company_information import CompanyInformation
from ingenico.direct.sdk.domain.personal_information_token import PersonalInformationToken
class CustomerToken(DataObject):
__billing_address = None
__company_information = None
__personal_information = None
@property
def billing_address(self) -> Address:
"""
| Object containing billing address details
Type: :class:`ingenico.direct.sdk.domain.address.Address`
"""
return self.__billing_address
@billing_address.setter
def billing_address(self, value: Address):
self.__billing_address = value
@property
def company_information(self) -> CompanyInformation:
"""
| Object containing company information
Type: :class:`ingenico.direct.sdk.domain.company_information.CompanyInformation`
"""
return self.__company_information
@company_information.setter
def company_information(self, value: CompanyInformation):
self.__company_information = value
@property
def personal_information(self) -> PersonalInformationToken:
"""
Type: :class:`ingenico.direct.sdk.domain.personal_information_token.PersonalInformationToken`
"""
return self.__personal_information
@personal_information.setter
def personal_information(self, value: PersonalInformationToken):
self.__personal_information = value
def to_dictionary(self):
dictionary = super(CustomerToken, self).to_dictionary()
if self.billing_address is not None:
dictionary['billingAddress'] = self.billing_address.to_dictionary()
if self.company_information is not None:
dictionary['companyInformation'] = self.company_information.to_dictionary()
if self.personal_information is not None:
dictionary['personalInformation'] = self.personal_information.to_dictionary()
return dictionary
def from_dictionary(self, dictionary):
super(CustomerToken, self).from_dictionary(dictionary)
if 'billingAddress' in dictionary:
if not isinstance(dictionary['billingAddress'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['billingAddress']))
value = Address()
self.billing_address = value.from_dictionary(dictionary['billingAddress'])
if 'companyInformation' in dictionary:
if not isinstance(dictionary['companyInformation'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['companyInformation']))
value = CompanyInformation()
self.company_information = value.from_dictionary(dictionary['companyInformation'])
if 'personalInformation' in dictionary:
if not isinstance(dictionary['personalInformation'], dict):
raise TypeError('value \'{}\' is not a dictionary'.format(dictionary['personalInformation']))
value = PersonalInformationToken()
self.personal_information = value.from_dictionary(dictionary['personalInformation'])
return self
| 2.265625 | 2 |
inserter.py | pirate/macOS-global-autocomplete | 23 | 6519 | import time
import pykeyboard
# TODO: Replace following two lines with the code that activate the application.
print('Activate the application 3 seconds.')
time.sleep(3)
k = pykeyboard.PyKeyboard()
k.press_key(k.left_key)
time.sleep(1) # Hold down left key for 1 second.
k.release_key(k.left_key)
| 3.015625 | 3 |
tools/corpora.py | EleutherAI/megatron-3d | 3 | 6520 | import os
import tarfile
from abc import ABC, abstractmethod
from glob import glob
import shutil
import random
import zstandard
"""
This registry is for automatically downloading and extracting datasets.
To register a class you need to inherit the DataDownloader class, provide name, filetype and url attributes, and
(optionally) provide download / extract / exists / tokenize functions to check if the data exists, and, if it doesn't, download,
extract and tokenize the data into the correct directory.
When done, add it to the DATA_DOWNLOADERS dict. The function process_data runs the pre-processing for the selected
dataset.
"""
DATA_DIR = os.environ.get('DATA_DIR', './data')
GPT2_VOCAB_FP = f"{DATA_DIR}/gpt2-vocab.json"
GPT2_VOCAB_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json"
GPT2_MERGE_FP = f"{DATA_DIR}/gpt2-merges.txt"
GPT2_MERGE_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt"
class DataDownloader(ABC):
"""Dataset registry class to automatically download / extract datasets"""
@property
def base_dir(self):
"""base data directory"""
return DATA_DIR
@property
@abstractmethod
def name(self):
"""name of dataset"""
pass
@property
@abstractmethod
def filetype(self):
"""filetype of dataset"""
pass
@property
@abstractmethod
def url(self):
"""URL from which to download dataset"""
pass
def _extract_tar(self):
self.path = os.path.join(self.base_dir, self.name)
os.makedirs(self.path, exist_ok=True)
tarfile_path = os.path.join(self.base_dir, os.path.basename(self.url))
with tarfile.open(tarfile_path, "r:gz") as dataset_tar:
print(f'Extracting files from {tarfile_path}...')
dataset_tar.extractall(self.path)
def _extract_zstd(self, remove_zstd=True):
self.path = os.path.join(self.base_dir, self.name)
os.makedirs(self.path, exist_ok=True)
zstd_file_path = os.path.join(self.base_dir, os.path.basename(self.url))
with open(zstd_file_path, 'rb') as compressed:
decomp = zstandard.ZstdDecompressor()
output_path = zstd_file_path.replace(".zst", "")
with open(output_path, 'wb') as destination:
decomp.copy_stream(compressed, destination)
if remove_zstd:
os.remove(zstd_file_path)
return output_path
def extract(self):
"""extracts dataset and moves to the correct data dir if necessary"""
self._extract_tar()
def exists(self):
"""Checks if the dataset is present"""
return os.path.isdir(f"{self.base_dir}/{self.name}")
def download(self):
"""downloads dataset"""
os.makedirs(self.base_dir, exist_ok=True)
os.system(f"wget {self.url} -O {os.path.join(self.base_dir, os.path.basename(self.url))}")
def tokenize(self):
parent_folder = os.path.join(self.base_dir, self.name)
jsonl_filepath = os.path.join(parent_folder, os.path.basename(self.url)).replace(".zst", "")
assert jsonl_filepath.endswith(".jsonl")
os.system(f"python tools/preprocess_data.py \
--input {jsonl_filepath} \
--output-prefix {parent_folder}/{self.name} \
--vocab {GPT2_VOCAB_FP} \
--dataset-impl mmap \
--tokenizer-type GPT2BPETokenizer \
--merge-file {GPT2_MERGE_FP} \
--append-eod")
def prepare(self):
if not self.exists():
self.download()
self.extract()
self.tokenize()
class Enron(DataDownloader):
name = "enron"
filetype = "jsonl.zst"
url = "http://eaidata.bmk.sh/data/enron_emails.jsonl.zst"
seed = 1
def exists(self):
self.path = os.path.join(self.base_dir, self.name)
return os.path.isfile(os.path.join(self.path, os.path.basename(self.url).replace(".zst", "")))
def extract(self, remove_zstd=True):
self._extract_zstd(remove_zstd=remove_zstd)
shutil.move(os.path.join(self.base_dir, os.path.basename(self.url).replace(".zst", "")), os.path.join(self.base_dir, self.name))
def maybe_download_gpt2_tokenizer_data():
if not os.path.isfile(GPT2_VOCAB_FP):
os.system(f'wget {GPT2_VOCAB_URL} -O {GPT2_VOCAB_FP}')
if not os.path.isfile(GPT2_MERGE_FP):
os.system(f'wget {GPT2_MERGE_URL} -O {GPT2_MERGE_FP}')
DATA_DOWNLOADERS = {
"enron": Enron
}
def prepare_dataset(dataset_name):
os.makedirs(DATA_DIR, exist_ok=True)
maybe_download_gpt2_tokenizer_data()
DownloaderClass = DATA_DOWNLOADERS.get(dataset_name, None)
if DownloaderClass is None:
raise NotImplementedError
else:
d = DownloaderClass()
d.prepare()
| 3.359375 | 3 |
othello_rl/qlearning/qlearning.py | aka256/othello-rl | 0 | 6521 | <filename>othello_rl/qlearning/qlearning.py
from logging import getLogger
logger = getLogger(__name__)
class QLearning:
"""
Q-Learning用のクラス
Attributes
----------
alpha : float
学習率α
gamma : float
割引率γ
data : dict
Q-Learningでの学習結果の保存用辞書
init_value : float
dataの初期値
"""
def __init__(self, alpha: float, gamma: float, data: dict = {}, init_value: float = 0) -> None:
self.alpha = alpha
self.gamma = gamma
self.data = data
self.init_value = init_value
def get(self, s: int, a: int) -> float:
"""
dataから値の取得
Parameters
----------
s : int
状態
a : int
行動
Returns
-------
value : float
Q値, Q(s, a)
"""
return self.data.get((s, a), self.init_value)
def __set(self, s: int, a: int, value: float) -> None:
"""
dataへの値の代入
Parameters
----------
s : int
状態
a : int
行動
value : float
代入するQ値, Q(s, a)
"""
self.data[(s, a)] = value
def update(self, s: int, a: int, r: float, q: float, *q_old: float) -> float:
"""
Q値の更新
Parameters
----------
s : int
状態
a : int
行動
r : float
報酬
q : float
Q(s_t+1, a)
q_old : float
Q(s, a)
Returns
------
q_new : float
updateされたQ値
"""
if len(q_old) == 0:
q_old = self.get(s, a)
else:
q_old = q_old[0]
#print('alpha:{}, q_old:{}, r:{}, gamma:{}, q:{}'.format(self.alpha, q_old, r, self.gamma, q))
q_new = (1-self.alpha)*q_old+self.alpha*(r + self.gamma*q)
self.__set(s, a, q_new)
return q_new
| 3.109375 | 3 |
SearchService/test/unit/test_solr_interface.py | loftwah/appscale | 790 | 6522 | <gh_stars>100-1000
#!/usr/bin/env python
import os
import json
import sys
import unittest
import urllib2
from flexmock import flexmock
sys.path.append(os.path.join(os.path.dirname(__file__), "../../"))
import solr_interface
import search_exceptions
class FakeSolrDoc():
def __init__(self):
self.fields = []
class FakeDocument():
INDEX_NAME = "indexname"
INDEX_LOCALE = "indexlocale"
def __init__(self):
self.fields = []
self.id = "id"
self.language = "lang"
class FakeSchema():
def __init__(self):
self.fields = []
class FakeIndex():
def __init__(self):
self.name = "name"
self.schema = FakeSchema()
class FakeIndexSpec():
def __init__(self):
pass
def namespace(self):
return 'ns'
def name(self):
return self.name
class FakeUpdate():
def __init__(self, name, field_type):
self.name = name
self.field_type = field_type
class FakeConnection():
def __init__(self, is_good_code):
self.code = 200
if not is_good_code:
self.code = 500
def getcode(self):
return self.code
class TestSolrInterface(unittest.TestCase):
"""
A set of test cases for the solr interface module.
"""
def test_get_index_adapter(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
solr = flexmock(solr)
flexmock(solr_interface)
solr_interface.should_receive("get_index_name").and_return("index_ns_name")
flexmock(urllib2)
urllib2.should_receive("urlopen").and_return(FakeConnection(False))
self.assertRaises(search_exceptions.InternalError,
solr._get_index_adapter, "app_id", "ns", "name")
# Test the case of ValueError on a json.load.
urllib2.should_receive("urlopen").and_return(FakeConnection(True))
flexmock(json)
json.should_receive("load").and_raise(ValueError)
self.assertRaises(search_exceptions.InternalError,
solr._get_index_adapter, "app_id", "ns", "name")
# Test a bad status from SOLR.
dictionary = {'responseHeader':{'status': 1}}
json.should_receive("load").and_return(dictionary)
self.assertRaises(search_exceptions.InternalError,
solr._get_index_adapter, "app_id", "ns", "name")
fields = [{'name':"index_ns_name_"}]
dictionary = {'responseHeader':{'status': 0}, "fields": fields}
json.should_receive("load").and_return(dictionary)
index = solr._get_index_adapter("app_id", "ns", "name")
self.assertEquals(index.schema[0]['name'], "index_ns_name_")
def test_update_schema(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
flexmock(urllib2)
urllib2.should_receive("urlopen").and_return(FakeConnection(False))
updates = []
self.assertRaises(search_exceptions.InternalError,
solr.update_schema, updates)
updates = [{'name': 'name1', 'type':'type1'}]
flexmock(json)
json.should_receive("load").and_raise(ValueError)
urllib2.should_receive("urlopen").and_return(FakeConnection(True))
self.assertRaises(search_exceptions.InternalError,
solr.update_schema, updates)
dictionary = {"responseHeader":{"status":1}}
json.should_receive("load").and_return(dictionary)
self.assertRaises(search_exceptions.InternalError,
solr.update_schema, updates)
dictionary = {"responseHeader":{"status":0}}
json.should_receive("load").and_return(dictionary)
solr.update_schema(updates)
def test_to_solr_hash_map(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
self.assertNotEqual(solr.to_solr_hash_map(FakeIndex(), FakeDocument()), {})
def test_commit_update(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
flexmock(json)
json.should_receive("loads").and_return({})
flexmock(urllib2)
urllib2.should_receive("urlopen").and_return(FakeConnection(False))
self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})
json.should_receive("load").and_raise(ValueError)
urllib2.should_receive("urlopen").and_return(FakeConnection(True))
self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})
dictionary = {'responseHeader':{'status': 1}}
json.should_receive("load").and_return(dictionary).once()
self.assertRaises(search_exceptions.InternalError, solr.commit_update, {})
dictionary = {'responseHeader':{'status': 0}}
json.should_receive("load").and_return(dictionary).once()
solr.commit_update({})
def test_update_document(self):
appscale_info = flexmock()
appscale_info.should_receive("get_search_location").\
and_return("somelocation")
solr = solr_interface.Solr()
solr = flexmock(solr)
solr.should_receive("to_solr_doc").and_return(FakeSolrDoc())
solr.should_receive("_get_index_adapter").and_return(FakeIndex())
solr.should_receive("compute_updates").and_return([])
solr.should_receive("to_solr_hash_map").and_return(None)
solr.should_receive("commit_update").and_return(None)
solr.update_document("app_id", None, FakeIndexSpec())
solr.should_receive("compute_updates").and_return([1,2])
solr.should_receive("update_schema").twice()
solr.update_document("app_id", None, FakeIndexSpec())
solr.should_receive("to_solr_hash_map").and_return(None).once()
solr.update_document("app_id", None, FakeIndexSpec())
def test_json_loads_byteified(self):
json_with_unicode = (
'{"key2": [{"\\u2611": 28, "\\u2616": ["\\u263a"]}, "second", "third"], '
'"key1": "value", '
'"\\u2604": {"\\u2708": "\\u2708"}}'
)
parsed_obj = solr_interface.json_loads_byteified(json_with_unicode)
def walk_and_check_type(obj):
if isinstance(obj, dict):
for key, value in obj.iteritems():
self.assertIsInstance(key, str)
walk_and_check_type(value)
elif isinstance(obj, list):
for value in obj:
walk_and_check_type(value)
else:
self.assertIsInstance(obj, (str, int))
walk_and_check_type(parsed_obj)
self.assertEqual(parsed_obj, {
'key1': 'value',
'key2': [
{'\<KEY>': 28, '\xe2\x98\x96': ['\xe2\x98\xba']},
'second',
'third'
],
'\xe2\x98\x84': {'\xe2\x9c\x88': '\xe2\x9c\x88'}
})
| 2.578125 | 3 |
payabbhi/error.py | ppm-avinder/payabbhi-python | 1 | 6523 | class PayabbhiError(Exception):
def __init__(self, description=None, http_status=None,
field=None):
self.description = description
self.http_status = http_status
self.field = field
self._message = self.error_message()
super(PayabbhiError, self).__init__(self._message)
def error_message(self):
msg = "message: " + self.description
msg = (msg + ", http_code: " + str(self.http_status)) if self.http_status else msg
msg = (msg + ", field: " + self.field) if self.field else msg
return msg + "\n"
class APIError(PayabbhiError):
pass
class APIConnectionError(PayabbhiError):
pass
class AuthenticationError(PayabbhiError):
pass
class InvalidRequestError(PayabbhiError):
pass
class GatewayError(PayabbhiError):
pass
class SignatureVerificationError(PayabbhiError):
pass
| 2.75 | 3 |
src/mpu/__init__.py | TsinghuaAI/CPM-2-Pretrain | 54 | 6524 | # coding=utf-8
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model parallel utility interface."""
from .cross_entropy import vocab_parallel_cross_entropy
from .data import broadcast_data
from .grads import clip_grad_norm
from .initialize import destroy_model_parallel
from .initialize import get_data_parallel_group
from .initialize import get_data_parallel_rank
from .initialize import get_data_parallel_world_size
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_src_rank
from .initialize import get_model_parallel_world_size
from .initialize import initialize_model_parallel
from .initialize import model_parallel_is_initialized
from .layers import ColumnParallelLinear
from .layers import ParallelEmbedding
from .layers import RowParallelLinear
from .layers import VocabParallelEmbedding
from .mappings import copy_to_model_parallel_region
from .mappings import gather_from_model_parallel_region
from .mappings import reduce_from_model_parallel_region
from .mappings import scatter_to_model_parallel_region
from .random import checkpoint
from .random import partition_activations_in_checkpoint
from .random import get_cuda_rng_tracker
from .random import model_parallel_cuda_manual_seed
from .transformer_enc_dec import ParallelTransformer, LayerNorm
| 1.1875 | 1 |
djangosige/apps/cadastro/models/empresa.py | MateusMolina/lunoERP | 0 | 6525 | # -*- coding: utf-8 -*-
import os
from django.db import models
from django.db.models.signals import post_delete
from django.dispatch import receiver
from .base import Pessoa
from djangosige.apps.login.models import Usuario
from djangosige.configs.settings import MEDIA_ROOT
def logo_directory_path(instance, filename):
extension = os.path.splitext(filename)[1]
return 'imagens/empresas/logo_{0}_{1}{2}'.format(instance.nome_razao_social, instance.id, extension)
class Empresa(Pessoa):
logo_file = models.ImageField(
upload_to=logo_directory_path, default='imagens/logo.png', blank=True, null=True)
cnae = models.CharField(max_length=10, blank=True, null=True)
iest = models.CharField(max_length=32, null=True, blank=True)
class Meta:
verbose_name = "Empresa"
@property
def caminho_completo_logo(self):
if self.logo_file.name != 'imagens/logo.png':
return os.path.join(MEDIA_ROOT, self.logo_file.name)
else:
return ''
def save(self, *args, **kwargs):
# Deletar logo se ja existir um
try:
obj = Empresa.objects.get(id=self.id)
if obj.logo_file != self.logo_file and obj.logo_file != 'imagens/logo.png':
obj.logo_file.delete(save=False)
except:
pass
super(Empresa, self).save(*args, **kwargs)
def __unicode__(self):
return u'%s' % self.nome_razao_social
def __str__(self):
return u'%s' % self.nome_razao_social
# Deletar logo quando empresa for deletada
@receiver(post_delete, sender=Empresa)
def logo_post_delete_handler(sender, instance, **kwargs):
# Nao deletar a imagem default 'logo.png'
if instance.logo_file != 'imagens/logo.png':
instance.logo_file.delete(False)
class MinhaEmpresa(models.Model):
m_empresa = models.ForeignKey(
Empresa, on_delete=models.CASCADE, related_name='minha_empresa', blank=True, null=True)
m_usuario = models.ForeignKey(
Usuario, on_delete=models.CASCADE, related_name='empresa_usuario')
| 1.882813 | 2 |
WDJN/eval/eval.py | silverriver/Stylized_Dialog | 21 | 6526 | import os
from nltk.translate.bleu_score import corpus_bleu
from nltk.translate.bleu_score import SmoothingFunction
import json
from tqdm import tqdm, trange
from random import sample
import numpy as np
import pickle
import argparse
import bert_eval_acc
import svm_eval_acc
smooth = SmoothingFunction()
def eval_bleu(ref, pred):
"""
:param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references
:param pred: list(list(any)), a list of predictions
:return: corpus bleu score
"""
return corpus_bleu(ref, pred, smoothing_function=smooth.method1)
def eval_bleu_detail(ref, pred):
"""
:param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references
:param pred: list(list(any)), a list of predictions
:return: corpus bleu score
"""
return corpus_bleu(ref, pred, weights=[1, 0, 0, 0], smoothing_function=smooth.method1),\
corpus_bleu(ref, pred, weights=[0, 1, 0, 0], smoothing_function=smooth.method1), \
corpus_bleu(ref, pred, weights=[0, 0, 1, 0], smoothing_function=smooth.method1), \
corpus_bleu(ref, pred, weights=[0, 0, 0, 1], smoothing_function=smooth.method1)
def count_ngram(hyps_resp, n):
"""
Count the number of unique n-grams
:param hyps_resp: list, a list of responses
:param n: int, n-gram
:return: the number of unique n-grams in hyps_resp
"""
if len(hyps_resp) == 0:
print("ERROR, eval_distinct get empty input")
return
if type(hyps_resp[0]) != list:
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(
type(hyps_resp[0])))
return
ngram = set()
for resp in hyps_resp:
if len(resp) < n:
continue
for i in range(len(resp) - n + 1):
ngram.add(' '.join(resp[i: i + n]))
return len(ngram)
def eval_distinct_detail(hyps_resp):
"""
compute distinct score for the hyps_resp
:param hyps_resp: list, a list of hyps responses
:return: average distinct score for 1, 2-gram
"""
if len(hyps_resp) == 0:
print("ERROR, eval_distinct get empty input")
return
if type(hyps_resp[0]) != list:
print("ERROR, eval_distinct takes in a list of <class 'list'>, get a list of {} instead".format(
type(hyps_resp[0])))
return
hyps_resp = [[str(x) for x in l] for l in hyps_resp]
hyps_resp = [(' '.join(i)).split() for i in hyps_resp]
num_tokens = sum([len(i) for i in hyps_resp])
dist1 = count_ngram(hyps_resp, 1) / float(num_tokens)
dist2 = count_ngram(hyps_resp, 2) / float(num_tokens)
return dist1, dist2
def eval_f1(ref, pred):
"""
:param ref: list(list(list(any))), a list of reference sentences, each element of the list is a list of references
:param pred: list(list(any)), a list of predictions
:return: f1 score
"""
assert len(ref) == len(pred) > 0
precisions = []
recalls = []
for i, s in enumerate(pred):
ref_set = set()
for rs in ref[i]:
for w in rs:
ref_set.add(w)
pred_set = set()
for w in s:
pred_set.add(w)
p = 0
for w in s:
if w in ref_set:
p += 1
if len(s) > 0:
p /= len(s)
r = 0
for rs in ref[i]:
for w in rs:
if w in pred_set:
r += 1
tot_l = sum([len(rs) for rs in ref[i]])
if tot_l > 0:
r /= tot_l
precisions.append(p)
recalls.append(r)
precision = sum(precisions) / len(precisions)
recall = sum(recalls) / len(recalls)
return 0.0 if precision == recall == 0 else 2 * precision * recall / (precision + recall)
def calc_metrics_value(task, fn, n_sample=None):
with open(fn) as f:
res = [json.loads(i) for i in f.readlines()]
s0_pred, s0_ref = [], []
s1_pred, s1_ref = [], []
for d in res:
if d['style'] == 0:
s0_ref.append([list(d['resp'])])
s0_pred.append(list(d['pred_style0'][0]))
else:
s1_ref.append([list(d['resp'])])
s1_pred.append(list(d['pred_style1'][0]))
if n_sample:
assert len(s0_ref) >= n_sample
assert len(s1_ref) >= n_sample
sampled_idxs = sample(range(len(s0_ref)), n_sample)
s0_ref = [x for i, x in enumerate(s0_ref) if i in sampled_idxs]
s0_pred = [x for i, x in enumerate(s0_pred) if i in sampled_idxs]
sampled_idxs = sample(range(len(s1_ref)), n_sample)
s1_ref = [x for i, x in enumerate(s1_ref) if i in sampled_idxs]
s1_pred = [x for i, x in enumerate(s1_pred) if i in sampled_idxs]
bleu_s0 = eval_bleu_detail(s0_ref, s0_pred)
bleu_s1 = eval_bleu_detail(s1_ref, s1_pred)
dist_s0 = eval_distinct_detail(s0_pred)
dist_s1 = eval_distinct_detail(s1_pred)
f1_s0 = eval_f1(s0_ref, s0_pred)
f1_s1 = eval_f1(s1_ref, s1_pred)
for k in range(1, 4):
print('%d-gram BLEU:' % k,
's0', bleu_s0[k - 1] * 100,
's1', bleu_s1[k - 1] * 100,
'mean', (bleu_s0[k - 1] + bleu_s1[k - 1]) / 2 * 100)
print('F1:',
's0', f1_s0 * 100, 's1', f1_s1 * 100,
'mean', (f1_s0 + f1_s1) / 2 * 100)
print('Dist:',
's0', dist_s0[1] * 100, 's1', dist_s1[1] * 100,
'mean', (dist_s0[1] + dist_s1[1]) / 2 * 100)
parser = argparse.ArgumentParser()
parser.add_argument('--eval_file_path', help='path of the eval file', required=True)
args = parser.parse_args()
file_path = args.eval_file_path
calc_metrics_value(None, file_path)
print("Evaluating acc results:")
bert_eval_acc.main(file_path)
svm_eval_acc.main(file_path)
| 2.765625 | 3 |
homeassistant/components/unifi/const.py | olbjan/home-assistant-1 | 7 | 6527 | """Constants for the UniFi component."""
import logging
LOGGER = logging.getLogger(__package__)
DOMAIN = "unifi"
CONTROLLER_ID = "{host}-{site}"
CONF_CONTROLLER = "controller"
CONF_SITE_ID = "site"
UNIFI_WIRELESS_CLIENTS = "unifi_wireless_clients"
CONF_ALLOW_BANDWIDTH_SENSORS = "allow_bandwidth_sensors"
CONF_BLOCK_CLIENT = "block_client"
CONF_DETECTION_TIME = "detection_time"
CONF_POE_CLIENTS = "poe_clients"
CONF_TRACK_CLIENTS = "track_clients"
CONF_TRACK_DEVICES = "track_devices"
CONF_TRACK_WIRED_CLIENTS = "track_wired_clients"
CONF_SSID_FILTER = "ssid_filter"
DEFAULT_ALLOW_BANDWIDTH_SENSORS = False
DEFAULT_POE_CLIENTS = True
DEFAULT_TRACK_CLIENTS = True
DEFAULT_TRACK_DEVICES = True
DEFAULT_TRACK_WIRED_CLIENTS = True
DEFAULT_DETECTION_TIME = 300
ATTR_MANUFACTURER = "Ubiquiti Networks"
| 1.78125 | 2 |
coding_intereview/1656. Design an Ordered Stream.py | Jahidul007/Python-Bootcamp | 2 | 6528 | <reponame>Jahidul007/Python-Bootcamp<filename>coding_intereview/1656. Design an Ordered Stream.py
class OrderedStream:
def __init__(self, n: int):
self.data = [None]*n
self.ptr = 0
def insert(self, id: int, value: str) -> List[str]:
id -= 1
self.data[id] = value
if id > self.ptr: return []
while self.ptr < len(self.data) and self.data[self.ptr]: self.ptr += 1
return self.data[id:self.ptr]
# Your OrderedStream object will be instantiated and called as such:
# obj = OrderedStream(n)
# param_1 = obj.insert(id,value)
| 3.90625 | 4 |
python/test/test_tree_dp.py | EQt/treelas | 3 | 6529 | import numpy as np
from treelas import post_order, TreeInstance
def test_demo_3x7_postord():
parent = np.array([0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8,
9, 14, 17, 12, 15, 16, 19, 16, 17])
po = post_order(parent, include_root=True)
expect = np.array([12, 11, 19, 20, 21, 14, 15, 18, 17, 16, 13,
10, 7, 8, 9, 3, 6, 2, 5, 4, 1], dtype='i4') - 1
assert (po == expect).all()
def test_demo_3x7():
y = np.fromstring("0.62 0.73 0.71 1.5 1.17 0.43 1.08 0.62 " +
"1.73 0.95 1.46 1.6 1.16 0.38 0.9 0.32 " +
"-0.48 0.95 1.08 0.02 0.4", sep=" ")
parent = np.array([0, 4, 5, 0, 3, 4, 7, 8, 5, 6, 7, 8,
9, 14, 17, 12, 15, 16, 19, 16, 17])
lam = 1.0
prob = TreeInstance(y, parent, lam=lam)
assert prob.root == 0
assert prob.parent.dtype == np.int32
prob.solve()
assert abs(prob.x.mean() - prob.y.mean()) < 1e-15
assert len(np.unique(prob.x)) == 2
assert max(np.abs(prob.dual[2:]) - lam) < 1e-12
assert max(np.abs(prob.gamma)) < 1e-15
| 2.296875 | 2 |
lista01/rpc/ex01_cl.py | SD-CC-UFG/leonardo.fleury | 0 | 6530 | <reponame>SD-CC-UFG/leonardo.fleury<gh_stars>0
import xmlrpc.client
def main():
s = xmlrpc.client.ServerProxy('http://localhost:9991')
nome = input("Nome: ")
cargo = input("Cargo (programador, operador): ")
salario = float(input("Salário: "))
print("\n\n{}".format(s.atualiza_salario(nome, cargo, salario)))
if __name__ == '__main__':
main()
| 2.796875 | 3 |
autocomplete/migrations/0001_initial.py | openshift-eng/art-dashboard-server | 1 | 6531 | <gh_stars>1-10
# Generated by Django 3.0.7 on 2020-07-27 19:23
import build.models
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AutoCompleteRecord',
fields=[
('updated_at', build.models.UnixTimestampField(auto_created=True, null=True)),
('created_at', build.models.UnixTimestampField(auto_created=True, null=True)),
('log_autocomplete_record_id', models.AutoField(primary_key=True, serialize=False)),
('type', models.CharField(max_length=50)),
('value', models.CharField(max_length=300)),
],
options={
'db_table': 'log_autocomplete_record',
},
),
]
| 1.96875 | 2 |
unet3d/config.py | fcollman/pytorch-3dunet | 0 | 6532 | import argparse
import os
import torch
import yaml
DEFAULT_DEVICE = 'cuda:0'
def load_config():
parser = argparse.ArgumentParser(description='UNet3D training')
parser.add_argument('--config', type=str, help='Path to the YAML config file', required=True)
args = parser.parse_args()
config = _load_config_yaml(args.config)
# Get a device to train on
device = config.get('device', DEFAULT_DEVICE)
config['device'] = torch.device(device if torch.cuda.is_available() else "cpu")
return config
def _load_config_yaml(config_file):
return yaml.load(open(config_file, 'r'), Loader=yaml.FullLoader)
| 2.578125 | 3 |
src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py | gunpowder78/webdnn | 1 | 6533 | <filename>src/graph_transpiler/webdnn/backend/webgl/optimize_rules/simplify_channel_mode_conversion/simplify_channel_mode_conversion.py<gh_stars>1-10
from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_nonsense_channel_mode_conversion import \
SimplifyNonsenseChannelModeConversion
from webdnn.backend.webgl.optimize_rules.simplify_channel_mode_conversion.simplify_redundant_channel_mode_conversion import \
SimplifyRedundantChannelModeConversion
from webdnn.graph.optimize_rule import OptimizeRuleGroup
class SimplifyChannelModeConversion(OptimizeRuleGroup):
def __init__(self):
super(SimplifyChannelModeConversion, self).__init__([
SimplifyRedundantChannelModeConversion(),
SimplifyNonsenseChannelModeConversion()
])
| 1.546875 | 2 |
script.video.F4mProxy/lib/flvlib/constants.py | akuala/REPO.KUALA | 105 | 6534 | <filename>script.video.F4mProxy/lib/flvlib/constants.py
"""
The constants used in FLV files and their meanings.
"""
# Tag type
(TAG_TYPE_AUDIO, TAG_TYPE_VIDEO, TAG_TYPE_SCRIPT) = (8, 9, 18)
# Sound format
(SOUND_FORMAT_PCM_PLATFORM_ENDIAN,
SOUND_FORMAT_ADPCM,
SOUND_FORMAT_MP3,
SOUND_FORMAT_PCM_LITTLE_ENDIAN,
SOUND_FORMAT_NELLYMOSER_16KHZ,
SOUND_FORMAT_NELLYMOSER_8KHZ,
SOUND_FORMAT_NELLYMOSER,
SOUND_FORMAT_G711_A_LAW,
SOUND_FORMAT_G711_MU_LAW) = range(9)
(SOUND_FORMAT_AAC,
SOUND_FORMAT_SPEEX) = range(10, 12)
(SOUND_FORMAT_MP3_8KHZ,
SOUND_FORMAT_DEVICE_SPECIFIC) = range(14, 16)
sound_format_to_string = {
SOUND_FORMAT_PCM_PLATFORM_ENDIAN: "Linear PCM, platform endian",
SOUND_FORMAT_ADPCM: "ADPCM",
SOUND_FORMAT_MP3: "MP3",
SOUND_FORMAT_PCM_LITTLE_ENDIAN: "Linear PCM, little endian",
SOUND_FORMAT_NELLYMOSER_16KHZ: "Nellymoser 16-kHz mono",
SOUND_FORMAT_NELLYMOSER_8KHZ: "Nellymoser 8-kHz mono",
SOUND_FORMAT_NELLYMOSER: "Nellymoser",
SOUND_FORMAT_G711_A_LAW: "G.711 A-law logarithmic PCM",
SOUND_FORMAT_G711_MU_LAW: "G.711 mu-law logarithmic PCM",
SOUND_FORMAT_AAC: "AAC",
SOUND_FORMAT_SPEEX: "Speex",
SOUND_FORMAT_MP3_8KHZ: "MP3 8-kHz",
SOUND_FORMAT_DEVICE_SPECIFIC: "Device-specific sound"
}
# Sound rate
(SOUND_RATE_5_5_KHZ,
SOUND_RATE_11_KHZ,
SOUND_RATE_22_KHZ,
SOUND_RATE_44_KHZ) = range(4)
sound_rate_to_string = {
SOUND_RATE_5_5_KHZ: "5.5-kHz",
SOUND_RATE_11_KHZ: "11-kHz",
SOUND_RATE_22_KHZ: "22-kHz",
SOUND_RATE_44_KHZ: "44-kHz"
}
# Sound size
(SOUND_SIZE_8_BIT, SOUND_SIZE_16_BIT) = range(2)
sound_size_to_string = {
SOUND_SIZE_8_BIT: "snd8Bit",
SOUND_SIZE_16_BIT: "snd16Bit"
}
# Sound type
(SOUND_TYPE_MONO, SOUND_TYPE_STEREO) = range(2)
sound_type_to_string = {
SOUND_TYPE_MONO: "sndMono",
SOUND_TYPE_STEREO: "sndStereo"
}
# AAC packet type
(AAC_PACKET_TYPE_SEQUENCE_HEADER,
AAC_PACKET_TYPE_RAW) = range(2)
aac_packet_type_to_string = {
AAC_PACKET_TYPE_SEQUENCE_HEADER: "sequence header",
AAC_PACKET_TYPE_RAW: "raw"
}
# Codec ID
(CODEC_ID_JPEG,
CODEC_ID_H263,
CODEC_ID_SCREEN_VIDEO,
CODEC_ID_VP6,
CODEC_ID_VP6_WITH_ALPHA,
CODEC_ID_SCREEN_VIDEO_V2,
CODEC_ID_H264) = range(1, 8)
codec_id_to_string = {
CODEC_ID_JPEG: "JPEG",
CODEC_ID_H263: "Sorenson H.263",
CODEC_ID_SCREEN_VIDEO: "Screen video",
CODEC_ID_VP6: "On2 VP6",
CODEC_ID_VP6_WITH_ALPHA: "On2 VP6 with alpha channel",
CODEC_ID_SCREEN_VIDEO_V2: "Screen video version 2",
CODEC_ID_H264: "H.264"
}
# Frame type
(FRAME_TYPE_KEYFRAME,
FRAME_TYPE_INTERFRAME,
FRAME_TYPE_DISPOSABLE_INTERFRAME,
FRAME_TYPE_GENERATED_KEYFRAME,
FRAME_TYPE_INFO_FRAME) = range(1, 6)
frame_type_to_string = {
FRAME_TYPE_KEYFRAME: "keyframe",
FRAME_TYPE_INTERFRAME: "interframe",
FRAME_TYPE_DISPOSABLE_INTERFRAME: "disposable interframe",
FRAME_TYPE_GENERATED_KEYFRAME: "generated keyframe",
FRAME_TYPE_INFO_FRAME: "video info/command frame"
}
# H.264 packet type
(H264_PACKET_TYPE_SEQUENCE_HEADER,
H264_PACKET_TYPE_NALU,
H264_PACKET_TYPE_END_OF_SEQUENCE) = range(3)
h264_packet_type_to_string = {
H264_PACKET_TYPE_SEQUENCE_HEADER: "sequence header",
H264_PACKET_TYPE_NALU: "NAL unit",
H264_PACKET_TYPE_END_OF_SEQUENCE: "sequence end"
}
# Value type
(VALUE_TYPE_NUMBER,
VALUE_TYPE_BOOLEAN,
VALUE_TYPE_STRING,
VALUE_TYPE_OBJECT,
VALUE_TYPE_MOVIECLIP,
VALUE_TYPE_NULL,
VALUE_TYPE_UNDEFINED,
VALUE_TYPE_REFERENCE,
VALUE_TYPE_ECMA_ARRAY) = range(9)
(VALUE_TYPE_STRICT_ARRAY,
VALUE_TYPE_DATE,
VALUE_TYPE_LONGSTRING) = range(10, 13)
value_type_to_string = {
VALUE_TYPE_NUMBER: 'Number',
VALUE_TYPE_BOOLEAN: 'Boolean',
VALUE_TYPE_STRING: 'String',
VALUE_TYPE_OBJECT: 'Object',
VALUE_TYPE_MOVIECLIP: 'MovieClip',
VALUE_TYPE_NULL: 'Null',
VALUE_TYPE_UNDEFINED: 'Undefined',
VALUE_TYPE_REFERENCE: 'Reference',
VALUE_TYPE_ECMA_ARRAY: 'ECMA Array',
VALUE_TYPE_STRICT_ARRAY: 'Strict Array',
VALUE_TYPE_DATE: 'Date',
VALUE_TYPE_LONGSTRING: 'Longstring'
}
| 2.09375 | 2 |
A2/semcor_chunk.py | Rogerwlk/Natural-Language-Processing | 0 | 6535 | <filename>A2/semcor_chunk.py
from nltk.corpus import semcor
class semcor_chunk:
def __init__(self, chunk):
self.chunk = chunk
#returns the synset if applicable, otherwise returns None
def get_syn_set(self):
try:
synset = self.chunk.label().synset()
return synset
except AttributeError:
try:
synset = wn.synset(self.chunk.label())
return synset
except:
return None
#returns a list of the words in the chunk
def get_words(self):
try:
return self.chunk.leaves()
except AttributeError:
return self.chunk
# if __name__ == "__main__":
# s = semcor.tagged_sents(tag='sem')[0]
# for chunk in s:
# a = semcor_chunk(chunk)
# print a.get_syn_set()
# for chunk in s:
# a = semcor_chunk(chunk)
# print a.get_words() | 3.015625 | 3 |
gnn_model.py | thoang3/graph_neural_network_benchmark | 0 | 6536 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from load_cora import load_cora
from baseline_model import create_ffn
from utils import run_experiment
from utils import display_learning_curves
# Graph convolution layer
class GraphConvLayer(layers.Layer):
def __init__(
self,
hidden_units,
dropout_rate=0.2,
aggregation_type="mean",
combination_type="concat",
normalize=False,
*args,
**kwargs
):
super(GraphConvLayer, self).__init__(*args, **kwargs)
self._aggregation_type = aggregation_type
self._combination_type = combination_type
self._normalize = normalize
self._ffn_prepare = create_ffn(hidden_units, dropout_rate)
if self._combination_type == "gated":
self._update_fn = layers.GRU(
units=hidden_units,
activation="tanh",
recurrent_activation="sigmoid",
dropout=dropout_rate,
return_state=True,
recurrent_dropout=dropout_rate
)
else:
self._update_fn = create_ffn(hidden_units, dropout_rate)
def _prepare(self, node_representations, weights=None):
# node_representations shape is [num_edges, embedding_dim]
messages = self._ffn_prepare(node_representations)
if weights is not None:
messages = messages * tf.expand_dims(weights, -1)
return messages
def _aggregate(self, node_indices, neighbour_messages):
# node_indices shape is [num_edges]
# neighbour_messages shape: [num_edges, representation_dim]
num_nodes = tf.math.reduce_max(node_indices) + 1
if self._aggregation_type == "sum":
aggregated_message = tf.math.unsorted_segment_sum(
neighbour_messages,
node_indices,
num_segments=num_nodes
)
elif self._aggregation_type == "mean":
aggregated_message = tf.math.unsorted_segment_mean(
neighbour_messages,
node_indices,
num_segments=num_nodes
)
elif self._aggregation_type == "max":
aggregated_message = tf.math.unsorted_segment_max(
neighbour_messages,
node_indices,
num_segments=num_nodes
)
else:
raise ValueError(f"Invalid aggregation type: {self._aggregation_type}.")
return aggregated_message
def _update(self, node_representations, aggregated_messages):
# node_representations shape is [num_nodes, representation_dim]
# aggregated_messages shape is [num_nodes, representation_dim]
if self._combination_type == "gru":
# Create a sequence of two elements for the GRU layer
h = tf.stack([node_respresentations, aggregated_messages], axis=1)
elif self._combination_type == "concat":
# Concatenate the node_representations and aggregated_messages
h = tf.concat([node_representations, aggregated_messages], axis=1)
elif self._combination_type == "add":
# Add node_representations and aggregated_messages
h = node_representations + aggregated_messages
else:
raise ValueError(f"Invalid combination type: {self._combinatino_type}.")
# Apply the processing function
node_embeddings = self._update_fn(h)
if self._combination_type == "gru":
node_embeddings = tf.unstack(node_embeddings, axis=1)[-1]
if self._normalize:
node_embeddings = tf.nn.l2_normalize(node_embeddings, axis=-1)
return node_embeddings
def call(self, inputs):
"""Process the inputs to produce the node_embeddings.
Args:
Inputs:
A tuple of three elements: node_representations, edges, edge_weights.
Returns:
node_embeddings of shape [num_nodes, representation_dim].
"""
node_representations, edges, edge_weights = inputs
# Get node_indices (source) and neighbour_indices (target) from edges
node_indices, neighbour_indices = edges[0], edges[1]
# neighbour_representations shape is [num_edges, representation_dim]
neighbour_representations = tf.gather(node_representations, neighbour_indices)
# Prepare the messages of the neighbours
neighbour_messages = self._prepare(neighbour_representations, edge_weights)
# Aggregate the neighbour messages
aggregated_messages = self._aggregate(node_indices, neighbour_messages)
# Update the node embedding with the neighbour messages
return self._update(node_representations, aggregated_messages)
class GNNNodeClassifier(tf.keras.Model):
def __init__(
self,
graph_info,
num_classes,
hidden_units,
aggregation_type="sum",
combination_type="concat",
dropout_rate=0.2,
normalize=True,
*args,
**kwargs
):
super(GNNNodeClassifier, self).__init__(*args, **kwargs)
# Unpack graph_info
node_features, edges, edge_weights = graph_info
self._node_features = node_features
self._edges = edges
self._edge_weights = edge_weights
# Set edge_weights to ones if not provided
if self._edge_weights is None:
self._edge_weights = tf.ones(shape=edges.shape[1])
# Scale edge_weights to sum to 1
self._edge_weights = self._edge_weights / tf.math.reduce_sum(self._edge_weights)
# Create a process layer
self._preprocess = create_ffn(hidden_units, dropout_rate, name="preprocess")
# Create the 1st GraphConv layer
self._conv1 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv1"
)
# Create the 2nd GraphConv layer
self._conv2 = GraphConvLayer(
hidden_units,
dropout_rate,
aggregation_type,
combination_type,
normalize,
name="graph_conv2"
)
# Create a postprocess layer
self._postprocess = create_ffn(hidden_units, dropout_rate, name="postprocess")
# Create a compute logits layer
self._compute_logits = layers.Dense(units=num_classes, name="logits")
def call(self, input_node_indices):
# Preprocess the node_features to produce node representations
x = self._preprocess(self._node_features)
# Apply the 1st graph conv layer
x1 = self._conv1((x, self._edges, self._edge_weights))
# Skip connection
x = x1 + x
# Apply the 2nd graph conv layer
x2 = self._conv2((x, self._edges, self._edge_weights))
# Skip connection
x = x2 + x
# Postprocess node embedding
x = self._postprocess(x)
# Fetch node embeddings for the input node_indices
node_embeddings = tf.gather(x, input_node_indices)
# Compute logits
return self._compute_logits(node_embeddings)
if __name__ == '__main__':
papers, train_data, test_data, paper_idx, class_idx, citations, feature_names = load_cora(verbose=1)
num_features = len(feature_names)
num_classes = len(class_idx)
hidden_units = [32, 32]
learning_rate = 0.01
dropout_rate = 0.5
epochs = 300
batch_size = 256
# Create an edges array (sparse adjacency matrix) of shape [2, num_edges]
edges = citations[["source", "target"]].to_numpy().T
#print(edges)
# Create an edge weights array of ones (default weights)
edge_weights = tf.ones(shape=edges.shape[1])
# Create a node features array of shape [num_nodes, num_features]
node_features = tf.cast(
papers.sort_values("paper_id")[feature_names].to_numpy(), dtype=tf.float32)
# Create graph info tuple with node_features, edges, and edge_weights
graph_info = (node_features, edges, edge_weights)
print("Edges shape: ", edges.shape)
print("Nodes shape: ", node_features.shape)
gnn_model = GNNNodeClassifier(
graph_info=graph_info,
num_classes=num_classes,
hidden_units=hidden_units,
dropout_rate=dropout_rate,
name="gnn_model"
)
print("GNN output shape: ", gnn_model([1, 10, 100]))
gnn_model.summary()
# Train the GNN model
X_train = train_data.paper_id.to_numpy()
y_train = train_data.subject
history = run_experiment(gnn_model, X_train, y_train, batch_size, epochs, learning_rate)
# Plot the learning curves
display_learning_curves(history, figure_name="gnn.png")
# Evaluate on test data
X_test = test_data.paper_id.to_numpy()
y_test = test_data.subject
_, test_accuracy = gnn_model.evaluate(x=X_test, y=y_test, verbose=1)
print(f"Test accuracy: {round(test_accuracy * 100, 2)}%")
| 2.546875 | 3 |
deps/lib/python3.5/site-packages/netdisco/discoverables/samsung_tv.py | jfarmer08/hassio | 78 | 6537 | """Discover Samsung Smart TV services."""
from . import SSDPDiscoverable
from ..const import ATTR_NAME
# For some models, Samsung forces a [TV] prefix to the user-specified name.
FORCED_NAME_PREFIX = '[TV]'
class Discoverable(SSDPDiscoverable):
"""Add support for discovering Samsung Smart TV services."""
def get_entries(self):
"""Get all the Samsung RemoteControlReceiver entries."""
return self.find_by_st(
"urn:samsung.com:device:RemoteControlReceiver:1")
def info_from_entry(self, entry):
"""Get most important info, by default the description location."""
info = super().info_from_entry(entry)
# Strip the forced prefix, if present
if info[ATTR_NAME].startswith(FORCED_NAME_PREFIX):
info[ATTR_NAME] = info[ATTR_NAME][len(FORCED_NAME_PREFIX):].strip()
return info
| 3.21875 | 3 |
pyecsca/sca/re/__init__.py | scrambler-crypto/pyecsca | 24 | 6538 | <filename>pyecsca/sca/re/__init__.py<gh_stars>10-100
"""Package for reverse-engineering."""
from .rpa import *
| 1.210938 | 1 |
sapmi/employees/migrations/0002_remove_employee_phone_alt.py | Juhanostby/django-apotek-sapmi | 1 | 6539 | <reponame>Juhanostby/django-apotek-sapmi
# Generated by Django 3.2.5 on 2021-12-21 19:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('employees', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='employee',
name='phone_alt',
),
]
| 1.429688 | 1 |
src/model/exception/emote_fetch_error.py | konrad2508/kokomi-discord-bot | 2 | 6540 | class EmoteFetchError(Exception):
'''Exception stating that there was a problem while fetching emotes from a source.'''
| 2.03125 | 2 |
src/sim/basicExample/main.py | andremtsilva/dissertacao | 0 | 6541 | <gh_stars>0
"""
This is the most simple scenario with a basic topology, some users and a set of apps with only one service.
@author: <NAME>
"""
import os
import time
import json
import random
import logging.config
import networkx as nx
import numpy as np
from pathlib import Path
from yafs.core import Sim
from yafs.application import create_applications_from_json
from yafs.topology import Topology
from yafs.placement import JSONPlacement
from yafs.path_routing import DeviceSpeedAwareRouting
from yafs.distribution import deterministic_distribution
from yafs.stats import Stats
RANDOM_SEED = 1
def main(stop_time, it):
folder_results = Path("results/")
folder_results.mkdir(parents=True, exist_ok=True)
folder_results = str(folder_results)+"/"
"""
TOPOLOGY
"""
# Fix position of nodes for drawing
random.seed(RANDOM_SEED)
np.random.seed(RANDOM_SEED)
t = Topology()
# You also can create a topology using JSONs files. Check out examples folder
size = 3
t.G = nx.generators.binomial_tree(size) # In NX-lib there are a lot of Graphs generators
# Definition of mandatory attributes of a Topology
## Attr. on edges
# PR (link propagation) and BW (bandwith) are 1 unit
attPR_BW = {x: 1 for x in t.G.edges()}
nx.set_edge_attributes(t.G, name="PR", values=attPR_BW)
nx.set_edge_attributes(t.G, name="BW", values=attPR_BW)
## Attr. on nodes
# IPT
attIPT = {x: random.randrange(100, 900, 100) for x in t.G.nodes()}
nx.set_node_attributes(t.G, name="IPT", values=attIPT)
# nx.write_gexf(t.G,folder_results+"graph_binomial_tree_%i"%size) # you can export the Graph in multiples format to view in tools like Gephi, and so on.
nx.write_graphml(t.G,folder_results+"graph_binomial_tree_%i.graphml"%size)
# Graph visualization
pos = nx.spring_layout(t.G)
nx.draw(t.G, pos, with_labels=True, edge_color='black', width=1,
alpha=0.7)
print(t.G.nodes()) # nodes id can be str or int
print()
print(nx.get_node_attributes(t.G, "IPT"))
print()
"""
APPLICATION or SERVICES
"""
dataApp = json.load(open('data/appDefinition.json'))
apps = create_applications_from_json(dataApp)
# print(apps)
"""
SERVICE PLACEMENT
"""
placementJson = json.load(open('data/allocDefinition.json'))
placement = JSONPlacement(name="Placement", json=placementJson)
"""
Defining ROUTING algorithm to define how path messages in the topology among modules
"""
selectorPath = DeviceSpeedAwareRouting()
"""
SIMULATION ENGINE
"""
s = Sim(t, default_results_path=folder_results+"sim_trace")
"""
Deploy services == APP's modules
"""
for aName in apps.keys():
s.deploy_app(apps[aName], placement, selectorPath) # Note: each app can have a different routing algorithm
"""
Deploy users
"""
userJSON = json.load(open('data/usersDefinition.json'))
for user in userJSON["sources"]:
app_name = user["app"]
app = s.apps[app_name]
msg = app.get_message(user["message"])
node = user["id_resource"]
dist = deterministic_distribution(100, name="Deterministic")
idDES = s.deploy_source(app_name, id_node=node, msg=msg, distribution=dist)
"""
RUNNING - last step
"""
logging.info(" Performing simulation: %i " % it)
s.run(stop_time) # To test deployments put test_initial_deploy a TRUE
s.print_debug_assignaments()
if __name__ == '__main__':
logging.config.fileConfig(os.getcwd() + '/logging.ini')
nIterations = 1 # iteration for each experiment
simulationDuration = 1000
# Iteration for each experiment changing the seed of randoms
for iteration in range(nIterations):
random.seed(iteration)
logging.info("Running experiment it: - %i" % iteration)
start_time = time.time()
main(stop_time=simulationDuration,
it=iteration)
print("\n--- %s seconds ---" % (time.time() - start_time))
print("Simulation Done!")
m = Stats(defaultPath="results/sim_trace")
# print ("\tNetwork bytes transmitted:")
# print (f"\t\t{m.bytes_transmitted():.1f}")
# m.df_link.head(15) # from Stats class
time_loops = [["M.USER.APP.0", "M.USER.APP.1", "M.USER.APP.2",
"M.USER.APP.3"]]
m.showResults2(10000, time_loops=time_loops)
m.compute_times_df()
print ("\t- Network saturation -")
print()
print ("\t\tAverage waiting messages : "
f"{m.average_messages_not_transmitted()}")
print()
print ("\t\tPeak of waiting messages :"
f"{m.peak_messages_not_transmitted()}")
print()
print(f"\t\tShow Loops: {m.showLoops(time_loops)}")
print()
print (f"\t\tTOTAL messages not transmitted:"
f" {m.messages_not_transmitted()}")
print()
#print(m.df.head())
#print(m.df['time_latency'])
#print(m.df_link.head())
print(m.get_df_modules()) | 2.625 | 3 |
Backend/models/risklayerPrognosis.py | dbvis-ukon/coronavis | 15 | 6542 | from db import db
class RisklayerPrognosis(db.Model):
__tablename__ = 'risklayer_prognosis'
datenbestand = db.Column(db.TIMESTAMP, primary_key=True, nullable=False)
prognosis = db.Column(db.Float, nullable=False)
# class RisklayerPrognosisSchema(SQLAlchemyAutoSchema):
# class Meta:
# strict = True
# model = RisklayerPrognosis
#
# timestamp = fields.Timestamp(data_key="datenbestand")
# prognosis = fields.Number(data_key="prognosis")
| 2.375 | 2 |
tests.py | smartfile/django-secureform | 12 | 6543 | <reponame>smartfile/django-secureform
import os
import unittest
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django
if django.VERSION >= (1, 7):
django.setup()
from django import forms
from django.db import models
from django.forms.forms import NON_FIELD_ERRORS
from django_secureform.forms import SecureForm
def get_form_sname(form, name):
for sname, v in form._secure_field_map.items():
if v and v == name:
return sname
raise KeyError(name)
def get_form_honeypot(form):
for sname, v in form._secure_field_map.items():
if v is None:
return sname
raise Exception('No honeypots found.')
def get_form_secure_data(form):
# We must copy over the security data.
return form._meta.secure_field_name, form[form._meta.secure_field_name].value()
class BasicForm(SecureForm):
name = forms.CharField(required=True, max_length=16)
class FormTestCase(unittest.TestCase):
klass = BasicForm
def setUp(self):
self.form = self.klass()
self.form.secure_data()
def assertIn(self, value, iterable):
self.assertTrue(value in iterable, '%s did not occur in %s' % (value,
iterable))
def getForm(self, **kwargs):
data = dict((get_form_secure_data(self.form), ))
for n, v in kwargs.items():
data[get_form_sname(self.form, n)] = v
return self.klass(data=data)
class BasicTestCase(FormTestCase):
def test_valid(self):
post = self.getForm(name='foobar')
self.assertTrue(post.is_valid())
def test_missing(self):
post = self.getForm()
self.assertFalse(post.is_valid())
self.assertIn('name', post._errors)
def test_replay(self):
post = self.getForm(name='foobar')
post.is_valid()
post = self.getForm(name='foobar')
self.assertFalse(post.is_valid())
self.assertIn(NON_FIELD_ERRORS, post._errors)
self.assertIn('This form has already been submitted.', post._errors[NON_FIELD_ERRORS])
def test_honeypot(self):
honeypot = get_form_honeypot(self.form)
data = dict((get_form_secure_data(self.form), ))
data[honeypot] = 'mmm, hunny!'
data[get_form_sname(self.form, 'name')] = 'foobar'
post = self.klass(data=data)
self.assertFalse(post.is_valid())
self.assertIn(NON_FIELD_ERRORS, post._errors)
self.assertIn('Unexpected value in form field.', post._errors[NON_FIELD_ERRORS])
if __name__ == '__main__':
unittest.main()
| 2.578125 | 3 |
opencv/resizing.py | hackerman-101/Hacktoberfest-2022 | 1 | 6544 | <gh_stars>1-10
import cv2 as cv
import numpy as np
cap = cv.VideoCapture(1)
print(cap.get(cv.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
cap.set(3,3000)
cap.set(4,3000)
print(cap.get(cv.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv.CAP_PROP_FRAME_HEIGHT))
while (cap.isOpened()):
ret , frame = cap.read()
if (ret == True):
cv.imshow("camVid", frame)
if cv.waitKey(25) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv.destroyAllWindows()
| 2.546875 | 3 |
minibenchmarks/go.py | kevinxucs/pyston | 1 | 6545 | <reponame>kevinxucs/pyston
# from pypy-benchmarks/own/chaos.py, with some minor modifications
# (more output, took out the benchmark harness)
#
import random, math, sys, time
SIZE = 9
GAMES = 200
KOMI = 7.5
EMPTY, WHITE, BLACK = 0, 1, 2
SHOW = {EMPTY: '.', WHITE: 'o', BLACK: 'x'}
PASS = -1
MAXMOVES = SIZE*SIZE*3
TIMESTAMP = 0
MOVES = 0
def to_pos(x,y):
return y * SIZE + x
def to_xy(pos):
y, x = divmod(pos, SIZE)
return x, y
class Square:
def __init__(self, board, pos):
self.board = board
self.pos = pos
self.timestamp = TIMESTAMP
self.removestamp = TIMESTAMP
self.zobrist_strings = [random.randrange(sys.maxint) for i in range(3)]
def set_neighbours(self):
x, y = self.pos % SIZE, self.pos / SIZE;
self.neighbours = []
for dx, dy in [(-1, 0), (1, 0), (0, -1), (0, 1)]:
newx, newy = x + dx, y + dy
if 0 <= newx < SIZE and 0 <= newy < SIZE:
self.neighbours.append(self.board.squares[to_pos(newx, newy)])
def move(self, color):
global TIMESTAMP, MOVES
TIMESTAMP += 1
MOVES += 1
self.board.zobrist.update(self, color)
self.color = color
self.reference = self
self.ledges = 0
self.used = True
for neighbour in self.neighbours:
neighcolor = neighbour.color
if neighcolor == EMPTY:
self.ledges += 1
else:
neighbour_ref = neighbour.find(update=True)
if neighcolor == color:
if neighbour_ref.reference.pos != self.pos:
self.ledges += neighbour_ref.ledges
neighbour_ref.reference = self
self.ledges -= 1
else:
neighbour_ref.ledges -= 1
if neighbour_ref.ledges == 0:
neighbour.remove(neighbour_ref)
self.board.zobrist.add()
def remove(self, reference, update=True):
self.board.zobrist.update(self, EMPTY)
self.removestamp = TIMESTAMP
if update:
self.color = EMPTY
self.board.emptyset.add(self.pos)
# if color == BLACK:
# self.board.black_dead += 1
# else:
# self.board.white_dead += 1
for neighbour in self.neighbours:
if neighbour.color != EMPTY and neighbour.removestamp != TIMESTAMP:
neighbour_ref = neighbour.find(update)
if neighbour_ref.pos == reference.pos:
neighbour.remove(reference, update)
else:
if update:
neighbour_ref.ledges += 1
def find(self, update=False):
reference = self.reference
if reference.pos != self.pos:
reference = reference.find(update)
if update:
self.reference = reference
return reference
def __repr__(self):
return repr(to_xy(self.pos))
class EmptySet:
def __init__(self, board):
self.board = board
self.empties = range(SIZE*SIZE)
self.empty_pos = range(SIZE*SIZE)
def random_choice(self):
choices = len(self.empties)
while choices:
i = int(random.random()*choices)
pos = self.empties[i]
if self.board.useful(pos):
return pos
choices -= 1
self.set(i, self.empties[choices])
self.set(choices, pos)
return PASS
def add(self, pos):
self.empty_pos[pos] = len(self.empties)
self.empties.append(pos)
def remove(self, pos):
self.set(self.empty_pos[pos], self.empties[len(self.empties)-1])
self.empties.pop()
def set(self, i, pos):
self.empties[i] = pos
self.empty_pos[pos] = i
class ZobristHash:
def __init__(self, board):
self.board = board
self.hash_set = set()
self.hash = 0
for square in self.board.squares:
self.hash ^= square.zobrist_strings[EMPTY]
self.hash_set.clear()
self.hash_set.add(self.hash)
def update(self, square, color):
self.hash ^= square.zobrist_strings[square.color]
self.hash ^= square.zobrist_strings[color]
def add(self):
self.hash_set.add(self.hash)
def dupe(self):
return self.hash in self.hash_set
class Board:
def __init__(self):
self.squares = [Square(self, pos) for pos in range(SIZE*SIZE)]
for square in self.squares:
square.set_neighbours()
self.reset()
def reset(self):
for square in self.squares:
square.color = EMPTY
square.used = False
self.emptyset = EmptySet(self)
self.zobrist = ZobristHash(self)
self.color = BLACK
self.finished = False
self.lastmove = -2
self.history = []
self.white_dead = 0
self.black_dead = 0
def move(self, pos):
square = self.squares[pos]
if pos != PASS:
square.move(self.color)
self.emptyset.remove(square.pos)
elif self.lastmove == PASS:
self.finished = True
if self.color == BLACK: self.color = WHITE
else: self.color = BLACK
self.lastmove = pos
self.history.append(pos)
def random_move(self):
return self.emptyset.random_choice()
def useful_fast(self, square):
if not square.used:
for neighbour in square.neighbours:
if neighbour.color == EMPTY:
return True
return False
def useful(self, pos):
global TIMESTAMP
TIMESTAMP += 1
square = self.squares[pos]
if self.useful_fast(square):
return True
old_hash = self.zobrist.hash
self.zobrist.update(square, self.color)
empties = opps = weak_opps = neighs = weak_neighs = 0
for neighbour in square.neighbours:
neighcolor = neighbour.color
if neighcolor == EMPTY:
empties += 1
continue
neighbour_ref = neighbour.find()
if neighbour_ref.timestamp != TIMESTAMP:
if neighcolor == self.color:
neighs += 1
else:
opps += 1
neighbour_ref.timestamp = TIMESTAMP
neighbour_ref.temp_ledges = neighbour_ref.ledges
neighbour_ref.temp_ledges -= 1
if neighbour_ref.temp_ledges == 0:
if neighcolor == self.color:
weak_neighs += 1
else:
weak_opps += 1
neighbour_ref.remove(neighbour_ref, update=False)
dupe = self.zobrist.dupe()
self.zobrist.hash = old_hash
strong_neighs = neighs-weak_neighs
strong_opps = opps-weak_opps
return not dupe and \
(empties or weak_opps or (strong_neighs and (strong_opps or weak_neighs)))
def useful_moves(self):
return [pos for pos in self.emptyset.empties if self.useful(pos)]
def replay(self, history):
for pos in history:
self.move(pos)
def score(self, color):
if color == WHITE:
count = KOMI + self.black_dead
else:
count = self.white_dead
for square in self.squares:
squarecolor = square.color
if squarecolor == color:
count += 1
elif squarecolor == EMPTY:
surround = 0
for neighbour in square.neighbours:
if neighbour.color == color:
surround += 1
if surround == len(square.neighbours):
count += 1
return count
def check(self):
for square in self.squares:
if square.color == EMPTY:
continue
members1 = set([square])
changed = True
while changed:
changed = False
for member in members1.copy():
for neighbour in member.neighbours:
if neighbour.color == square.color and neighbour not in members1:
changed = True
members1.add(neighbour)
ledges1 = 0
for member in members1:
for neighbour in member.neighbours:
if neighbour.color == EMPTY:
ledges1 += 1
root = square.find()
#print 'members1', square, root, members1
#print 'ledges1', square, ledges1
members2 = set()
for square2 in self.squares:
if square2.color != EMPTY and square2.find() == root:
members2.add(square2)
ledges2 = root.ledges
#print 'members2', square, root, members1
#print 'ledges2', square, ledges2
assert members1 == members2
assert ledges1 == ledges2, ('ledges differ at %r: %d %d' % (square, ledges1, ledges2))
empties1 = set(self.emptyset.empties)
empties2 = set()
for square in self.squares:
if square.color == EMPTY:
empties2.add(square.pos)
def __repr__(self):
result = []
for y in range(SIZE):
start = to_pos(0, y)
result.append(''.join([SHOW[square.color]+' ' for square in self.squares[start:start+SIZE]]))
return '\n'.join(result)
class UCTNode:
def __init__(self):
self.bestchild = None
self.pos = -1
self.wins = 0
self.losses = 0
self.pos_child = [None for x in range(SIZE*SIZE)]
self.parent = None
def play(self, board):
""" uct tree search """
color = board.color
node = self
path = [node]
while True:
pos = node.select(board)
if pos == PASS:
break
board.move(pos)
child = node.pos_child[pos]
if not child:
child = node.pos_child[pos] = UCTNode()
child.unexplored = board.useful_moves()
child.pos = pos
child.parent = node
path.append(child)
break
path.append(child)
node = child
self.random_playout(board)
self.update_path(board, color, path)
def select(self, board):
""" select move; unexplored children first, then according to uct value """
if self.unexplored:
i = random.randrange(len(self.unexplored))
pos = self.unexplored[i]
self.unexplored[i] = self.unexplored[len(self.unexplored)-1]
self.unexplored.pop()
return pos
elif self.bestchild:
return self.bestchild.pos
else:
return PASS
def random_playout(self, board):
""" random play until both players pass """
for x in range(MAXMOVES): # XXX while not self.finished?
if board.finished:
break
board.move(board.random_move())
def update_path(self, board, color, path):
""" update win/loss count along path """
wins = board.score(BLACK) >= board.score(WHITE)
for node in path:
if color == BLACK: color = WHITE
else: color = BLACK
if wins == (color == BLACK):
node.wins += 1
else:
node.losses += 1
if node.parent:
node.parent.bestchild = node.parent.best_child()
def score(self):
winrate = self.wins/float(self.wins+self.losses)
parentvisits = self.parent.wins+self.parent.losses
if not parentvisits:
return winrate
nodevisits = self.wins+self.losses
return winrate + math.sqrt((math.log(parentvisits))/(5*nodevisits))
def best_child(self):
maxscore = -1
maxchild = None
for child in self.pos_child:
if child and child.score() > maxscore:
maxchild = child
maxscore = child.score()
return maxchild
def best_visited(self):
maxvisits = -1
maxchild = None
for child in self.pos_child:
# if child:
# print to_xy(child.pos), child.wins, child.losses, child.score()
if child and (child.wins+child.losses) > maxvisits:
maxvisits, maxchild = (child.wins+child.losses), child
return maxchild
def user_move(board):
while True:
text = raw_input('?').strip()
if text == 'p':
return PASS
if text == 'q':
raise EOFError
try:
x, y = [int(i) for i in text.split()]
except ValueError:
continue
if not (0 <= x < SIZE and 0 <= y < SIZE):
continue
pos = to_pos(x, y)
if board.useful(pos):
return pos
def computer_move(board):
global MOVES
pos = board.random_move()
if pos == PASS:
return PASS
tree = UCTNode()
tree.unexplored = board.useful_moves()
nboard = Board()
for game in range(GAMES):
node = tree
nboard.reset()
nboard.replay(board.history)
node.play(nboard)
# print 'moves', MOVES
return tree.best_visited().pos
def versus_cpu():
print "versus_cpu"
random.seed(1)
board = Board()
pos = computer_move(board)
def main(n):
times = []
for i in range(5):
versus_cpu() # warmup
for i in range(n):
t1 = time.time()
versus_cpu()
t2 = time.time()
times.append(t2 - t1)
return times
if __name__ == "__main__":
main(100)
| 2.71875 | 3 |
tools/gen_usb_descriptor.py | BrianPugh/circuitpython | 1 | 6546 | # SPDX-FileCopyrightText: 2014 MicroPython & CircuitPython contributors (https://github.com/adafruit/circuitpython/graphs/contributors)
#
# SPDX-License-Identifier: MIT
import argparse
import os
import sys
sys.path.append("../../tools/usb_descriptor")
from adafruit_usb_descriptor import audio, audio10, cdc, hid, midi, msc, standard, util
import hid_report_descriptors
DEFAULT_INTERFACE_NAME = 'CircuitPython'
ALL_DEVICES='CDC,MSC,AUDIO,HID'
ALL_DEVICES_SET=frozenset(ALL_DEVICES.split(','))
DEFAULT_DEVICES='CDC,MSC,AUDIO,HID'
ALL_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,SYS_CONTROL,GAMEPAD,DIGITIZER,XAC_COMPATIBLE_GAMEPAD,RAW'
ALL_HID_DEVICES_SET=frozenset(ALL_HID_DEVICES.split(','))
# Digitizer works on Linux but conflicts with mouse, so omit it.
DEFAULT_HID_DEVICES='KEYBOARD,MOUSE,CONSUMER,GAMEPAD'
parser = argparse.ArgumentParser(description='Generate USB descriptors.')
parser.add_argument('--highspeed', default=False, action='store_true',
help='descriptor for highspeed device')
parser.add_argument('--manufacturer', type=str,
help='manufacturer of the device')
parser.add_argument('--product', type=str,
help='product name of the device')
parser.add_argument('--vid', type=lambda x: int(x, 16),
help='vendor id')
parser.add_argument('--pid', type=lambda x: int(x, 16),
help='product id')
parser.add_argument('--serial_number_length', type=int, default=32,
help='length needed for the serial number in digits')
parser.add_argument('--devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_DEVICES,
help='devices to include in descriptor (AUDIO includes MIDI support)')
parser.add_argument('--hid_devices', type=lambda l: tuple(l.split(',')), default=DEFAULT_HID_DEVICES,
help='HID devices to include in HID report descriptor')
parser.add_argument('--interface_name', type=str,
help='The name/prefix to use in the interface descriptions',
default=DEFAULT_INTERFACE_NAME)
parser.add_argument('--no-renumber_endpoints', dest='renumber_endpoints', action='store_false',
help='use to not renumber endpoint')
parser.add_argument('--cdc_ep_num_notification', type=int, default=0,
help='endpoint number of CDC NOTIFICATION')
parser.add_argument('--cdc_ep_num_data_out', type=int, default=0,
help='endpoint number of CDC DATA OUT')
parser.add_argument('--cdc_ep_num_data_in', type=int, default=0,
help='endpoint number of CDC DATA IN')
parser.add_argument('--msc_ep_num_out', type=int, default=0,
help='endpoint number of MSC OUT')
parser.add_argument('--msc_ep_num_in', type=int, default=0,
help='endpoint number of MSC IN')
parser.add_argument('--hid_ep_num_out', type=int, default=0,
help='endpoint number of HID OUT')
parser.add_argument('--hid_ep_num_in', type=int, default=0,
help='endpoint number of HID IN')
parser.add_argument('--midi_ep_num_out', type=int, default=0,
help='endpoint number of MIDI OUT')
parser.add_argument('--midi_ep_num_in', type=int, default=0,
help='endpoint number of MIDI IN')
parser.add_argument('--output_c_file', type=argparse.FileType('w', encoding='UTF-8'), required=True)
parser.add_argument('--output_h_file', type=argparse.FileType('w', encoding='UTF-8'), required=True)
args = parser.parse_args()
unknown_devices = list(frozenset(args.devices) - ALL_DEVICES_SET)
if unknown_devices:
raise ValueError("Unknown device(s)", unknown_devices)
unknown_hid_devices = list(frozenset(args.hid_devices) - ALL_HID_DEVICES_SET)
if unknown_hid_devices:
raise ValueError("Unknown HID devices(s)", unknown_hid_devices)
if not args.renumber_endpoints:
if 'CDC' in args.devices:
if args.cdc_ep_num_notification == 0:
raise ValueError("CDC notification endpoint number must not be 0")
elif args.cdc_ep_num_data_out == 0:
raise ValueError("CDC data OUT endpoint number must not be 0")
elif args.cdc_ep_num_data_in == 0:
raise ValueError("CDC data IN endpoint number must not be 0")
if 'MSC' in args.devices:
if args.msc_ep_num_out == 0:
raise ValueError("MSC endpoint OUT number must not be 0")
elif args.msc_ep_num_in == 0:
raise ValueError("MSC endpoint IN number must not be 0")
if 'HID' in args.devices:
if args.args.hid_ep_num_out == 0:
raise ValueError("HID endpoint OUT number must not be 0")
elif args.hid_ep_num_in == 0:
raise ValueError("HID endpoint IN number must not be 0")
if 'AUDIO' in args.devices:
if args.args.midi_ep_num_out == 0:
raise ValueError("MIDI endpoint OUT number must not be 0")
elif args.midi_ep_num_in == 0:
raise ValueError("MIDI endpoint IN number must not be 0")
class StringIndex:
"""Assign a monotonically increasing index to each unique string. Start with 0."""
string_to_index = {}
index_to_variable = {}
strings = []
@classmethod
def index(cls, string, *, variable_name = None):
if string in cls.string_to_index:
idx = cls.string_to_index[string]
if not cls.index_to_variable[idx]:
cls.index_to_variable[idx] = variable_name
return idx
else:
idx = len(cls.strings)
cls.string_to_index[string] = idx
cls.strings.append(string)
cls.index_to_variable[idx] = variable_name
return idx
@classmethod
def strings_in_order(cls):
return cls.strings
# langid must be the 0th string descriptor
LANGID_INDEX = StringIndex.index("\u0409", variable_name="language_id")
assert LANGID_INDEX == 0
SERIAL_NUMBER_INDEX = StringIndex.index("S" * args.serial_number_length, variable_name="usb_serial_number")
device = standard.DeviceDescriptor(
description="top",
idVendor=args.vid,
idProduct=args.pid,
iManufacturer=StringIndex.index(args.manufacturer),
iProduct=StringIndex.index(args.product),
iSerialNumber=SERIAL_NUMBER_INDEX)
# Interface numbers are interface-set local and endpoints are interface local
# until util.join_interfaces renumbers them.
cdc_union = cdc.Union(
description="CDC comm",
bMasterInterface=0x00, # Adjust this after interfaces are renumbered.
bSlaveInterface_list=[0x01]) # Adjust this after interfaces are renumbered.
cdc_call_management = cdc.CallManagement(
description="CDC comm",
bmCapabilities=0x01,
bDataInterface=0x01) # Adjust this after interfaces are renumbered.
cdc_comm_interface = standard.InterfaceDescriptor(
description="CDC comm",
bInterfaceClass=cdc.CDC_CLASS_COMM, # Communications Device Class
bInterfaceSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model
bInterfaceProtocol=cdc.CDC_PROTOCOL_NONE,
iInterface=StringIndex.index("{} CDC control".format(args.interface_name)),
subdescriptors=[
cdc.Header(
description="CDC comm",
bcdCDC=0x0110),
cdc_call_management,
cdc.AbstractControlManagement(
description="CDC comm",
bmCapabilities=0x02),
cdc_union,
standard.EndpointDescriptor(
description="CDC comm in",
bEndpointAddress=args.cdc_ep_num_notification | standard.EndpointDescriptor.DIRECTION_IN,
bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT,
wMaxPacketSize=0x0040,
bInterval=0x10)
])
cdc_data_interface = standard.InterfaceDescriptor(
description="CDC data",
bInterfaceClass=cdc.CDC_CLASS_DATA,
iInterface=StringIndex.index("{} CDC data".format(args.interface_name)),
subdescriptors=[
standard.EndpointDescriptor(
description="CDC data out",
bEndpointAddress=args.cdc_ep_num_data_out | standard.EndpointDescriptor.DIRECTION_OUT,
bmAttributes=standard.EndpointDescriptor.TYPE_BULK,
bInterval=0,
wMaxPacketSize=512 if args.highspeed else 64),
standard.EndpointDescriptor(
description="CDC data in",
bEndpointAddress=args.cdc_ep_num_data_in | standard.EndpointDescriptor.DIRECTION_IN,
bmAttributes=standard.EndpointDescriptor.TYPE_BULK,
bInterval=0,
wMaxPacketSize=512 if args.highspeed else 64),
])
cdc_interfaces = [cdc_comm_interface, cdc_data_interface]
msc_interfaces = [
standard.InterfaceDescriptor(
description="MSC",
bInterfaceClass=msc.MSC_CLASS,
bInterfaceSubClass=msc.MSC_SUBCLASS_TRANSPARENT,
bInterfaceProtocol=msc.MSC_PROTOCOL_BULK,
iInterface=StringIndex.index("{} Mass Storage".format(args.interface_name)),
subdescriptors=[
standard.EndpointDescriptor(
description="MSC in",
bEndpointAddress=args.msc_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN,
bmAttributes=standard.EndpointDescriptor.TYPE_BULK,
bInterval=0,
wMaxPacketSize=512 if args.highspeed else 64),
standard.EndpointDescriptor(
description="MSC out",
bEndpointAddress=(args.msc_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT),
bmAttributes=standard.EndpointDescriptor.TYPE_BULK,
bInterval=0,
wMaxPacketSize=512 if args.highspeed else 64),
]
)
]
# When there's only one hid_device, it shouldn't have a report id.
# Otherwise, report ids are assigned sequentially:
# args.hid_devices[0] has report_id 1
# args.hid_devices[1] has report_id 2
# etc.
report_ids = {}
if len(args.hid_devices) == 1:
name = args.hid_devices[0]
combined_hid_report_descriptor = hid.ReportDescriptor(
description=name,
report_descriptor=bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](0)))
report_ids[name] = 0
else:
report_id = 1
concatenated_descriptors = bytearray()
for name in args.hid_devices:
concatenated_descriptors.extend(
bytes(hid_report_descriptors.REPORT_DESCRIPTOR_FUNCTIONS[name](report_id)))
report_ids[name] = report_id
report_id += 1
combined_hid_report_descriptor = hid.ReportDescriptor(
description="MULTIDEVICE",
report_descriptor=bytes(concatenated_descriptors))
# ASF4 expects keyboard and generic devices to have both in and out endpoints,
# and will fail (possibly silently) if both are not supplied.
hid_endpoint_in_descriptor = standard.EndpointDescriptor(
description="HID in",
bEndpointAddress=args.hid_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN,
bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT,
bInterval=8)
hid_endpoint_out_descriptor = standard.EndpointDescriptor(
description="HID out",
bEndpointAddress=args.hid_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT,
bmAttributes=standard.EndpointDescriptor.TYPE_INTERRUPT,
bInterval=8)
hid_interfaces = [
standard.InterfaceDescriptor(
description="HID Multiple Devices",
bInterfaceClass=hid.HID_CLASS,
bInterfaceSubClass=hid.HID_SUBCLASS_NOBOOT,
bInterfaceProtocol=hid.HID_PROTOCOL_NONE,
iInterface=StringIndex.index("{} HID".format(args.interface_name)),
subdescriptors=[
hid.HIDDescriptor(
description="HID",
wDescriptorLength=len(bytes(combined_hid_report_descriptor))),
hid_endpoint_in_descriptor,
hid_endpoint_out_descriptor,
]
),
]
# Audio!
# In and out here are relative to CircuitPython
# USB OUT -> midi_in_jack_emb -> midi_out_jack_ext -> CircuitPython
midi_in_jack_emb = midi.InJackDescriptor(
description="MIDI PC -> {}".format(args.interface_name),
bJackType=midi.JACK_TYPE_EMBEDDED,
iJack=StringIndex.index("{} usb_midi.ports[0]".format(args.interface_name)))
midi_out_jack_ext = midi.OutJackDescriptor(
description="MIDI data out to user code.",
bJackType=midi.JACK_TYPE_EXTERNAL,
input_pins=[(midi_in_jack_emb, 1)],
iJack=0)
# USB IN <- midi_out_jack_emb <- midi_in_jack_ext <- CircuitPython
midi_in_jack_ext = midi.InJackDescriptor(
description="MIDI data in from user code.",
bJackType=midi.JACK_TYPE_EXTERNAL,
iJack=0)
midi_out_jack_emb = midi.OutJackDescriptor(
description="MIDI PC <- {}".format(args.interface_name),
bJackType=midi.JACK_TYPE_EMBEDDED,
input_pins=[(midi_in_jack_ext, 1)],
iJack=StringIndex.index("{} usb_midi.ports[1]".format(args.interface_name)))
audio_midi_interface = standard.InterfaceDescriptor(
description="Midi goodness",
bInterfaceClass=audio.AUDIO_CLASS_DEVICE,
bInterfaceSubClass=audio.AUDIO_SUBCLASS_MIDI_STREAMING,
bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1,
iInterface=StringIndex.index("{} MIDI".format(args.interface_name)),
subdescriptors=[
midi.Header(
jacks_and_elements=[
midi_in_jack_emb,
midi_in_jack_ext,
midi_out_jack_emb,
midi_out_jack_ext
],
),
standard.EndpointDescriptor(
description="MIDI data out to {}".format(args.interface_name),
bEndpointAddress=args.midi_ep_num_out | standard.EndpointDescriptor.DIRECTION_OUT,
bmAttributes=standard.EndpointDescriptor.TYPE_BULK,
bInterval=0,
wMaxPacketSize=512 if args.highspeed else 64),
midi.DataEndpointDescriptor(baAssocJack=[midi_in_jack_emb]),
standard.EndpointDescriptor(
description="MIDI data in from {}".format(args.interface_name),
bEndpointAddress=args.midi_ep_num_in | standard.EndpointDescriptor.DIRECTION_IN,
bmAttributes=standard.EndpointDescriptor.TYPE_BULK,
bInterval = 0x0,
wMaxPacketSize=512 if args.highspeed else 64),
midi.DataEndpointDescriptor(baAssocJack=[midi_out_jack_emb]),
])
cs_ac_interface = audio10.AudioControlInterface(
description="Empty audio control",
audio_streaming_interfaces = [],
midi_streaming_interfaces = [
audio_midi_interface
]
)
audio_control_interface = standard.InterfaceDescriptor(
description="All the audio",
bInterfaceClass=audio.AUDIO_CLASS_DEVICE,
bInterfaceSubClass=audio.AUDIO_SUBCLASS_CONTROL,
bInterfaceProtocol=audio.AUDIO_PROTOCOL_V1,
iInterface=StringIndex.index("{} Audio".format(args.interface_name)),
subdescriptors=[
cs_ac_interface,
])
# Audio streaming interfaces must occur before MIDI ones.
audio_interfaces = [audio_control_interface] + cs_ac_interface.audio_streaming_interfaces + cs_ac_interface.midi_streaming_interfaces
interfaces_to_join = []
if 'CDC' in args.devices:
interfaces_to_join.append(cdc_interfaces)
if 'MSC' in args.devices:
interfaces_to_join.append(msc_interfaces)
if 'HID' in args.devices:
interfaces_to_join.append(hid_interfaces)
if 'AUDIO' in args.devices:
interfaces_to_join.append(audio_interfaces)
# util.join_interfaces() will renumber the endpoints to make them unique across descriptors,
# and renumber the interfaces in order. But we still need to fix up certain
# interface cross-references.
interfaces = util.join_interfaces(interfaces_to_join, renumber_endpoints=args.renumber_endpoints)
# Now adjust the CDC interface cross-references.
cdc_union.bMasterInterface = cdc_comm_interface.bInterfaceNumber
cdc_union.bSlaveInterface_list = [cdc_data_interface.bInterfaceNumber]
cdc_call_management.bDataInterface = cdc_data_interface.bInterfaceNumber
cdc_iad = standard.InterfaceAssociationDescriptor(
description="CDC IAD",
bFirstInterface=cdc_comm_interface.bInterfaceNumber,
bInterfaceCount=len(cdc_interfaces),
bFunctionClass=cdc.CDC_CLASS_COMM, # Communications Device Class
bFunctionSubClass=cdc.CDC_SUBCLASS_ACM, # Abstract control model
bFunctionProtocol=cdc.CDC_PROTOCOL_NONE)
descriptor_list = []
if 'CDC' in args.devices:
# Put the CDC IAD just before the CDC interfaces.
# There appears to be a bug in the Windows composite USB driver that requests the
# HID report descriptor with the wrong interface number if the HID interface is not given
# first. However, it still fetches the descriptor anyway. We could reorder the interfaces but
# the Windows 7 Adafruit_usbser.inf file thinks CDC is at Interface 0, so we'll leave it
# there for backwards compatibility.
descriptor_list.append(cdc_iad)
descriptor_list.extend(cdc_interfaces)
if 'MSC' in args.devices:
descriptor_list.extend(msc_interfaces)
if 'HID' in args.devices:
descriptor_list.extend(hid_interfaces)
if 'AUDIO' in args.devices:
# Only add the control interface because other audio interfaces are managed by it to ensure the
# correct ordering.
descriptor_list.append(audio_control_interface)
# Finally, build the composite descriptor.
configuration = standard.ConfigurationDescriptor(
description="Composite configuration",
wTotalLength=(standard.ConfigurationDescriptor.bLength +
sum([len(bytes(x)) for x in descriptor_list])),
bNumInterfaces=len(interfaces))
descriptor_list.insert(0, configuration)
string_descriptors = [standard.StringDescriptor(string) for string in StringIndex.strings_in_order()]
serial_number_descriptor = string_descriptors[SERIAL_NUMBER_INDEX]
c_file = args.output_c_file
h_file = args.output_h_file
c_file.write("""\
#include <stdint.h>
#include "py/objtuple.h"
#include "shared-bindings/usb_hid/Device.h"
#include "{H_FILE_NAME}"
""".format(H_FILE_NAME=h_file.name))
c_file.write("""\
// {DESCRIPTION} : {CLASS}
""".format(DESCRIPTION=device.description,
CLASS=device.__class__))
c_file.write("""\
const uint8_t usb_desc_dev[] = {
""")
for b in bytes(device):
c_file.write("0x{:02x}, ".format(b))
c_file.write("""\
};
""")
c_file.write("""\
const uint8_t usb_desc_cfg[] = {
""")
# Write out all the regular descriptors as one long array (that's how ASF4 does it).
descriptor_length = 0
for descriptor in descriptor_list:
c_file.write("""\
// {DESCRIPTION} : {CLASS}
""".format(DESCRIPTION=descriptor.description,
CLASS=descriptor.__class__))
b = bytes(descriptor)
notes = descriptor.notes()
i = 0
# This prints each subdescriptor on a separate line.
n = 0
while i < len(b):
length = b[i]
for j in range(length):
c_file.write("0x{:02x}, ".format(b[i + j]))
c_file.write("// " + notes[n])
n += 1
c_file.write("\n")
i += length
descriptor_length += len(b)
c_file.write("""\
};
""")
pointers_to_strings = []
for idx, descriptor in enumerate(string_descriptors):
c_file.write("""\
// {DESCRIPTION} : {CLASS}
""".format(DESCRIPTION=descriptor.description,
CLASS=descriptor.__class__))
b = bytes(descriptor)
notes = descriptor.notes()
i = 0
# This prints each subdescriptor on a separate line.
variable_name = StringIndex.index_to_variable[idx]
if not variable_name:
variable_name = "string_descriptor{}".format(idx)
const = "const "
if variable_name == "usb_serial_number":
const = ""
c_file.write("""\
{const}uint16_t {NAME}[] = {{
""".format(const=const, NAME=variable_name))
pointers_to_strings.append("{name}".format(name=variable_name))
n = 0
while i < len(b):
length = b[i]
for j in range(length // 2):
c_file.write("0x{:04x}, ".format(b[i + 2*j + 1] << 8 | b[i + 2*j]))
n += 1
c_file.write("\n")
i += length
c_file.write("""\
};
""")
c_file.write("""\
// array of pointer to string descriptors
uint16_t const * const string_desc_arr [] =
{
""")
c_file.write(""",\
""".join(pointers_to_strings))
c_file.write("""
};
""")
c_file.write("\n")
hid_descriptor_length = len(bytes(combined_hid_report_descriptor))
# Now we values we need for the .h file.
h_file.write("""\
#ifndef MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H
#define MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H
#include <stdint.h>
extern const uint8_t usb_desc_dev[{device_length}];
extern const uint8_t usb_desc_cfg[{configuration_length}];
extern uint16_t usb_serial_number[{serial_number_length}];
extern uint16_t const * const string_desc_arr [{string_descriptor_length}];
extern const uint8_t hid_report_descriptor[{hid_report_descriptor_length}];
#define CFG_TUSB_RHPORT0_MODE ({rhport0_mode})
#define USB_HID_NUM_DEVICES {hid_num_devices}
// Vendor name included in Inquiry response, max 8 bytes
#define CFG_TUD_MSC_VENDOR "{msc_vendor}"
// Product name included in Inquiry response, max 16 bytes
#define CFG_TUD_MSC_PRODUCT "{msc_product}"
"""
.format(serial_number_length=len(bytes(serial_number_descriptor)) // 2,
device_length=len(bytes(device)),
configuration_length=descriptor_length,
max_configuration_length=max(hid_descriptor_length, descriptor_length),
string_descriptor_length=len(pointers_to_strings),
hid_report_descriptor_length=len(bytes(combined_hid_report_descriptor)),
rhport0_mode='OPT_MODE_DEVICE | OPT_MODE_HIGH_SPEED' if args.highspeed else 'OPT_MODE_DEVICE',
hid_num_devices=len(args.hid_devices),
msc_vendor=args.manufacturer[:8],
msc_product=args.product[:16]))
# Write out the report descriptor and info
c_file.write("""\
const uint8_t hid_report_descriptor[{HID_DESCRIPTOR_LENGTH}] = {{
""".format(HID_DESCRIPTOR_LENGTH=hid_descriptor_length))
for b in bytes(combined_hid_report_descriptor):
c_file.write("0x{:02x}, ".format(b))
c_file.write("""\
};
""")
# Write out USB HID report buffer definitions.
for name in args.hid_devices:
c_file.write("""\
static uint8_t {name}_report_buffer[{report_length}];
""".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].report_length))
if hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length > 0:
c_file.write("""\
static uint8_t {name}_out_report_buffer[{report_length}];
""".format(name=name.lower(), report_length=hid_report_descriptors.HID_DEVICE_DATA[name].out_report_length))
# Write out table of device objects.
c_file.write("""
usb_hid_device_obj_t usb_hid_devices[] = {
""")
for name in args.hid_devices:
device_data = hid_report_descriptors.HID_DEVICE_DATA[name]
out_report_buffer = '{}_out_report_buffer'.format(name.lower()) if device_data.out_report_length > 0 else 'NULL'
c_file.write("""\
{{
.base = {{ .type = &usb_hid_device_type }},
.report_buffer = {name}_report_buffer,
.report_id = {report_id},
.report_length = {report_length},
.usage_page = {usage_page:#04x},
.usage = {usage:#04x},
.out_report_buffer = {out_report_buffer},
.out_report_length = {out_report_length},
}},
""".format(name=name.lower(), report_id=report_ids[name],
report_length=device_data.report_length,
usage_page=device_data.usage_page,
usage=device_data.usage,
out_report_buffer=out_report_buffer,
out_report_length=device_data.out_report_length))
c_file.write("""\
};
""")
# Write out tuple of device objects.
c_file.write("""
mp_obj_tuple_t common_hal_usb_hid_devices = {{
.base = {{
.type = &mp_type_tuple,
}},
.len = {num_devices},
.items = {{
""".format(num_devices=len(args.hid_devices)))
for idx in range(len(args.hid_devices)):
c_file.write("""\
(mp_obj_t) &usb_hid_devices[{idx}],
""".format(idx=idx))
c_file.write("""\
},
};
""")
h_file.write("""\
#endif // MICROPY_INCLUDED_AUTOGEN_USB_DESCRIPTOR_H
""")
| 2.4375 | 2 |
bclstm/train_meld.py | Columbine21/THUIAR-ERC | 1 | 6547 | from tqdm import tqdm
import pandas as pd
import numpy as np, argparse, time, pickle, random, os, datetime
import torch
import torch.optim as optim
from model import MaskedNLLLoss, BC_LSTM
from dataloader import MELDDataLoader
from sklearn.metrics import f1_score, confusion_matrix, accuracy_score, classification_report
def setup_seed(seed):
""" Manually Fix the random seed to get deterministic results.
"""
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.benchmark = False
torch.backends.cudnn.deterministic = True
def train_or_eval_model(model, loss_function, dataloader, epoch, optimizer=None, mode='train'):
losses, preds, labels, masks, losses_sense = [], [], [], [], []
max_sequence_len = []
assert mode != 'train' or optimizer != None
if mode == 'train':
model.train()
else:
model.eval()
with tqdm(dataloader) as td:
for data in td:
if mode == 'train':
optimizer.zero_grad()
textf, acouf, mask, label = [d.cuda() for d in data[:-1]] if args.cuda else data[:-1]
log_prob, _ = model(textf, None, acouf, None, mask)
lp_ = log_prob.transpose(0,1).contiguous().view(-1, log_prob.size()[2]) # batch*seq_len, n_classes
labels_ = label.view(-1) # batch*seq_len
loss = loss_function(lp_, labels_, mask)
pred_ = torch.argmax(lp_,1) # batch*seq_len
preds.append(pred_.data.cpu().numpy())
labels.append(labels_.data.cpu().numpy())
masks.append(mask.view(-1).cpu().numpy())
losses.append(loss.item()*masks[-1].sum())
if mode == 'train':
total_loss = loss
total_loss.backward()
optimizer.step()
if preds!=[]:
preds = np.concatenate(preds)
labels = np.concatenate(labels)
masks = np.concatenate(masks)
else:
return float('nan'), float('nan'), float('nan'), [], [], [], float('nan'),[]
avg_loss = round(np.sum(losses)/np.sum(masks), 4)
avg_sense_loss = round(np.sum(losses_sense)/np.sum(masks), 4)
avg_accuracy = round(accuracy_score(labels,preds, sample_weight=masks)*100, 2)
avg_fscore = round(f1_score(labels,preds, sample_weight=masks, average='weighted')*100, 2)
if mode == 'test':
class_report = classification_report(labels, preds, sample_weight=masks, target_names=['neutral', 'surprise', 'fear', 'sadness', 'joy', 'disgust', 'anger'], digits=6)
print(class_report)
return avg_loss, avg_accuracy, labels, preds, masks, [avg_fscore]
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--num_workers', type=int, default=0,
help='num workers of loading data')
# dataloader settings
parser.add_argument('--batch-size', type=int, default=32, metavar='BS', help='batch size')
parser.add_argument('--data_path', type=str, default='../TextCnn/dataset/MELD_features_raw.pkl')
# model settings.
parser.add_argument('--attention_type', type=str, default='general2')
parser.add_argument('--utterance_dim', type=int, default=600,
help='embedding dims to use')
parser.add_argument('--emotion_state_dim', type=int, default=100)
parser.add_argument('--hidden_layer_dim', type=int, default=100)
parser.add_argument('--dropout', type=float, default=0.25)
parser.add_argument('--n_classes', type=int, default=7)
# late fusion module.
parser.add_argument('--lateFusionModule', type=str, default='concat')
parser.add_argument('--input_features', type=tuple, default=(100, 300))
parser.add_argument('--pre_fusion_hidden_dims', type=tuple, default=(24, 7))
parser.add_argument('--pre_fusion_dropout', type=float, default=0.4)
parser.add_argument('--post_fusion_dropout', type=float, default=0.3)
# train settings.
parser.add_argument('--lr', type=float, default=1e-4, metavar='LR', help='learning rate')
parser.add_argument('--l2', type=float, default=1e-5, metavar='L2', help='L2 regularization weight')
parser.add_argument('--epochs', type=int, default=100, metavar='E', help='number of epochs')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
args.cuda = torch.cuda.is_available()
if args.cuda:
print('Running on GPU')
else:
print('Running on CPU')
for seed in [1, 11, 111, 1111, 11111]:
setup_seed(seed)
args.seed = seed
print(args)
model = BC_LSTM(args)
print('MELD BC_LSTM MODULE ...')
if args.cuda:
model.cuda()
loss_weights = torch.FloatTensor([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
loss_function = MaskedNLLLoss(loss_weights.cuda() if args.cuda else loss_weights)
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.l2)
lf = open('logs/cnn_meld_logs.txt', 'a')
dataloader = MELDDataLoader(args)
valid_losses, valid_fscores = [], []
test_fscores, test_accuracys, test_losses = [], [], []
best_loss, best_label, best_pred, best_mask = None, None, None, None
for e in range(args.epochs):
start_time = time.time()
train_loss, train_acc, _, _, _, train_fscore = train_or_eval_model(model, loss_function, dataloader['train'], e, optimizer, mode='train')
valid_loss, valid_acc, _, _, _, valid_fscore = train_or_eval_model(model, loss_function, dataloader['valid'], e, mode='valid')
test_loss, test_acc, test_label, test_pred, test_mask, test_fscore = train_or_eval_model(model, loss_function, dataloader['test'], e, mode='test')
valid_losses.append(valid_loss)
valid_fscores.append(valid_fscore)
test_losses.append(test_loss)
test_accuracys.append(test_acc)
test_fscores.append(test_fscore)
x = 'epoch: {}, train_loss: {}, acc: {}, fscore: {}, valid_loss: {}, acc: {}, fscore: {}, test_loss: {}, acc: {}, fscore: {}, time: {} sec'.format(e+1, train_loss, train_acc, train_fscore, valid_loss, valid_acc, valid_fscore, test_loss, test_acc, test_fscore, round(time.time()-start_time, 2))
print (x)
lf.write(x + '\n')
valid_fscores = np.array(valid_fscores).transpose()
test_fscores = np.array(test_fscores).transpose() # [1, epoches]
test_accuracys = np.array(test_accuracys).transpose() # [epoches]
f1_score1 = test_fscores[0][np.argmin(valid_losses)]
acc_score1 = test_accuracys[np.argmin(valid_losses)]
f1_score2 = test_fscores[0][np.argmax(valid_fscores[0])]
acc_score2 = test_accuracys[np.argmax(valid_fscores[0])]
scores = [acc_score1, f1_score1, acc_score2, f1_score2]
scores = [str(item) for item in scores]
print ('Test Scores: Weighted F1')
print('@Best Valid Loss: Test Acc: {}, Test F1: {}'.format(acc_score1, f1_score1))
print('@Best Valid F1: Test Acc: {}, Test F1: {}'.format(acc_score2, f1_score2))
rf = open('results/cnn_meld_results.txt', 'a')
rf.write('\t'.join(scores) + '\t' + str(args) + '\n')
rf.close()
| 2.171875 | 2 |
bin/p3starcoordcheck.py | emkailu/PAT3DEM | 0 | 6548 | <reponame>emkailu/PAT3DEM
#!/usr/bin/env python
import os
import sys
import argparse
import pat3dem.star as p3s
import math
def main():
progname = os.path.basename(sys.argv[0])
usage = progname + """ [options] <coord star files>
Output the coord star files after deleting duplicate particles
"""
args_def = {'mindis':150}
parser = argparse.ArgumentParser()
parser.add_argument("star", nargs='*', help="specify coord star files to be processed")
parser.add_argument("-m", "--mindis", type=float, help="specify the minimum distance between particles in pixels, by default {}".format(args_def['mindis']))
args = parser.parse_args()
if len(sys.argv) == 1:
print "usage: " + usage
print "Please run '" + progname + " -h' for detailed options."
sys.exit(1)
# get default values
for i in args_def:
if args.__dict__[i] == None:
args.__dict__[i] = args_def[i]
# loop over all input files
for star in args.star:
star_dict = p3s.star_parse(star, 'data_')
header = star_dict['data_']+star_dict['loop_']
header_len = len(header)
basename = os.path.basename(os.path.splitext(star)[0])
with open(star) as s_read:
lines = s_read.readlines()[header_len:-1]
#
with open(basename+'_checked.star', 'w') as s_w:
s_w.write(''.join(header))
# use list of list to store x and y
xy = []
for line in lines:
good = 1
line = line.split()
# get coord
x, y = float(line[star_dict['_rlnCoordinateX']]), float(line[star_dict['_rlnCoordinateY']])
for i in xy:
dis = math.sqrt((x - i[0])**2 + (y - i[1])**2)
if dis < args.mindis:
print 'Distance between ({},{}) and {} is {}. Discard.'.format(x,y,i,dis)
good = 0
break
if good == 1:
s_w.write('{:>12} '.format(x) + '{:>12} \n'.format(y))
xy.append((x,y))
s_w.write('\n')
if __name__ == '__main__':
main()
| 3.109375 | 3 |
src/review_scraper.py | ryankirkland/voice-of-the-customer | 0 | 6549 | <filename>src/review_scraper.py
from bs4 import BeautifulSoup
import pandas as pd
import requests
import time
import sys
def reviews_scraper(asin_list, filename):
'''
Takes a list of asins, retrieves html for reviews page, and parses out key data points
Parameters
----------
List of ASINs (list of strings)
Returns:
-------
review information (list), reviews_df (Pandas DataFrame)
'''
asin_list = [asin_list]
print(asin_list)
reviews = []
headers = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0", "Accept-Encoding":"gzip, deflate", "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8", "DNT":"1","Connection":"close", "Upgrade-Insecure-Requests":"1"}
for asin in asin_list:
print(f'Collecting reviews for {asin}')
passed_last_page = None
counter = 1
while (passed_last_page == None) and (counter <= 10):
print(len(reviews))
reviews_url = f'https://www.amazon.com/product-reviews/{asin}/ref=cm_cr_arp_d_viewopt_srt?ie=UTF8&reviewerType=all_reviews&sortBy=recent&pageNumber={counter}'
print(reviews_url)
rev = requests.get(reviews_url, headers=headers)
print(rev.status_code)
reviews_page_content = rev.content
review_soup = BeautifulSoup(reviews_page_content, features='lxml')
print(review_soup)
passed_last_page = review_soup.find('div', attrs={'class': 'a-section a-spacing-top-large a-text-center no-reviews-section'})
if passed_last_page == None:
for d in review_soup.findAll('div', attrs={'data-hook':'review'}):
# print(d)
try:
date = d.find('span', attrs={'data-hook':'review-date'})
date = date.text.split(' ')[-3:]
date = ' '.join(date)
except:
date = 'null'
try:
title = d.find('a', attrs={'data-hook': 'review-title'})
except:
title = 'null'
try:
product = d.find('a', attrs={'data-hook': 'format-strip'})
product = product.text
except:
product = 'null'
try:
review_asin = product['href'].split('/')[3]
except:
review_asin = asin
try:
verified = d.find('span', attrs={'data-hook':'avp-badge'})
if verified == None:
verified = 'Not Verified'
else:
verified = verified.text
except:
verified = 'null'
try:
description = d.find('span', attrs={'data-hook': 'review-body'})
except:
description = 'null'
try:
reviewer_name = d.find('span', attrs={'class': 'a-profile-name'})
except:
reviewer_name = 'null'
try:
stars = d.find('span', attrs={'class': 'a-icon-alt'})
except:
stars = 'null'
reviews.append([review_asin, product, date, verified, title.text, description.text, reviewer_name.text, float(stars.text[0:3])])
else:
pass
counter += 1
time.sleep(15)
reviews_df = pd.DataFrame(reviews, columns=['asin','product','date', 'verified', 'title', 'desc', 'reviewer_name', 'rating'])
reviews_df.to_csv(f'data/reviews/{filename}')
print(f'{len(reviews)} reviews for {len(asin_list)} asins stored successfully in {filename}')
return reviews, reviews_df
if __name__ == '__main__':
reviews_scraper(*sys.argv[1:]) | 3.203125 | 3 |
lumberdata/metadata.py | cglumberjack/lumber_metadata | 0 | 6550 | # noinspection PyUnresolvedReferences
import os
import re
# TODO I'm going to need to make a dictionary for my big list of stuff i care about and what's needed for
# every file type....
RAF = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate',
'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake']
MOV = ['EXIF:LensModel', 'MakerNotes:RawImageHeight', 'MakerNotes:RawImageWidth', 'EXIF:CreateDate', 'EXIF:ModifyDate',
'EXIF:SerialNumber', 'Composite:Aperture', 'EXIF:FocalLength', 'EXIF:Make', 'EXIF:Model', 'EXIF:LensMake',
'QuickTime:VideoFrameRate', 'QuickTime:Duration']
R3D = ['ClipName', 'EdgeTC', 'EndEdgeTC', 'TotalFrames', 'FrameHeight', 'FrameWidth', 'Aperture', 'ISO', 'Date',
'AudioSlate', 'VideoSlate', 'Camera', 'CameraModel', 'CameraPIN', 'MediaSerialNumber', 'LensSerialNumber',
'FPS', 'AspectRatio', 'Kelvin', 'LensName', 'LensBrand', 'FocalLength', 'Shutter(deg)', 'SensorID', 'SensorName',
'Take']
def check_exiftool():
"""
checks if exiftool is installed.
:return:
"""
pass
def check_redline():
"""
checks if redline is installed
:return:
"""
pass
def check_ffprobe():
"""
checks if ffprobe is installed
:return:
"""
pass
def get(filein, tool='exiftool', print_output=False):
"""
Due to issues with the exiftool module this is provided as a way to parse output directly
from exiftool through the system commands and cglexecute. For the moment it's only designed
to get the lumberdata for a single file.
:param filein:
:return: dictionary containing lumberdata from exiftool
"""
ext = os.path.splitext(filein)[-1]
d = {}
if tool == 'exiftool':
command = r'exiftool %s' % filein
output = cgl_execute(command=command, verbose=False, print_output=print_output)
for each in output['printout']:
key, value = re.split("\s+:\s+", each)
d[key] = value
return d
elif tool == 'ffprobe':
command = r'%s %s' % ('ffprobe', filein)
output = cgl_execute(command=command)
for each in output['printout']:
try:
values = re.split(":\s+", each)
key = values[0]
values.pop(0)
if 'Stream' in key:
split_v = values[1].split(',')
d['Image Size'] = split_v[2].split()[0]
d['Source Image Width'], d['Source Image Height'] = d['Image Size'].split('x')
d['Video Frame Rate'] = split_v[4].split(' fps')[0].replace(' ', '')
if 'Duration' in key:
d['Track Duration'] = '%s s' % values[0].split(',')[0]
value = ' '.join(values)
d[key] = value
except ValueError:
print('skipping %s' % each)
return d
def get_red_data(filein):
"""
method for pulling lumberdata from r3d files. REDLINE is a command line interface from RED that is required
for this
https://www.red.com/downloads/options?itemInternalId=16144
:param filein:
:return:
"""
file_, ext_ = os.path.splitext(filein)
if ext_.upper() == '.R3D':
command = r'REDLINE --i %s --printMeta 1' % filein
d = {}
for line in os.popen(command).readlines():
line = line.strip('\n')
line = line.replace('\t', '')
line = line.replace(' ', '')
try:
key_, value = line.split(':', 1)
if key_ != 'None':
d[key_] = value
except ValueError:
pass
return d
| 2.3125 | 2 |
rlbench/task_environment.py | robfiras/RLBench | 0 | 6551 | import logging
from typing import List, Callable
import numpy as np
from pyquaternion import Quaternion
from pyrep import PyRep
from pyrep.errors import IKError
from pyrep.objects import Dummy, Object
from rlbench import utils
from rlbench.action_modes import ArmActionMode, ActionMode
from rlbench.backend.exceptions import BoundaryError, WaypointError
from rlbench.backend.observation import Observation
from rlbench.backend.robot import Robot
from rlbench.backend.scene import Scene
from rlbench.backend.task import Task
from rlbench.demo import Demo
from rlbench.observation_config import ObservationConfig
_TORQUE_MAX_VEL = 9999
_DT = 0.05
_MAX_RESET_ATTEMPTS = 40
_MAX_DEMO_ATTEMPTS = 10
class InvalidActionError(Exception):
pass
class TaskEnvironmentError(Exception):
pass
class TaskEnvironment(object):
def __init__(self, pyrep: PyRep, robot: Robot, scene: Scene, task: Task,
action_mode: ActionMode, dataset_root: str,
obs_config: ObservationConfig,
static_positions: bool = False,
attach_grasped_objects: bool = True):
self._pyrep = pyrep
self._robot = robot
self._scene = scene
self._task = task
self._variation_number = 0
self._action_mode = action_mode
self._dataset_root = dataset_root
self._obs_config = obs_config
self._static_positions = static_positions
self._attach_grasped_objects = attach_grasped_objects
self._reset_called = False
self._prev_ee_velocity = None
self._enable_path_observations = False
self._scene.load(self._task)
self._pyrep.start()
self._target_workspace_check = Dummy.create()
self._last_e = None
def get_name(self) -> str:
return self._task.get_name()
def sample_variation(self) -> int:
self._variation_number = np.random.randint(
0, self._task.variation_count())
return self._variation_number
def set_variation(self, v: int) -> None:
if v >= self.variation_count():
raise TaskEnvironmentError(
'Requested variation %d, but there are only %d variations.' % (
v, self.variation_count()))
self._variation_number = v
def variation_count(self) -> int:
return self._task.variation_count()
def reset(self) -> (List[str], Observation):
self._scene.reset()
try:
desc = self._scene.init_episode(
self._variation_number, max_attempts=_MAX_RESET_ATTEMPTS,
randomly_place=not self._static_positions)
except (BoundaryError, WaypointError) as e:
raise TaskEnvironmentError(
'Could not place the task %s in the scene. This should not '
'happen, please raise an issues on this task.'
% self._task.get_name()) from e
self._reset_called = True
# redundancy resolution
self._last_e = None
# Returns a list of descriptions and the first observation
return desc, self._scene.get_observation()
def get_observation(self) -> Observation:
return self._scene.get_observation()
def get_joint_upper_velocity_limits(self):
return self._robot.arm.get_joint_upper_velocity_limits()
def get_all_graspable_objects(self):
return self._task.get_graspable_objects()
def get_robot_visuals(self):
return self._robot.arm.get_visuals()
def get_all_graspable_object_positions(self, relative_to_cameras=False):
""" returns the positions of all graspable object relative to all enabled cameras """
objects = self._task.get_graspable_objects()
positions = []
for ob in objects:
if relative_to_camera:
positions.append(self._scene.get_object_position_relative_to_cameras(ob))
else:
positions.append({"left_shoulder_camera": ob.get_position(),
"right_shoulder_camera": ob.get_position(),
"front_camera": ob.get_position(),
"wrist_camera": ob.get_position()})
return positions
def get_all_graspable_object_poses(self, relative_to_cameras=False):
""" returns the pose of all graspable object relative to all enabled cameras """
objects = self._task.get_graspable_objects()
poses = []
for ob in objects:
if relative_to_cameras:
poses.append(self._scene.get_object_pose_relative_to_cameras(ob))
else:
poses.append({"left_shoulder_camera": ob.get_pose(),
"right_shoulder_camera": ob.get_pose(),
"front_camera": ob.get_pose(),
"wrist_camera": ob.get_pose()})
return poses
def _assert_action_space(self, action, expected_shape):
if np.shape(action) != expected_shape:
raise RuntimeError(
'Expected the action shape to be: %s, but was shape: %s' % (
str(expected_shape), str(np.shape(action))))
def _assert_unit_quaternion(self, quat):
if not np.isclose(np.linalg.norm(quat), 1.0):
raise RuntimeError('Action contained non unit quaternion!')
def _torque_action(self, action):
self._robot.arm.set_joint_target_velocities(
[(_TORQUE_MAX_VEL if t < 0 else -_TORQUE_MAX_VEL)
for t in action])
self._robot.arm.set_joint_forces(np.abs(action))
def _ee_action(self, action, relative_to=None):
self._assert_unit_quaternion(action[3:])
try:
joint_positions = self._robot.arm.solve_ik(
action[:3], quaternion=action[3:], relative_to=relative_to)
self._robot.arm.set_joint_target_positions(joint_positions)
except IKError as e:
raise InvalidActionError('Could not find a path.') from e
done = False
prev_values = None
# Move until reached target joint positions or until we stop moving
# (e.g. when we collide wth something)
while not done:
self._scene.step()
cur_positions = self._robot.arm.get_joint_positions()
reached = np.allclose(cur_positions, joint_positions, atol=0.01)
not_moving = False
if prev_values is not None:
not_moving = np.allclose(
cur_positions, prev_values, atol=0.001)
prev_values = cur_positions
done = reached or not_moving
def _path_action(self, action, relative_to=None):
self._assert_unit_quaternion(action[3:])
try:
# Check if the target is in the workspace; if not, then quick reject
# Only checks position, not rotation
pos_to_check = action[:3]
if relative_to is not None:
self._target_workspace_check.set_position(
pos_to_check, relative_to)
pos_to_check = self._target_workspace_check.get_position()
valid = self._scene.check_target_in_workspace(pos_to_check)
if not valid:
raise InvalidActionError('Target is outside of workspace.')
path = self._robot.arm.get_path(
action[:3], quaternion=action[3:], ignore_collisions=True,
relative_to=relative_to)
done = False
observations = []
while not done:
done = path.step()
self._scene.step()
if self._enable_path_observations:
observations.append(self._scene.get_observation())
success, terminate = self._task.success()
# If the task succeeds while traversing path, then break early
if success:
break
observations.append(self._scene.get_observation())
return observations
except IKError as e:
raise InvalidActionError('Could not find a path.') from e
def step(self, action, camcorder=None) -> (Observation, int, bool):
# returns observation, reward, done, info
if not self._reset_called:
raise RuntimeError(
"Call 'reset' before calling 'step' on a task.")
# action should contain 1 extra value for gripper open close state
arm_action = np.array(action[:-1])
ee_action = action[-1]
if 0.0 > ee_action > 1.0:
raise ValueError('Gripper action expected to be within 0 and 1.')
# Discretize the gripper action
current_ee = (1.0 if self._robot.gripper.get_open_amount()[0] > 0.9 else 0.0)
if ee_action > 0.5:
ee_action = 1.0
elif ee_action < 0.5:
ee_action = 0.0
if current_ee != ee_action:
arm_action = np.array([0.0]*7)
if self._action_mode.arm == ArmActionMode.ABS_JOINT_VELOCITY:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
self._robot.arm.set_joint_target_velocities(arm_action)
self._scene.step()
# if needed save some images
if camcorder:
obs = self._scene.get_observation()
camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects())
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_VELOCITY:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
cur = np.array(self._robot.arm.get_joint_velocities())
self._robot.arm.set_joint_target_velocities(cur + arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.ABS_JOINT_POSITION:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
self._robot.arm.set_joint_target_positions(arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_POSITION:
self._assert_action_space(arm_action,
(len(self._robot.arm.joints),))
cur = np.array(self._robot.arm.get_joint_positions())
self._robot.arm.set_joint_target_positions(cur + arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.ABS_JOINT_TORQUE:
self._assert_action_space(
arm_action, (len(self._robot.arm.joints),))
self._torque_action(arm_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.DELTA_JOINT_TORQUE:
cur = np.array(self._robot.arm.get_joint_forces())
new_action = cur + arm_action
self._torque_action(new_action)
self._scene.step()
elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
self._ee_action(list(arm_action))
elif self._action_mode.arm == ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
self._path_observations = []
self._path_observations = self._path_action(list(arm_action))
elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action
x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose()
new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(qw, qx,
qy, qz)
qw, qx, qy, qz = list(new_rot)
new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw]
self._path_observations = []
self._path_observations = self._path_action(list(new_pose))
elif self._action_mode.arm == ArmActionMode.DELTA_EE_POSE_WORLD_FRAME:
self._assert_action_space(arm_action, (7,))
a_x, a_y, a_z, a_qx, a_qy, a_qz, a_qw = arm_action
x, y, z, qx, qy, qz, qw = self._robot.arm.get_tip().get_pose()
new_rot = Quaternion(a_qw, a_qx, a_qy, a_qz) * Quaternion(
qw, qx, qy, qz)
qw, qx, qy, qz = list(new_rot)
new_pose = [a_x + x, a_y + y, a_z + z] + [qx, qy, qz, qw]
self._ee_action(list(new_pose))
elif self._action_mode.arm == ArmActionMode.EE_POSE_EE_FRAME:
self._assert_action_space(arm_action, (7,))
self._ee_action(
list(arm_action), relative_to=self._robot.arm.get_tip())
elif self._action_mode.arm == ArmActionMode.EE_POSE_PLAN_EE_FRAME:
self._assert_action_space(arm_action, (7,))
self._path_observations = []
self._path_observations = self._path_action(
list(arm_action), relative_to=self._robot.arm.get_tip())
else:
raise RuntimeError('Unrecognised action mode.')
if current_ee != ee_action:
done = False
while not done:
done = self._robot.gripper.actuate(ee_action, velocity=0.2)
self._pyrep.step()
self._task.step()
# if needed save some images
if camcorder:
obs = self._scene.get_observation()
camcorder.save(obs, self.get_robot_visuals(), self.get_all_graspable_objects())
if ee_action == 0.0 and self._attach_grasped_objects:
# If gripper close action, the check for grasp.
for g_obj in self._task.get_graspable_objects():
self._robot.gripper.grasp(g_obj)
else:
# If gripper open action, the check for ungrasp.
self._robot.gripper.release()
success, terminate = self._task.success()
task_reward = self._task.reward()
reward = float(success) if task_reward is None else task_reward
return self._scene.get_observation(), reward, terminate
def resolve_redundancy_joint_velocities(self, actions, setup):
"""
Resolves redundant self-motion into the nullspace without changing the gripper tip position
:param actions:
Current actions without redundancy resolution.
:param setup:
Setup for redundancy resolution defining the mode, weighting etc.
:return: Array of joint velocities, which move the robot's tip according to the provided actions yet push
the joint position towards a reference position.
"""
# get the Jacobian
J = self._robot.arm.get_jacobian()
J = np.transpose(J)
J = np.flip(J)
J = J[-3:]
# compute the pseudo inverse
J_plus = np.linalg.pinv(J)
# weighting
if type(setup["W"]) is list:
W = np.array(setup["W"])
elif setup["W"] is None:
# use default weighting later
W = None
else:
raise TypeError("Unsupported type %s for weighting vector." % type(setup["W"]))
# compute the error
if setup["mode"] == "reference_position":
dL, L = self.get_loss_reference_position(setup["ref_position"], W)
elif setup["mode"] == "collision_avoidance":
dL, L = self.get_loss_collision_avoidance(W, setup)
# compute the joint velocities
q_dot_redundancy = setup["alpha"] * np.matmul((np.identity(len(self._robot.arm.joints)) - np.matmul(J_plus, J)), dL)
# the provided jacobian seems to be inaccurate resulting in slight movement of the ee. This is why
# the velocites are set to 0 once the error stops changing much.
e = dL
if setup["cut-off_error"] is not None:
if self._last_e is not None:
e_dot = np.sum(np.abs(e - self._last_e))
if self._last_e is not None and e_dot < setup["cut-off_error"]:
q_dot_redundancy = np.array([0.0] * 7)
self._last_e = e
else:
self._last_e = e
return actions - q_dot_redundancy, L
def get_loss_reference_position(self, ref_pos, W):
"""
Calculates the summed squarred error between the current and the reference consfiguration as well as
its partial derivatives with respect to al q's for redundancy resoltuion.
-> L(q) = 1/2 sum_{i=1}^N w_i (q_i - \tilde{q}_i)^2
:param ref_pos:
Reference position.
:param W:
Weighting vector.
:return:
1: The partial derivatives of the summed squarred error between the current and the
reference configuration -> -> \nabla_q L(q)
2: Summed squarred error between the current and the reference configuration. -> L(q)
"""
if W is None:
# default weighting
W = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
e = (self._robot.arm.get_joint_positions() - ref_pos)
return e * W, 0.5*np.dot(e,e*W)
def get_loss_collision_avoidance(self, W, setup):
"""
Calculates the loss as well as the respective partial derivatives for redundancy resoltuion with
collision avoidance. This only works with tasks that include one obstacles!
L(q) = \sum_{i=1}^N d(q)^{-1}
:param W:
Weighting vector.
:return:
1: The partial derivatives of the loss above. -> \nable_q L(q)
2: The loss shown above.-> L(q)
"""
# get the position of the object
p_obs = self._task.obstacle.get_position() + np.array([0, 0, 0.33]) - self._robot.arm.joints[0].get_position()
#p_obs = self._task.obstacle.get_position()
p_obs = np.append(p_obs, [1])
# get the transformation matrices, their derivatives, and the positions of the links
A_1, A_2, A_3, A_4, A_5, A_6, A_7 = self._robot.get_transformation_matrices()
dA_1, dA_2, dA_3, dA_4, dA_5, dA_6, dA_7 = self._robot.get_transformation_matrices_derivatives()
p_1, p_2, p_3, p_4, p_5, p_6, p_7 = self._robot.get_link_positions_in_ref_frames()
# we use reciprocal of the distance between each link and an obstacle as our Loss
# the chain rule delivers: d/dq L = (p_i^0 (q_1,..., q_i) - p_obs)^T * d/dq (p_i^0 (q_1,..., q_i) - p_obs)
# where p_i^0 = (\prod_{j=1}^i A_j^{j-1}(q_j)) * p_i
# as the left side of d/dq L is used often, let's calculate it in advance
d_1_T = np.transpose(A_1.dot(p_1) - p_obs)
d_2_T = np.transpose(A_1.dot(A_2).dot(p_2) - p_obs)
d_3_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(p_3) - p_obs)
d_4_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4) - p_obs)
d_5_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5) - p_obs)
d_6_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6) - p_obs)
d_7_T = np.transpose(A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7) - p_obs)
# now we can calculate the derivatives in each dimension
dq_1 = -np.matmul(d_1_T, dA_1.dot(p_1)) + \
-np.matmul(d_2_T, dA_1.dot(A_2).dot(p_2)) + \
-np.matmul(d_3_T, dA_1.dot(A_2).dot(A_3).dot(p_3)) + \
-np.matmul(d_4_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(p_4)) + \
-np.matmul(d_5_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, dA_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_2 = -np.matmul(d_2_T, A_1.dot(dA_2).dot(p_2)) + \
-np.matmul(d_3_T, A_1.dot(dA_2).dot(A_3).dot(p_3)) + \
-np.matmul(d_4_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(p_4)) + \
-np.matmul(d_5_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(dA_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_3 = -np.matmul(d_3_T, A_1.dot(A_2).dot(dA_3).dot(p_3)) + \
-np.matmul(d_4_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(p_4)) + \
-np.matmul(d_5_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(dA_3).dot(A_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_4 = -np.matmul(d_4_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(p_4)) + \
-np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(dA_4).dot(A_5).dot(A_6).dot(A_7).dot(p_7))
dq_5 = -np.matmul(d_5_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(p_5)) + \
-np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(dA_5).dot(A_6).dot(A_7).dot(p_7))
dq_6 = -np.matmul(d_6_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(p_6)) + \
-np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(dA_6).dot(A_7).dot(p_7))
dq_7 = -np.matmul(d_7_T, A_1.dot(A_2).dot(A_3).dot(A_4).dot(A_5).dot(A_6).dot(dA_7).dot(p_7))
if W is None:
# default weighting vector -> based on the reciprocal of the distance. The greater the distance the smaller
# the weight. That is, it is concentrated on close objects.
W = np.array([1 / np.sum(np.square(d_1_T)),
1 / np.sum(np.square(d_2_T)) ,
1 / np.sum(np.square(d_3_T)) ,
1 / np.sum(np.square(d_4_T)) ,
1 / np.sum(np.square(d_5_T)) ,
1 / np.sum(np.square(d_6_T)) ,
1 / np.sum(np.square(d_7_T)) ]) * 0.1
# --- scaling to keep distance to joint limits ---
# get the minimum distance of each joint to its limit
joint_positions = np.array([j.get_joint_position() for j in self._robot.arm.joints])
lower_joint_limits = np.array(setup["lower_joint_pos_limit"])
upper_joint_limits = np.array(setup["upper_joint_pos_limit"])
min_j_distances = [np.minimum(u-j, j-l) for l,u,j in zip(lower_joint_limits, upper_joint_limits,
joint_positions)]
# start scaling down error when joint limit is 15° away.
# Scaling is done linearly from 0 to 1 for 0° <= d <= 15°
rad_thres = 15*(np.pi/180)
W *= np.array([ np.minimum((1/rad_thres)*d, 1.0) for d in min_j_distances])
# concatenate the derivaties to vector and apply weightig
dL = np.array([dq_1, dq_2, dq_3, dq_4, dq_5, dq_6, dq_7])*W
# calculate the loss
L = np.sqrt(np.dot(d_1_T, d_1_T))*W[0] \
+ np.sqrt(np.dot(d_2_T, d_2_T))*W[1] \
+ np.sqrt(np.dot(d_3_T, d_3_T))*W[2] \
+ np.sqrt(np.dot(d_4_T, d_4_T))*W[3] \
+ np.sqrt(np.dot(d_5_T, d_5_T))*W[4] \
+ np.sqrt(np.dot(d_6_T, d_6_T))*W[5] \
+ np.sqrt(np.dot(d_7_T, d_7_T))*W[6]
return dL, L
def enable_path_observations(self, value: bool) -> None:
if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME):
raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or '
'ABS_EE_POSE_PLAN action mode.')
self._enable_path_observations = value
def get_path_observations(self):
if (self._action_mode.arm != ArmActionMode.DELTA_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.ABS_EE_POSE_PLAN_WORLD_FRAME and
self._action_mode.arm != ArmActionMode.EE_POSE_PLAN_EE_FRAME):
raise RuntimeError('Only available in DELTA_EE_POSE_PLAN or '
'ABS_EE_POSE_PLAN action mode.')
return self._path_observations
def get_demos(self, amount: int, live_demos: bool = False,
image_paths: bool = False,
callable_each_step: Callable[[Observation], None] = None,
max_attempts: int = _MAX_DEMO_ATTEMPTS,
) -> List[Demo]:
"""Negative means all demos"""
if not live_demos and (self._dataset_root is None
or len(self._dataset_root) == 0):
raise RuntimeError(
"Can't ask for a stored demo when no dataset root provided.")
if not live_demos:
if self._dataset_root is None or len(self._dataset_root) == 0:
raise RuntimeError(
"Can't ask for stored demo when no dataset root provided.")
demos = utils.get_stored_demos(
amount, image_paths, self._dataset_root, self._variation_number,
self._task.get_name(), self._obs_config)
else:
ctr_loop = self._robot.arm.joints[0].is_control_loop_enabled()
self._robot.arm.set_control_loop_enabled(True)
demos = self._get_live_demos(
amount, callable_each_step, max_attempts)
self._robot.arm.set_control_loop_enabled(ctr_loop)
return demos
def _get_live_demos(self, amount: int,
callable_each_step: Callable[
[Observation], None] = None,
max_attempts: int = _MAX_DEMO_ATTEMPTS) -> List[Demo]:
demos = []
for i in range(amount):
attempts = max_attempts
while attempts > 0:
random_seed = np.random.get_state()
self.reset()
logging.info('Collecting demo %d' % i)
try:
demo = self._scene.get_demo(
callable_each_step=callable_each_step)
demo.random_seed = random_seed
demos.append(demo)
break
except Exception as e:
attempts -= 1
logging.info('Bad demo. ' + str(e))
if attempts <= 0:
raise RuntimeError(
'Could not collect demos. Maybe a problem with the task?')
return demos
def reset_to_demo(self, demo: Demo) -> (List[str], Observation):
demo.restore_state()
return self.reset()
| 2.046875 | 2 |
tests/generic_relations/test_forms.py | Yoann-Vie/esgi-hearthstone | 0 | 6552 | from django import forms
from django.contrib.contenttypes.forms import generic_inlineformset_factory
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.test import TestCase
from django.test.utils import isolate_apps
from .models import (
Animal, ForProxyModelModel, Gecko, Mineral, ProxyRelatedModel, TaggedItem,
)
class CustomWidget(forms.TextInput):
pass
class TaggedItemForm(forms.ModelForm):
class Meta:
model = TaggedItem
fields = '__all__'
widgets = {'tag': CustomWidget}
class GenericInlineFormsetTests(TestCase):
def test_output(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet()
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>"""
)
formset = GenericFormSet(instance=Animal())
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">
Tag:</label> <input id="id_generic_relations-taggeditem-content_type-object_id-0-tag"
type="text" name="generic_relations-taggeditem-content_type-object_id-0-tag" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE"><input type="hidden"
name="generic_relations-taggeditem-content_type-object_id-0-id"
id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>"""
)
platypus = Animal.objects.create(
common_name='Platypus', latin_name='Ornithorhynchus anatinus',
)
platypus.tags.create(tag='shiny')
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
formset = GenericFormSet(instance=platypus)
tagged_item_id = TaggedItem.objects.get(tag='shiny', object_id=platypus.id).id
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-0-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-0-tag" value="shiny" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-0-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-0-DELETE">
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id"
value="%s" id="id_generic_relations-taggeditem-content_type-object_id-0-id"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-tag">Tag:</label>
<input id="id_generic_relations-taggeditem-content_type-object_id-1-tag" type="text"
name="generic_relations-taggeditem-content_type-object_id-1-tag" maxlength="50"></p>
<p><label for="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">Delete:</label>
<input type="checkbox" name="generic_relations-taggeditem-content_type-object_id-1-DELETE"
id="id_generic_relations-taggeditem-content_type-object_id-1-DELETE">
<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-1-id"
id="id_generic_relations-taggeditem-content_type-object_id-1-id"></p>""" % tagged_item_id
)
lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo')
formset = GenericFormSet(instance=lion, prefix='x')
self.assertHTMLEqual(
''.join(form.as_p() for form in formset.forms),
"""<p><label for="id_x-0-tag">Tag:</label>
<input id="id_x-0-tag" type="text" name="x-0-tag" maxlength="50"></p>
<p><label for="id_x-0-DELETE">Delete:</label> <input type="checkbox" name="x-0-DELETE" id="id_x-0-DELETE">
<input type="hidden" name="x-0-id" id="id_x-0-id"></p>"""
)
def test_options(self):
TaggedItemFormSet = generic_inlineformset_factory(
TaggedItem,
can_delete=False,
exclude=['tag'],
extra=3,
)
platypus = Animal.objects.create(common_name='Platypus', latin_name='Ornithorhynchus anatinus')
harmless = platypus.tags.create(tag='harmless')
mammal = platypus.tags.create(tag='mammal')
# Works without a queryset.
formset = TaggedItemFormSet(instance=platypus)
self.assertEqual(len(formset.forms), 5)
self.assertHTMLEqual(
formset.forms[0].as_p(),
'<input type="hidden" name="generic_relations-taggeditem-content_type-object_id-0-id" value="%s" '
'id="id_generic_relations-taggeditem-content_type-object_id-0-id">' % harmless.pk
)
self.assertEqual(formset.forms[0].instance, harmless)
self.assertEqual(formset.forms[1].instance, mammal)
self.assertIsNone(formset.forms[2].instance.pk)
# A queryset can be used to alter display ordering.
formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.order_by('-tag'))
self.assertEqual(len(formset.forms), 5)
self.assertEqual(formset.forms[0].instance, mammal)
self.assertEqual(formset.forms[1].instance, harmless)
self.assertIsNone(formset.forms[2].instance.pk)
# A queryset that omits items.
formset = TaggedItemFormSet(instance=platypus, queryset=TaggedItem.objects.filter(tag__startswith='harm'))
self.assertEqual(len(formset.forms), 4)
self.assertEqual(formset.forms[0].instance, harmless)
self.assertIsNone(formset.forms[1].instance.pk)
def test_get_queryset_ordering(self):
"""
BaseGenericInlineFormSet.get_queryset() adds default ordering, if
needed.
"""
inline_formset = generic_inlineformset_factory(TaggedItem, exclude=('tag',))
formset = inline_formset(instance=Gecko.objects.create())
self.assertIs(formset.get_queryset().ordered, True)
def test_initial(self):
quartz = Mineral.objects.create(name='Quartz', hardness=7)
GenericFormSet = generic_inlineformset_factory(TaggedItem, extra=1)
ctype = ContentType.objects.get_for_model(quartz)
initial_data = [{
'tag': 'lizard',
'content_type': ctype.pk,
'object_id': quartz.pk,
}]
formset = GenericFormSet(initial=initial_data)
self.assertEqual(formset.forms[0].initial, initial_data[0])
def test_meta_widgets(self):
"""TaggedItemForm has a widget defined in Meta."""
Formset = generic_inlineformset_factory(TaggedItem, TaggedItemForm)
form = Formset().forms[0]
self.assertIsInstance(form['tag'].field.widget, CustomWidget)
@isolate_apps('generic_relations')
def test_incorrect_content_type(self):
class BadModel(models.Model):
content_type = models.PositiveIntegerField()
msg = "fk_name 'generic_relations.BadModel.content_type' is not a ForeignKey to ContentType"
with self.assertRaisesMessage(Exception, msg):
generic_inlineformset_factory(BadModel, TaggedItemForm)
def test_save_new_uses_form_save(self):
class SaveTestForm(forms.ModelForm):
def save(self, *args, **kwargs):
self.instance.saved_by = 'custom method'
return super().save(*args, **kwargs)
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', form=SaveTestForm)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj = formset.save()[0]
self.assertEqual(new_obj.saved_by, 'custom method')
def test_save_new_for_proxy(self):
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=False)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertEqual(new_obj.obj, instance)
def test_save_new_for_concrete(self):
Formset = generic_inlineformset_factory(ForProxyModelModel, fields='__all__', for_concrete_model=True)
instance = ProxyRelatedModel.objects.create()
data = {
'form-TOTAL_FORMS': '1',
'form-INITIAL_FORMS': '0',
'form-MAX_NUM_FORMS': '',
'form-0-title': 'foo',
}
formset = Formset(data, instance=instance, prefix='form')
self.assertTrue(formset.is_valid())
new_obj, = formset.save()
self.assertNotIsInstance(new_obj.obj, ProxyRelatedModel)
def test_initial_count(self):
GenericFormSet = generic_inlineformset_factory(TaggedItem)
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MAX_NUM_FORMS': '',
}
formset = GenericFormSet(data=data, prefix='form')
self.assertEqual(formset.initial_form_count(), 3)
formset = GenericFormSet(data=data, prefix='form', save_as_new=True)
self.assertEqual(formset.initial_form_count(), 0)
def test_save_as_new(self):
"""
The save_as_new parameter creates new items that are associated with
the object.
"""
lion = Animal.objects.create(common_name='Lion', latin_name='Panthera leo')
yellow = lion.tags.create(tag='yellow')
hairy = lion.tags.create(tag='hairy')
GenericFormSet = generic_inlineformset_factory(TaggedItem)
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '2',
'form-MAX_NUM_FORMS': '',
'form-0-id': str(yellow.pk),
'form-0-tag': 'hunts',
'form-1-id': str(hairy.pk),
'form-1-tag': 'roars',
}
formset = GenericFormSet(data, instance=lion, prefix='form', save_as_new=True)
self.assertTrue(formset.is_valid())
tags = formset.save()
self.assertEqual([tag.tag for tag in tags], ['hunts', 'roars'])
hunts, roars = tags
self.assertSequenceEqual(lion.tags.order_by('tag'), [hairy, hunts, roars, yellow])
| 2.03125 | 2 |
src/sage/rings/polynomial/pbori/fglm.py | tamnguyen135/sage | 1 | 6553 | from .PyPolyBoRi import (BooleSet, Polynomial, BoolePolynomialVector,
FGLMStrategy)
def _fglm(I, from_ring, to_ring):
r"""
Unchecked variant of fglm
"""
vec = BoolePolynomialVector(I)
return FGLMStrategy(from_ring, to_ring, vec).main()
def fglm(I, from_ring, to_ring):
r"""
Convert *reduced* Groebner Basis in from_ring to a GroebnerBasis in to_ring.
It acts independent of the global ring, which is restored at the end of the
computation.
TESTS::
sage: from sage.rings.polynomial.pbori import *
sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode
sage: dp_asc = OrderCode.dp_asc
sage: r=declare_ring(['x','y','z'],dict())
sage: old_ring = r
sage: new_ring = old_ring.clone(ordering=dp_asc)
sage: (x,y,z) = [old_ring.variable(i) for i in range(3)]
sage: ideal=[x+z, y+z]# lp Groebner basis
sage: from sage.rings.polynomial.pbori.fglm import fglm
sage: list(fglm(ideal, old_ring, new_ring))
[y + x, z + x]
"""
for poly in I:
if poly.ring().id() != from_ring.id():
raise ValueError("Ideal I must be from the first ring argument")
return _fglm(I, from_ring, to_ring)
def vars_real_divisors(monomial, monomial_set):
r"""
Returns all elements of of monomial_set, which result multiplied by a variable in monomial.
TESTS::
sage: from sage.rings.polynomial.pbori.pbori import *
sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode
sage: dp_asc = OrderCode.dp_asc
sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring
sage: r=Ring(1000)
sage: x = r.variable
sage: b=BooleSet([x(1)*x(2),x(2)])
sage: from sage.rings.polynomial.pbori.fglm import vars_real_divisors
sage: vars_real_divisors(x(1)*x(2)*x(3),b)
{{x(1),x(2)}}
"""
return BooleSet(Polynomial(monomial_set.divisors_of(monomial)). \
graded_part(monomial.deg() - 1))
def m_k_plus_one(completed_elements, variables):
r"""
Calculates $m_{k+1}$ from the FGLM algorithm as described in Wichmanns diploma thesis
It would be nice to be able to efficiently extract the smallest term of a polynomial.
TESTS::
sage: from sage.rings.polynomial.pbori.pbori import *
sage: from sage.rings.polynomial.pbori.PyPolyBoRi import OrderCode
sage: dp_asc = OrderCode.dp_asc
sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Ring
sage: r=Ring(1000)
sage: x = r.variable
sage: from sage.rings.polynomial.pbori.PyPolyBoRi import Monomial
sage: s=BooleSet([x(1)*x(2),x(1),x(2),Monomial(r),x(3)])
sage: from sage.rings.polynomial.pbori.fglm import m_k_plus_one
sage: variables=BooleSet([x(1),x(2),x(3)])
sage: m_k_plus_one(s,variables)
x(2)*x(3)
sage: r2 = r.clone(ordering=dp_asc)
sage: m_k_plus_one(r2(s).set(),r2(variables).set())
x(1)*x(3)
"""
return sorted(completed_elements.cartesian_product(variables).diff(
completed_elements))[0]
| 2.09375 | 2 |
ferry/embed/umap_reduce.py | coursetable/ferry | 4 | 6554 | <gh_stars>1-10
"""
Uses UMAP (https://umap-learn.readthedocs.io/en/latest/index.html) to reduce course
embeddings to two dimensions for visualization.
"""
import pandas as pd
import umap
from sklearn.preprocessing import StandardScaler
from ferry import config
courses = pd.read_csv(
config.DATA_DIR / "course_embeddings/courses_deduplicated.csv",
index_col=0,
)
# mypy: ignore-errors
embeddings = pd.read_hdf(
config.DATA_DIR / "course_embeddings/fasttext_embeddings.h5",
key="embeddings",
)
embeddings = StandardScaler().fit_transform(embeddings)
reducer = umap.UMAP()
umap_embeddings = reducer.fit_transform(embeddings)
courses["umap1"] = umap_embeddings[:, 0]
courses["umap2"] = umap_embeddings[:, 1]
courses.to_csv(config.DATA_DIR / "course_embeddings/courses_deduplicated_umap.csv")
| 2.609375 | 3 |
flora_fauna.py | zhumakova/ClassProject | 0 | 6555 | <reponame>zhumakova/ClassProject<filename>flora_fauna.py
import inheritance
class Flora:
def __init__(self, name, lifespan, habitat, plant_type):
self.name = name
self.lifespan = lifespan
self.habitat = habitat
self.plant_type = plant_type
self.plant_size = 0
class Fauna:
def __init__(self, name):
self.name = name
class Predator(Fauna):
def __init__(self, name:str, predator_type:str, what_eats:str, lifespan:int):
super().__init__(name)
self.predator_type = predator_type
self.what_eats = what_eats
self.lifespan = lifespan
# def check_planet(self,planet:tsk4.Planet):
# if planet.fauna and not planet.humanity:
# print('YES')
# else:
# print('NO')
class Mammal(Fauna):
def __init__(self, name, mammal_type, lifespan):
super().__init__(name)
self.mammal_type = mammal_type
self.lifespan = lifespan
def check_planet(self,planet:inheritance.Planet):
if planet.flora and planet.fauna and not planet.humanity:
planet.add_fauna(self)
shark = Predator('baby shark','sea','all',20)
giraffe = Mammal('malwan','earth',20)
giraffe.check_planet(inheritance.friendly)
marti = Mammal('marti','earth',20)
marti.check_planet(inheritance.friendly)
print(inheritance.friendly.__dict__)
print(inheritance.Planet.__dict__)
| 3.84375 | 4 |
jug/subcommands/demo.py | rdenham/jug | 309 | 6556 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2017, <NAME> <<EMAIL>>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from . import SubCommand
__all__ = ['DemoCommand']
class DemoCommand(SubCommand):
'''Create demo directory.
'''
name = "demo"
def run(self, *args, **kwargs):
import os
from os import path
print('''
Jug will create a directory called 'jug-demo/' with a file called 'primes.py'
inside.
You can test jug by switching to that directory and running the commands:
jug status primes.py
followed by
jug execute primes.py
Upon termination of the process, results will be in a file called 'output.txt'.
PARALLEL USAGE
You can speed up the process by running several 'jug execute' in parallel:
jug execute primes.py &
jug execute primes.py &
jug execute primes.py &
jug execute primes.py &
TROUBLE SHOOTING:
Should you run into issues, you can run the internal tests for jug with
jug test-jug
FURTHER READING
The online documentation contains further reading. You can read the next
tutorial here:
http://jug.readthedocs.io/en/latest/decrypt-example.html
''')
if path.exists('jug-demo'):
print("Jug-demo previously created")
return
os.mkdir('jug-demo')
output = open('jug-demo/primes.py', 'wt')
output.write(r'''
from time import sleep
from jug import TaskGenerator
@TaskGenerator
def is_prime(n):
sleep(1.)
for j in range(2, n - 1):
if (n % j) == 0:
return False
return True
@TaskGenerator
def count_primes(ps):
return sum(ps)
@TaskGenerator
def write_output(n):
output = open('output.txt', 'wt')
output.write("Found {0} primes <= 100.\n".format(n))
output.close()
primes100 = []
for n in range(2, 101):
primes100.append(is_prime(n))
n_primes = count_primes(primes100)
write_output(n_primes)
''')
output.close()
demo = DemoCommand()
| 2.171875 | 2 |
search/controllers/simple/tests.py | ID2797370/arxiv-search | 35 | 6557 | <reponame>ID2797370/arxiv-search<filename>search/controllers/simple/tests.py
"""Tests for simple search controller, :mod:`search.controllers.simple`."""
from http import HTTPStatus
from unittest import TestCase, mock
from werkzeug.datastructures import MultiDict
from werkzeug.exceptions import InternalServerError, NotFound, BadRequest
from search.domain import SimpleQuery
from search.controllers import simple
from search.controllers.simple.forms import SimpleSearchForm
from search.services.index import (
IndexConnectionError,
QueryError,
DocumentNotFound,
)
class TestRetrieveDocument(TestCase):
"""Tests for :func:`.simple.retrieve_document`."""
@mock.patch("search.controllers.simple.SearchSession")
def test_encounters_queryerror(self, mock_index):
"""There is a bug in the index or query."""
def _raiseQueryError(*args, **kwargs):
raise QueryError("What now")
mock_index.get_document.side_effect = _raiseQueryError
with self.assertRaises(InternalServerError):
try:
response_data, code, headers = simple.retrieve_document(1)
except QueryError as ex:
self.fail("QueryError should be handled (caught %s)" % ex)
self.assertEqual(
mock_index.get_document.call_count,
1,
"A search should be attempted",
)
@mock.patch("search.controllers.simple.SearchSession")
def test_index_raises_connection_exception(self, mock_index):
"""Index service raises a IndexConnectionError."""
mock_index.get_document.side_effect = IndexConnectionError
with self.assertRaises(InternalServerError):
response_data, code, headers = simple.retrieve_document("124.5678")
self.assertEqual(
mock_index.get_document.call_count,
1,
"A search should be attempted",
)
call_args, call_kwargs = mock_index.get_document.call_args
self.assertIsInstance(call_args[0], str, "arXiv ID is passed")
# self.assertEqual(code, status.HTTP_500_INTERNAL_SERVER_ERROR)
@mock.patch("search.controllers.simple.SearchSession")
def test_document_not_found(self, mock_index):
"""The document is not found."""
def _raiseDocumentNotFound(*args, **kwargs):
raise DocumentNotFound("What now")
mock_index.get_document.side_effect = _raiseDocumentNotFound
with self.assertRaises(NotFound):
try:
response_data, code, headers = simple.retrieve_document(1)
except DocumentNotFound as ex:
self.fail(
"DocumentNotFound should be handled (caught %s)" % ex
)
self.assertEqual(
mock_index.get_document.call_count,
1,
"A search should be attempted",
)
class TestSearchController(TestCase):
"""Tests for :func:`.simple.search`."""
@mock.patch(
"search.controllers.simple.url_for",
lambda *a, **k: f'https://arxiv.org/{k["paper_id"]}',
)
@mock.patch("search.controllers.simple.SearchSession")
def test_arxiv_id(self, mock_index):
"""Query parameter contains an arXiv ID."""
request_data = MultiDict({"query": "1702.00123"})
response_data, code, headers = simple.search(request_data)
self.assertEqual(
code,
HTTPStatus.MOVED_PERMANENTLY,
"Response should be a 301 redirect.",
)
self.assertIn("Location", headers, "Location header should be set")
self.assertEqual(
mock_index.search.call_count, 0, "No search should be attempted"
)
@mock.patch("search.controllers.simple.SearchSession")
def test_no_form_data(self, mock_index):
"""No form data has been submitted."""
request_data = MultiDict()
response_data, code, headers = simple.search(request_data)
self.assertEqual(code, HTTPStatus.OK, "Response should be OK.")
self.assertIn("form", response_data, "Response should include form.")
self.assertEqual(
mock_index.search.call_count, 0, "No search should be attempted"
)
@mock.patch("search.controllers.simple.SearchSession")
def test_single_field_term(self, mock_index):
"""Form data are present."""
mock_index.search.return_value = {"metadata": {}, "results": []}
request_data = MultiDict({"searchtype": "title", "query": "foo title"})
response_data, code, headers = simple.search(request_data)
self.assertEqual(
mock_index.search.call_count, 1, "A search should be attempted"
)
call_args, call_kwargs = mock_index.search.call_args
self.assertIsInstance(
call_args[0],
SimpleQuery,
"An SimpleQuery is passed to the search index",
)
self.assertEqual(code, HTTPStatus.OK, "Response should be OK.")
@mock.patch("search.controllers.simple.SearchSession")
def test_invalid_data(self, mock_index):
"""Form data are invalid."""
request_data = MultiDict({"searchtype": "title"})
response_data, code, headers = simple.search(request_data)
self.assertEqual(code, HTTPStatus.OK, "Response should be OK.")
self.assertIn("form", response_data, "Response should include form.")
self.assertEqual(
mock_index.search.call_count, 0, "No search should be attempted"
)
@mock.patch("search.controllers.simple.SearchSession")
def test_index_raises_connection_exception(self, mock_index):
"""Index service raises a IndexConnectionError."""
def _raiseIndexConnectionError(*args, **kwargs):
raise IndexConnectionError("What now")
mock_index.search.side_effect = _raiseIndexConnectionError
request_data = MultiDict({"searchtype": "title", "query": "foo title"})
with self.assertRaises(InternalServerError):
_, _, _ = simple.search(request_data)
self.assertEqual(
mock_index.search.call_count, 1, "A search should be attempted"
)
call_args, call_kwargs = mock_index.search.call_args
self.assertIsInstance(
call_args[0],
SimpleQuery,
"An SimpleQuery is passed to the search index",
)
@mock.patch("search.controllers.simple.SearchSession")
def test_index_raises_query_error(self, mock_index):
"""Index service raises a QueryError."""
def _raiseQueryError(*args, **kwargs):
raise QueryError("What now")
mock_index.search.side_effect = _raiseQueryError
request_data = MultiDict({"searchtype": "title", "query": "foo title"})
with self.assertRaises(InternalServerError):
try:
response_data, code, headers = simple.search(request_data)
except QueryError as ex:
self.fail("QueryError should be handled (caught %s)" % ex)
self.assertEqual(
mock_index.search.call_count, 1, "A search should be attempted"
)
class TestSimpleSearchForm(TestCase):
"""Tests for :class:`.SimpleSearchForm`."""
def test_searchtype_only(self):
"""User has entered only a searchtype (field)."""
data = MultiDict({"searchtype": "title"})
form = SimpleSearchForm(data)
self.assertFalse(form.validate(), "Form should be invalid")
def test_query_only(self):
"""User has entered only a query (value); this should never happen."""
data = MultiDict({"query": "someone monkeyed with the request"})
form = SimpleSearchForm(data)
self.assertFalse(form.validate(), "Form should be invalid")
def test_query_and_searchtype(self):
"""User has entered a searchtype (field) and query (value)."""
data = MultiDict({"searchtype": "title", "query": "foo title"})
form = SimpleSearchForm(data)
self.assertTrue(form.validate(), "Form should be valid")
class TestQueryFromForm(TestCase):
"""Tests for :func:`.simple._query_from_form`."""
def test_multiple_simple(self):
"""Form data has three simple."""
data = MultiDict({"searchtype": "title", "query": "foo title"})
form = SimpleSearchForm(data)
query = simple._query_from_form(form)
self.assertIsInstance(
query, SimpleQuery, "Should return an instance of SimpleQuery"
)
def test_form_data_has_order(self):
"""Form data includes sort order."""
data = MultiDict(
{
"searchtype": "title",
"query": "foo title",
"order": "submitted_date",
}
)
form = SimpleSearchForm(data)
query = simple._query_from_form(form)
self.assertIsInstance(
query, SimpleQuery, "Should return an instance of SimpleQuery"
)
self.assertEqual(query.order, "submitted_date")
def test_form_data_has_no_order(self):
"""Form data includes sort order parameter, but it is 'None'."""
data = MultiDict(
{"searchtype": "title", "query": "foo title", "order": "None"} #
)
form = SimpleSearchForm(data)
query = simple._query_from_form(form)
self.assertIsInstance(
query, SimpleQuery, "Should return an instance of SimpleQuery"
)
self.assertIsNone(query.order, "Order should be None")
def test_querystring_has_wildcard_at_start(self):
"""Querystring starts with a wildcard."""
data = MultiDict({"searchtype": "title", "query": "*foo title"})
form = SimpleSearchForm(data)
self.assertFalse(form.validate(), "Form should be invalid")
def test_input_whitespace_is_stripped(self):
"""If query has padding whitespace, it should be removed."""
data = MultiDict({"searchtype": "title", "query": " foo title "})
form = SimpleSearchForm(data)
self.assertTrue(form.validate(), "Form should be valid.")
self.assertEqual(form.query.data, "foo title")
def test_querystring_has_unbalanced_quotes(self):
"""Querystring has an odd number of quote characters."""
data = MultiDict({"searchtype": "title", "query": '"rhubarb'})
form = SimpleSearchForm(data)
self.assertFalse(form.validate(), "Form should be invalid")
data["query"] = '"rhubarb"'
form = SimpleSearchForm(data)
self.assertTrue(form.validate(), "Form should be valid")
data["query"] = '"rhubarb" "pie'
form = SimpleSearchForm(data)
self.assertFalse(form.validate(), "Form should be invalid")
data["query"] = '"rhubarb" "pie"'
form = SimpleSearchForm(data)
self.assertTrue(form.validate(), "Form should be valid")
class TestPaginationParametersAreFunky(TestCase):
"""
The user may have monkeyed with the order or sort parameters.
Since these are limited to specific values, there is no other reason for
them to be invalid. Given that they are passed around among
views (to persist users' selection), it's important to break the chain.
To do this, we return a 400 Bad Request, with a clean link back to the
search form.
"""
@mock.patch("search.controllers.simple.url_for")
def test_order_is_invalid(self, mock_url_for):
"""The order parameter on the request is invalid."""
request_data = MultiDict(
{
"searchtype": "title",
"query": "foo title",
"size": 50, # Valid.
"order": "foo", # Invalid
}
)
with self.assertRaises(BadRequest):
simple.search(request_data)
@mock.patch("search.controllers.simple.url_for")
def test_size_is_invalid(self, mock_url_for):
"""The order parameter on the request is invalid."""
request_data = MultiDict(
{
"searchtype": "title",
"query": "foo title",
"size": 51, # Invalid
"order": "", # Valid
}
)
with self.assertRaises(BadRequest):
simple.search(request_data)
class TestClassicAuthorSyntaxIsIntercepted(TestCase):
"""
The user may have entered an author query using `surname_f` syntax.
This is an artefact of the classic search system, and not intended to be
supported. Nevertheless, users have become accustomed to this syntax. We
therefore rewrite the query using a comma, and show the user a warning
about the syntax change.
"""
@mock.patch("search.controllers.simple.SearchSession")
def test_all_fields_search_contains_classic_syntax(self, mock_index):
"""User has entered a `surname_f` query in an all-fields search."""
request_data = MultiDict(
{
"searchtype": "all",
"query": "franklin_r",
"size": 50,
"order": "",
}
)
mock_index.search.return_value = {"metadata": {}, "results": []}
data, code, headers = simple.search(request_data)
self.assertEqual(
data["query"].value,
"franklin, r",
"The query should be rewritten.",
)
self.assertTrue(
data["has_classic_format"],
"A flag denoting the syntax interception should be set"
" in the response context, so that a message may be"
" rendered in the template.",
)
@mock.patch("search.controllers.simple.SearchSession")
def test_author_search_contains_classic_syntax(self, mock_index):
"""User has entered a `surname_f` query in an author search."""
request_data = MultiDict(
{
"searchtype": "author",
"query": "franklin_r",
"size": 50,
"order": "",
}
)
mock_index.search.return_value = {"metadata": {}, "results": []}
data, code, headers = simple.search(request_data)
self.assertEqual(
data["query"].value,
"franklin, r",
"The query should be rewritten.",
)
self.assertTrue(
data["has_classic_format"],
"A flag denoting the syntax interception should be set"
" in the response context, so that a message may be"
" rendered in the template.",
)
@mock.patch("search.controllers.simple.SearchSession")
def test_all_fields_search_multiple_classic_syntax(self, mock_index):
"""User has entered a classic query with multiple authors."""
request_data = MultiDict(
{
"searchtype": "all",
"query": "j franklin_r hawking_s",
"size": 50,
"order": "",
}
)
mock_index.search.return_value = {"metadata": {}, "results": []}
data, code, headers = simple.search(request_data)
self.assertEqual(
data["query"].value,
"j franklin, r; hawking, s",
"The query should be rewritten.",
)
self.assertTrue(
data["has_classic_format"],
"A flag denoting the syntax interception should be set"
" in the response context, so that a message may be"
" rendered in the template.",
)
@mock.patch("search.controllers.simple.SearchSession")
def test_title_search_contains_classic_syntax(self, mock_index):
"""User has entered a `surname_f` query in a title search."""
request_data = MultiDict(
{
"searchtype": "title",
"query": "franklin_r",
"size": 50,
"order": "",
}
)
mock_index.search.return_value = {"metadata": {}, "results": []}
data, code, headers = simple.search(request_data)
self.assertEqual(
data["query"].value,
"franklin_r",
"The query should not be rewritten.",
)
self.assertFalse(
data["has_classic_format"],
"Flag should not be set, as no rewrite has occurred.",
)
| 2.53125 | 3 |
kuri_wandering_robot/scripts/kuri_wandering_robot_executive_node.py | hcrlab/kuri_wandering_robot | 0 | 6558 | #!/usr/bin/env python
# ROS Libraries
import actionlib
from actionlib_msgs.msg import GoalStatus
from control_msgs.msg import JointTrajectoryControllerState, FollowJointTrajectoryAction, FollowJointTrajectoryGoal
from kuri_wandering_robot.msg import Power
from wandering_behavior.msg import WanderAction, WanderGoal
import rospy
from sensor_msgs.msg import CompressedImage
from std_msgs.msg import Empty
from trajectory_msgs.msg import JointTrajectoryPoint
# Python Default Libraries
import base64
import csv
from enum import Enum
import os
import requests
import threading
import time
import traceback
# Custom Libraries
from sent_messages_database import SentMessagesDatabase
class KuriWanderingRobotState(Enum):
"""
During NORMAL, the base moves according to wandering_behavior.
During CHARGING, the robot's eyes are closed and it is charging. The robot
transitions from NORMAL to CHARGING if its battery is below a threshold and
it is on the charger. It transitions from CHARGING to NORMAL if it's battery
is above a threshold or it is off the charger.
"""
NORMAL = 1
CHARGING = 2
class KuriWanderingRobot(object):
"""
The central executive node. This node runs a control loop that manages the
robot's state: turning on and monitoring progress of the wandering module
in NORMAL, turning off wandering in CHARGING, and switching back to NORMAL
when the robot is sufficiently charged.
This node also runs anomaly detection to detect low battery; when it detects
low battery, it sends a low battery request to the Slackbot, which then
sends it to the helpers. This node can be extended with additional anomaly
detection and help requests, as needed. This node also subscribes to a dummy
`where_am_i_help` topic, which sends helpers the sample `where_am_i` help
message. Note that that is only in place to illsutrate the sample
`where_am_i` help message, and actually using that would require developing
a custom anomaly detection system to trigger the robot asking for that type
of help.
Finally, this node has a separate thread that continually queries the
Slackbot for responses to its help requests.
"""
def __init__(self):
"""
Initialize an instance of the KuriWanderingRobot class
"""
self.has_loaded = False
# Get the Slackbot URL
self.slackbot_url = rospy.get_param('~slackbot_url')
# Initialize the state.
self.state_lock = threading.Lock()
self.state_changed = True
self.state = KuriWanderingRobotState.NORMAL
# Initialize the wandering module
self.wandering_module_action = actionlib.SimpleActionClient('/wandering_behavior/navigate', WanderAction)
# Initialize the eye controller
self.eyelid_controller_action = actionlib.SimpleActionClient('/eyelids_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
self.eye_closed_position = 0.41
self.eye_open_position = 0.0
# Initialize the camera
self.img_sub = rospy.Subscriber(
'/upward_looking_camera/compressed', CompressedImage, self.image_callback, queue_size=1)
self.latest_image = None
self.latest_image_lock = threading.Lock()
# Initialize low battery anomaly detector
self.battery_sub = rospy.Subscriber(
"/mobile_base/power", Power, self.power_callback, queue_size=1)
self.previous_battery_lock = threading.Lock()
self.previous_battery = None
self.previous_dock_present = None
self.battery_notification_thresholds = rospy.get_param('~battery_notification_thresholds', [40, 20, 10, 5, 4, 3, 2, 1])
# if the battery is less than this and Kuri is docked, charge
self.to_charge_threshold = rospy.get_param('~to_charge_threshold', 50)
# if the batter is greater than this and Kuri is charging, switch back to NORMAL
self.charging_done_threshold = rospy.get_param('~charging_done_threshold', 90)
# Whether the low battery message should include Kuri's current camera image
self.low_battery_message_include_image = rospy.get_param('~low_battery_message_include_image', True)
# Initialize the dummy `where_am_i` anomaly detector
self.where_am_i_help_sub = rospy.Subscriber(
"/where_am_i_help", Empty, self.where_am_i_help_callback, queue_size=1)
# Initialize storing images and message IDs
self.sent_messages_database_filepath = rospy.get_param('~send_messages_database_filepath')
self.sent_messages_database = SentMessagesDatabase.load(
self.sent_messages_database_filepath)
self.database_save_interval = 1
self.database_updates_since_last_save = 0
# Initialize the head controller
self.head_state_sub = rospy.Subscriber(
"/head_controller/state", JointTrajectoryControllerState, self.head_state_callback, queue_size=1)
self.head_controller_action = actionlib.SimpleActionClient('/head_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
self.head_tilt_speed = 0.2 # head tilt is in [-0.8, 0.3]
self.head_pan_speed = 0.2 # head pan is in [-0.75, 0.75]
# Initialize the Slackbot updates thread
self.slackbot_responses_thread = threading.Thread(
target=self.get_slackbot_updates,
)
self.slackbot_responses_thread.start()
# Initialize the state machine
self.state_machine_thread = threading.Thread(
target=self.state_machine_control_loop,
)
self.state_machine_thread.start()
self.has_centered_head = False
self.has_loaded = True
def database_updated(self, num_updates=1):
"""
Called everytime the database is updated. Saves the database every
self.database_save_interval updates
"""
self.database_updates_since_last_save += num_updates
if self.database_updates_since_last_save % self.database_save_interval == 0:
self.sent_messages_database.save(self.sent_messages_database_filepath)
rospy.logdebug("Saved sent_messages_database!")
def open_eyes(self, duration_secs=0.2):
"""
Open the robot's eyes
"""
rospy.logdebug("Open Eyes")
duration = rospy.Duration.from_sec(duration_secs)
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time.now()
goal.trajectory.joint_names = ["eyelids_joint"]
point = JointTrajectoryPoint()
point.positions = [self.eye_open_position]
point.velocities = []
point.accelerations = []
point.effort = []
point.time_from_start = duration
goal.trajectory.points = [point]
# Send the goal
self.eyelid_controller_action.wait_for_server()
self.eyelid_controller_action.send_goal(goal)
self.eyelid_controller_action.wait_for_result(duration)
def close_eyes(self, duration_secs=0.2):
"""
Close the robot's eyes
"""
rospy.logdebug("Close Eyes")
duration = rospy.Duration.from_sec(duration_secs)
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time.now()
goal.trajectory.joint_names = ["eyelids_joint"]
point = JointTrajectoryPoint()
point.positions = [self.eye_closed_position]
point.velocities = []
point.accelerations = []
point.effort = []
point.time_from_start = duration
goal.trajectory.points = [point]
# Send the goal
self.eyelid_controller_action.wait_for_server()
self.eyelid_controller_action.send_goal(goal)
self.eyelid_controller_action.wait_for_result(duration)
def head_state_callback(self, head_state_msg):
"""
Get the head's current position
"""
if not self.has_loaded:
return
if not self.has_centered_head:
self.center_head(head_state_msg.actual.positions[0], head_state_msg.actual.positions[1])
def center_head(self, current_pan, current_tilt):
"""
Center Kuri's head. This involves moving from the current pan and tilt
to the centered values of (0.0, -0.3)
"""
pan_endpoint = 0.0
tilt_endpoint = -0.3
n_waypoints = 10
# Compute the actual endpoint and duration_secs
duration_secs = max(
abs(pan_endpoint-current_pan)/self.head_pan_speed,
abs(tilt_endpoint-current_tilt)/self.head_tilt_speed)
duration = rospy.Duration.from_sec(duration_secs)
# Create the goal
goal = FollowJointTrajectoryGoal()
goal.trajectory.header.stamp = rospy.Time.now()
goal.trajectory.joint_names = ["head_1_joint", "head_2_joint"]
goal.trajectory.points = []
pan_interval = (pan_endpoint-current_pan)/(n_waypoints-1)
tilt_interval = (tilt_endpoint-current_tilt)/(n_waypoints-1)
time_interval = duration/n_waypoints
for i in range(n_waypoints):
point = JointTrajectoryPoint()
point.positions = [current_pan + i*pan_interval, current_tilt + i*tilt_interval]
point.velocities = []
point.accelerations = []
point.effort = []
point.time_from_start = (i+1)*time_interval
goal.trajectory.points.append(point)
# Send the goal
self.head_controller_action.wait_for_server()
self.head_controller_action.send_goal(goal)
self.head_controller_action.wait_for_result(duration)
self.has_centered_head = True
def state_machine_control_loop(self, rate_hz=10):
"""
The control loop for the state machine. All of the state machine logic
is handled in this function and the functions it calls.
During NORMAL, the base moves according to wandering_behavior.
During CHARGING, the robot's eyes are closed and it is charging. The
robot transitions from NORMAL to CHARGING if its battery is below a
threshold and it is on the charger. It transitions from CHARGING to
NORMAL if it's battery is above a threshold or it is off the charger.
"""
rate = rospy.Rate(rate_hz)
while not rospy.is_shutdown():
rate.sleep()
with self.state_lock:
state_at_start_of_loop = self.state
if (self.state == KuriWanderingRobotState.NORMAL):
goal_state = self.wandering_module_action.get_state()
if (self.state_changed or goal_state == GoalStatus.ABORTED or goal_state == GoalStatus.SUCCEEDED):
rospy.logdebug("Waiting for wandering_module_action server")
self.wandering_module_action.wait_for_server()
rospy.logdebug("Sending goal to wandering_module_action")
# Effort -1 means "don't stop unless preempted"
self.wandering_module_action.send_goal(WanderGoal(effort=-1))
self.open_eyes()
with self.previous_battery_lock:
if (self.previous_battery is not None and self.previous_battery < self.to_charge_threshold and self.previous_dock_present):
self.close_eyes()
self.state = KuriWanderingRobotState.CHARGING
self.wandering_module_action.cancel_all_goals()
rospy.loginfo("State: NORMAL ==> CHARGING")
elif self.state == KuriWanderingRobotState.CHARGING:
with self.previous_battery_lock:
if (self.previous_battery is None or not self.previous_dock_present or self.previous_battery >= self.charging_done_threshold):
self.state = KuriWanderingRobotState.NORMAL
rospy.loginfo("State: CHARGING ==> NORMAL")
state_at_end_of_loop = self.state
self.state_changed = (state_at_start_of_loop != state_at_end_of_loop)
def image_callback(self, img_msg):
"""
Store the latest image.
"""
if not self.has_loaded: return
with self.latest_image_lock:
self.latest_image = img_msg
def power_callback(self, msg):
"""
Callback function for Kuri's power update. It Kuri's battery has crossed
a battery_notification_threshold, notify the Slackbot.
"""
if not self.has_loaded: return
with self.state_lock:
with self.previous_battery_lock:
self.previous_dock_present = msg.dock_present
if self.state == KuriWanderingRobotState.CHARGING:
self.previous_battery = msg.battery.pct
else:
update_previous_battery = True
if msg.battery.pct <= self.battery_notification_thresholds[0]:
# Send the low-battery helper notifications when the battery
# crosses the thresholds defined in self.battery_notification_thresholds
for i in range(len(self.battery_notification_thresholds)):
if (self.previous_battery is None or (self.previous_battery > self.battery_notification_thresholds[i]) and msg.battery.pct <= self.battery_notification_thresholds[i]):
try:
# Send a low_battery_alert
dict_to_send = {'battery_pct':msg.battery.pct}
if self.low_battery_message_include_image:
with self.latest_image_lock:
if self.latest_image is not None:
image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii')
dict_to_send['image'] = image_contents
rospy.loginfo("Sending battery request for pct %s" % msg.battery.pct)
res = requests.post(
os.path.join(self.slackbot_url, 'low_battery'),
json=dict_to_send,
)
res_json = res.json()
if not res_json['success']:
update_previous_battery = False
except Exception as e:
rospy.logwarn("Error communicating with Slackbot /low_battery at URL %s." % self.slackbot_url)
if "res" in locals():
rospy.logwarn("Response text %s." % res.text)
rospy.logwarn(traceback.format_exc())
rospy.logwarn("Error %s." % e)
update_previous_battery = False
break
if (update_previous_battery and (self.previous_battery is None or msg.battery.pct < self.previous_battery)):
self.previous_battery = msg.battery.pct
def where_am_i_help_callback(self, msg):
"""
A dummy callback that triggers sending a where_am_i help message to the
Slackbot. This is merely intended to showcase some of the Slackbot's
capabilities. Users who want a robot that autonomously asks the human to
tell it where it is should implement their own anomaly detection system
for triggering this help request.
"""
with self.latest_image_lock:
if self.latest_image is None:
rospy.loginfo("Attempted to send where_am_i help request but have no image.")
return
try:
# Send a low_battery_alert
rospy.loginfo("Sending where_am_i help request")
with self.latest_image_lock:
image_contents = base64.b64encode(bytearray(self.latest_image.data)).decode('ascii')
res = requests.post(
os.path.join(self.slackbot_url, 'where_am_i'),
json={'image':image_contents, 'options':['Lounge', "Office#252", "200 Corridoor", "Atrium"]},
)
res_json = res.json()
message_id = res_json['message_id']
self.sent_messages_database.add_respondable_message(message_id)
self.database_updated()
except Exception as e:
rospy.logwarn("Error communicating with Slackbot /where_am_i at URL %s." % self.slackbot_url)
if "res" in locals():
rospy.logwarn("Response text %s." % res.text)
rospy.logwarn(traceback.format_exc())
rospy.logwarn("Error %s." % e)
def get_slackbot_updates(self, refresh_secs=5.0):
"""
Once every refresh_secs seconds, request updates (e.g., human responses)
from the Slackbot. Note that you can optionally request updates for
partular message_ids (e.g., those that have not received responses yet)
"""
r = rospy.Rate(1.0/refresh_secs)
while not rospy.is_shutdown():
if not self.has_loaded: r.sleep()
try:
message_ids_and_action_ts = self.sent_messages_database.get_message_ids_and_latest_action_ts()
# Request responses for those message_ids
res = requests.post(
os.path.join(self.slackbot_url, 'get_updates'),
json={'message_ids_and_action_ts':message_ids_and_action_ts},
)
res_json = res.json()
rospy.logdebug("Got updates from Slackbot %s" % res_json)
message_id_to_responses = res_json["message_id_to_responses"]
if len(message_id_to_responses) > 0:
num_updates = 0
# Insert reactions into the database
for message_id in message_id_to_responses:
for action_ts, response in message_id_to_responses[message_id]:
rospy.loginfo("Got reaction %s from at ts %s" % (response, action_ts))
self.sent_messages_database.add_user_response(message_id, action_ts, response)
num_updates += 1
self.database_updated(num_updates)
except Exception as e:
rospy.logwarn("Error communicating with Slackbot /get_updates at URL %s." % self.slackbot_url)
if "res" in locals():
rospy.logwarn("Response text %s." % res.text)
rospy.logwarn(traceback.format_exc())
rospy.logwarn("Error %s." % e)
r.sleep()
if __name__ == "__main__":
rospy.init_node("kuri_wandering_robot")
kuri_wandering_robot = KuriWanderingRobot()
rospy.spin()
| 2.40625 | 2 |
src/python/nimbusml/internal/entrypoints/trainers_lightgbmbinaryclassifier.py | montehoover/NimbusML | 134 | 6559 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
Trainers.LightGbmBinaryClassifier
"""
import numbers
from ..utils.entrypoints import EntryPoint
from ..utils.utils import try_set, unlist
def trainers_lightgbmbinaryclassifier(
training_data,
predictor_model=None,
number_of_iterations=100,
learning_rate=None,
number_of_leaves=None,
minimum_example_count_per_leaf=None,
feature_column_name='Features',
booster=None,
label_column_name='Label',
example_weight_column_name=None,
row_group_column_name=None,
normalize_features='Auto',
caching='Auto',
unbalanced_sets=False,
weight_of_positive_examples=1.0,
sigmoid=0.5,
evaluation_metric='Logloss',
maximum_bin_count_per_feature=255,
verbose=False,
silent=True,
number_of_threads=None,
early_stopping_round=0,
batch_size=1048576,
use_categorical_split=None,
handle_missing_value=True,
use_zero_as_missing_value=False,
minimum_example_count_per_group=100,
maximum_categorical_split_point_count=32,
categorical_smoothing=10.0,
l2_categorical_regularization=10.0,
seed=None,
parallel_trainer=None,
**params):
"""
**Description**
Train a LightGBM binary classification model.
:param number_of_iterations: Number of iterations. (inputs).
:param training_data: The data to be used for training (inputs).
:param learning_rate: Shrinkage rate for trees, used to prevent
over-fitting. Range: (0,1]. (inputs).
:param number_of_leaves: Maximum leaves for trees. (inputs).
:param minimum_example_count_per_leaf: Minimum number of
instances needed in a child. (inputs).
:param feature_column_name: Column to use for features (inputs).
:param booster: Which booster to use, can be gbtree, gblinear or
dart. gbtree and dart use tree based model while gblinear
uses linear function. (inputs).
:param label_column_name: Column to use for labels (inputs).
:param example_weight_column_name: Column to use for example
weight (inputs).
:param row_group_column_name: Column to use for example groupId
(inputs).
:param normalize_features: Normalize option for the feature
column (inputs).
:param caching: Whether trainer should cache input training data
(inputs).
:param unbalanced_sets: Use for binary classification when
training data is not balanced. (inputs).
:param weight_of_positive_examples: Control the balance of
positive and negative weights, useful for unbalanced classes.
A typical value to consider: sum(negative cases) /
sum(positive cases). (inputs).
:param sigmoid: Parameter for the sigmoid function. (inputs).
:param evaluation_metric: Evaluation metrics. (inputs).
:param maximum_bin_count_per_feature: Maximum number of bucket
bin for features. (inputs).
:param verbose: Verbose (inputs).
:param silent: Printing running messages. (inputs).
:param number_of_threads: Number of parallel threads used to run
LightGBM. (inputs).
:param early_stopping_round: Rounds of early stopping, 0 will
disable it. (inputs).
:param batch_size: Number of entries in a batch when loading
data. (inputs).
:param use_categorical_split: Enable categorical split or not.
(inputs).
:param handle_missing_value: Enable special handling of missing
value or not. (inputs).
:param use_zero_as_missing_value: Enable usage of zero (0) as
missing value. (inputs).
:param minimum_example_count_per_group: Minimum number of
instances per categorical group. (inputs).
:param maximum_categorical_split_point_count: Max number of
categorical thresholds. (inputs).
:param categorical_smoothing: Lapalace smooth term in categorical
feature spilt. Avoid the bias of small categories. (inputs).
:param l2_categorical_regularization: L2 Regularization for
categorical split. (inputs).
:param seed: Sets the random seed for LightGBM to use. (inputs).
:param parallel_trainer: Parallel LightGBM Learning Algorithm
(inputs).
:param predictor_model: The trained model (outputs).
"""
entrypoint_name = 'Trainers.LightGbmBinaryClassifier'
inputs = {}
outputs = {}
if number_of_iterations is not None:
inputs['NumberOfIterations'] = try_set(
obj=number_of_iterations,
none_acceptable=True,
is_of_type=numbers.Real)
if training_data is not None:
inputs['TrainingData'] = try_set(
obj=training_data,
none_acceptable=False,
is_of_type=str)
if learning_rate is not None:
inputs['LearningRate'] = try_set(
obj=learning_rate,
none_acceptable=True,
is_of_type=numbers.Real)
if number_of_leaves is not None:
inputs['NumberOfLeaves'] = try_set(
obj=number_of_leaves,
none_acceptable=True,
is_of_type=numbers.Real)
if minimum_example_count_per_leaf is not None:
inputs['MinimumExampleCountPerLeaf'] = try_set(
obj=minimum_example_count_per_leaf,
none_acceptable=True,
is_of_type=numbers.Real)
if feature_column_name is not None:
inputs['FeatureColumnName'] = try_set(
obj=feature_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if booster is not None:
inputs['Booster'] = try_set(
obj=booster,
none_acceptable=True,
is_of_type=dict)
if label_column_name is not None:
inputs['LabelColumnName'] = try_set(
obj=label_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if example_weight_column_name is not None:
inputs['ExampleWeightColumnName'] = try_set(
obj=example_weight_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if row_group_column_name is not None:
inputs['RowGroupColumnName'] = try_set(
obj=row_group_column_name,
none_acceptable=True,
is_of_type=str,
is_column=True)
if normalize_features is not None:
inputs['NormalizeFeatures'] = try_set(
obj=normalize_features,
none_acceptable=True,
is_of_type=str,
values=[
'No',
'Warn',
'Auto',
'Yes'])
if caching is not None:
inputs['Caching'] = try_set(
obj=caching,
none_acceptable=True,
is_of_type=str,
values=[
'Auto',
'Memory',
'None'])
if unbalanced_sets is not None:
inputs['UnbalancedSets'] = try_set(
obj=unbalanced_sets,
none_acceptable=True,
is_of_type=bool)
if weight_of_positive_examples is not None:
inputs['WeightOfPositiveExamples'] = try_set(
obj=weight_of_positive_examples,
none_acceptable=True,
is_of_type=numbers.Real)
if sigmoid is not None:
inputs['Sigmoid'] = try_set(
obj=sigmoid,
none_acceptable=True,
is_of_type=numbers.Real)
if evaluation_metric is not None:
inputs['EvaluationMetric'] = try_set(
obj=evaluation_metric,
none_acceptable=True,
is_of_type=str,
values=[
'None',
'Default',
'Logloss',
'Error',
'AreaUnderCurve'])
if maximum_bin_count_per_feature is not None:
inputs['MaximumBinCountPerFeature'] = try_set(
obj=maximum_bin_count_per_feature,
none_acceptable=True,
is_of_type=numbers.Real)
if verbose is not None:
inputs['Verbose'] = try_set(
obj=verbose,
none_acceptable=True,
is_of_type=bool)
if silent is not None:
inputs['Silent'] = try_set(
obj=silent,
none_acceptable=True,
is_of_type=bool)
if number_of_threads is not None:
inputs['NumberOfThreads'] = try_set(
obj=number_of_threads,
none_acceptable=True,
is_of_type=numbers.Real)
if early_stopping_round is not None:
inputs['EarlyStoppingRound'] = try_set(
obj=early_stopping_round,
none_acceptable=True,
is_of_type=numbers.Real)
if batch_size is not None:
inputs['BatchSize'] = try_set(
obj=batch_size,
none_acceptable=True,
is_of_type=numbers.Real)
if use_categorical_split is not None:
inputs['UseCategoricalSplit'] = try_set(
obj=use_categorical_split, none_acceptable=True, is_of_type=bool)
if handle_missing_value is not None:
inputs['HandleMissingValue'] = try_set(
obj=handle_missing_value,
none_acceptable=True,
is_of_type=bool)
if use_zero_as_missing_value is not None:
inputs['UseZeroAsMissingValue'] = try_set(
obj=use_zero_as_missing_value,
none_acceptable=True,
is_of_type=bool)
if minimum_example_count_per_group is not None:
inputs['MinimumExampleCountPerGroup'] = try_set(
obj=minimum_example_count_per_group,
none_acceptable=True,
is_of_type=numbers.Real,
valid_range={
'Inf': 0,
'Max': 2147483647})
if maximum_categorical_split_point_count is not None:
inputs['MaximumCategoricalSplitPointCount'] = try_set(
obj=maximum_categorical_split_point_count,
none_acceptable=True,
is_of_type=numbers.Real,
valid_range={
'Inf': 0,
'Max': 2147483647})
if categorical_smoothing is not None:
inputs['CategoricalSmoothing'] = try_set(
obj=categorical_smoothing,
none_acceptable=True,
is_of_type=numbers.Real, valid_range={'Min': 0.0})
if l2_categorical_regularization is not None:
inputs['L2CategoricalRegularization'] = try_set(
obj=l2_categorical_regularization,
none_acceptable=True,
is_of_type=numbers.Real, valid_range={'Min': 0.0})
if seed is not None:
inputs['Seed'] = try_set(
obj=seed,
none_acceptable=True,
is_of_type=numbers.Real)
if parallel_trainer is not None:
inputs['ParallelTrainer'] = try_set(
obj=parallel_trainer,
none_acceptable=True,
is_of_type=dict)
if predictor_model is not None:
outputs['PredictorModel'] = try_set(
obj=predictor_model, none_acceptable=False, is_of_type=str)
input_variables = {
x for x in unlist(inputs.values())
if isinstance(x, str) and x.startswith("$")}
output_variables = {
x for x in unlist(outputs.values())
if isinstance(x, str) and x.startswith("$")}
entrypoint = EntryPoint(
name=entrypoint_name, inputs=inputs, outputs=outputs,
input_variables=input_variables,
output_variables=output_variables)
return entrypoint
| 2.078125 | 2 |
Job Portal with Automated Resume Screening/gensim-4.1.2/gensim/test/test_rpmodel.py | Candida18/Job-Portal-with-Automated-Resume-Screening | 3 | 6560 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 <NAME> <<EMAIL>>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import rpmodel
from gensim import matutils
from gensim.test.utils import datapath, get_tmpfile
class TestRpModel(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def test_transform(self):
# create the transformation model
# HACK; set fixed seed so that we always get the same random matrix (and can compare against expected results)
np.random.seed(13)
model = rpmodel.RpModel(self.corpus, num_topics=2)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = np.array([-0.70710677, 0.70710677])
self.assertTrue(np.allclose(vec, expected)) # transformed entries must be equal up to sign
def test_persistence(self):
fname = get_tmpfile('gensim_models.tst')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
def test_persistence_compressed(self):
fname = get_tmpfile('gensim_models.tst.gz')
model = rpmodel.RpModel(self.corpus, num_topics=2)
model.save(fname)
model2 = rpmodel.RpModel.load(fname, mmap=None)
self.assertEqual(model.num_topics, model2.num_topics)
self.assertTrue(np.allclose(model.projection, model2.projection))
tstvec = []
self.assertTrue(np.allclose(model[tstvec], model2[tstvec])) # try projecting an empty vector
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 2.4375 | 2 |
playground/tianhaoz95/gan_getting_started/cgan_model.py | tianhaoz95/mangekyo | 0 | 6561 | import tensorflow as tf
from tensorflow import keras
class CondGeneratorModel(keras.Model):
def __init__(self):
super(CondGeneratorModel, self).__init__()
# Expand 7*7*128 features into a (7,7,128) tensor
self.dense_1 = keras.layers.Dense(7*7*256)
self.reshape_1 = keras.layers.Reshape((7, 7, 256))
# Expand (10,) to (7,7,1)
self.embedder = keras.layers.Embedding(10, 100)
self.dense_2 = keras.layers.Dense(7*7*256)
# From (7,7,256) to (7,7,128)
self.convt_1 = keras.layers.Conv2DTranspose(
128, (5, 5), strides=1, padding='same', use_bias=False)
self.convt_bn_1 = keras.layers.BatchNormalization()
self.convt_relu_1 = keras.layers.LeakyReLU()
# From (7,7,128) to (14,14,64)
self.convt_2 = keras.layers.Conv2DTranspose(
64, (5, 5), strides=2, padding='same', use_bias=False)
self.convt_bn_2 = keras.layers.BatchNormalization()
self.convt_relu_2 = keras.layers.LeakyReLU()
# From (14,14,64) to (28,28,1)
self.convt_out = keras.layers.Conv2DTranspose(
1, (5, 5), strides=2, padding='same', use_bias=False)
def call(self, inputs):
feat_x = inputs[0]
label = inputs[2]
# Expand label input to be the same as latent feature
label_x = self.embedder(label)
label_x = self.dense_2(label_x)
label_x = tf.squeeze(label_x, 1)
# Expand features to image channels
feat_x = self.dense_1(feat_x)
# Combine latent feature and label input
x = tf.math.multiply(feat_x, label_x)
x = self.reshape_1(x)
# From (7,7,256) to (7,7,128)
x = self.convt_1(x)
x = self.convt_bn_1(x)
x = self.convt_relu_1(x)
# From (7,7,128) to (14,14,64)
x = self.convt_2(x)
x = self.convt_bn_2(x)
x = self.convt_relu_2(x)
# From (14,14,64) to (28,28,1)
x = self.convt_out(x)
return [x, None, label]
class CondDiscriminatorModel(keras.Model):
def __init__(self):
super(CondDiscriminatorModel, self).__init__()
self.embedder = keras.layers.Embedding(10, 100)
self.expand_layer = keras.layers.Dense(28*28*1)
self.reshape_layer = keras.layers.Reshape((28, 28, 1))
self.conv_1 = keras.layers.Conv2D(
64, (5, 5), strides=2, padding='same', input_shape=(28, 28, 1))
self.relu_1 = keras.layers.LeakyReLU()
self.drop_1 = keras.layers.Dropout(0.3)
self.conv_2 = keras.layers.Conv2D(
128, (5, 5), strides=2, padding='same')
self.relu_2 = keras.layers.LeakyReLU()
self.drop_2 = keras.layers.Dropout(0.3)
self.flatten = keras.layers.Flatten()
self.out = keras.layers.Dense(1)
def call(self, inputs):
images_x = inputs[0]
labels = inputs[2]
labels_x = self.embedder(labels)
labels_x = self.expand_layer(labels_x)
labels_x = self.reshape_layer(labels_x)
x = tf.math.multiply(images_x, labels_x)
x = self.conv_1(x)
x = self.relu_1(x)
x = self.drop_1(x)
x = self.conv_2(x)
x = self.relu_2(x)
x = self.drop_2(x)
x = self.flatten(x)
x = self.out(x)
return x
| 2.734375 | 3 |
ahd2fhir/utils/resource_handler.py | miracum/ahd2fhir | 3 | 6562 | <reponame>miracum/ahd2fhir<filename>ahd2fhir/utils/resource_handler.py
import base64
import datetime
import logging
import os
import time
from typing import List, Tuple
import structlog
import tenacity
from averbis import Pipeline
from fhir.resources.bundle import Bundle
from fhir.resources.codeableconcept import CodeableConcept
from fhir.resources.composition import Composition, CompositionSection
from fhir.resources.documentreference import DocumentReference
from fhir.resources.fhirtypes import DateTime
from fhir.resources.identifier import Identifier
from fhir.resources.reference import Reference
from fhir.resources.resource import Resource
from prometheus_client import Counter, Histogram, Summary
from tenacity.after import after_log
from ahd2fhir.mappers import ahd_to_condition, ahd_to_medication_statement
from ahd2fhir.utils.bundle_builder import BundleBuilder
from ahd2fhir.utils.custom_mappers import custom_mappers, mapper_functions
from ahd2fhir.utils.device_builder import build_device
from ahd2fhir.utils.fhir_utils import sha256_of_identifier
MAPPING_FAILURES_COUNTER = Counter("mapping_failures", "Exceptions during mapping")
MAPPING_DURATION_SUMMARY = Histogram(
"map_duration_seconds",
"Time spent mapping",
buckets=(
0.05,
0.1,
0.5,
1.0,
2.0,
3.0,
5.0,
8.0,
13.0,
21.0,
34.0,
55.0,
"inf",
),
)
EXTRACTED_RESOURCES_COUNT_SUMMARY = Summary(
"extracted_resources", "Number of extracted resources for each processed document"
)
DOCUMENT_LENGTH_SUMMARY = Summary(
"document_length",
"Length of each processed document's text in charactes",
)
DISCHARGE_SUMMARY_CONCEPT_TEXT = (
"Clinical document Kind of document from LOINC Document Ontology"
)
DISCHARGE_SUMMARY_CONCEPT = CodeableConcept(
**{
"coding": [
{
"system": "http://loinc.org",
"code": "74477-1",
"display": DISCHARGE_SUMMARY_CONCEPT_TEXT,
},
],
"text": DISCHARGE_SUMMARY_CONCEPT_TEXT,
}
)
AHD_TYPE_DOCUMENT_ANNOTATION = "de.averbis.types.health.DocumentAnnotation"
AHD_TYPE_MEDICATION = "de.averbis.types.health.Medication"
AHD_TYPE_DIAGNOSIS = "de.averbis.types.health.Diagnosis"
log = structlog.get_logger()
class TransientError(Exception):
pass
class ResourceHandler:
def __init__(self, averbis_pipeline: Pipeline):
self.pipeline = averbis_pipeline
self.bundle_builder = BundleBuilder()
@MAPPING_FAILURES_COUNTER.count_exceptions()
@MAPPING_DURATION_SUMMARY.time()
def handle_documents(self, document_references: List[DocumentReference]) -> Bundle:
"""
Process a list of DocumentReferences
"""
all_resources = []
bundle_id = None
for document_reference in document_references:
resources_from_document = self._process_documentreference(
document_reference
)
composition = self._build_composition(
document_reference, resources_from_document
)
bundle_id = composition.id
all_resources.extend(resources_from_document)
all_resources.append(composition)
EXTRACTED_RESOURCES_COUNT_SUMMARY.observe(len(all_resources))
result_bundle = self.bundle_builder.build_from_resources(
all_resources, bundle_id
)
return result_bundle
def handle_bundle(self, bundle: Bundle):
"""
Process all FHIR DocumentReference resources from a given bundle
"""
document_references = []
for entry in bundle.entry:
if entry.resource.resource_type == "DocumentReference":
document_references.append(entry.resource)
return self.handle_documents(document_references)
def _build_composition(
self, document_reference: DocumentReference, all_resources: List[Resource]
):
composition_type = (
document_reference.type
if document_reference.type is not None
else DISCHARGE_SUMMARY_CONCEPT
)
composition_subject = document_reference.subject
composition_category = document_reference.category
composition_encounter = None
if document_reference.context is not None:
if len(document_reference.context.encounter) > 1:
log.warning(
"DocumentReference contains more than one encounter. "
+ "Using the first."
)
composition_encounter = document_reference.context.encounter[0]
composition_author = None
composition_sections = []
for resource in all_resources:
resource_type = resource.resource_type
if resource_type == "Device":
author = Reference.construct()
author.reference = f"Device/{resource.id}"
author.type = "Device"
composition_author = author
continue
# Check if no resource specific section exists ands adds it,
# otherwise select the correct section
if not any(
section.title == resource_type for section in composition_sections
):
resource_section = CompositionSection.construct()
resource_section.title = resource_type
resource_section.entry = []
composition_sections.append(resource_section)
ind = len(composition_sections) - 1
else:
ind = [
ind
for ind, section in enumerate(composition_sections)
if section.title == resource_type
][0]
entry_reference = Reference.construct()
entry_reference.reference = resource_type + "/" + resource.id
composition_sections[ind].entry.append(entry_reference)
if composition_author is None:
composition_author = Reference(**{"display": "Averbis Health Discovery"})
composition_identifier = (
self._build_composition_identifier_from_documentreference(
document_reference
)
)
composition = Composition(
**{
"title": "NLP FHIR Results " + time.strftime("%Y-%m-%dT%H:%M"),
"status": "final",
"date": DateTime.validate(datetime.datetime.now(datetime.timezone.utc)),
"type": composition_type,
"identifier": composition_identifier,
"id": sha256_of_identifier(composition_identifier),
"subject": composition_subject,
"category": composition_category,
"encounter": composition_encounter,
"author": [composition_author],
"section": composition_sections,
}
)
return composition
def _process_documentreference(self, document_reference: DocumentReference):
log = structlog.get_logger().bind(
document_id=f"{document_reference.get_resource_type()}/"
+ f"{document_reference.id}"
)
# Text extraction and text analysis
(text, content_type, lang) = self._extract_text_from_resource(
document_reference
)
DOCUMENT_LENGTH_SUMMARY.observe(len(text))
averbis_result = None
try:
averbis_result = self._perform_text_analysis(
text=text, mime_type=content_type, lang=lang
)
except Exception as exc:
log.exception(exc)
log.error("Failed to perform text analysis", error=exc)
raise TransientError(exc)
total_results = []
# Building FHIR resources as results
medication_statement_lists = []
for val in averbis_result:
if val["type"] == AHD_TYPE_DIAGNOSIS:
mapped_condition = ahd_to_condition.get_fhir_condition(
val, document_reference
)
if mapped_condition is not None:
total_results.append(mapped_condition)
if val["type"] == AHD_TYPE_DOCUMENT_ANNOTATION:
device = build_device(val)
if device is not None:
total_results.append(device)
if val["type"] == AHD_TYPE_MEDICATION:
statement = ahd_to_medication_statement.get_fhir_medication_statement(
val, document_reference
)
if statement is not None:
medication_statement_lists.append(statement)
# if custom_mappers_enabled
if os.getenv("CUSTOM_MAPPERS_ENABLED", "False").lower() in ["true", "1"]:
total_results.extend(custom_mappers(val, document_reference))
medication_results = []
medication_statement_results = []
for medication_statement_list in medication_statement_lists:
for medication_statement_dict in medication_statement_list:
medication_results.append(medication_statement_dict["medication"])
medication_statement_results.append(
medication_statement_dict["statement"]
)
# de-duplicate any Medication and MedicationStatement resources
medication_resources_unique = {m.id: m for m in medication_results}.values()
medication_statements_unique = {
m.id: m for m in medication_statement_results
}.values()
total_results.extend(medication_resources_unique)
total_results.extend(medication_statements_unique)
return total_results
def _extract_text_from_resource(
self,
document_reference: DocumentReference,
) -> Tuple[str, str]:
valid_content = [
content
for content in document_reference.content
if content.attachment.data is not None
]
if len(valid_content) == 0:
raise ValueError(
f"Document {document_reference.id} contains no valid content"
)
if len(valid_content) > 1:
raise ValueError(
f"Document {document_reference.id} contains more than one attachment"
)
content = valid_content[0]
language = None
if content.attachment.language:
language = content.attachment.language.lower().split("-")[0]
return (
base64.b64decode(content.attachment.data).decode("utf8"),
content.attachment.contentType,
language,
)
@tenacity.retry(
stop=tenacity.stop.stop_after_attempt(10),
wait=tenacity.wait.wait_fixed(5)
+ tenacity.wait.wait_random_exponential(multiplier=1, max=30),
after=after_log(logging.getLogger(), logging.WARNING),
reraise=True,
)
def _perform_text_analysis(
self, text: str, mime_type: str = "text/plain", lang: str = None
):
types = ",".join(
[
AHD_TYPE_DIAGNOSIS,
AHD_TYPE_MEDICATION,
AHD_TYPE_DOCUMENT_ANNOTATION,
*mapper_functions.keys(),
]
)
analyse_args = {"language": lang, "annotation_types": types}
try:
if mime_type == "text/html":
return self.pipeline.analyse_html(text, **analyse_args)
else:
return self.pipeline.analyse_text(text, **analyse_args)
except Exception as exc:
log.exception(exc)
log.error("Text analysis failed")
raise exc
def _build_composition_identifier_from_documentreference(
self,
doc_ref: DocumentReference,
):
"""
construct a hopefully unqiue identifier for the condition from
the document identifier as well as the offset into the text
and the unique id of the annotation
"""
doc_ref_identifier = None
if doc_ref.identifier is None or len(doc_ref.identifier) == 0:
log.warning(
"No identifier specified on the document. "
+ "Trying to fall-back to the DocumentReference.id"
)
doc_ref_identifier = doc_ref.id
else:
if len(doc_ref.identifier) > 1:
log.warning(
"More than one identifier specified on the document. "
+ "Using the first occurrence."
)
doc_ref_identifier = doc_ref.identifier[0].value
composition_identifier_system = (
"https://fhir.miracum.org/nlp/identifiers/ahd-analysis-result-composition"
)
composition_identifier_value = f"{doc_ref_identifier}_ahd-analysis-result"
return Identifier(
**{
"system": composition_identifier_system,
"value": composition_identifier_value,
}
)
| 1.929688 | 2 |
maestros/lookups.py | Infinityloopsistemas/SIVA | 0 | 6563 | # -*- coding: utf-8 -*-
from selectable.decorators import login_required
from maestros.models import TiposMedidasActuacion, TiposLimitesCriticos, TiposMedidasVigilancia, TiposTemperaturas, TiposFrecuencias, Zonas, Terceros, CatalogoEquipos, Personal, Consumibles, ParametrosAnalisis, Actividades, Etapas, Peligros, TiposCursos, TiposLegislacion, Unidades, Firmas, HorarioTurnos
from selectable.base import ModelLookup
from selectable.registry import registry
from maestros_generales.models import Empresas
from siva import settings
__author__ = 'julian'
@login_required
class TPActuacionPrevLookup(ModelLookup):
model = TiposMedidasActuacion
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TPActuacionPrevLookup, self).get_query(request, term)
results = results.filter(tipo="P",empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TPActuacionPrevLookup)
@login_required
class TPActuacionCorrLookup(ModelLookup):
model = TiposMedidasActuacion
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TPActuacionCorrLookup, self).get_query(request, term)
results = results.filter(tipo="C",empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TPActuacionCorrLookup)
@login_required
class TPLimitesCritLookup(ModelLookup):
model = TiposLimitesCriticos
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TPLimitesCritLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TPLimitesCritLookup)
@login_required
class ActividadesLookup(ModelLookup):
model = Actividades
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(ActividadesLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(ActividadesLookup)
@login_required
class TipoMedidasVigilanciaLookup(ModelLookup):
model = TiposMedidasVigilancia
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TipoMedidasVigilanciaLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TipoMedidasVigilanciaLookup)
@login_required
class TiposTemperaturasLookup(ModelLookup):
model = TiposTemperaturas
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TiposTemperaturasLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TiposTemperaturasLookup)
@login_required
class TiposFrecuenciasLookup(ModelLookup):
model = TiposFrecuencias
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TiposFrecuenciasLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TiposFrecuenciasLookup)
@login_required
class ZonasLookup(ModelLookup):
model = Zonas
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(ZonasLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(ZonasLookup)
@login_required
class TercerosLookup(ModelLookup):
model = Terceros
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TercerosLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TercerosLookup)
@login_required
class TercerosTiposLookup(ModelLookup):
model = Terceros
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TercerosTiposLookup, self).get_query(request, term)
results = results.filter(tipotercero__descripcion=settings.ASESORSANITARIO, empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TercerosTiposLookup)
@login_required
class CatalogoEquiposLookup(ModelLookup):
model = CatalogoEquipos
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(CatalogoEquiposLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(CatalogoEquiposLookup)
@login_required
class PersonalLookup(ModelLookup):
model = Personal
search_fields = ('apellidos__icontains',)
def get_query(self, request, term):
results = super(PersonalLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.apellidos
def get_item_label(self, item):
return "%s %s" % (item.apellidos, item.nombres)
registry.register(PersonalLookup)
@login_required
class TiposCursosLookup(ModelLookup):
model = TiposCursos
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TiposCursosLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TiposCursosLookup)
@login_required
class TiposLegislacionLookup(ModelLookup):
model = TiposLegislacion
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(TiposLegislacionLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(TiposLegislacionLookup)
@login_required
class ConsumiblesLookup(ModelLookup):
model = Consumibles
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(ConsumiblesLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(ConsumiblesLookup)
@login_required
class ParametrosAnalisisLookup(ModelLookup):
model = ParametrosAnalisis
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(ParametrosAnalisisLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(ParametrosAnalisisLookup)
@login_required
class EtapasLookup(ModelLookup):
model = Etapas
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(EtapasLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(EtapasLookup)
@login_required
class PeligrosLookup(ModelLookup):
model = Peligros
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(PeligrosLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(PeligrosLookup)
@login_required
class UnidadesLookup(ModelLookup):
model = Unidades
search_fields = ('denominacion__icontains',)
def get_query(self, request, term):
results = super(UnidadesLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.denominacion
def get_item_label(self, item):
return "%s" % (item.denominacion)
registry.register(UnidadesLookup)
@login_required
class FirmasLookup(ModelLookup):
model = Firmas
search_fields = ('personal__apellidos__icontains',)
def get_query(self, request, term):
results = super(FirmasLookup, self).get_query(request, term)
results = results.filter(empresa__in=Empresas.objects.filter(usuario__username=request.user))
return results
def get_item_value(self, item):
return item.personal.apellidos
def get_item_label(self, item):
return "%s %s" % (item.personal__apellidos, item.personal__nombres)
registry.register(FirmasLookup)
@login_required
class HorarioTurnoLookup(ModelLookup):
model = HorarioTurnos
search_fields = ('ihora__icontains','fhora__icontains')
def get_query(self, request, term):
results = super(HorarioTurnoLookup, self).get_query(request, term)
idtpturno = request.GET.get('idtpturno', '')
if idtpturno:
results = results.filter(tpturnos_id=idtpturno)
return results
def get_item_value(self, item):
return "%s - %s" % (item.ihora, item.fhora)
def get_item_label(self, item):
return "%s - %s" % (item.ihora, item.fhora)
registry.register(HorarioTurnoLookup)
| 1.929688 | 2 |
julynter/oldcmd.py | dew-uff/julynter | 9 | 6564 | """Define commands for Python 2.7"""
import argparse
import traceback
from . import util
from .cmd import run
from .cmd import extractpipenv
def main():
"""Main function"""
print("This version is not supported! It has limitted analysis features")
parser = argparse.ArgumentParser(description='Analyze Jupyter Notebooks')
subparsers = parser.add_subparsers()
run.create_subparsers(subparsers)
extractpipenv.create_subparsers(subparsers)
args, rest = parser.parse_known_args()
try:
if not getattr(args, 'func', None):
parser.print_help()
else:
args.func(args, rest)
if not util.EXITED:
util.do_exit(0)
except: # pylint: disable=bare-except
if not util.EXITED:
traceback.print_exc()
util.do_exit(1)
| 2.515625 | 3 |
gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py | nurikk/gpdb | 0 | 6565 | <filename>gpMgmt/bin/gppylib/test/unit/test_unit_gpcrondump.py
#!/usr/bin/env python
import os
import imp
gpcrondump_path = os.path.abspath('gpcrondump')
gpcrondump = imp.load_source('gpcrondump', gpcrondump_path)
import unittest2 as unittest
from datetime import datetime
from gppylib import gplog
from gpcrondump import GpCronDump
from gppylib.operations.utils import DEFAULT_NUM_WORKERS
from mock import patch, Mock
from gppylib.operations.dump import MailDumpEvent
from gppylib.operations.backup_utils import get_backup_directory, write_lines_to_file
import mock
logger = gplog.get_unittest_logger()
class GpCronDumpTestCase(unittest.TestCase):
class Options:
def __init__(self):
self.masterDataDirectory = ""
self.interactive = False
self.clear_dumps_only = False
self.post_script = None
self.dump_config = False
self.history = False
self.pre_vacuum = False
self.post_vacuum = False
self.rollback = False
self.compress = True
self.free_space_percent = None
self.clear_dumps = False
self.cleanup_date = None
self.cleanup_total = None
self.dump_schema = False
self.dump_databases = ['testdb']
self.bypass_disk_check = True
self.backup_set = None
self.dump_global = False
self.clear_catalog_dumps = False
self.batch_default = DEFAULT_NUM_WORKERS
self.include_dump_tables = None
self.exclude_dump_tables = None
self.include_dump_tables_file = None
self.exclude_dump_tables_file = None
self.backup_dir = None
self.encoding = None
self.output_options = None
self.report_dir = None
self.timestamp_key = None
self.list_backup_files = None
self.quiet = False
self.verbose = False
self.local_dump_prefix = ''
self.list_filter_tables = None
self.include_email_file = None
self.email_details = None
self.include_schema_file = None
self.exclude_schema_file = None
self.exclude_dump_schema = None
self.dump_stats = None
## Enterprise init
self.incremental = False
self.ddboost = False
self.ddboost_hosts = None
self.ddboost_user = None
self.ddboost_config_remove = False
self.ddboost_verify = False
self.ddboost_remote = None
self.ddboost_ping = None
self.ddboost_backupdir = None
self.replicate = None
self.max_streams = None
self.netbackup_service_host = None
self.netbackup_policy = None
self.netbackup_schedule = None
self.netbackup_block_size = None
self.netbackup_keyword = None
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.GpCronDump.validate_dump_schema')
@patch('gpcrondump.validate_current_timestamp')
def test_option_schema_filter_1(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_schema_file = '/tmp/foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '--schema-file option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.GpCronDump.validate_dump_schema')
@patch('gpcrondump.validate_current_timestamp')
def test_option_schema_filter_2(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = '/tmp/foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '--exclude-schema-file option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_3(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '-S option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_4(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '-s option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_5(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-s can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_6(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-s can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_7(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_dump_schema = 'foo'
with self.assertRaisesRegexp(Exception, '-s can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_8(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.exclude_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-S can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_9(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-S can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_10(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--exclude-schema-file can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_11(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_12(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_13(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_14(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_15(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_16(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_17(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.include_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_18(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.exclude_dump_tables_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '--table-file and --exclude-table-file can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_19(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_20(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --exclude-schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_21(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_22(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with --schema-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_23(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_24(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -s option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_25(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.exclude_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_26(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'foo'
options.include_dump_tables = '/tmp/foo'
with self.assertRaisesRegexp(Exception, '-t and -T can not be selected with -S option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_27(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = ['information_schema']
with self.assertRaisesRegexp(Exception, "can not specify catalog schema 'information_schema' using -s option"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_28(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = ['information_schema']
with self.assertRaisesRegexp(Exception, "can not specify catalog schema 'information_schema' using -S option"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema'])
def test_options_schema_filter_29(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, "can not exclude catalog schema 'information_schema' in schema file '/tmp/foo'"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_lines_from_file', return_value=['public', 'information_schema'])
def test_options_schema_filter_30(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_schema_file = '/tmp/foo'
with self.assertRaisesRegexp(Exception, "can not include catalog schema 'information_schema' in schema file '/tmp/foo'"):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_31(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertEquals(file, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_32(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = ['public']
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_schema_filter_33(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.include_schema_file = '/tmp/foo'
write_lines_to_file('/tmp/foo', ['public'])
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
if os.path.exists('/tmp/foo'):
os.remove('/tmp/foo')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public'])
def test_options_schema_filter_34(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_schema_file = '/tmp/foo'
write_lines_to_file('/tmp/foo', ['public'])
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
if os.path.exists('/tmp/foo'):
os.remove('/tmp/foo')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_include_schema_list_from_exclude_schema', return_value=['public'])
def test_options_schema_filter_35(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.exclude_dump_schema = 'public'
gpcd = GpCronDump(options, None)
dbname = 'foo'
timestamp = '20141016010101'
file = gpcd.get_schema_list_file(dbname)
self.assertTrue(file.startswith('/tmp/schema_list'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_lines_from_file', return_value=['public'])
@patch('gpcrondump.get_user_table_list_for_schema', return_value=['public', 'table1', 'public', 'table2'])
def test_options_schema_filter_36(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
gpcd = GpCronDump(options, None)
dbname = 'foo'
schema_file = '/tmp/foo'
inc = gpcd.generate_include_table_list_from_schema_file(dbname, schema_file)
self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options1(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'include table list can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options2(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'exclude table list can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options3(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables_file = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'include table file can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options4(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables_file = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, 'exclude table file can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options10(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.local_dump_prefix = 'foo'
options.incremental = False
options.list_filter_tables = True
try:
with self.assertRaisesRegexp(Exception, 'list filter tables option requires --prefix and --incremental'):
cron = GpCronDump(options, None)
finally:
options.list_filter_tables = False
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000')
def test_options11(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.incremental = True
cron = GpCronDump(options, None)
self.assertEquals(cron.full_dump_timestamp, '20121225090000')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options12(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.incremental = True
options.dump_databases = 'bkdb,fulldb'
with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20120330090000')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_master_port')
def test_options13(self, mock, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.incremental = True
options.dump_databases = ['bkdb']
#If this is successful then it should not raise an exception
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options14(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = 'bkdb'
options.incremental = False
#If this is successful then it should not raise an exception
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options15(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = 'bkdb,fulldb'
options.incremental = False
#If this is successful then it should not raise an exception
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options16(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.backup_dir = '/foo1'
gpcd = GpCronDump(options, None)
self.assertEquals(gpcd.getBackupDirectoryRoot(), '/foo1')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options17(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.backup_dir = None
gpcd = GpCronDump(options, None)
self.assertEquals(gpcd.getBackupDirectoryRoot(), '/tmp/foobar')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options18(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_schema = 'foo'
options.incremental = True
with self.assertRaisesRegexp(Exception, '-s option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options19(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.clear_dumps = True
options.incremental = True
with self.assertRaisesRegexp(Exception, '-c option can not be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options20(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = []
options.incremental = True
with self.assertRaisesRegexp(Exception, 'Must supply -x <database name> with incremental option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options21(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = False
options.max_streams = 20
with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options22(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = True
options.max_streams = None
with self.assertRaisesRegexp(Exception, '--max-streams must be specified along with --replicate'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options23(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = True
options.max_streams = 0
with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than zero'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options24(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = True
options.replicate = True
options.max_streams = "abc"
with self.assertRaisesRegexp(Exception, '--max-streams must be a number greater than zero'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options25(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.ddboost = False
options.replicate = False
options.max_streams = 20
with self.assertRaisesRegexp(Exception, '--replicate and --max-streams cannot be used without --ddboost'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options26(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.list_backup_files = True
options.timestamp_key = None
with self.assertRaisesRegexp(Exception, 'Must supply -K option when listing backup files'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options27(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = 'bkdb,fulldb'
options.timestamp_key = True
with self.assertRaisesRegexp(Exception, 'multi-database backup is not supported with -K option'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options28(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = ['bkdb']
options.timestamp_key = True
options.ddboost = True
options.list_backup_files = True
with self.assertRaisesRegexp(Exception, 'list backup files not supported with ddboost option'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options29(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.dump_databases = ['bkdb']
options.timestamp_key = True
options.ddboost = True
options.netbackup_service_host = "mdw"
options.netbackup_policy = "test_policy"
options.netbackup_schedule = "test_schedule"
with self.assertRaisesRegexp(Exception, '--ddboost is not supported with NetBackup'):
GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_include_exclude_for_dump_database00(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertEquals(inc, None)
self.assertEquals(exc, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file')
@patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2'])
def test_get_include_exclude_for_dump_database01(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.include_dump_tables_file = '/mydir/incfile'
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/include_dump_tables_file')
@patch('gpcrondump.get_lines_from_file')
def test_get_include_exclude_for_dump_database02(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.include_dump_tables = ['public.t1', 'public.t2', 'public.t3']
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(inc.startswith('/tmp/include_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20121225090000')
def test_get_include_exclude_for_dump_database03(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.incremental = True
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertEquals(inc, '/tmp/dirty')
self.assertEquals(exc, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file')
@patch('gpcrondump.get_lines_from_file', return_value=['public.t1', 'public.t2'])
def test_get_include_exclude_for_dump_database04(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.exclude_dump_tables_file = '/odir/exfile'
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.expand_partitions_and_populate_filter_file', return_value='/tmp/exclude_dump_tables_file')
@patch('gpcrondump.get_lines_from_file')
def test_get_include_exclude_for_dump_database06(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.masterDataDirectory = '/tmp/foobar'
options.exclude_dump_tables = ['public.t4', 'public.t5', 'public.t6']
gpcd = GpCronDump(options, None)
dirtyfile = '/tmp/dirty'
dbname = 'foo'
(inc, exc) = gpcd.get_include_exclude_for_dump_database(dirtyfile, dbname)
self.assertTrue(exc.startswith('/tmp/exclude_dump_tables_file'))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1', 'public.aot2'], ['public.cot1', 'public.cot2']])
def test_verify_tablenames_00(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
ao_partition_list = ['public, aot1, 2190', 'public, aot2, 3190']
co_partition_list = ['public, cot1, 2190', 'public, cot2, 3190']
heap_partition_list = ['public.heapt1', 'public.heapt2']
cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list) #Should not raise an exception
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_table_names_from_partition_list', side_effect = [['public.aot1:asd', 'public.aot2'], ['public.cot1', 'public.cot2:asd']])
def test_verify_tablenames_00_bad(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
ao_partition_list = ['public, aot1!asd, 2190', 'public, aot2, 3190']
co_partition_list = ['public, cot1, 2190', 'public, cot2\nasd, 3190']
heap_partition_list = ['public, heapt1, 2190', 'public, heapt2!asdasd , 3190']
with self.assertRaisesRegexp(Exception, ''):
cron._verify_tablenames(ao_partition_list, co_partition_list, heap_partition_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_inserts_with_incremental(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.output_options = ['--inserts']
options.incremental = True
with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_oids_with_incremental(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.output_options = ['--oids']
options.incremental = True
with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_column_inserts_with_incremental(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.output_options = ['--column-inserts']
options.incremental = True
with self.assertRaisesRegexp(Exception, '--inserts, --column-inserts, --oids cannot be selected with incremental backup'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_table_names_from_partition_list_00(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
partition_list = ['public, aot1, 2190', 'public, aot2:aot, 3190']
expected_output = ['public.aot1', 'public.aot2:aot']
result = cron._get_table_names_from_partition_list(partition_list)
self.assertEqual(result, expected_output)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_table_names_from_partition_list_01(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
cron = GpCronDump(options, None)
partition_list = ['public, aot1, 2190', 'public, aot2,aot, 3190']
with self.assertRaisesRegexp(Exception, 'Invalid partition entry "public, aot2,aot, 3190"'):
cron._get_table_names_from_partition_list(partition_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter1(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.include_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-t can not be selected with --table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter2(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.exclude_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-t can not be selected with --exclude-table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter3(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables = 'foo'
options.exclude_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-T can not be selected with --exclude-table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter4(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.exclude_dump_tables = 'foo'
options.include_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '-T can not be selected with --table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter5(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables = 'foo'
options.exclude_dump_tables = 'foo'
with self.assertRaisesRegexp(Exception, '-t can not be selected with -T option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_options_table_filter6(self, mock, mock2):
options = GpCronDumpTestCase.Options()
options.include_dump_tables_file = 'foo'
options.exclude_dump_tables_file = 'foo'
with self.assertRaisesRegexp(Exception, '--table-file can not be selected with --exclude-table-file option'):
cron = GpCronDump(options, None)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_timestamp_object1(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
gpcd = GpCronDump(options, None)
timestamp = gpcd._get_timestamp_object(options.timestamp_key)
self.assertEquals(timestamp, datetime(2013, 1, 1, 1, 1, 1))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_timestamp_object2(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010'
gpcd = GpCronDump(options, None)
with self.assertRaisesRegexp(Exception, 'Invalid timestamp key'):
gpcd._get_timestamp_object(options.timestamp_key)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_timestamp_object3(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
gpcd = GpCronDump(options, None)
timestamp = gpcd._get_timestamp_object(options.timestamp_key)
self.assertTrue(isinstance(timestamp, datetime))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_files_file_list1(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory]
self.assertEqual(files_file_list, expected_files_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_files_file_list2(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo2'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo2:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory]
self.assertEqual(files_file_list, expected_files_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000')
def test_get_files_file_list3(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
options.incremental = True
options.masterDataDirectory = '/data/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_20130101000000_increments' % options.masterDataDirectory]
self.assertEqual(sorted(files_file_list), sorted(expected_files_list))
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gppylib.operations.backup_utils.get_latest_full_dump_timestamp', return_value='20130101000000')
def test_get_files_file_list_with_filter(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
options.local_dump_prefix = 'metro'
options.include_dump_tables_file = 'bar'
options.masterDataDirectory = '/data/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_filter' % options.masterDataDirectory]
self.assertEqual(sorted(files_file_list), sorted(expected_files_list))
@patch('gpcrondump.validate_current_timestamp')
@patch('gpcrondump.get_latest_full_dump_timestamp', return_value='20130101000000')
@patch('gpcrondump.GpCronDump._get_master_port')
def test_get_files_file_list_with_prefix(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.timestamp_key = '20130101010101'
options.incremental = True
options.local_dump_prefix = 'metro'
options.masterDataDirectory = '/data/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, None, gpcd.dump_dir, timestamp)
files_file_list = gpcd._get_files_file_list(master, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/metro_gp_cdatabase_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_ao_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_co_state_file' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101_last_operation' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101010101.rpt' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_status_1_1_20130101010101' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/metro_gp_dump_20130101000000_increments' % options.masterDataDirectory]
self.assertEqual(sorted(files_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list1(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo2'
mock_segs = []
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo2:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory]
self.assertEqual(pipes_file_list, expected_files_list)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list2(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.getSegmentDataDirectory.return_value = '/bar'
seg.getSegmentHostName.return_value = 'foo1'
seg.getSegmentDbId.return_value = id + 1
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,
'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',
'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']
self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list3(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.dump_global = True
options.masterDataDirectory = '/foo'
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.getSegmentDataDirectory.return_value = '/bar'
seg.getSegmentHostName.return_value = 'foo1'
seg.getSegmentDbId.return_value = id + 1
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_global_1_1_20130101010101' % options.masterDataDirectory,
'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',
'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']
self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_get_pipes_file_list4(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.masterDataDirectory = '/foo'
options.dump_config = True
gpcd = GpCronDump(options, None)
master = Mock()
master.getSegmentHostName.return_value = 'foo1'
mock_segs = [Mock(), Mock()]
for id, seg in enumerate(mock_segs):
seg.getSegmentDataDirectory.return_value = '/bar'
seg.getSegmentHostName.return_value = 'foo1'
seg.getSegmentDbId.return_value = id + 1
timestamp = '20130101010101'
dump_dir = get_backup_directory(options.masterDataDirectory, options.backup_dir, gpcd.dump_dir, timestamp)
pipes_file_list = gpcd._get_pipes_file_list(master, mock_segs, dump_dir, timestamp)
expected_files_list = ['foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_dump_1_1_20130101010101_post_data.gz' % options.masterDataDirectory,
'foo1:%s/db_dumps/20130101/gp_master_config_files_20130101010101.tar' % options.masterDataDirectory,
'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_1_20130101010101.tar',
'foo1:/bar/db_dumps/20130101/gp_segment_config_files_0_2_20130101010101.tar',
'foo1:/bar/db_dumps/20130101/gp_dump_0_1_20130101010101.gz',
'foo1:/bar/db_dumps/20130101/gp_dump_0_2_20130101010101.gz']
self.assertEqual(sorted(pipes_file_list), sorted(expected_files_list))
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.validate_current_timestamp')
def test_gpcrondump_init0(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.timestamp_key = None
options.local_dump_prefix = 'foo'
options.ddboost = False
options.ddboost_verify = False
options.ddboost_config_remove = False
options.ddboost_user = False
options.ddboost_host = False
options.max_streams = None
options.list_backup_files = False
gpcd = GpCronDump(options, None)
self.assertEqual(gpcd.dump_prefix, 'foo_')
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': 'testdb100', 'SUBJECT': "backup completed for Database 'testdb100'"}]})
def test_validate_parse_email_File00(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
m = mock.MagicMock()
with patch('__builtin__.open', m, create=True):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=False)
@patch('gpcrondump.GpCronDump._get_master_port')
def test_validate_parse_email_File01(self, mock1, mock2):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
with self.assertRaisesRegexp(Exception, "\'%s\' file does not exist." % options.include_email_file):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
def test_validate_parse_email_File02(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc"
with self.assertRaisesRegexp(Exception, "'%s' is not '.yaml' file. File containing email details should be '.yaml' file." % options.include_email_file):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=0)
def test_validate_parse_email_File03(self, mock1, mock2, mock3):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
with self.assertRaisesRegexp(Exception, "'%s' file is empty." % options.include_email_file):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'NAME': 'testdb100', 'SUBJECT': "backup completed for Database 'testdb100'"}]})
def test_validate_parse_email_File04(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
m = mock.MagicMock()
with self.assertRaisesRegexp(Exception, "\'%s\' file is not formatted properly." % options.include_email_file):
with patch('__builtin__.open', m, create=True):
cron = GpCronDump(options, None)
@patch('gpcrondump.os.path.isfile', return_value=True)
@patch('gpcrondump.GpCronDump._get_master_port')
@patch('gpcrondump.os.path.getsize', return_value=111)
@patch('gpcrondump.yaml.load', return_value={'EMAIL_DETAILS': [{'FROM': 'RRP_MPE2_DCA_1', 'DBNAME': None, 'SUBJECT': "backup completed for Database 'testdb100'"}]})
def test_validate_parse_email_File05(self, mock1, mock2, mock3, mock4):
options = GpCronDumpTestCase.Options()
options.include_email_file = "/tmp/abc.yaml"
m = mock.MagicMock()
with self.assertRaisesRegexp(Exception, "\'%s\' file is not formatted properly." % options.include_email_file):
with patch('__builtin__.open', m, create=True):
cron = GpCronDump(options, None)
@patch('gpcrondump.MailDumpEvent')
@patch('gpcrondump.GpCronDump._get_master_port')
def test_send_email00(self, mock1, MailDumpEvent):
options = GpCronDumpTestCase.Options()
dump_database = 'testdb1'
current_exit_status = 0
time_start = '12:07:09'
time_end = '12:08:18'
cron = GpCronDump(options, None)
cron._send_email(dump_database, current_exit_status, time_start, time_end)
#------------------------------- Mainline --------------------------------
if __name__ == '__main__':
unittest.main()
| 2.125 | 2 |
ambari-server/src/test/python/stacks/2.0.6/HIVE/test_hive_service_check.py | vsosrc/ambari | 0 | 6566 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import datetime, sys, socket
import resource_management.libraries.functions
@patch.object(resource_management.libraries.functions, "get_unique_id_and_date", new = MagicMock(return_value=''))
@patch("socket.socket", new = MagicMock())
class TestServiceCheck(RMFTestCase):
@patch("sys.exit")
def test_service_check_default(self, sys_exit_mock):
self.executeScript("2.0.6/services/HIVE/package/scripts/service_check.py",
classname="HiveServiceCheck",
command="service_check",
config_file="default.json"
)
self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
content = StaticFile('hcatSmoke.sh'),
mode = 0755,
)
self.assertResourceCalled('Execute', 'env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
logoutput = True,
user = 'hdfs',
conf_dir = '/etc/hadoop/conf',
keytab=UnknownConfigurationMock(),
kinit_path_local='/usr/bin/kinit',
bin_dir = '/usr/lib/hive/bin',
security_enabled=False
)
self.assertResourceCalled('Execute', ' /tmp/hcatSmoke.sh hcatsmoke cleanup',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertNoMoreResources()
@patch("sys.exit")
def test_service_check_secured(self, sys_exit_mock):
self.executeScript("2.0.6/services/HIVE/package/scripts/service_check.py",
classname="HiveServiceCheck",
command="service_check",
config_file="secured.json"
)
self.assertResourceCalled('File', '/tmp/hcatSmoke.sh',
content = StaticFile('hcatSmoke.sh'),
mode = 0755,
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; env JAVA_HOME=/usr/jdk64/jdk1.7.0_45 /tmp/hcatSmoke.sh hcatsmoke prepare',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertResourceCalled('ExecuteHadoop', 'fs -test -e /apps/hive/warehouse/hcatsmoke',
logoutput = True,
user = 'hdfs',
conf_dir = '/etc/hadoop/conf',
keytab='/etc/security/keytabs/hdfs.headless.keytab',
kinit_path_local='/usr/bin/kinit',
security_enabled=True,
bin_dir = '/usr/lib/hive/bin',
principal='hdfs'
)
self.assertResourceCalled('Execute', '/usr/bin/kinit -kt /etc/security/keytabs/smokeuser.headless.keytab ambari-qa; /tmp/hcatSmoke.sh hcatsmoke cleanup',
logoutput = True,
path = ['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
tries = 3,
user = 'ambari-qa',
environment = {'PATH' : os.environ['PATH'] + os.pathsep + "/usr/lib/hive/bin"},
try_sleep = 5,
)
self.assertNoMoreResources()
| 1.617188 | 2 |
test/linux/gyptest-ldflags-from-environment.py | chlorm-forks/gyp | 77 | 6567 | <reponame>chlorm-forks/gyp
#!/usr/bin/env python
# Copyright (c) 2017 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies the use of linker flags in environment variables.
In this test, gyp and build both run in same local environment.
"""
import TestGyp
import re
import subprocess
import sys
FORMATS = ('make', 'ninja')
if sys.platform.startswith('linux'):
test = TestGyp.TestGyp(formats=FORMATS)
CHDIR = 'ldflags-from-environment'
with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target',
'LDFLAGS_host': '-Wl,--dynamic-linker=/host',
'GYP_CROSSCOMPILE': '1'}):
test.run_gyp('test.gyp', chdir=CHDIR)
test.build('test.gyp', chdir=CHDIR)
def GetDynamicLinker(p):
p = test.built_file_path(p, chdir=CHDIR)
r = re.compile(r'\[Requesting program interpreter: ([^\]]+)\]')
proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE)
o = proc.communicate()[0].decode('utf-8')
assert not proc.returncode
return r.search(o).group(1)
if GetDynamicLinker('ldflags') != '/target':
test.fail_test()
if GetDynamicLinker('ldflags_host') != '/host':
test.fail_test()
test.pass_test()
| 1.960938 | 2 |
tests/test_advanced.py | dhaitz/python-package-template | 0 | 6568 | <gh_stars>0
# -*- coding: utf-8 -*-
from .context import sample
def test_thoughts():
assert(sample.hmm() is None)
| 1.640625 | 2 |
binary_tree/m_post_order_traversal.py | dhrubach/python-code-recipes | 0 | 6569 | ######################################################################
# LeetCode Problem Number : 145
# Difficulty Level : Medium
# URL : https://leetcode.com/problems/binary-tree-postorder-traversal/
######################################################################
from binary_search_tree.tree_node import TreeNode
class BinaryTree:
# runtime --> 77.59%, memory --> 50.59%
def postOrderRecursive(self, root: TreeNode) -> [int]:
if not root:
return []
res = []
""" post - order traversal
visit left sub - tree
visit right sub - tree
visit node
"""
res += self.postOrderRecursive(root.left)
res += self.postOrderRecursive(root.right)
res.append(root.val)
""" return visited node + child nodes """
return res
def postOrderIterative(self, root: TreeNode) -> [int]:
if not root:
return []
ret = []
""" on visiting a node, push 2 copies to the stack.
use 1st copy to process the child nodes
use 2nd copy to insert into result
"""
st = [root] * 2
while st:
cur = st.pop()
""" if current node is the last node in the stack,
then visit it's child nodes
if current node is not the last node in the stack,
then current node is the 2nd copy. Insert node into
result list
"""
if st and st[-1] is cur:
"""insert right child node followed by left.
this ensures processing is done from left to right.
"""
if cur.right:
st += [cur.right] * 2
if cur.left:
st += [cur.left] * 2
else:
ret.append(cur.val)
return ret
# runtime --> 54.35%, memory --> 5.09%
def postOrderIterativeReverse(self, root: TreeNode) -> [int]:
if not root:
return []
res, stack = [], [root]
while stack:
cur = stack.pop()
if cur:
""" visit the nodes in reverse order
i.e. node -> right child node -> left child node
similar to right-first pre-order traversal
"""
res.append(cur.val)
stack.append(cur.left)
stack.append(cur.right)
""" reversed result will give post-order traversal """
return res[::-1]
| 3.890625 | 4 |
dokuwiki.py | luminisward/python-dokuwiki | 0 | 6570 | # -*- coding: utf-8 -*-
"""This python module aims to manage
`DokuWiki <https://www.dokuwiki.org/dokuwiki>`_ wikis by using the
provided `XML-RPC API <https://www.dokuwiki.org/devel:xmlrpc>`_. It is
compatible with python2.7 and python3+.
Installation
------------
It is on `PyPi <https://pypi.python.org/pypi/dokuwiki>`_ so you can use
the ``pip`` command to install it::
pip install dokuwiki
Otherwise sources are in `github <https://github.com/fmenabe/python-dokuwiki>`_
"""
import re
import sys
import base64
import weakref
from xml.parsers.expat import ExpatError
if sys.version_info[0] == 3:
from xmlrpc.client import ServerProxy, Binary, Fault, Transport
from urllib.parse import urlencode
else:
from xmlrpclib import ServerProxy, Binary, Fault, Transport
from urllib import urlencode
from datetime import datetime, timedelta
ERR = 'XML or text declaration not at start of entity: line 2, column 0'
_URL_RE = re.compile(r'(?P<proto>https?)://(?P<host>[^/]*)(?P<uri>/.*)?')
def date(date):
"""DokuWiki returns dates of `xmlrpclib`/`xmlrpc.client` ``DateTime``
type and the format changes between DokuWiki versions ... This function
convert *date* to a `datetime` object.
"""
date = date.value
return (datetime.strptime(date[:-5], '%Y-%m-%dT%H:%M:%S')
if len(date) == 24
else datetime.strptime(date, '%Y%m%dT%H:%M:%S'))
def utc2local(date):
"""DokuWiki returns date with a +0000 timezone. This function convert *date*
to the local time.
"""
date_offset = (datetime.now() - datetime.utcnow())
# Python < 2.7 don't have the 'total_seconds' method so calculate it by hand!
date_offset = (date_offset.microseconds +
(date_offset.seconds + date_offset.days * 24 * 3600) * 1e6) / 1e6
date_offset = int(round(date_offset / 60 / 60))
return date + timedelta(hours=date_offset)
class DokuWikiError(Exception):
"""Exception raised by this module when there is an error."""
pass
class CookiesTransport(Transport):
"""A Python3 xmlrpc.client.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_headers(self, connection, headers):
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
Transport.send_headers(self, connection, headers)
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.msg.get_all("Set-Cookie"):
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class CookiesTransport2(Transport):
"""A Python2 xmlrpclib.Transport subclass that retains cookies."""
def __init__(self):
Transport.__init__(self)
self._cookies = dict()
def send_request(self, connection, handler, request_body):
Transport.send_request(self, connection, handler, request_body)
# set cookie below handler
if self._cookies:
cookies = map(lambda x: x[0] + '=' + x[1], self._cookies.items())
connection.putheader("Cookie", "; ".join(cookies))
def parse_response(self, response):
"""parse and store cookie"""
try:
for header in response.getheader("set-cookie").split(", "):
# filter 'expire' information
if not header.startswith("D"):
continue
cookie = header.split(";", 1)[0]
cookieKey, cookieValue = cookie.split("=", 1)
self._cookies[cookieKey] = cookieValue
finally:
return Transport.parse_response(self, response)
class DokuWiki(object):
"""Initialize a connection to a DokuWiki wiki. *url*, *user* and
*password* are respectively the URL, the login and the password for
connecting to the wiki. *kwargs* are `xmlrpclib`/`xmlrpc.client`
**ServerProxy** parameters.
The exception `DokuWikiError` is raised if the authentification
fails but others exceptions (like ``gaierror`` for invalid domain,
``ProtocolError`` for an invalid wiki, ...) are not catched.
.. code::
try:
wiki = dokuwiki.DokuWiki('URL', 'USER', 'PASSWORD', cookieAuth=False)
except (DokuWikiError, Exception) as err:
print('unable to connect: %s' % err)
"""
def __init__(self, url, user, password, cookieAuth=False, **kwargs):
"""Initialize the object by connecting to the XMLRPC server."""
# Initialize XMLRPC client.
try:
params = _URL_RE.search(url).groupdict()
if cookieAuth == False:
url = '%s://%s:%s@%s%s/lib/exe/xmlrpc.php' % (
params['proto'], user, password, params['host'], params['uri'] or '')
else:
url = '%s://%s%s/lib/exe/xmlrpc.php' % (
params['proto'], params['host'], params['uri'] or '')
except AttributeError:
raise DokuWikiError("invalid url '%s'" % url)
if cookieAuth == False:
self.proxy = ServerProxy(url, **kwargs)
else:
if sys.version_info[0] == 3:
self.proxy = ServerProxy(url, CookiesTransport(), **kwargs)
else:
self.proxy = ServerProxy(url, CookiesTransport2(), **kwargs)
# Force login to check the connection.
if not self.login(user, password):
raise DokuWikiError('invalid login or password!')
# Set "namespaces" for pages and medias functions.
self.pages = _Pages(weakref.ref(self)())
self.medias = _Medias(weakref.ref(self)())
def send(self, command, *args, **kwargs):
"""Generic method for executing an XML-RPC *command*. *args* and
*kwargs* are the arguments and parameters needed by the command.
"""
args = list(args)
if kwargs:
args.append(kwargs)
method = self.proxy
for elt in command.split('.'):
method = getattr(method, elt)
try:
return method(*args)
except Fault as err:
if err.faultCode == 121:
return {}
elif err.faultCode == 321:
return []
raise DokuWikiError(err)
except ExpatError as err:
if str(err) != ERR:
raise DokuWikiError(err)
@property
def version(self):
"""Property that returns the DokuWiki version of the remote Wiki."""
return self.send('dokuwiki.getVersion')
@property
def time(self):
"""Property that returns the current time at the remote wiki server as
Unix timestamp.
"""
return self.send('dokuwiki.getTime')
@property
def xmlrpc_version(self):
"""Property that returns the XML RPC interface version of the remote
Wiki. This is DokuWiki implementation specific and independent of the
supported standard API version returned by ``wiki.getRPCVersionSupported``.
"""
return self.send('dokuwiki.getXMLRPCAPIVersion')
@property
def xmlrpc_supported_version(self):
"""Property that returns *2* with the supported RPC API version."""
return self.send('wiki.getRPCVersionSupported')
@property
def title(self):
"""Property that returns the title of the wiki."""
return self.send('dokuwiki.getTitle')
def login(self, user, password):
"""Log to the wiki using *user* and *password* credentials. It returns
a boolean that indicates if the user succesfully authenticate."""
return self.send('dokuwiki.login', user, password)
def add_acl(self, scope, user, permission):
"""Add an `ACL <https://www.dokuwiki.org/acl>`_ rule that restricts
the page/namespace *scope* to *user* (use *@group* syntax for groups)
with *permission* level. It returns a boolean that indicate if the rule
was correctly added.
"""
return self.send('plugin.acl.addAcl', scope, user, permission)
def del_acl(self, scope, user):
"""Delete any ACL matching the given *scope* and *user* (or group if
*@group* syntax is used). It returns a boolean that indicate if the rule
was correctly removed.
"""
return self.send('plugin.acl.delAcl', scope, user)
class _Pages(object):
"""This object regroup methods for managing pages of a DokuWiki. This object
is accessible from the ``pages`` property of an `DokuWiki` instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.pages.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""List all pages of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *hash*: (bool) do an md5 sum of content
* *skipacl*: (bool) list everything regardless of ACL
"""
return self._dokuwiki.send('dokuwiki.getPagelist', namespace, options)
def changes(self, timestamp):
"""Returns a list of changes since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.pages.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentChanges', timestamp)
def search(self, string):
"""Performs a fulltext search on *string* and returns the first 15
results.
"""
return self._dokuwiki.send('dokuwiki.search', string)
def versions(self, page, offset=0):
"""Returns the available versions of *page*. *offset* can be used to
list earlier versions in the history.
"""
return self._dokuwiki.send('wiki.getPageVersions', page, offset)
def info(self, page, version=None):
"""Returns informations of *page*. Informations of the last version
is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageInfoVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageInfo', page))
def get(self, page, version=None):
"""Returns the content of *page*. The content of the last version is
returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPage', page))
def append(self, page, content, **options):
"""Appends *content* text to *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
return self._dokuwiki.send('dokuwiki.appendPage', page, content, options)
def html(self, page, version=None):
"""Returns HTML content of *page*. The HTML content of the last version
of the page is returned if *version* is not set.
"""
return (self._dokuwiki.send('wiki.getPageHTMLVersion', page, version)
if version is not None
else self._dokuwiki.send('wiki.getPageHTML', page))
def set(self, page, content, **options):
"""Set/replace the *content* of *page*.
Valid *options* are:
* *sum*: (str) change summary
* *minor*: (bool) whether this is a minor change
"""
try:
return self._dokuwiki.send('wiki.putPage', page, content, options)
except ExpatError as err:
# Sometime the first line of the XML response is blank which raise
# the 'ExpatError' exception although the change has been done. This
# allow to ignore the error.
if str(err) != ERR:
raise DokuWikiError(err)
def delete(self, page):
"""Delete *page* by setting an empty content."""
return self.set(page, '')
def lock(self, page):
"""Locks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[page], unlock=[])
if result['lockfail']:
raise DokuWikiError('unable to lock page')
def unlock(self, page):
"""Unlocks *page*."""
result = self._dokuwiki.send('dokuwiki.setLocks',
lock=[], unlock=[page])
if result['unlockfail']:
raise DokuWikiError('unable to unlock page')
def permission(self, page):
"""Returns the permission level of *page*."""
return self._dokuwiki.send('wiki.aclCheck', page)
def links(self, page):
"""Returns a list of all links contained in *page*."""
return self._dokuwiki.send('wiki.listLinks', page)
def backlinks(self, page):
"""Returns a list of all links referencing *page*."""
return self._dokuwiki.send('wiki.getBackLinks', page)
class _Medias(object):
"""This object regroup methods for managing medias of a DokuWiki. This
object is accessible from the ``medias`` property of an `DokuWiki`
instance::
wiki = dokuwiki.DokuWiki('URL', 'User', 'Password')
wiki.medias.list()
"""
def __init__(self, dokuwiki):
self._dokuwiki = dokuwiki
def list(self, namespace='/', **options):
"""Returns all medias of the given *namespace*.
Valid *options* are:
* *depth*: (int) recursion level, 0 for all
* *skipacl*: (bool) skip acl checking
* *pattern*: (str) check given pattern
* *hash*: (bool) add hashes to result list
"""
return self._dokuwiki.send('wiki.getAttachments', namespace, options)
def changes(self, timestamp):
"""Returns the list of medias changed since given *timestamp*.
For example, for returning all changes since *2016-01-01*::
from datetime import datetime
wiki.medias.changes(datetime(2016, 1, 1).timestamp())
"""
return self._dokuwiki.send('wiki.getRecentMediaChanges', timestamp)
def get(self, media, dirpath=None, filename=None, overwrite=False, b64decode=False):
"""Returns the binary data of *media* or save it to a file. If *dirpath*
is not set the binary data is returned, otherwise the data is saved
to a file. By default, the filename is the name of the media but it can
be changed with *filename* parameter. *overwrite* parameter allow to
overwrite the file if it already exists locally.
"""
import os
data = self._dokuwiki.send('wiki.getAttachment', media)
data = base64.b64decode(data) if b64decode else data.data
if dirpath is None:
return data
if filename is None:
filename = media.replace('/', ':').split(':')[-1]
if not os.path.exists(dirpath):
os.makedirs(dirpath)
filepath = os.path.join(dirpath, filename)
if os.path.exists(filepath) and not overwrite:
raise FileExistsError("[Errno 17] File exists: '%s'" % filepath)
with open(filepath, 'wb') as fhandler:
fhandler.write(data)
def info(self, media):
"""Returns informations of *media*."""
return self._dokuwiki.send('wiki.getAttachmentInfo', media)
def add(self, media, filepath, overwrite=True):
"""Set *media* from local file *filepath*. *overwrite* parameter specify
if the media must be overwrite if it exists remotely.
"""
with open(filepath, 'rb') as fhandler:
self._dokuwiki.send('wiki.putAttachment', media,
Binary(fhandler.read()), ow=overwrite)
def set(self, media, _bytes, overwrite=True, b64encode=False):
"""Set *media* from *_bytes*. *overwrite* parameter specify if the media
must be overwrite if it exists remotely.
"""
data = base64.b64encode(_bytes) if b64encode else Binary(_bytes)
self._dokuwiki.send('wiki.putAttachment', media, data, ow=overwrite)
def delete(self, media):
"""Delete *media*."""
return self._dokuwiki.send('wiki.deleteAttachment', media)
class Dataentry(object):
"""Object that manage `data entries <https://www.dokuwiki.org/plugin:data>`_."""
@staticmethod
def get(content, keep_order=False):
"""Get dataentry from *content*. *keep_order* indicates whether to
return an ordered dictionnay."""
if keep_order:
from collections import OrderedDict
dataentry = OrderedDict()
else:
dataentry = {}
found = False
for line in content.split('\n'):
if line.strip().startswith('---- dataentry'):
found = True
continue
elif line == '----':
break
elif not found:
continue
line_split = line.split(':')
key = line_split[0].strip()
value = re.sub('#.*$', '', ':'.join(line_split[1:])).strip()
dataentry.setdefault(key, value)
if not found:
raise DokuWikiError('no dataentry found')
return dataentry
@staticmethod
def gen(name, data):
"""Generate dataentry *name* from *data*."""
return '---- dataentry %s ----\n%s\n----' % (name, '\n'.join(
'%s:%s' % (attr, value) for attr, value in data.items()))
@staticmethod
def ignore(content):
"""Remove dataentry from *content*."""
page_content = []
start = False
for line in content.split('\n'):
if line == '----' and not start:
start = True
continue
if start:
page_content.append(line)
return '\n'.join(page_content) if page_content else content
| 2.53125 | 3 |
setup.py | lvgig/test-aide | 2 | 6571 | import setuptools
import re
with open("README.md", "r") as fh:
long_description = fh.read()
# get version from _version.py file, from below
# https://stackoverflow.com/questions/458550/standard-way-to-embed-version-into-python-package
VERSION_FILE = "test_aide/_version.py"
version_file_str = open(VERSION_FILE, "rt").read()
VERSION_STR_RE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VERSION_STR_RE, version_file_str, re.M)
if mo:
version = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSION_FILE,))
def list_reqs(fname="requirements.txt"):
with open(fname) as fd:
return fd.read().splitlines()
setuptools.setup(
name="test-aide",
version=version,
author="LV GI Data Science Team",
author_email="<EMAIL>",
description="Package of helper functions to be used for unit testing",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
install_requires=list_reqs(),
python_requires=">=3.6",
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Operating System :: OS Independent",
"License :: OSI Approved :: BSD License",
],
)
| 2.015625 | 2 |
examples/pylab_examples/matshow.py | jbbrokaw/matplotlib | 16 | 6572 | """Simple matshow() example."""
from matplotlib.pylab import *
def samplemat(dims):
"""Make a matrix with all zeros and increasing elements on the diagonal"""
aa = zeros(dims)
for i in range(min(dims)):
aa[i, i] = i
return aa
# Display 2 matrices of different sizes
dimlist = [(12, 12), (15, 35)]
for d in dimlist:
matshow(samplemat(d))
# Display a random matrix with a specified figure number and a grayscale
# colormap
matshow(rand(64, 64), fignum=100, cmap=cm.gray)
show()
| 3.765625 | 4 |
setup.py | HeyLifeHD/rp-bp | 6 | 6573 | #! /usr/bin/env python3
import importlib
import logging
import os
import subprocess
from setuptools import setup
from setuptools.command.install import install as install
from setuptools.command.develop import develop as develop
logger = logging.getLogger(__name__)
stan_model_files = [
os.path.join("nonperiodic", "no-periodicity.stan"),
os.path.join("nonperiodic", "start-high-high-low.stan"),
os.path.join("nonperiodic", "start-high-low-high.stan"),
os.path.join("periodic", "start-high-low-low.stan"),
os.path.join("untranslated", "gaussian-naive-bayes.stan"),
os.path.join("translated", "periodic-gaussian-mixture.stan")
]
stan_pickle_files = [
os.path.join("nonperiodic", "no-periodicity.pkl"),
os.path.join("nonperiodic", "start-high-high-low.pkl"),
os.path.join("nonperiodic", "start-high-low-high.pkl"),
os.path.join("periodic", "start-high-low-low.pkl"),
os.path.join("untranslated", "gaussian-naive-bayes.pkl"),
os.path.join("translated", "periodic-gaussian-mixture.pkl")
]
def _pickle_it(stan, pickle):
import shlex
dirname = os.path.dirname(pickle)
if not os.path.exists(dirname):
os.makedirs(dirname)
cmd = "pickle-stan {} {}".format(shlex.quote(stan), shlex.quote(pickle))
logging.info(cmd)
subprocess.call(cmd, shell=True)
def _post_install(force_recompile):
import site
importlib.reload(site)
import pbio.ribo.ribo_filenames as filenames
import pbio.misc.shell_utils as shell_utils
smf = [os.path.join("rpbp_models", s) for s in stan_model_files]
models_base = filenames.get_default_models_base()
spf = [os.path.join(models_base, s) for s in stan_pickle_files]
# Compile and pickle the Stan models
if force_recompile:
for stan, pickle in zip(smf, spf):
_pickle_it(stan, pickle)
else: # default
for stan, pickle in zip(smf, spf):
if os.path.exists(pickle):
msg = "A model already exists at: {}. Skipping.".format(pickle)
logging.warning(msg)
continue
_pickle_it(stan, pickle)
# Check for the prerequisite programs
programs = ['flexbar']
shell_utils.check_programs_exist(programs, raise_on_error=False,
package_name='flexbar', logger=logger)
programs = ['STAR']
shell_utils.check_programs_exist(programs, raise_on_error=False,
package_name='STAR', logger=logger)
programs = ['bowtie2', 'bowtie2-build-s']
shell_utils.check_programs_exist(programs, raise_on_error=False,
package_name='bowtie2', logger=logger)
programs = ['samtools']
shell_utils.check_programs_exist(programs, raise_on_error=False,
package_name='SAMtools', logger=logger)
class SetupInstall(install):
user_options = install.user_options + [
('force-recompile', None, 'Set this flag to recompile the Stan models'),
]
def initialize_options(self):
install.initialize_options(self)
self.force_recompile = None
def finalize_options(self):
install.finalize_options(self)
def run(self):
force_recompile = self.force_recompile # 0 or 1
level = logging.getLevelName("INFO")
logging.basicConfig(level=level,
format='%(levelname)-8s : %(message)s')
install.run(self)
# skip if RTD
if not os.environ.get('READTHEDOCS') == 'True':
_post_install(force_recompile)
class SetupDevelop(develop):
user_options = develop.user_options + [
('force-recompile', None, 'Set this flag to recompile the Stan models'),
]
def initialize_options(self):
develop.initialize_options(self)
self.force_recompile = None
def finalize_options(self):
develop.finalize_options(self)
def run(self):
force_recompile = self.force_recompile # 0 or 1
level = logging.getLevelName("INFO")
logging.basicConfig(level=level,
format='%(levelname)-8s : %(message)s')
develop.run(self)
# skip if RTD
if not os.environ.get('READTHEDOCS') == 'True':
_post_install(force_recompile)
setup(
cmdclass={
'install': SetupInstall,
'develop': SetupDevelop
}
)
| 1.90625 | 2 |
utils/data_utils.py | BorisMansencal/quickNAT_pytorch | 0 | 6574 | import os
import h5py
import nibabel as nb
import numpy as np
import torch
import torch.utils.data as data
from torchvision import transforms
import utils.preprocessor as preprocessor
# transform_train = transforms.Compose([
# transforms.RandomCrop(200, padding=56),
# transforms.ToTensor(),
# ])
class ImdbData(data.Dataset):
def __init__(self, X, y, w, transforms=None):
self.X = X if len(X.shape) == 4 else X[:, np.newaxis, :, :]
self.y = y
self.w = w
self.transforms = transforms
def __getitem__(self, index):
img = torch.from_numpy(self.X[index])
label = torch.from_numpy(self.y[index])
weight = torch.from_numpy(self.w[index])
return img, label, weight
def __len__(self):
return len(self.y)
def get_imdb_dataset(data_params):
data_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_data_file']), 'r')
label_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_label_file']), 'r')
class_weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_class_weights_file']), 'r')
weight_train = h5py.File(os.path.join(data_params['data_dir'], data_params['train_weights_file']), 'r')
data_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_data_file']), 'r')
label_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_label_file']), 'r')
class_weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_class_weights_file']), 'r')
weight_test = h5py.File(os.path.join(data_params['data_dir'], data_params['test_weights_file']), 'r')
return (ImdbData(data_train['data'][()], label_train['label'][()], class_weight_train['class_weights'][()]),
ImdbData(data_test['data'][()], label_test['label'][()], class_weight_test['class_weights'][()]))
def load_dataset(file_paths,
orientation,
remap_config,
return_weights=False,
reduce_slices=False,
remove_black=False):
print("Loading and preprocessing data...")
volume_list, labelmap_list, headers, class_weights_list, weights_list = [], [], [], [], []
for file_path in file_paths:
volume, labelmap, class_weights, weights, header = load_and_preprocess(file_path, orientation,
remap_config=remap_config,
reduce_slices=reduce_slices,
remove_black=remove_black,
return_weights=return_weights)
volume_list.append(volume)
labelmap_list.append(labelmap)
if return_weights:
class_weights_list.append(class_weights)
weights_list.append(weights)
headers.append(header)
print("#", end='', flush=True)
print("100%", flush=True)
if return_weights:
return volume_list, labelmap_list, class_weights_list, weights_list, headers
else:
return volume_list, labelmap_list, headers
def load_and_preprocess(file_path, orientation, remap_config, reduce_slices=False,
remove_black=False,
return_weights=False):
volume, labelmap, header = load_data(file_path, orientation)
volume, labelmap, class_weights, weights = preprocess(volume, labelmap, remap_config=remap_config,
reduce_slices=reduce_slices,
remove_black=remove_black,
return_weights=return_weights)
return volume, labelmap, class_weights, weights, header
def load_and_preprocess_eval(file_path, orientation, notlabel=True):
volume_nifty = nb.load(file_path[0])
header = volume_nifty.header
volume = volume_nifty.get_fdata()
if notlabel:
volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))
else:
volume = np.round(volume)
if orientation == "COR":
volume = volume.transpose((2, 0, 1))
elif orientation == "AXI":
volume = volume.transpose((1, 2, 0))
return volume, header
def load_data(file_path, orientation):
volume_nifty, labelmap_nifty = nb.load(file_path[0]), nb.load(file_path[1])
volume, labelmap = volume_nifty.get_fdata(), labelmap_nifty.get_fdata()
volume = (volume - np.min(volume)) / (np.max(volume) - np.min(volume))
volume, labelmap = preprocessor.rotate_orientation(volume, labelmap, orientation)
return volume, labelmap, volume_nifty.header
def preprocess(volume, labelmap, remap_config, reduce_slices=False, remove_black=False, return_weights=False):
if reduce_slices:
volume, labelmap = preprocessor.reduce_slices(volume, labelmap)
if remap_config:
labelmap = preprocessor.remap_labels(labelmap, remap_config)
if remove_black:
volume, labelmap = preprocessor.remove_black(volume, labelmap)
if return_weights:
class_weights, weights = preprocessor.estimate_weights_mfb(labelmap)
return volume, labelmap, class_weights, weights
else:
return volume, labelmap, None, None
# def load_file_paths(data_dir, label_dir, volumes_txt_file=None):
# """
# This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label.
# It should be modified to suit the need of the project
# :param data_dir: Directory which contains the data files
# :param label_dir: Directory which contains the label files
# :param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read
# :return: list of file paths as string
# """
#
# volume_exclude_list = ['IXI290', 'IXI423']
# if volumes_txt_file:
# with open(volumes_txt_file) as file_handle:
# volumes_to_use = file_handle.read().splitlines()
# else:
# volumes_to_use = [name for name in os.listdir(data_dir) if
# name.startswith('IXI') and name not in volume_exclude_list]
#
# file_paths = [
# [os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol, 'mri/aseg.auto_noCCseg.mgz')]
# for
# vol in volumes_to_use]
# return file_paths
def load_file_paths(data_dir, label_dir, data_id, volumes_txt_file=None):
"""
This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label.
It should be modified to suit the need of the project
:param data_dir: Directory which contains the data files
:param label_dir: Directory which contains the label files
:param data_id: A flag indicates the name of Dataset for proper file reading
:param volumes_txt_file: (Optional) Path to the a csv file, when provided only these data points will be read
:return: list of file paths as string
"""
if volumes_txt_file:
with open(volumes_txt_file) as file_handle:
volumes_to_use = file_handle.read().splitlines()
else:
volumes_to_use = [name for name in os.listdir(data_dir)]
if data_id == "MALC":
file_paths = [
[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_glm.mgz')]
for
vol in volumes_to_use]
elif data_id == "ADNI":
file_paths = [
[os.path.join(data_dir, vol, 'orig.mgz'), os.path.join(label_dir, vol, 'Lab_con.mgz')]
for
vol in volumes_to_use]
elif data_id == "CANDI":
file_paths = [
[os.path.join(data_dir, vol + '/' + vol + '_1.mgz'),
os.path.join(label_dir, vol + '/' + vol + '_1_seg.mgz')]
for
vol in volumes_to_use]
elif data_id == "IBSR":
file_paths = [
[os.path.join(data_dir, vol, 'mri/orig.mgz'), os.path.join(label_dir, vol + '_map.nii.gz')]
for
vol in volumes_to_use]
elif data_id == "BORIS": #BORIS
file_paths = [
[os.path.join(data_dir, vol), os.path.join(label_dir, vol.replace('.nii', '_seg.nii'))]
for
vol in volumes_to_use]
else:
raise ValueError("Invalid entry, valid options are MALC, ADNI, CANDI and IBSR")
return file_paths
def load_file_paths_eval(data_dir, volumes_txt_file, dir_struct):
"""
This function returns the file paths combined as a list where each element is a 2 element tuple, 0th being data and 1st being label.
It should be modified to suit the need of the project
:param data_dir: Directory which contains the data files
:param volumes_txt_file: Path to the a csv file, when provided only these data points will be read
:param dir_struct: If the id_list is in FreeSurfer style or normal
:return: list of file paths as string
"""
with open(volumes_txt_file) as file_handle:
volumes_to_use = file_handle.read().splitlines()
if dir_struct == "FS":
file_paths = [
[os.path.join(data_dir, vol, 'mri/orig.mgz')]
for
vol in volumes_to_use]
elif dir_struct == "Linear":
file_paths = [
[os.path.join(data_dir, vol)]
for
vol in volumes_to_use]
elif dir_struct == "part_FS":
file_paths = [
[os.path.join(data_dir, vol, 'orig.mgz')]
for
vol in volumes_to_use]
else:
raise ValueError("Invalid entry, valid options are FS and Linear")
return file_paths
| 2.375 | 2 |
lib/common/app.py | auho/python-ETL | 0 | 6575 | <filename>lib/common/app.py
import argparse
import yaml
import sys
from .conf import MysqlConf
from lib.db import mysql
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="config file name", type=str, required=False, default='office')
input_args = parser.parse_args()
class PartConfig:
def __init__(self):
self._mysqlDbConf = MysqlConf()
self._yamlConfig = None
def parse(self, conf_name, module_path):
self._parse_yaml(conf_name, module_path)
self._mysqlDbConf.load(self.get('mysql'))
def get(self, name):
return self._yamlConfig[name]
def _parse_yaml(self, conf_name, module_path):
yaml_file = module_path + f"/conf/db_{conf_name}.yml"
f = open(yaml_file, 'r', encoding='utf-8')
yaml_content = f.read()
self._yamlConfig = yaml.safe_load(yaml_content)
@property
def mysql_db_conf(self):
return self._mysqlDbConf
class App:
configName = None
moduleImport = None
moduleName = None
modulePath = None
mysqlDb = None
mysqlDbConf = None
ENV = 'dev'
DEBUG = True
ENV_DEBUG = False
def __init__(self, module_path, root_path):
self.configName = input_args.config
self.modulePath = module_path
self.moduleName = self.modulePath.replace(root_path + '/', '')
self.moduleImport = self.moduleName.replace('/', '.')
part_conf = PartConfig() # type:PartConfig
part_conf.parse(conf_name=input_args.config, module_path=module_path)
self.mysqlDbConf = part_conf.mysql_db_conf # type:MysqlConf
self.mysqlDb = mysql.Mysql(self.mysqlDbConf) # type: mysql.Mysql
self.mysqlDb.connect()
self.DEBUG = bool(part_conf.get('debug'))
self.ENV = part_conf.get('env')
if self.ENV == 'dev':
self.ENV_DEBUG = True
def get_db(self):
return self.mysqlDb
def get_sub_import(self, sub):
return self.moduleImport + '.' + sub
def get_sub_path(self, sub):
return self.modulePath + '/' + sub
def get_conf_path(self):
return self.get_sub_path(sub='conf')
def get_data_path(self):
return self.get_sub_path(sub='data')
def get_data_file_path(self, file):
return self.get_data_path() + '/' + file
def log(self):
self._init_info()
def _init_info(self):
print("=" * 50)
print("=" * 2 + f" MODULE PATH:: {self.modulePath}")
print("=" * 2 + f" FILE PATH:: {' '.join(sys.argv)}")
print(f" config file: {self.configName}")
print(f" db:: {self.mysqlDbConf.db}")
print(f" debug:: {str(int(self.DEBUG))}")
print(f" env_debug:: {str(int(self.ENV_DEBUG))}")
print("=" * 50)
print("\n")
| 2.5 | 2 |
design.py | StrangeArcturus/QtAndRequestParser-Project | 0 | 6576 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'design.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(650, 550)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 10, 140, 13))
self.label.setObjectName("label")
self.song_title = QtWidgets.QLineEdit(self.centralwidget)
self.song_title.setGeometry(QtCore.QRect(90, 30, 113, 20))
self.song_title.setObjectName("song_title")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 30, 60, 13))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(220, 30, 80, 13))
self.label_3.setObjectName("label_3")
self.song_autor = QtWidgets.QLineEdit(self.centralwidget)
self.song_autor.setGeometry(QtCore.QRect(310, 30, 113, 20))
self.song_autor.setObjectName("song_autor")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(20, 90, 140, 13))
self.label_4.setObjectName("label_4")
self.orig_text = QtWidgets.QPlainTextEdit(self.centralwidget)
self.orig_text.setGeometry(QtCore.QRect(20, 150, 270, 340))
self.orig_text.setObjectName("orig_text")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(20, 120, 60, 13))
self.label_5.setObjectName("label_5")
self.trans_text = QtWidgets.QPlainTextEdit(self.centralwidget)
self.trans_text.setGeometry(QtCore.QRect(320, 150, 270, 340))
self.trans_text.setObjectName("trans_text")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(320, 120, 120, 13))
self.label_6.setObjectName("label_6")
self.get_text = QtWidgets.QPushButton(self.centralwidget)
self.get_text.setGeometry(QtCore.QRect(310, 70, 100, 23))
self.get_text.setObjectName("get_text")
self.pretty_flag = QtWidgets.QCheckBox(self.centralwidget)
self.pretty_flag.setGeometry(QtCore.QRect(20, 60, 250, 20))
self.pretty_flag.setObjectName("pretty_flag")
self.info = QtWidgets.QLabel(self.centralwidget)
self.info.setGeometry(QtCore.QRect(30, 500, 560, 13))
self.info.setText("")
self.info.setObjectName("info")
self.error_text = QtWidgets.QLabel(self.centralwidget)
self.error_text.setGeometry(QtCore.QRect(30, 520, 560, 20))
self.error_text.setText("")
self.error_text.setObjectName("error_text")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Проект 1"))
self.label.setText(_translate("MainWindow", "Введите данные о песне:"))
self.label_2.setText(_translate("MainWindow", "Название:"))
self.label_3.setText(_translate("MainWindow", "Исполнитель:"))
self.label_4.setText(_translate("MainWindow", "Полученный текст песни:"))
self.label_5.setText(_translate("MainWindow", "Оригинал:"))
self.label_6.setText(_translate("MainWindow", "Перевод на русский:"))
self.get_text.setText(_translate("MainWindow", "Запрос текста"))
self.pretty_flag.setText(_translate("MainWindow", "Красивый текст (без указания на припев)"))
| 1.984375 | 2 |
EP_2019/py_impl/main.py | Alisa-lisa/conferences | 5 | 6577 | from simulation.car import spawn_drivers
from simulation.passenger import spawn_passengers
from simulation.core import World, Clock
conf = {
"x": 100,
"y": 100,
"drivers": 200,
"users": 1000,
"start": "2019-07-08T00:00:00",
"end": "2019-07-08T00:01:00"
}
clock = Clock(conf["start"], conf["end"])
if __name__ == '__main__':
world = World([conf['x'], conf['y']], clock=clock)
world.register_drivers(spawn_drivers(conf["drivers"], conf['x'], conf['y']))
world.register_passengers(spawn_passengers(conf["users"], conf['x'], conf['y']))
world.run(log=False)
| 2.484375 | 2 |
Python/reverse_with_swap.py | avulaankith/Python | 0 | 6578 | <gh_stars>0
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'reverse_words_order_and_swap_cases' function below.
#
# The function is expected to return a STRING.
# The function accepts STRING sentence as parameter.
#
def reverse_words_order_and_swap_cases(sentence):
# Write your code here
l = []
st = ""
for i in sentence:
if i == " ":
l.append(st)
st = ""
else:
st += i.swapcase()
# continue
l.append(st)
st = ""
l.reverse()
news = ""
for i in range(len(l)):
if i != (len(l) - 1):
news += l[i] + " "
else:
news += l[i]
return news
sentence = input()
news = reverse_words_order_and_swap_cases(sentence)
print(news)
| 4.0625 | 4 |
playground/check_equal.py | INK-USC/hypter | 11 | 6579 | import json
d1 = {}
with open("/home/qinyuan/zs/out/bart-large-with-description-grouped-1e-5-outerbsz4-innerbsz32-adapterdim4-unfreeze-dec29/test_predictions.jsonl") as fin:
for line in fin:
d = json.loads(line)
d1[d["id"]] = d["output"][0]["answer"]
d2 = {}
dq = {}
with open("/home/qinyuan/zs/out/bart-large-zsre-with-description-LR2e-5-FREQ32-dec27/test_predictions_submitted.jsonl") as fin:
for line in fin:
d = json.loads(line)
d2[d["id"]] = d["output"][0]["answer"]
dq[d["id"]] = d["input"]
d3 = {}
with open("/home/qinyuan/zs/data/structured_zeroshot-test.jsonl") as fin:
for line in fin:
d = json.loads(line)
d3[d["id"]] = [item["answer"] for item in d["output"]]
count = 0
win1 = 0
win2 = 0
for key in d1.keys():
if d1[key]!= d2[key]:
print("{}. {}. {}. {}. {}".format(key, dq[key], d1[key], d2[key], d3[key]))
count += 1
if d1[key] in d3[key] and d2[key] not in d3[key]:
win1 += 1
print(d1[key])
print(d2[key])
if d2[key] in d3[key] and d1[key] not in d3[key]:
win2 += 1
print(d1[key])
print(d2[key])
print(count)
print(win1)
print(win2)
| 2.59375 | 3 |
creeds/static/api1.py | MaayanLab/creeds | 2 | 6580 | <gh_stars>1-10
import json, requests
from pprint import pprint
CREEDS_URL = 'http://amp.pharm.mssm.edu/CREEDS/'
response = requests.get(CREEDS_URL + 'search', params={'q':'STAT3'})
if response.status_code == 200:
pprint(response.json())
json.dump(response.json(), open('api1_result.json', 'wb'), indent=4)
| 2.828125 | 3 |
admin/migrations/0041_course_color.py | rodlukas/UP-admin | 4 | 6581 | <reponame>rodlukas/UP-admin<filename>admin/migrations/0041_course_color.py
# Generated by Django 2.2.3 on 2019-07-31 13:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("admin", "0040_auto_20190718_0938")]
operations = [
migrations.AddField(
model_name="course", name="color", field=models.CharField(default="#000", max_length=7)
)
]
| 1.570313 | 2 |
exercicios-Python/ex083.py | pedrosimoes-programmer/exercicios-python | 0 | 6582 | # Forma sem bugs
expressao = (str(input('Digite a expressão: ')))
pilhaParenteses = []
for v in expressao:
if v == '(':
pilhaParenteses.append('(')
elif v == ')':
if len(pilhaParenteses) > 0:
pilhaParenteses.pop()
else:
pilhaParenteses.append(')')
break
if len(pilhaParenteses) == 0:
print(f'A expressão {expressao} está válida.')
else:
print(f'A expressão {expressao} está inválida!')
# Forma com bugs
#expressao = (str(input('Digite a expressão: ')))
#if expressao.count('(') == expressao.count(')'):
# print('Sua expressão está válida.')
#else:
# print('Sua expressão está inválida!')
| 3.890625 | 4 |
src/inspectortodo/todo.py | code-acrobat/InspectorTodo | 8 | 6583 | # Copyright 2018 TNG Technology Consulting GmbH, Unterföhring, Germany
# Licensed under the Apache License, Version 2.0 - see LICENSE.md in project root directory
import logging
from xml.sax.saxutils import escape
log = logging.getLogger()
class Todo:
def __init__(self, file_path, line_number, content):
self.file_path = file_path
self.line_number = line_number
self.content = content
self.is_valid = True
self.error_reason = None
def __str__(self):
return 'Todo in file ' + self.file_path + ':' + str(self.line_number) + ' | ' + self.content
def mark_as_valid(self):
self.is_valid = True
self.error_reason = None
def mark_as_invalid(self, error_reason):
self.is_valid = False
self.error_reason = error_reason
def print(self, show_valid=False):
if not show_valid and self.is_valid:
return
log.error('[REASON] %s' % self.error_reason)
log.error('[FILE] %s' % self.file_path)
log.error('[LINE] %s' % self.line_number)
log.error('[CONTENT] %s' % self.content)
def print_xml(self, xml_file):
if self.is_valid:
xml_file.write('\t<testcase classname="{}" name="line {}" />\n'.format(self.file_path, self.line_number))
else:
xml_file.write('\t<testcase classname="{}" name="line {}" >\n'.format(self.file_path, self.line_number))
xml_file.write('\t\t<failure message="{}">{}</failure>\n'.format(self.error_reason, escape(self.content)))
xml_file.write('\t</testcase>\n')
| 2.53125 | 3 |
generators.py | FabLabUTFSM/fusm_usage_report | 0 | 6584 | import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
import plotly.express as px
from plotly.subplots import make_subplots
import pandas as pd
import math
from datetime import datetime, time
from utils import MONTH_NAMES, month_range
def section(title, content, gray=False):
return html.Section(className=f'hero is-fullheight is-medium {"has-background-grey-lighter" if gray else ""}', children=[
html.Div(className='hero-body', children=[
html.Div(className='container', children=[
html.Div(className='columns is-centered', children=[
html.Div(className='column is-four-fifths is-full-mobile', children=[
html.Div(className='level', children=[
html.H2(title, className='title')
]),
] + content)
])
])
])
])
def quality_index(df):
indexes = df.sort_values('Valor', ascending=False).fillna('?').values
return html.Div(className='columns is-multiline is-4 is-variable', children=[
html.Div(className=f'column is-one-quarter index-container {"unknown-data" if i[1] == "?" else ""}', children=[
html.H1(i[1], className='title'),
html.H2(i[0], className='subtitle')
]) for i in indexes
])
def month_selector(df, first_month=None):
current_month = datetime.now().month
return html.Div(dcc.RangeSlider(
id='month-range-slider',
marks={i+1: MONTH_NAMES[i] for i in range(first_month-1, current_month)},
min=first_month, max=current_month,
value=[current_month-2,current_month],
pushable=1
), className='slider-frame')
def point_list(items):
return html.Ul([html.Li(item) for item in items])
def first():
return html.Section(className='hero is-fullheight', children=[
html.Div(className='hero-body', children=[
html.Div(className='container', children=[
html.Div(className='columns is-vcentered is-centered', children=[
html.Div(className='column is-5', children=[
html.Figure(className='image is-4by4', children=[
html.Img(src='/indicadores/assets/logo.png', alt='FabLab UTFSM'),
]),
]),
html.Div(className='column is-5 main-title', children=[
html.H1('Informe de Gestión de Operaciones', className='title')
])
])
]),
])
])
def last():
return html.Footer(className='footer has-background-white', children=[
html.Div(className='content has-text-centered', children=[
html.Img(src='/indicadores/assets/footer.png', alt='FabLab UTFSM'),
html.P(className='is-size-7', children=[
'FabLab UTFSM 2019', html.Br(),
'UTFSM Campus San Joaquín, Edificio C', html.Br(),
'Av. <NAME> 3939, Santiago de Chile', html.Br(),
'Desarrollado bajo licencia MIT'
])
])
])
def fig_records(df, months=None, stacked=False):
machine_list = df['Tipo Máquina'].unique()
months = month_range(months)
def create_frame(df, serie_name):
count = df['Tipo Máquina'].value_counts()
frame = pd.DataFrame({'Tipo de Máquina': machine_list})
frame[serie_name] = [count.get(machine, 0) for machine in machine_list]
return frame
extras = {'barmode': 'relative' if stacked else 'group'}
figure = go.Figure()
for m in months:
name = MONTH_NAMES[m-1]
frame = create_frame(df[df.index.month == m], name)
figure.add_trace(go.Bar(x=frame['Tipo de Máquina'], y=frame[name], name=name, hoverinfo='name+y'))
if stacked and months:
frame = create_frame(df[df.index.month.isin(months)], 'Total')
figure.add_trace(go.Scatter(
x=frame['Tipo de Máquina'],
y=frame['Total'],
text=frame['Total'],
textposition='top center',
mode='text',
showlegend=False,
hoverinfo='skip'
))
figure.update_layout(yaxis={ 'title': 'Número de registros'}, **extras)
return figure
def fig_hours(df, months=None, stacked=False):
machine_list = df['Tipo Máquina'].unique()
months=month_range(months)
def create_frame(df, serie_name):
count = df.groupby('Tipo Máquina').sum()['Tiempo de uso en minutos'].divide(60).round(0)
frame = pd.DataFrame({'Tipo de Máquina': machine_list})
frame[serie_name] = [count.get(machine, 0) for machine in machine_list]
return frame
if months and type(months) == list:
df = df[df.index.month.isin(months)]
frame = create_frame(df, 'Total')
figure = go.Figure()
extras = {'barmode': 'relative' if stacked else 'group'}
for m in months:
name = MONTH_NAMES[m-1]
frame = create_frame(df[df.index.month == m], name)
figure.add_trace(go.Bar(y=frame['Tipo de Máquina'], x=frame[name], name=name, hoverinfo='name+x', orientation='h'))
if stacked and months:
frame = create_frame(df[df.index.month.isin(months)], 'Total')
figure.add_trace(go.Scatter(
y=frame['Tipo de Máquina'],
x=frame['Total'],
text=frame['Total'],
textposition='middle right',
mode='text',
showlegend=False,
hoverinfo='skip'
))
figure.update_layout(xaxis={ 'title': f'Horas de uso {"total" if stacked else ""}'}, **extras)
return figure
def cap_per_machine_per_month(month_caps, machine, month):
this_month = month_caps[month_caps['Mes'] == month]
machine_count = {'Impresora 3D': 5, 'Cortadora Láser': 2, 'Router CNC': 3, 'Torno': 1, 'Cirqoid': 1}
return (this_month['Dias'] * this_month['Horas']).values[0] * 60 * machine_count[machine]
def fig_total_capacity_2(df, month_caps, months):
machine_list = df['Tipo Máquina'].unique()
months = month_range(months)
month_names = [MONTH_NAMES[m-1] for m in months]
figure = go.Figure()
for machine in machine_list:
texts = []
caps = []
for month in months:
total_cap = cap_per_machine_per_month(month_caps, machine, month)
hours = total_cap // 60
used_cap = df[df.index.month==month].groupby('Tipo Máquina')['Tiempo de uso en minutos'].sum().divide(total_cap).multiply(100).round(2).get(machine, 0)
caps.append(used_cap)
texts.append(f'{used_cap}% utilizado de una capacidad total de {hours} horas.')
figure.add_trace(go.Bar(x=month_names, y=caps, name=machine, hovertext=texts))
figure.update_layout(barmode='group', yaxis=dict(type='linear', ticksuffix='%', title='Capacidad Utilizada'))
return figure
"""
TODO: Terminar el heatmap de alguna manera...
def fig_uses(df, months):
dias = ['Lunes', 'Martes', 'Miércoles', 'Jueves', 'Viernes']
days = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']
data = df[df.index.month.isin(month_range(months))]
figure = go.Figure()
times = data.groupby([data.index.weekday_name, pd.Grouper(freq='60min', key='Hora Inicio')]).fillna(0).sum().reset_index()
day_times = times[times['Marca temporal'] == 'Monday']['Hora Inicio'].dt.time
z_dict = dict()
for i, d in enumerate(days):
z_dict.update({dias[i]: times[times['Marca temporal'] == d]['Tiempo de uso en minutos'].fillna(0).values})
z_values = pd.DataFrame(z_dict).values
figure.add_trace(go.Heatmap(
x=dias,
y=day_times,
z=z_values))
return figure
"""
def trace_context_use(df, level=None, **kwargs):
grouped = None
if not level:
grouped = df.groupby('Contexto 1')
else:
grouped = df[df['Contexto 1'] == level].groupby('Contexto 2')
context_data = grouped.sum()['Tiempo de uso en minutos']
return go.Pie(labels=context_data.index, values=context_data.values, **kwargs)
def fig_contexts_use(df, months, level, **kwargs):
col_count = 3
row_count = math.ceil(len(month_range(months))/col_count)
figure = make_subplots(row_count, col_count, specs=[[{'type':'domain'} for c in range(col_count)] for r in range(row_count)],
subplot_titles=[MONTH_NAMES[m-1] for m in month_range(months)])
def take_month(months):
for m in month_range(months):
yield trace_context_use(df[df.index.month == m], level, name=MONTH_NAMES[m-1])
pie_factory = take_month(months)
try:
for r in range(row_count):
for c in range(col_count):
figure.add_trace(next(pie_factory), r+1, c+1)
except StopIteration as stop:
pass
return figure
def records_per_machine(df, months=None, stacked=False):
return dcc.Graph(figure=fig_records(df, months=months, stacked=stacked), style={'height': '80vh'})
def time_per_machine(df, months=None, stacked=False):
return dcc.Graph(figure=fig_hours(df, months=months, stacked=stacked), style={'height': '80vh'})
def machine_capacity(df, caps, months=None):
return dcc.Graph(figure=fig_total_capacity_2(df, caps, months), style={'height': '80vh'})
#def uses(df, months):
# return dcc.Graph(figure=fig_uses(df, months), style={'height': '80vh'})
def contexts(df, months, level=None):
return dcc.Graph(figure=fig_contexts_use(df, months, level), style={'height': '80vh'}) | 2.671875 | 3 |
gengine/app/tests_old/test_groups.py | greck2908/gamification-engine | 347 | 6585 | # -*- coding: utf-8 -*-
from gengine.app.tests.base import BaseDBTest
from gengine.app.tests.helpers import create_user, update_user, delete_user, get_or_create_language
from gengine.metadata import DBSession
from gengine.app.model import AuthUser
class TestUserCreation(BaseDBTest):
def test_user_creation(self):
lang = get_or_create_language("en")
user = create_user(
lat = 12.1,
lng = 12.2,
#country = "RO",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "<NAME>"
}
)
self.assertTrue(user.lat == 12.1)
self.assertTrue(user.lng == 12.2)
#self.assertTrue(user.country == "RO")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
self.assertTrue(user.additional_public_data["first_name"] == "Rudolf")
self.assertTrue(user.additional_public_data["last_name"] == "<NAME>")
def test_user_updation(self):
lang = get_or_create_language("en")
user = create_user()
user = update_user(
user_id = user.id,
lat = 14.2,
lng = 16.3,
#country = "EN",
#region = "Transylvania",
#city = "Cluj-Napoca",
timezone = "Europe/Bukarest",
language = "en",
additional_public_data = {
"first_name" : "Rudolf",
"last_name" : "<NAME>"
}
)
# Correct cases
self.assertTrue(user.lat == 14.2)
self.assertTrue(user.lng == 16.3)
#self.assertTrue(user.country == "EN")
#self.assertTrue(user.region == "Transylvania")
#self.assertTrue(user.city == "Cluj-Napoca")
self.assertTrue(user.timezone == "Europe/Bukarest")
self.assertTrue(user.language_id == lang.id)
def test_user_deletion(self):
user1 = create_user()
# Create Second user
user2 = create_user(
lat=85.59,
lng=65.75,
#country="DE",
#region="Niedersachsen",
#city="Osnabrück",
timezone="Europe/Berlin",
language="de",
additional_public_data={
"first_name": "Michael",
"last_name": "Clarke"
},
friends=[1]
)
remaining_users = delete_user(
user_id = user1.id
)
# Correct cases
self.assertNotIn(user1.id, remaining_users)
self.assertEqual(user2.id, remaining_users[0].id)
def test_verify_password(self):
auth_user = AuthUser()
auth_user.password = "<PASSWORD>"
auth_user.active = True
auth_user.email = "<EMAIL>"
DBSession.add(auth_user)
iscorrect = auth_user.verify_password("<PASSWORD>")
self.assertEqual(iscorrect, True)
def test_create_token(self):
user = create_user()
auth_user = AuthUser()
auth_user.user_id = user.id
auth_user.password = "<PASSWORD>"
auth_user.active = True
auth_user.email = "<EMAIL>"
DBSession.add(auth_user)
if auth_user.verify_password("<PASSWORD>"):
token = auth_user.get_or_create_token()
self.assertNotEqual(token, None)
| 2.3125 | 2 |
Lib/fontTools/designspaceLib/__init__.py | guorenxi/fonttools | 0 | 6586 | from __future__ import annotations
import collections
import copy
import itertools
import math
import os
import posixpath
from io import BytesIO, StringIO
from textwrap import indent
from typing import Any, Dict, List, MutableMapping, Optional, Tuple, Union
from fontTools.misc import etree as ET
from fontTools.misc import plistlib
from fontTools.misc.loggingTools import LogMixin
from fontTools.misc.textTools import tobytes, tostr
"""
designSpaceDocument
- read and write designspace files
"""
__all__ = [
'DesignSpaceDocumentError', 'DesignSpaceDocument', 'SourceDescriptor',
'InstanceDescriptor', 'AxisDescriptor', 'RuleDescriptor', 'BaseDocReader',
'BaseDocWriter'
]
# ElementTree allows to find namespace-prefixed elements, but not attributes
# so we have to do it ourselves for 'xml:lang'
XML_NS = "{http://www.w3.org/XML/1998/namespace}"
XML_LANG = XML_NS + "lang"
def posix(path):
"""Normalize paths using forward slash to work also on Windows."""
new_path = posixpath.join(*path.split(os.path.sep))
if path.startswith('/'):
# The above transformation loses absolute paths
new_path = '/' + new_path
elif path.startswith(r'\\'):
# The above transformation loses leading slashes of UNC path mounts
new_path = '//' + new_path
return new_path
def posixpath_property(private_name):
"""Generate a propery that holds a path always using forward slashes."""
def getter(self):
# Normal getter
return getattr(self, private_name)
def setter(self, value):
# The setter rewrites paths using forward slashes
if value is not None:
value = posix(value)
setattr(self, private_name, value)
return property(getter, setter)
class DesignSpaceDocumentError(Exception):
def __init__(self, msg, obj=None):
self.msg = msg
self.obj = obj
def __str__(self):
return str(self.msg) + (
": %r" % self.obj if self.obj is not None else "")
class AsDictMixin(object):
def asdict(self):
d = {}
for attr, value in self.__dict__.items():
if attr.startswith("_"):
continue
if hasattr(value, "asdict"):
value = value.asdict()
elif isinstance(value, list):
value = [
v.asdict() if hasattr(v, "asdict") else v for v in value
]
d[attr] = value
return d
class SimpleDescriptor(AsDictMixin):
""" Containers for a bunch of attributes"""
# XXX this is ugly. The 'print' is inappropriate here, and instead of
# assert, it should simply return True/False
def compare(self, other):
# test if this object contains the same data as the other
for attr in self._attrs:
try:
assert(getattr(self, attr) == getattr(other, attr))
except AssertionError:
print("failed attribute", attr, getattr(self, attr), "!=", getattr(other, attr))
def __repr__(self):
attrs = [f"{a}={repr(getattr(self, a))}," for a in self._attrs]
attrs = indent('\n'.join(attrs), ' ')
return f"{self.__class__.__name__}(\n{attrs}\n)"
class SourceDescriptor(SimpleDescriptor):
"""Simple container for data related to the source
.. code:: python
doc = DesignSpaceDocument()
s1 = SourceDescriptor()
s1.path = masterPath1
s1.name = "master.ufo1"
s1.font = defcon.Font("master.ufo1")
s1.location = dict(weight=0)
s1.familyName = "MasterFamilyName"
s1.styleName = "MasterStyleNameOne"
s1.localisedFamilyName = dict(fr="Caractère")
s1.mutedGlyphNames.append("A")
s1.mutedGlyphNames.append("Z")
doc.addSource(s1)
"""
flavor = "source"
_attrs = ['filename', 'path', 'name', 'layerName',
'location', 'copyLib',
'copyGroups', 'copyFeatures',
'muteKerning', 'muteInfo',
'mutedGlyphNames',
'familyName', 'styleName', 'localisedFamilyName']
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
designLocation=None,
layerName=None,
familyName=None,
styleName=None,
localisedFamilyName=None,
copyLib=False,
copyInfo=False,
copyGroups=False,
copyFeatures=False,
muteKerning=False,
muteInfo=False,
mutedGlyphNames=None,
):
self.filename = filename
"""string. A relative path to the source file, **as it is in the document**.
MutatorMath + VarLib.
"""
self.path = path
"""The absolute path, calculated from filename."""
self.font = font
"""Any Python object. Optional. Points to a representation of this
source font that is loaded in memory, as a Python object (e.g. a
``defcon.Font`` or a ``fontTools.ttFont.TTFont``).
The default document reader will not fill-in this attribute, and the
default writer will not use this attribute. It is up to the user of
``designspaceLib`` to either load the resource identified by
``filename`` and store it in this field, or write the contents of
this field to the disk and make ```filename`` point to that.
"""
self.name = name
"""string. Optional. Unique identifier name for this source.
MutatorMath + Varlib.
"""
self.designLocation = designLocation if designLocation is not None else location or {}
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + Varlib.
This may be only part of the full design location.
See :meth:`getFullDesignLocation()`
.. versionadded:: 5.0
"""
self.layerName = layerName
"""string. The name of the layer in the source to look for
outline data. Default ``None`` which means ``foreground``.
"""
self.familyName = familyName
"""string. Family name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
Varlib.
"""
self.styleName = styleName
"""string. Style name of this source. Though this data
can be extracted from the font, it can be efficient to have it right
here.
Varlib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name strings, keyed by
language code.
If present, will be used to build localized names for all instances.
.. versionadded:: 5.0
"""
self.copyLib = copyLib
"""bool. Indicates if the contents of the font.lib need to
be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyInfo = copyInfo
"""bool. Indicates if the non-interpolating font.info needs
to be copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyGroups = copyGroups
"""bool. Indicates if the groups need to be copied to the
instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.copyFeatures = copyFeatures
"""bool. Indicates if the feature text needs to be
copied to the instances.
MutatorMath.
.. deprecated:: 5.0
"""
self.muteKerning = muteKerning
"""bool. Indicates if the kerning data from this source
needs to be muted (i.e. not be part of the calculations).
MutatorMath only.
"""
self.muteInfo = muteInfo
"""bool. Indicated if the interpolating font.info data for
this source needs to be muted.
MutatorMath only.
"""
self.mutedGlyphNames = mutedGlyphNames or []
"""list. Glyphnames that need to be muted in the
instances.
MutatorMath only.
"""
@property
def location(self):
"""dict. Axis values for this source, in design space coordinates.
MutatorMath + Varlib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setFamilyName(self, familyName, languageCode="en"):
"""Setter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
"""Getter for :attr:`localisedFamilyName`
.. versionadded:: 5.0
"""
return self.localisedFamilyName.get(languageCode)
def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
"""Get the complete design location of this source, from its
:attr:`designLocation` and the document's axis defaults.
.. versionadded:: 5.0
"""
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
else:
result[axis.name] = axis.map_forward(axis.default)
return result
class RuleDescriptor(SimpleDescriptor):
"""Represents the rule descriptor element: a set of glyph substitutions to
trigger conditionally in some parts of the designspace.
.. code:: python
r1 = RuleDescriptor()
r1.name = "unique.rule.name"
r1.conditionSets.append([dict(name="weight", minimum=-10, maximum=10), dict(...)])
r1.conditionSets.append([dict(...), dict(...)])
r1.subs.append(("a", "a.alt"))
.. code:: xml
<!-- optional: list of substitution rules -->
<rules>
<rule name="vertical.bars">
<conditionset>
<condition minimum="250.000000" maximum="750.000000" name="weight"/>
<condition minimum="100" name="width"/>
<condition minimum="10" maximum="40" name="optical"/>
</conditionset>
<sub name="cent" with="cent.alt"/>
<sub name="dollar" with="dollar.alt"/>
</rule>
</rules>
"""
_attrs = ['name', 'conditionSets', 'subs'] # what do we need here
def __init__(self, *, name=None, conditionSets=None, subs=None):
self.name = name
"""string. Unique name for this rule. Can be used to reference this rule data."""
# list of lists of dict(name='aaaa', minimum=0, maximum=1000)
self.conditionSets = conditionSets or []
"""a list of conditionsets.
- Each conditionset is a list of conditions.
- Each condition is a dict with ``name``, ``minimum`` and ``maximum`` keys.
"""
# list of substitutions stored as tuples of glyphnames ("a", "a.alt")
self.subs = subs or []
"""list of substitutions.
- Each substitution is stored as tuples of glyphnames, e.g. ("a", "a.alt").
- Note: By default, rules are applied first, before other text
shaping/OpenType layout, as they are part of the
`Required Variation Alternates OpenType feature <https://docs.microsoft.com/en-us/typography/opentype/spec/features_pt#-tag-rvrn>`_.
See ref:`rules-element` § Attributes.
"""
def evaluateRule(rule, location):
"""Return True if any of the rule's conditionsets matches the given location."""
return any(evaluateConditions(c, location) for c in rule.conditionSets)
def evaluateConditions(conditions, location):
"""Return True if all the conditions matches the given location.
- If a condition has no minimum, check for < maximum.
- If a condition has no maximum, check for > minimum.
"""
for cd in conditions:
value = location[cd['name']]
if cd.get('minimum') is None:
if value > cd['maximum']:
return False
elif cd.get('maximum') is None:
if cd['minimum'] > value:
return False
elif not cd['minimum'] <= value <= cd['maximum']:
return False
return True
def processRules(rules, location, glyphNames):
"""Apply these rules at this location to these glyphnames.
Return a new list of glyphNames with substitutions applied.
- rule order matters
"""
newNames = []
for rule in rules:
if evaluateRule(rule, location):
for name in glyphNames:
swap = False
for a, b in rule.subs:
if name == a:
swap = True
break
if swap:
newNames.append(b)
else:
newNames.append(name)
glyphNames = newNames
newNames = []
return glyphNames
AnisotropicLocationDict = Dict[str, Union[float, Tuple[float, float]]]
SimpleLocationDict = Dict[str, float]
class InstanceDescriptor(SimpleDescriptor):
"""Simple container for data related to the instance
.. code:: python
i2 = InstanceDescriptor()
i2.path = instancePath2
i2.familyName = "InstanceFamilyName"
i2.styleName = "InstanceStyleName"
i2.name = "instance.ufo2"
# anisotropic location
i2.designLocation = dict(weight=500, width=(400,300))
i2.postScriptFontName = "InstancePostscriptName"
i2.styleMapFamilyName = "InstanceStyleMapFamilyName"
i2.styleMapStyleName = "InstanceStyleMapStyleName"
i2.lib['com.coolDesignspaceApp.specimenText'] = 'Hamburgerwhatever'
doc.addInstance(i2)
"""
flavor = "instance"
_defaultLanguageCode = "en"
_attrs = ['filename',
'path',
'name',
'locationLabel',
'designLocation',
'userLocation',
'familyName',
'styleName',
'postScriptFontName',
'styleMapFamilyName',
'styleMapStyleName',
'localisedFamilyName',
'localisedStyleName',
'localisedStyleMapFamilyName',
'localisedStyleMapStyleName',
'glyphs',
'kerning',
'info',
'lib']
filename = posixpath_property("_filename")
path = posixpath_property("_path")
def __init__(
self,
*,
filename=None,
path=None,
font=None,
name=None,
location=None,
locationLabel=None,
designLocation=None,
userLocation=None,
familyName=None,
styleName=None,
postScriptFontName=None,
styleMapFamilyName=None,
styleMapStyleName=None,
localisedFamilyName=None,
localisedStyleName=None,
localisedStyleMapFamilyName=None,
localisedStyleMapStyleName=None,
glyphs=None,
kerning=True,
info=True,
lib=None,
):
self.filename = filename
"""string. Relative path to the instance file, **as it is
in the document**. The file may or may not exist.
MutatorMath + VarLib.
"""
self.path = path
"""string. Absolute path to the instance file, calculated from
the document path and the string in the filename attr. The file may
or may not exist.
MutatorMath.
"""
self.font = font
"""Same as :attr:`SourceDescriptor.font`
.. seealso:: :attr:`SourceDescriptor.font`
"""
self.name = name
"""string. Unique identifier name of the instance, used to
identify it if it needs to be referenced from elsewhere in the
document.
"""
self.locationLabel = locationLabel
"""Name of a :class:`LocationLabelDescriptor`. If
provided, the instance should have the same location as the
LocationLabel.
.. seealso::
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.designLocation: AnisotropicLocationDict = designLocation if designLocation is not None else (location or {})
"""dict. Axis values for this instance, in design space coordinates.
MutatorMath + Varlib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.userLocation: SimpleLocationDict = userLocation or {}
"""dict. Axis values for this instance, in user space coordinates.
MutatorMath + Varlib.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullDesignLocation`
:meth:`getFullUserLocation`
.. versionadded:: 5.0
"""
self.familyName = familyName
"""string. Family name of this instance.
MutatorMath + Varlib.
"""
self.styleName = styleName
"""string. Style name of this instance.
MutatorMath + Varlib.
"""
self.postScriptFontName = postScriptFontName
"""string. Postscript fontname for this instance.
MutatorMath + Varlib.
"""
self.styleMapFamilyName = styleMapFamilyName
"""string. StyleMap familyname for this instance.
MutatorMath + Varlib.
"""
self.styleMapStyleName = styleMapStyleName
"""string. StyleMap stylename for this instance.
MutatorMath + Varlib.
"""
self.localisedFamilyName = localisedFamilyName or {}
"""dict. A dictionary of localised family name
strings, keyed by language code.
"""
self.localisedStyleName = localisedStyleName or {}
"""dict. A dictionary of localised stylename
strings, keyed by language code.
"""
self.localisedStyleMapFamilyName = localisedStyleMapFamilyName or {}
"""A dictionary of localised style map
familyname strings, keyed by language code.
"""
self.localisedStyleMapStyleName = localisedStyleMapStyleName or {}
"""A dictionary of localised style map
stylename strings, keyed by language code.
"""
self.glyphs = glyphs or {}
"""dict for special master definitions for glyphs. If glyphs
need special masters (to record the results of executed rules for
example).
MutatorMath.
.. deprecated:: 5.0
Use rules or sparse sources instead.
"""
self.kerning = kerning
""" bool. Indicates if this instance needs its kerning
calculated.
MutatorMath.
.. deprecated:: 5.0
"""
self.info = info
"""bool. Indicated if this instance needs the interpolating
font.info calculated.
.. deprecated:: 5.0
"""
self.lib = lib or {}
"""Custom data associated with this instance."""
@property
def location(self):
"""dict. Axis values for this instance.
MutatorMath + Varlib.
.. deprecated:: 5.0
Use the more explicit alias for this property :attr:`designLocation`.
"""
return self.designLocation
@location.setter
def location(self, location: Optional[AnisotropicLocationDict]):
self.designLocation = location or {}
def setStyleName(self, styleName, languageCode="en"):
"""These methods give easier access to the localised names."""
self.localisedStyleName[languageCode] = tostr(styleName)
def getStyleName(self, languageCode="en"):
return self.localisedStyleName.get(languageCode)
def setFamilyName(self, familyName, languageCode="en"):
self.localisedFamilyName[languageCode] = tostr(familyName)
def getFamilyName(self, languageCode="en"):
return self.localisedFamilyName.get(languageCode)
def setStyleMapStyleName(self, styleMapStyleName, languageCode="en"):
self.localisedStyleMapStyleName[languageCode] = tostr(styleMapStyleName)
def getStyleMapStyleName(self, languageCode="en"):
return self.localisedStyleMapStyleName.get(languageCode)
def setStyleMapFamilyName(self, styleMapFamilyName, languageCode="en"):
self.localisedStyleMapFamilyName[languageCode] = tostr(styleMapFamilyName)
def getStyleMapFamilyName(self, languageCode="en"):
return self.localisedStyleMapFamilyName.get(languageCode)
def clearLocation(self, axisName: Optional[str] = None):
"""Clear all location-related fields. Ensures that
:attr:``designLocation`` and :attr:``userLocation`` are dictionaries
(possibly empty if clearing everything).
In order to update the location of this instance wholesale, a user
should first clear all the fields, then change the field(s) for which
they have data.
.. code:: python
instance.clearLocation()
instance.designLocation = {'Weight': (34, 36.5), 'Width': 100}
instance.userLocation = {'Opsz': 16}
In order to update a single axis location, the user should only clear
that axis, then edit the values:
.. code:: python
instance.clearLocation('Weight')
instance.designLocation['Weight'] = (34, 36.5)
Args:
axisName: if provided, only clear the location for that axis.
.. versionadded:: 5.0
"""
self.locationLabel = None
if axisName is None:
self.designLocation = {}
self.userLocation = {}
else:
if self.designLocation is None:
self.designLocation = {}
if axisName in self.designLocation:
del self.designLocation[axisName]
if self.userLocation is None:
self.userLocation = {}
if axisName in self.userLocation:
del self.userLocation[axisName]
def getLocationLabelDescriptor(self, doc: 'DesignSpaceDocument') -> Optional[LocationLabelDescriptor]:
"""Get the :class:`LocationLabelDescriptor` instance that matches
this instances's :attr:`locationLabel`.
Raises if the named label can't be found.
.. versionadded:: 5.0
"""
if self.locationLabel is None:
return None
label = doc.getLocationLabel(self.locationLabel)
if label is None:
raise DesignSpaceDocumentError(
'InstanceDescriptor.getLocationLabelDescriptor(): '
f'unknown location label `{self.locationLabel}` in instance `{self.name}`.'
)
return label
def getFullDesignLocation(self, doc: 'DesignSpaceDocument') -> AnisotropicLocationDict:
"""Get the complete design location of this instance, by combining data
from the various location fields, default axis values and mappings, and
top-level location labels.
The source of truth for this instance's location is determined for each
axis independently by taking the first not-None field in this list:
- ``locationLabel``: the location along this axis is the same as the
matching STAT format 4 label. No anisotropy.
- ``designLocation[axisName]``: the explicit design location along this
axis, possibly anisotropic.
- ``userLocation[axisName]``: the explicit user location along this
axis. No anisotropy.
- ``axis.default``: default axis value. No anisotropy.
.. versionadded:: 5.0
"""
label = self.getLocationLabelDescriptor(doc)
if label is not None:
return doc.map_forward(label.userLocation) # type: ignore
result: AnisotropicLocationDict = {}
for axis in doc.axes:
if axis.name in self.designLocation:
result[axis.name] = self.designLocation[axis.name]
elif axis.name in self.userLocation:
result[axis.name] = axis.map_forward(self.userLocation[axis.name])
else:
result[axis.name] = axis.map_forward(axis.default)
return result
def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
"""Get the complete user location for this instance.
.. seealso:: :meth:`getFullDesignLocation`
.. versionadded:: 5.0
"""
return doc.map_backward(self.getFullDesignLocation(doc))
def tagForAxisName(name):
# try to find or make a tag name for this axis name
names = {
'weight': ('wght', dict(en = 'Weight')),
'width': ('wdth', dict(en = 'Width')),
'optical': ('opsz', dict(en = 'Optical Size')),
'slant': ('slnt', dict(en = 'Slant')),
'italic': ('ital', dict(en = 'Italic')),
}
if name.lower() in names:
return names[name.lower()]
if len(name) < 4:
tag = name + "*" * (4 - len(name))
else:
tag = name[:4]
return tag, dict(en=name)
class AbstractAxisDescriptor(SimpleDescriptor):
flavor = "axis"
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
# opentype tag for this axis
self.tag = tag
"""string. Four letter tag for this axis. Some might be
registered at the `OpenType
specification <https://www.microsoft.com/typography/otspec/fvar.htm#VAT>`__.
Privately-defined axis tags must begin with an uppercase letter and
use only uppercase letters or digits.
"""
# name of the axis used in locations
self.name = name
"""string. Name of the axis as it is used in the location dicts.
MutatorMath + Varlib.
"""
# names for UI purposes, if this is not a standard axis,
self.labelNames = labelNames or {}
"""dict. When defining a non-registered axis, it will be
necessary to define user-facing readable names for the axis. Keyed by
xml:lang code. Values are required to be ``unicode`` strings, even if
they only contain ASCII characters.
"""
self.hidden = hidden
"""bool. Whether this axis should be hidden in user interfaces.
"""
self.map = map or []
"""list of input / output values that can describe a warp of user space
to design space coordinates. If no map values are present, it is assumed
user space is the same as design space, as in [(minimum, minimum),
(maximum, maximum)].
Varlib.
"""
self.axisOrdering = axisOrdering
"""STAT table field ``axisOrdering``.
See: `OTSpec STAT Axis Record <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-records>`_
.. versionadded:: 5.0
"""
self.axisLabels: List[AxisLabelDescriptor] = axisLabels or []
"""STAT table entries for Axis Value Tables format 1, 2, 3.
See: `OTSpec STAT Axis Value Tables <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-tables>`_
.. versionadded:: 5.0
"""
class AxisDescriptor(AbstractAxisDescriptor):
""" Simple container for the axis data.
Add more localisations?
.. code:: python
a1 = AxisDescriptor()
a1.minimum = 1
a1.maximum = 1000
a1.default = 400
a1.name = "weight"
a1.tag = "wght"
a1.labelNames['fa-IR'] = "قطر"
a1.labelNames['en'] = "Wéíght"
a1.map = [(1.0, 10.0), (400.0, 66.0), (1000.0, 990.0)]
a1.axisOrdering = 1
a1.axisLabels = [
AxisLabelDescriptor(name="Regular", userValue=400, elidable=True)
]
doc.addAxis(a1)
"""
_attrs = ['tag', 'name', 'maximum', 'minimum', 'default', 'map', 'axisOrdering', 'axisLabels']
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
minimum=None,
default=None,
maximum=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.minimum = minimum
"""number. The minimum value for this axis in user space.
MutatorMath + Varlib.
"""
self.maximum = maximum
"""number. The maximum value for this axis in user space.
MutatorMath + Varlib.
"""
self.default = default
"""number. The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
MutatorMath + Varlib.
"""
def serialize(self):
# output to a dict, used in testing
return dict(
tag=self.tag,
name=self.name,
labelNames=self.labelNames,
maximum=self.maximum,
minimum=self.minimum,
default=self.default,
hidden=self.hidden,
map=self.map,
axisOrdering=self.axisOrdering,
axisLabels=self.axisLabels,
)
def map_forward(self, v):
"""Maps value from axis mapping's input (user) to output (design)."""
from fontTools.varLib.models import piecewiseLinearMap
if not self.map:
return v
return piecewiseLinearMap(v, {k: v for k, v in self.map})
def map_backward(self, v):
"""Maps value from axis mapping's output (design) to input (user)."""
from fontTools.varLib.models import piecewiseLinearMap
if isinstance(v, tuple):
v = v[0]
if not self.map:
return v
return piecewiseLinearMap(v, {v: k for k, v in self.map})
class DiscreteAxisDescriptor(AbstractAxisDescriptor):
"""Container for discrete axis data.
Use this for axes that do not interpolate. The main difference from a
continuous axis is that a continuous axis has a ``minimum`` and ``maximum``,
while a discrete axis has a list of ``values``.
Example: an Italic axis with 2 stops, Roman and Italic, that are not
compatible. The axis still allows to bind together the full font family,
which is useful for the STAT table, however it can't become a variation
axis in a VF.
.. code:: python
a2 = DiscreteAxisDescriptor()
a2.values = [0, 1]
a2.name = "Italic"
a2.tag = "ITAL"
a2.labelNames['fr'] = "Italique"
a2.map = [(0, 0), (1, -11)]
a2.axisOrdering = 2
a2.axisLabels = [
AxisLabelDescriptor(name="Roman", userValue=0, elidable=True)
]
doc.addAxis(a2)
.. versionadded:: 5.0
"""
flavor = "axis"
_attrs = ('tag', 'name', 'values', 'default', 'map', 'axisOrdering', 'axisLabels')
def __init__(
self,
*,
tag=None,
name=None,
labelNames=None,
values=None,
default=None,
hidden=False,
map=None,
axisOrdering=None,
axisLabels=None,
):
super().__init__(
tag=tag,
name=name,
labelNames=labelNames,
hidden=hidden,
map=map,
axisOrdering=axisOrdering,
axisLabels=axisLabels,
)
self.default: float = default
"""The default value for this axis, i.e. when a new location is
created, this is the value this axis will get in user space.
However, this default value is less important than in continuous axes:
- it doesn't define the "neutral" version of outlines from which
deltas would apply, as this axis does not interpolate.
- it doesn't provide the reference glyph set for the designspace, as
fonts at each value can have different glyph sets.
"""
self.values: List[float] = values or []
"""List of possible values for this axis. Contrary to continuous axes,
only the values in this list can be taken by the axis, nothing in-between.
"""
def map_forward(self, value):
"""Maps value from axis mapping's input to output.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
return next((v for k, v in self.map if k == value), value)
def map_backward(self, value):
"""Maps value from axis mapping's output to input.
Returns value unchanged if no mapping entry is found.
Note: for discrete axes, each value must have its mapping entry, if
you intend that value to be mapped.
"""
if isinstance(value, tuple):
value = value[0]
return next((k for k, v in self.map if v == value), value)
class AxisLabelDescriptor(SimpleDescriptor):
"""Container for axis label data.
Analogue of OpenType's STAT data for a single axis (formats 1, 2 and 3).
All values are user values.
See: `OTSpec STAT Axis value table, format 1, 2, 3 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-1>`_
The STAT format of the Axis value depends on which field are filled-in,
see :meth:`getFormat`
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ('userMinimum', 'userValue', 'userMaximum', 'name', 'elidable', 'olderSibling', 'linkedUserValue', 'labelNames')
def __init__(
self,
*,
name,
userValue,
userMinimum=None,
userMaximum=None,
elidable=False,
olderSibling=False,
linkedUserValue=None,
labelNames=None,
):
self.userMinimum: Optional[float] = userMinimum
"""STAT field ``rangeMinValue`` (format 2)."""
self.userValue: float = userValue
"""STAT field ``value`` (format 1, 3) or ``nominalValue`` (format 2)."""
self.userMaximum: Optional[float] = userMaximum
"""STAT field ``rangeMaxValue`` (format 2)."""
self.name: str = name
"""Label for this axis location, STAT field ``valueNameID``."""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.linkedUserValue: Optional[float] = linkedUserValue
"""STAT field ``linkedValue`` (format 3)."""
self.labelNames: MutableMapping[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
``xml:lang`` code.
"""
def getFormat(self) -> int:
"""Determine which format of STAT Axis value to use to encode this label.
=========== ========= =========== =========== ===============
STAT Format userValue userMinimum userMaximum linkedUserValue
=========== ========= =========== =========== ===============
1 ✅ ❌ ❌ ❌
2 ✅ ✅ ✅ ❌
3 ✅ ❌ ❌ ✅
=========== ========= =========== =========== ===============
"""
if self.linkedUserValue is not None:
return 3
if self.userMinimum is not None or self.userMaximum is not None:
return 2
return 1
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
class LocationLabelDescriptor(SimpleDescriptor):
"""Container for location label data.
Analogue of OpenType's STAT data for a free-floating location (format 4).
All values are user values.
See: `OTSpec STAT Axis value table, format 4 <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#axis-value-table-format-4>`_
.. versionadded:: 5.0
"""
flavor = "label"
_attrs = ('name', 'elidable', 'olderSibling', 'userLocation', 'labelNames')
def __init__(
self,
*,
name,
userLocation,
elidable=False,
olderSibling=False,
labelNames=None,
):
self.name: str = name
"""Label for this named location, STAT field ``valueNameID``."""
self.userLocation: SimpleLocationDict = userLocation or {}
"""Location in user coordinates along each axis.
If an axis is not mentioned, it is assumed to be at its default location.
.. seealso:: This may be only part of the full location. See:
:meth:`getFullUserLocation`
"""
self.elidable: bool = elidable
"""STAT flag ``ELIDABLE_AXIS_VALUE_NAME``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.olderSibling: bool = olderSibling
"""STAT flag ``OLDER_SIBLING_FONT_ATTRIBUTE``.
See: `OTSpec STAT Flags <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#flags>`_
"""
self.labelNames: Dict[str, str] = labelNames or {}
"""User-facing translations of this location's label. Keyed by
xml:lang code.
"""
@property
def defaultName(self) -> str:
"""Return the English name from :attr:`labelNames` or the :attr:`name`."""
return self.labelNames.get("en") or self.name
def getFullUserLocation(self, doc: 'DesignSpaceDocument') -> SimpleLocationDict:
"""Get the complete user location of this label, by combining data
from the explicit user location and default axis values.
.. versionadded:: 5.0
"""
return {
axis.name: self.userLocation.get(axis.name, axis.default)
for axis in doc.axes
}
class VariableFontDescriptor(SimpleDescriptor):
"""Container for variable fonts, sub-spaces of the Designspace.
Use-cases:
- From a single DesignSpace with discrete axes, define 1 variable font
per value on the discrete axes. Before version 5, you would have needed
1 DesignSpace per such variable font, and a lot of data duplication.
- From a big variable font with many axes, define subsets of that variable
font that only include some axes and freeze other axes at a given location.
.. versionadded:: 5.0
"""
flavor = "variable-font"
_attrs = ('filename', 'axisSubsets', 'lib')
filename = posixpath_property("_filename")
def __init__(self, *, name, filename=None, axisSubsets=None, lib=None):
self.name: str = name
"""string, required. Name of this variable to identify it during the
build process and from other parts of the document, and also as a
filename in case the filename property is empty.
VarLib.
"""
self.filename: str = filename
"""string, optional. Relative path to the variable font file, **as it is
in the document**. The file may or may not exist.
If not specified, the :attr:`name` will be used as a basename for the file.
"""
self.axisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = axisSubsets or []
"""Axis subsets to include in this variable font.
If an axis is not mentioned, assume that we only want the default
location of that axis (same as a :class:`ValueAxisSubsetDescriptor`).
"""
self.lib: MutableMapping[str, Any] = lib or {}
"""Custom data associated with this variable font."""
class RangeAxisSubsetDescriptor(SimpleDescriptor):
"""Subset of a continuous axis to include in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userMinimum', 'userDefault', 'userMaximum')
def __init__(self, *, name, userMinimum=-math.inf, userDefault=None, userMaximum=math.inf):
self.name: str = name
"""Name of the :class:`AxisDescriptor` to subset."""
self.userMinimum: float = userMinimum
"""New minimum value of the axis in the target variable font.
If not specified, assume the same minimum value as the full axis.
(default = ``-math.inf``)
"""
self.userDefault: Optional[float] = userDefault
"""New default value of the axis in the target variable font.
If not specified, assume the same default value as the full axis.
(default = ``None``)
"""
self.userMaximum: float = userMaximum
"""New maximum value of the axis in the target variable font.
If not specified, assume the same maximum value as the full axis.
(default = ``math.inf``)
"""
class ValueAxisSubsetDescriptor(SimpleDescriptor):
"""Single value of a discrete or continuous axis to use in a variable font.
.. versionadded:: 5.0
"""
flavor = "axis-subset"
_attrs = ('name', 'userValue')
def __init__(self, *, name, userValue):
self.name: str = name
"""Name of the :class:`AxisDescriptor` or :class:`DiscreteAxisDescriptor`
to "snapshot" or "freeze".
"""
self.userValue: float = userValue
"""Value in user coordinates at which to freeze the given axis."""
class BaseDocWriter(object):
_whiteSpace = " "
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
@classmethod
def getAxisDecriptor(cls):
return cls.axisDescriptorClass()
@classmethod
def getSourceDescriptor(cls):
return cls.sourceDescriptorClass()
@classmethod
def getInstanceDescriptor(cls):
return cls.instanceDescriptorClass()
@classmethod
def getRuleDescriptor(cls):
return cls.ruleDescriptorClass()
def __init__(self, documentPath, documentObject: DesignSpaceDocument):
self.path = documentPath
self.documentObject = documentObject
self.effectiveFormatTuple = self._getEffectiveFormatTuple()
self.root = ET.Element("designspace")
def write(self, pretty=True, encoding="UTF-8", xml_declaration=True):
self.root.attrib['format'] = ".".join(str(i) for i in self.effectiveFormatTuple)
if self.documentObject.axes or self.documentObject.elidedFallbackName is not None:
axesElement = ET.Element("axes")
if self.documentObject.elidedFallbackName is not None:
axesElement.attrib['elidedfallbackname'] = self.documentObject.elidedFallbackName
self.root.append(axesElement)
for axisObject in self.documentObject.axes:
self._addAxis(axisObject)
if self.documentObject.locationLabels:
labelsElement = ET.Element("labels")
for labelObject in self.documentObject.locationLabels:
self._addLocationLabel(labelsElement, labelObject)
self.root.append(labelsElement)
if self.documentObject.rules:
if getattr(self.documentObject, "rulesProcessingLast", False):
attributes = {"processing": "last"}
else:
attributes = {}
self.root.append(ET.Element("rules", attributes))
for ruleObject in self.documentObject.rules:
self._addRule(ruleObject)
if self.documentObject.sources:
self.root.append(ET.Element("sources"))
for sourceObject in self.documentObject.sources:
self._addSource(sourceObject)
if self.documentObject.variableFonts:
variableFontsElement = ET.Element("variable-fonts")
for variableFont in self.documentObject.variableFonts:
self._addVariableFont(variableFontsElement, variableFont)
self.root.append(variableFontsElement)
if self.documentObject.instances:
self.root.append(ET.Element("instances"))
for instanceObject in self.documentObject.instances:
self._addInstance(instanceObject)
if self.documentObject.lib:
self._addLib(self.root, self.documentObject.lib, 2)
tree = ET.ElementTree(self.root)
tree.write(
self.path,
encoding=encoding,
method='xml',
xml_declaration=xml_declaration,
pretty_print=pretty,
)
def _getEffectiveFormatTuple(self):
"""Try to use the version specified in the document, or a sufficiently
recent version to be able to encode what the document contains.
"""
minVersion = self.documentObject.formatTuple
if (
any(
isinstance(axis, DiscreteAxisDescriptor) or
axis.axisOrdering is not None or
axis.axisLabels
for axis in self.documentObject.axes
) or
self.documentObject.locationLabels or
any(
source.localisedFamilyName
for source in self.documentObject.sources
) or
self.documentObject.variableFonts or
any(
instance.locationLabel or
instance.userLocation
for instance in self.documentObject.instances
)
):
if minVersion < (5, 0):
minVersion = (5, 0)
return minVersion
def _makeLocationElement(self, locationObject, name=None):
""" Convert Location dict to a locationElement."""
locElement = ET.Element("location")
if name is not None:
locElement.attrib['name'] = name
validatedLocation = self.documentObject.newDefaultLocation()
for axisName, axisValue in locationObject.items():
if axisName in validatedLocation:
# only accept values we know
validatedLocation[axisName] = axisValue
for dimensionName, dimensionValue in validatedLocation.items():
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = dimensionName
if type(dimensionValue) == tuple:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue[0])
dimElement.attrib['yvalue'] = self.intOrFloat(dimensionValue[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(dimensionValue)
locElement.append(dimElement)
return locElement, validatedLocation
def intOrFloat(self, num):
if int(num) == num:
return "%d" % num
return ("%f" % num).rstrip('0').rstrip('.')
def _addRule(self, ruleObject):
# if none of the conditions have minimum or maximum values, do not add the rule.
ruleElement = ET.Element('rule')
if ruleObject.name is not None:
ruleElement.attrib['name'] = ruleObject.name
for conditions in ruleObject.conditionSets:
conditionsetElement = ET.Element('conditionset')
for cond in conditions:
if cond.get('minimum') is None and cond.get('maximum') is None:
# neither is defined, don't add this condition
continue
conditionElement = ET.Element('condition')
conditionElement.attrib['name'] = cond.get('name')
if cond.get('minimum') is not None:
conditionElement.attrib['minimum'] = self.intOrFloat(cond.get('minimum'))
if cond.get('maximum') is not None:
conditionElement.attrib['maximum'] = self.intOrFloat(cond.get('maximum'))
conditionsetElement.append(conditionElement)
if len(conditionsetElement):
ruleElement.append(conditionsetElement)
for sub in ruleObject.subs:
subElement = ET.Element('sub')
subElement.attrib['name'] = sub[0]
subElement.attrib['with'] = sub[1]
ruleElement.append(subElement)
if len(ruleElement):
self.root.findall('.rules')[0].append(ruleElement)
def _addAxis(self, axisObject):
axisElement = ET.Element('axis')
axisElement.attrib['tag'] = axisObject.tag
axisElement.attrib['name'] = axisObject.name
self._addLabelNames(axisElement, axisObject.labelNames)
if axisObject.map:
for inputValue, outputValue in axisObject.map:
mapElement = ET.Element('map')
mapElement.attrib['input'] = self.intOrFloat(inputValue)
mapElement.attrib['output'] = self.intOrFloat(outputValue)
axisElement.append(mapElement)
if axisObject.axisOrdering or axisObject.axisLabels:
labelsElement = ET.Element('labels')
if axisObject.axisOrdering is not None:
labelsElement.attrib['ordering'] = str(axisObject.axisOrdering)
for label in axisObject.axisLabels:
self._addAxisLabel(labelsElement, label)
axisElement.append(labelsElement)
if isinstance(axisObject, AxisDescriptor):
axisElement.attrib['minimum'] = self.intOrFloat(axisObject.minimum)
axisElement.attrib['maximum'] = self.intOrFloat(axisObject.maximum)
elif isinstance(axisObject, DiscreteAxisDescriptor):
axisElement.attrib['values'] = " ".join(self.intOrFloat(v) for v in axisObject.values)
axisElement.attrib['default'] = self.intOrFloat(axisObject.default)
if axisObject.hidden:
axisElement.attrib['hidden'] = "1"
self.root.findall('.axes')[0].append(axisElement)
def _addAxisLabel(self, axisElement: ET.Element, label: AxisLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['uservalue'] = self.intOrFloat(label.userValue)
if label.userMinimum is not None:
labelElement.attrib['userminimum'] = self.intOrFloat(label.userMinimum)
if label.userMaximum is not None:
labelElement.attrib['usermaximum'] = self.intOrFloat(label.userMaximum)
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
if label.linkedUserValue is not None:
labelElement.attrib['linkeduservalue'] = self.intOrFloat(label.linkedUserValue)
self._addLabelNames(labelElement, label.labelNames)
axisElement.append(labelElement)
def _addLabelNames(self, parentElement, labelNames):
for languageCode, labelName in sorted(labelNames.items()):
languageElement = ET.Element('labelname')
languageElement.attrib[XML_LANG] = languageCode
languageElement.text = labelName
parentElement.append(languageElement)
def _addLocationLabel(self, parentElement: ET.Element, label: LocationLabelDescriptor) -> None:
labelElement = ET.Element('label')
labelElement.attrib['name'] = label.name
if label.elidable:
labelElement.attrib['elidable'] = "true"
if label.olderSibling:
labelElement.attrib['oldersibling'] = "true"
self._addLabelNames(labelElement, label.labelNames)
self._addLocationElement(labelElement, userLocation=label.userLocation)
parentElement.append(labelElement)
def _addLocationElement(
self,
parentElement,
*,
designLocation: AnisotropicLocationDict = None,
userLocation: SimpleLocationDict = None
):
locElement = ET.Element("location")
for axis in self.documentObject.axes:
if designLocation is not None and axis.name in designLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = designLocation[axis.name]
if isinstance(value, tuple):
dimElement.attrib['xvalue'] = self.intOrFloat(value[0])
dimElement.attrib['yvalue'] = self.intOrFloat(value[1])
else:
dimElement.attrib['xvalue'] = self.intOrFloat(value)
locElement.append(dimElement)
elif userLocation is not None and axis.name in userLocation:
dimElement = ET.Element('dimension')
dimElement.attrib['name'] = axis.name
value = userLocation[axis.name]
dimElement.attrib['uservalue'] = self.intOrFloat(value)
locElement.append(dimElement)
if len(locElement) > 0:
parentElement.append(locElement)
def _addInstance(self, instanceObject):
instanceElement = ET.Element('instance')
if instanceObject.name is not None:
instanceElement.attrib['name'] = instanceObject.name
if instanceObject.locationLabel is not None:
instanceElement.attrib['location'] = instanceObject.locationLabel
if instanceObject.familyName is not None:
instanceElement.attrib['familyname'] = instanceObject.familyName
if instanceObject.styleName is not None:
instanceElement.attrib['stylename'] = instanceObject.styleName
# add localisations
if instanceObject.localisedStyleName:
languageCodes = list(instanceObject.localisedStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedStyleNameElement = ET.Element('stylename')
localisedStyleNameElement.attrib[XML_LANG] = code
localisedStyleNameElement.text = instanceObject.getStyleName(code)
instanceElement.append(localisedStyleNameElement)
if instanceObject.localisedFamilyName:
languageCodes = list(instanceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = instanceObject.getFamilyName(code)
instanceElement.append(localisedFamilyNameElement)
if instanceObject.localisedStyleMapStyleName:
languageCodes = list(instanceObject.localisedStyleMapStyleName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapStyleNameElement = ET.Element('stylemapstylename')
localisedStyleMapStyleNameElement.attrib[XML_LANG] = code
localisedStyleMapStyleNameElement.text = instanceObject.getStyleMapStyleName(code)
instanceElement.append(localisedStyleMapStyleNameElement)
if instanceObject.localisedStyleMapFamilyName:
languageCodes = list(instanceObject.localisedStyleMapFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue
localisedStyleMapFamilyNameElement = ET.Element('stylemapfamilyname')
localisedStyleMapFamilyNameElement.attrib[XML_LANG] = code
localisedStyleMapFamilyNameElement.text = instanceObject.getStyleMapFamilyName(code)
instanceElement.append(localisedStyleMapFamilyNameElement)
if self.effectiveFormatTuple >= (5, 0):
if instanceObject.locationLabel is None:
self._addLocationElement(
instanceElement,
designLocation=instanceObject.designLocation,
userLocation=instanceObject.userLocation
)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
if instanceObject.location is not None:
locationElement, instanceObject.location = self._makeLocationElement(instanceObject.location)
instanceElement.append(locationElement)
if instanceObject.filename is not None:
instanceElement.attrib['filename'] = instanceObject.filename
if instanceObject.postScriptFontName is not None:
instanceElement.attrib['postscriptfontname'] = instanceObject.postScriptFontName
if instanceObject.styleMapFamilyName is not None:
instanceElement.attrib['stylemapfamilyname'] = instanceObject.styleMapFamilyName
if instanceObject.styleMapStyleName is not None:
instanceElement.attrib['stylemapstylename'] = instanceObject.styleMapStyleName
if self.effectiveFormatTuple < (5, 0):
# Deprecated members as of version 5.0
if instanceObject.glyphs:
if instanceElement.findall('.glyphs') == []:
glyphsElement = ET.Element('glyphs')
instanceElement.append(glyphsElement)
glyphsElement = instanceElement.findall('.glyphs')[0]
for glyphName, data in sorted(instanceObject.glyphs.items()):
glyphElement = self._writeGlyphElement(instanceElement, instanceObject, glyphName, data)
glyphsElement.append(glyphElement)
if instanceObject.kerning:
kerningElement = ET.Element('kerning')
instanceElement.append(kerningElement)
if instanceObject.info:
infoElement = ET.Element('info')
instanceElement.append(infoElement)
self._addLib(instanceElement, instanceObject.lib, 4)
self.root.findall('.instances')[0].append(instanceElement)
def _addSource(self, sourceObject):
sourceElement = ET.Element("source")
if sourceObject.filename is not None:
sourceElement.attrib['filename'] = sourceObject.filename
if sourceObject.name is not None:
if sourceObject.name.find("temp_master") != 0:
# do not save temporary source names
sourceElement.attrib['name'] = sourceObject.name
if sourceObject.familyName is not None:
sourceElement.attrib['familyname'] = sourceObject.familyName
if sourceObject.styleName is not None:
sourceElement.attrib['stylename'] = sourceObject.styleName
if sourceObject.layerName is not None:
sourceElement.attrib['layer'] = sourceObject.layerName
if sourceObject.localisedFamilyName:
languageCodes = list(sourceObject.localisedFamilyName.keys())
languageCodes.sort()
for code in languageCodes:
if code == "en":
continue # already stored in the element attribute
localisedFamilyNameElement = ET.Element('familyname')
localisedFamilyNameElement.attrib[XML_LANG] = code
localisedFamilyNameElement.text = sourceObject.getFamilyName(code)
sourceElement.append(localisedFamilyNameElement)
if sourceObject.copyLib:
libElement = ET.Element('lib')
libElement.attrib['copy'] = "1"
sourceElement.append(libElement)
if sourceObject.copyGroups:
groupsElement = ET.Element('groups')
groupsElement.attrib['copy'] = "1"
sourceElement.append(groupsElement)
if sourceObject.copyFeatures:
featuresElement = ET.Element('features')
featuresElement.attrib['copy'] = "1"
sourceElement.append(featuresElement)
if sourceObject.copyInfo or sourceObject.muteInfo:
infoElement = ET.Element('info')
if sourceObject.copyInfo:
infoElement.attrib['copy'] = "1"
if sourceObject.muteInfo:
infoElement.attrib['mute'] = "1"
sourceElement.append(infoElement)
if sourceObject.muteKerning:
kerningElement = ET.Element("kerning")
kerningElement.attrib["mute"] = '1'
sourceElement.append(kerningElement)
if sourceObject.mutedGlyphNames:
for name in sourceObject.mutedGlyphNames:
glyphElement = ET.Element("glyph")
glyphElement.attrib["name"] = name
glyphElement.attrib["mute"] = '1'
sourceElement.append(glyphElement)
if self.effectiveFormatTuple >= (5, 0):
self._addLocationElement(sourceElement, designLocation=sourceObject.location)
else:
# Pre-version 5.0 code was validating and filling in the location
# dict while writing it out, as preserved below.
locationElement, sourceObject.location = self._makeLocationElement(sourceObject.location)
sourceElement.append(locationElement)
self.root.findall('.sources')[0].append(sourceElement)
def _addVariableFont(self, parentElement: ET.Element, vf: VariableFontDescriptor) -> None:
vfElement = ET.Element('variable-font')
vfElement.attrib['name'] = vf.name
if vf.filename is not None:
vfElement.attrib['filename'] = vf.filename
if vf.axisSubsets:
subsetsElement = ET.Element('axis-subsets')
for subset in vf.axisSubsets:
subsetElement = ET.Element('axis-subset')
subsetElement.attrib['name'] = subset.name
if isinstance(subset, RangeAxisSubsetDescriptor):
if subset.userMinimum != -math.inf:
subsetElement.attrib['userminimum'] = self.intOrFloat(subset.userMinimum)
if subset.userMaximum != math.inf:
subsetElement.attrib['usermaximum'] = self.intOrFloat(subset.userMaximum)
if subset.userDefault is not None:
subsetElement.attrib['userdefault'] = self.intOrFloat(subset.userDefault)
elif isinstance(subset, ValueAxisSubsetDescriptor):
subsetElement.attrib['uservalue'] = self.intOrFloat(subset.userValue)
subsetsElement.append(subsetElement)
vfElement.append(subsetsElement)
self._addLib(vfElement, vf.lib, 4)
parentElement.append(vfElement)
def _addLib(self, parentElement: ET.Element, data: Any, indent_level: int) -> None:
if not data:
return
libElement = ET.Element('lib')
libElement.append(plistlib.totree(data, indent_level=indent_level))
parentElement.append(libElement)
def _writeGlyphElement(self, instanceElement, instanceObject, glyphName, data):
glyphElement = ET.Element('glyph')
if data.get('mute'):
glyphElement.attrib['mute'] = "1"
if data.get('unicodes') is not None:
glyphElement.attrib['unicode'] = " ".join([hex(u) for u in data.get('unicodes')])
if data.get('instanceLocation') is not None:
locationElement, data['instanceLocation'] = self._makeLocationElement(data.get('instanceLocation'))
glyphElement.append(locationElement)
if glyphName is not None:
glyphElement.attrib['name'] = glyphName
if data.get('note') is not None:
noteElement = ET.Element('note')
noteElement.text = data.get('note')
glyphElement.append(noteElement)
if data.get('masters') is not None:
mastersElement = ET.Element("masters")
for m in data.get('masters'):
masterElement = ET.Element("master")
if m.get('glyphName') is not None:
masterElement.attrib['glyphname'] = m.get('glyphName')
if m.get('font') is not None:
masterElement.attrib['source'] = m.get('font')
if m.get('location') is not None:
locationElement, m['location'] = self._makeLocationElement(m.get('location'))
masterElement.append(locationElement)
mastersElement.append(masterElement)
glyphElement.append(mastersElement)
return glyphElement
class BaseDocReader(LogMixin):
axisDescriptorClass = AxisDescriptor
discreteAxisDescriptorClass = DiscreteAxisDescriptor
axisLabelDescriptorClass = AxisLabelDescriptor
locationLabelDescriptorClass = LocationLabelDescriptor
ruleDescriptorClass = RuleDescriptor
sourceDescriptorClass = SourceDescriptor
variableFontsDescriptorClass = VariableFontDescriptor
valueAxisSubsetDescriptorClass = ValueAxisSubsetDescriptor
rangeAxisSubsetDescriptorClass = RangeAxisSubsetDescriptor
instanceDescriptorClass = InstanceDescriptor
def __init__(self, documentPath, documentObject):
self.path = documentPath
self.documentObject = documentObject
tree = ET.parse(self.path)
self.root = tree.getroot()
self.documentObject.formatVersion = self.root.attrib.get("format", "3.0")
self._axes = []
self.rules = []
self.sources = []
self.instances = []
self.axisDefaults = {}
self._strictAxisNames = True
@classmethod
def fromstring(cls, string, documentObject):
f = BytesIO(tobytes(string, encoding="utf-8"))
self = cls(f, documentObject)
self.path = None
return self
def read(self):
self.readAxes()
self.readLabels()
self.readRules()
self.readVariableFonts()
self.readSources()
self.readInstances()
self.readLib()
def readRules(self):
# we also need to read any conditions that are outside of a condition set.
rules = []
rulesElement = self.root.find(".rules")
if rulesElement is not None:
processingValue = rulesElement.attrib.get("processing", "first")
if processingValue not in {"first", "last"}:
raise DesignSpaceDocumentError(
"<rules> processing attribute value is not valid: %r, "
"expected 'first' or 'last'" % processingValue)
self.documentObject.rulesProcessingLast = processingValue == "last"
for ruleElement in self.root.findall(".rules/rule"):
ruleObject = self.ruleDescriptorClass()
ruleName = ruleObject.name = ruleElement.attrib.get("name")
# read any stray conditions outside a condition set
externalConditions = self._readConditionElements(
ruleElement,
ruleName,
)
if externalConditions:
ruleObject.conditionSets.append(externalConditions)
self.log.info(
"Found stray rule conditions outside a conditionset. "
"Wrapped them in a new conditionset."
)
# read the conditionsets
for conditionSetElement in ruleElement.findall('.conditionset'):
conditionSet = self._readConditionElements(
conditionSetElement,
ruleName,
)
if conditionSet is not None:
ruleObject.conditionSets.append(conditionSet)
for subElement in ruleElement.findall('.sub'):
a = subElement.attrib['name']
b = subElement.attrib['with']
ruleObject.subs.append((a, b))
rules.append(ruleObject)
self.documentObject.rules = rules
def _readConditionElements(self, parentElement, ruleName=None):
cds = []
for conditionElement in parentElement.findall('.condition'):
cd = {}
cdMin = conditionElement.attrib.get("minimum")
if cdMin is not None:
cd['minimum'] = float(cdMin)
else:
# will allow these to be None, assume axis.minimum
cd['minimum'] = None
cdMax = conditionElement.attrib.get("maximum")
if cdMax is not None:
cd['maximum'] = float(cdMax)
else:
# will allow these to be None, assume axis.maximum
cd['maximum'] = None
cd['name'] = conditionElement.attrib.get("name")
# # test for things
if cd.get('minimum') is None and cd.get('maximum') is None:
raise DesignSpaceDocumentError(
"condition missing required minimum or maximum in rule" +
(" '%s'" % ruleName if ruleName is not None else ""))
cds.append(cd)
return cds
def readAxes(self):
# read the axes elements, including the warp map.
axesElement = self.root.find(".axes")
if axesElement is not None and 'elidedfallbackname' in axesElement.attrib:
self.documentObject.elidedFallbackName = axesElement.attrib['elidedfallbackname']
axisElements = self.root.findall(".axes/axis")
if not axisElements:
return
for axisElement in axisElements:
if self.documentObject.formatTuple >= (5, 0) and "values" in axisElement.attrib:
axisObject = self.discreteAxisDescriptorClass()
axisObject.values = [float(s) for s in axisElement.attrib["values"].split(" ")]
else:
axisObject = self.axisDescriptorClass()
axisObject.minimum = float(axisElement.attrib.get("minimum"))
axisObject.maximum = float(axisElement.attrib.get("maximum"))
axisObject.default = float(axisElement.attrib.get("default"))
axisObject.name = axisElement.attrib.get("name")
if axisElement.attrib.get('hidden', False):
axisObject.hidden = True
axisObject.tag = axisElement.attrib.get("tag")
for mapElement in axisElement.findall('map'):
a = float(mapElement.attrib['input'])
b = float(mapElement.attrib['output'])
axisObject.map.append((a, b))
for labelNameElement in axisElement.findall('labelname'):
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
for key, lang in labelNameElement.items():
if key == XML_LANG:
axisObject.labelNames[lang] = tostr(labelNameElement.text)
labelElement = axisElement.find(".labels")
if labelElement is not None:
if "ordering" in labelElement.attrib:
axisObject.axisOrdering = int(labelElement.attrib["ordering"])
for label in labelElement.findall(".label"):
axisObject.axisLabels.append(self.readAxisLabel(label))
self.documentObject.axes.append(axisObject)
self.axisDefaults[axisObject.name] = axisObject.default
def readAxisLabel(self, element: ET.Element):
xml_attrs = {'userminimum', 'uservalue', 'usermaximum', 'name', 'elidable', 'oldersibling', 'linkeduservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
valueStr = element.get("uservalue")
if valueStr is None:
raise DesignSpaceDocumentError("label element must have a uservalue attribute.")
value = float(valueStr)
minimumStr = element.get("userminimum")
minimum = float(minimumStr) if minimumStr is not None else None
maximumStr = element.get("usermaximum")
maximum = float(maximumStr) if maximumStr is not None else None
linkedValueStr = element.get("linkeduservalue")
linkedValue = float(linkedValueStr) if linkedValueStr is not None else None
elidable = True if element.get("elidable") == "true" else False
olderSibling = True if element.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in element.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
return self.axisLabelDescriptorClass(
name=name,
userValue=value,
userMinimum=minimum,
userMaximum=maximum,
elidable=elidable,
olderSibling=olderSibling,
linkedUserValue=linkedValue,
labelNames=labelNames,
)
def readLabels(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'elidable', 'oldersibling'}
for labelElement in self.root.findall(".labels/label"):
unknown_attrs = set(labelElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"Label element contains unknown attributes: {', '.join(unknown_attrs)}")
name = labelElement.get("name")
if name is None:
raise DesignSpaceDocumentError("label element must have a name attribute.")
designLocation, userLocation = self.locationFromElement(labelElement)
if designLocation:
raise DesignSpaceDocumentError(f'<label> element "{name}" must only have user locations (using uservalue="").')
elidable = True if labelElement.get("elidable") == "true" else False
olderSibling = True if labelElement.get("oldersibling") == "true" else False
labelNames = {
lang: label_name.text or ""
for label_name in labelElement.findall("labelname")
for attr, lang in label_name.items()
if attr == XML_LANG
# Note: elementtree reads the "xml:lang" attribute name as
# '{http://www.w3.org/XML/1998/namespace}lang'
}
locationLabel = self.locationLabelDescriptorClass(
name=name,
userLocation=userLocation,
elidable=elidable,
olderSibling=olderSibling,
labelNames=labelNames,
)
self.documentObject.locationLabels.append(locationLabel)
def readVariableFonts(self):
if self.documentObject.formatTuple < (5, 0):
return
xml_attrs = {'name', 'filename'}
for variableFontElement in self.root.findall(".variable-fonts/variable-font"):
unknown_attrs = set(variableFontElement.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"variable-font element contains unknown attributes: {', '.join(unknown_attrs)}")
name = variableFontElement.get("name")
if name is None:
raise DesignSpaceDocumentError("variable-font element must have a name attribute.")
filename = variableFontElement.get("filename")
axisSubsetsElement = variableFontElement.find(".axis-subsets")
if axisSubsetsElement is None:
raise DesignSpaceDocumentError("variable-font element must contain an axis-subsets element.")
axisSubsets = []
for axisSubset in axisSubsetsElement.iterfind(".axis-subset"):
axisSubsets.append(self.readAxisSubset(axisSubset))
lib = None
libElement = variableFontElement.find(".lib")
if libElement is not None:
lib = plistlib.fromtree(libElement[0])
variableFont = self.variableFontsDescriptorClass(
name=name,
filename=filename,
axisSubsets=axisSubsets,
lib=lib,
)
self.documentObject.variableFonts.append(variableFont)
def readAxisSubset(self, element: ET.Element):
if "uservalue" in element.attrib:
xml_attrs = {'name', 'uservalue'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
userValueStr = element.get("uservalue")
if userValueStr is None:
raise DesignSpaceDocumentError(
"The axis-subset element for a discrete subset must have a uservalue attribute."
)
userValue = float(userValueStr)
return self.valueAxisSubsetDescriptorClass(name=name, userValue=userValue)
else:
xml_attrs = {'name', 'userminimum', 'userdefault', 'usermaximum'}
unknown_attrs = set(element.attrib) - xml_attrs
if unknown_attrs:
raise DesignSpaceDocumentError(f"axis-subset element contains unknown attributes: {', '.join(unknown_attrs)}")
name = element.get("name")
if name is None:
raise DesignSpaceDocumentError("axis-subset element must have a name attribute.")
userMinimum = element.get("userminimum")
userDefault = element.get("userdefault")
userMaximum = element.get("usermaximum")
if userMinimum is not None and userDefault is not None and userMaximum is not None:
return self.rangeAxisSubsetDescriptorClass(
name=name,
userMinimum=float(userMinimum),
userDefault=float(userDefault),
userMaximum=float(userMaximum),
)
if all(v is None for v in (userMinimum, userDefault, userMaximum)):
return self.rangeAxisSubsetDescriptorClass(name=name)
raise DesignSpaceDocumentError(
"axis-subset element must have min/max/default values or none at all."
)
def readSources(self):
for sourceCount, sourceElement in enumerate(self.root.findall(".sources/source")):
filename = sourceElement.attrib.get('filename')
if filename is not None and self.path is not None:
sourcePath = os.path.abspath(os.path.join(os.path.dirname(self.path), filename))
else:
sourcePath = None
sourceName = sourceElement.attrib.get('name')
if sourceName is None:
# add a temporary source name
sourceName = "temp_master.%d" % (sourceCount)
sourceObject = self.sourceDescriptorClass()
sourceObject.path = sourcePath # absolute path to the ufo source
sourceObject.filename = filename # path as it is stored in the document
sourceObject.name = sourceName
familyName = sourceElement.attrib.get("familyname")
if familyName is not None:
sourceObject.familyName = familyName
styleName = sourceElement.attrib.get("stylename")
if styleName is not None:
sourceObject.styleName = styleName
for familyNameElement in sourceElement.findall('familyname'):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
sourceObject.setFamilyName(familyName, lang)
designLocation, userLocation = self.locationFromElement(sourceElement)
if userLocation:
raise DesignSpaceDocumentError(f'<source> element "{sourceName}" must only have design locations (using xvalue="").')
sourceObject.location = designLocation
layerName = sourceElement.attrib.get('layer')
if layerName is not None:
sourceObject.layerName = layerName
for libElement in sourceElement.findall('.lib'):
if libElement.attrib.get('copy') == '1':
sourceObject.copyLib = True
for groupsElement in sourceElement.findall('.groups'):
if groupsElement.attrib.get('copy') == '1':
sourceObject.copyGroups = True
for infoElement in sourceElement.findall(".info"):
if infoElement.attrib.get('copy') == '1':
sourceObject.copyInfo = True
if infoElement.attrib.get('mute') == '1':
sourceObject.muteInfo = True
for featuresElement in sourceElement.findall(".features"):
if featuresElement.attrib.get('copy') == '1':
sourceObject.copyFeatures = True
for glyphElement in sourceElement.findall(".glyph"):
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
continue
if glyphElement.attrib.get('mute') == '1':
sourceObject.mutedGlyphNames.append(glyphName)
for kerningElement in sourceElement.findall(".kerning"):
if kerningElement.attrib.get('mute') == '1':
sourceObject.muteKerning = True
self.documentObject.sources.append(sourceObject)
def locationFromElement(self, element):
"""Read a nested ``<location>`` element inside the given ``element``.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
elementLocation = (None, None)
for locationElement in element.findall('.location'):
elementLocation = self.readLocationElement(locationElement)
break
return elementLocation
def readLocationElement(self, locationElement):
"""Read a ``<location>`` element.
.. versionchanged:: 5.0
Return a tuple of (designLocation, userLocation)
"""
if self._strictAxisNames and not self.documentObject.axes:
raise DesignSpaceDocumentError("No axes defined")
userLoc = {}
designLoc = {}
for dimensionElement in locationElement.findall(".dimension"):
dimName = dimensionElement.attrib.get("name")
if self._strictAxisNames and dimName not in self.axisDefaults:
# In case the document contains no axis definitions,
self.log.warning("Location with undefined axis: \"%s\".", dimName)
continue
userValue = xValue = yValue = None
try:
userValue = dimensionElement.attrib.get('uservalue')
if userValue is not None:
userValue = float(userValue)
except ValueError:
self.log.warning("ValueError in readLocation userValue %3.3f", userValue)
try:
xValue = dimensionElement.attrib.get('xvalue')
if xValue is not None:
xValue = float(xValue)
except ValueError:
self.log.warning("ValueError in readLocation xValue %3.3f", xValue)
try:
yValue = dimensionElement.attrib.get('yvalue')
if yValue is not None:
yValue = float(yValue)
except ValueError:
self.log.warning("ValueError in readLocation yValue %3.3f", yValue)
if userValue is None == xValue is None:
raise DesignSpaceDocumentError(f'Exactly one of uservalue="" or xvalue="" must be provided for location dimension "{dimName}"')
if yValue is not None:
if xValue is None:
raise DesignSpaceDocumentError(f'Missing xvalue="" for the location dimension "{dimName}"" with yvalue="{yValue}"')
designLoc[dimName] = (xValue, yValue)
elif xValue is not None:
designLoc[dimName] = xValue
else:
userLoc[dimName] = userValue
return designLoc, userLoc
def readInstances(self, makeGlyphs=True, makeKerning=True, makeInfo=True):
instanceElements = self.root.findall('.instances/instance')
for instanceElement in instanceElements:
self._readSingleInstanceElement(instanceElement, makeGlyphs=makeGlyphs, makeKerning=makeKerning, makeInfo=makeInfo)
def _readSingleInstanceElement(self, instanceElement, makeGlyphs=True, makeKerning=True, makeInfo=True):
filename = instanceElement.attrib.get('filename')
if filename is not None and self.documentObject.path is not None:
instancePath = os.path.join(os.path.dirname(self.documentObject.path), filename)
else:
instancePath = None
instanceObject = self.instanceDescriptorClass()
instanceObject.path = instancePath # absolute path to the instance
instanceObject.filename = filename # path as it is stored in the document
name = instanceElement.attrib.get("name")
if name is not None:
instanceObject.name = name
familyname = instanceElement.attrib.get('familyname')
if familyname is not None:
instanceObject.familyName = familyname
stylename = instanceElement.attrib.get('stylename')
if stylename is not None:
instanceObject.styleName = stylename
postScriptFontName = instanceElement.attrib.get('postscriptfontname')
if postScriptFontName is not None:
instanceObject.postScriptFontName = postScriptFontName
styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname')
if styleMapFamilyName is not None:
instanceObject.styleMapFamilyName = styleMapFamilyName
styleMapStyleName = instanceElement.attrib.get('stylemapstylename')
if styleMapStyleName is not None:
instanceObject.styleMapStyleName = styleMapStyleName
# read localised names
for styleNameElement in instanceElement.findall('stylename'):
for key, lang in styleNameElement.items():
if key == XML_LANG:
styleName = styleNameElement.text
instanceObject.setStyleName(styleName, lang)
for familyNameElement in instanceElement.findall('familyname'):
for key, lang in familyNameElement.items():
if key == XML_LANG:
familyName = familyNameElement.text
instanceObject.setFamilyName(familyName, lang)
for styleMapStyleNameElement in instanceElement.findall('stylemapstylename'):
for key, lang in styleMapStyleNameElement.items():
if key == XML_LANG:
styleMapStyleName = styleMapStyleNameElement.text
instanceObject.setStyleMapStyleName(styleMapStyleName, lang)
for styleMapFamilyNameElement in instanceElement.findall('stylemapfamilyname'):
for key, lang in styleMapFamilyNameElement.items():
if key == XML_LANG:
styleMapFamilyName = styleMapFamilyNameElement.text
instanceObject.setStyleMapFamilyName(styleMapFamilyName, lang)
designLocation, userLocation = self.locationFromElement(instanceElement)
locationLabel = instanceElement.attrib.get('location')
if (designLocation or userLocation) and locationLabel is not None:
raise DesignSpaceDocumentError('instance element must have at most one of the location="..." attribute or the nested location element')
instanceObject.locationLabel = locationLabel
instanceObject.userLocation = userLocation or {}
instanceObject.designLocation = designLocation or {}
for glyphElement in instanceElement.findall('.glyphs/glyph'):
self.readGlyphElement(glyphElement, instanceObject)
for infoElement in instanceElement.findall("info"):
self.readInfoElement(infoElement, instanceObject)
for libElement in instanceElement.findall('lib'):
self.readLibElement(libElement, instanceObject)
self.documentObject.instances.append(instanceObject)
def readLibElement(self, libElement, instanceObject):
"""Read the lib element for the given instance."""
instanceObject.lib = plistlib.fromtree(libElement[0])
def readInfoElement(self, infoElement, instanceObject):
""" Read the info element."""
instanceObject.info = True
def readGlyphElement(self, glyphElement, instanceObject):
"""
Read the glyph element, which could look like either one of these:
.. code-block:: xml
<glyph name="b" unicode="0x62"/>
<glyph name="b"/>
<glyph name="b">
<master location="location-token-bbb" source="master-token-aaa2"/>
<master glyphname="b.alt1" location="location-token-ccc" source="master-token-aaa3"/>
<note>
This is an instance from an anisotropic interpolation.
</note>
</glyph>
"""
glyphData = {}
glyphName = glyphElement.attrib.get('name')
if glyphName is None:
raise DesignSpaceDocumentError("Glyph object without name attribute")
mute = glyphElement.attrib.get("mute")
if mute == "1":
glyphData['mute'] = True
# unicode
unicodes = glyphElement.attrib.get('unicode')
if unicodes is not None:
try:
unicodes = [int(u, 16) for u in unicodes.split(" ")]
glyphData['unicodes'] = unicodes
except ValueError:
raise DesignSpaceDocumentError("unicode values %s are not integers" % unicodes)
for noteElement in glyphElement.findall('.note'):
glyphData['note'] = noteElement.text
break
designLocation, userLocation = self.locationFromElement(glyphElement)
if userLocation:
raise DesignSpaceDocumentError(f'<glyph> element "{glyphName}" must only have design locations (using xvalue="").')
if designLocation is not None:
glyphData['instanceLocation'] = designLocation
glyphSources = None
for masterElement in glyphElement.findall('.masters/master'):
fontSourceName = masterElement.attrib.get('source')
designLocation, userLocation = self.locationFromElement(masterElement)
if userLocation:
raise DesignSpaceDocumentError(f'<master> element "{fontSourceName}" must only have design locations (using xvalue="").')
masterGlyphName = masterElement.attrib.get('glyphname')
if masterGlyphName is None:
# if we don't read a glyphname, use the one we have
masterGlyphName = glyphName
d = dict(font=fontSourceName,
location=designLocation,
glyphName=masterGlyphName)
if glyphSources is None:
glyphSources = []
glyphSources.append(d)
if glyphSources is not None:
glyphData['masters'] = glyphSources
instanceObject.glyphs[glyphName] = glyphData
def readLib(self):
"""Read the lib element for the whole document."""
for libElement in self.root.findall(".lib"):
self.documentObject.lib = plistlib.fromtree(libElement[0])
class DesignSpaceDocument(LogMixin, AsDictMixin):
"""The DesignSpaceDocument object can read and write ``.designspace`` data.
It imports the axes, sources, variable fonts and instances to very basic
**descriptor** objects that store the data in attributes. Data is added to
the document by creating such descriptor objects, filling them with data
and then adding them to the document. This makes it easy to integrate this
object in different contexts.
The **DesignSpaceDocument** object can be subclassed to work with
different objects, as long as they have the same attributes. Reader and
Writer objects can be subclassed as well.
**Note:** Python attribute names are usually camelCased, the
corresponding `XML <document-xml-structure>`_ attributes are usually
all lowercase.
.. code:: python
from fontTools.designspaceLib import DesignSpaceDocument
doc = DesignSpaceDocument.fromfile("some/path/to/my.designspace")
doc.formatVersion
doc.elidedFallbackName
doc.axes
doc.locationLabels
doc.rules
doc.rulesProcessingLast
doc.sources
doc.variableFonts
doc.instances
doc.lib
"""
def __init__(self, readerClass=None, writerClass=None):
self.path = None
"""String, optional. When the document is read from the disk, this is
the full path that was given to :meth:`read` or :meth:`fromfile`.
"""
self.filename = None
"""String, optional. When the document is read from the disk, this is
its original file name, i.e. the last part of its path.
When the document is produced by a Python script and still only exists
in memory, the producing script can write here an indication of a
possible "good" filename, in case one wants to save the file somewhere.
"""
self.formatVersion: Optional[str] = None
"""Format version for this document, as a string. E.g. "4.0" """
self.elidedFallbackName: Optional[str] = None
"""STAT Style Attributes Header field ``elidedFallbackNameID``.
See: `OTSpec STAT Style Attributes Header <https://docs.microsoft.com/en-us/typography/opentype/spec/stat#style-attributes-header>`_
.. versionadded:: 5.0
"""
self.axes: List[Union[AxisDescriptor, DiscreteAxisDescriptor]] = []
"""List of this document's axes."""
self.locationLabels: List[LocationLabelDescriptor] = []
"""List of this document's STAT format 4 labels.
.. versionadded:: 5.0"""
self.rules: List[RuleDescriptor] = []
"""List of this document's rules."""
self.rulesProcessingLast: bool = False
"""This flag indicates whether the substitution rules should be applied
before or after other glyph substitution features.
- False: before
- True: after.
Default is False. For new projects, you probably want True. See
the following issues for more information:
`fontTools#1371 <https://github.com/fonttools/fonttools/issues/1371#issuecomment-590214572>`__
`fontTools#2050 <https://github.com/fonttools/fonttools/issues/2050#issuecomment-678691020>`__
If you want to use a different feature altogether, e.g. ``calt``,
use the lib key ``com.github.fonttools.varLib.featureVarsFeatureTag``
.. code:: xml
<lib>
<dict>
<key>com.github.fonttools.varLib.featureVarsFeatureTag</key>
<string>calt</string>
</dict>
</lib>
"""
self.sources: List[SourceDescriptor] = []
"""List of this document's sources."""
self.variableFonts: List[VariableFontDescriptor] = []
"""List of this document's variable fonts.
.. versionadded:: 5.0"""
self.instances: List[InstanceDescriptor] = []
"""List of this document's instances."""
self.lib: Dict = {}
"""User defined, custom data associated with the whole document.
Use reverse-DNS notation to identify your own data.
Respect the data stored by others.
"""
self.default: Optional[str] = None
"""Name of the default master.
This attribute is updated by the :meth:`findDefault`
"""
if readerClass is not None:
self.readerClass = readerClass
else:
self.readerClass = BaseDocReader
if writerClass is not None:
self.writerClass = writerClass
else:
self.writerClass = BaseDocWriter
@classmethod
def fromfile(cls, path, readerClass=None, writerClass=None):
"""Read a designspace file from ``path`` and return a new instance of
:class:.
"""
self = cls(readerClass=readerClass, writerClass=writerClass)
self.read(path)
return self
@classmethod
def fromstring(cls, string, readerClass=None, writerClass=None):
self = cls(readerClass=readerClass, writerClass=writerClass)
reader = self.readerClass.fromstring(string, self)
reader.read()
if self.sources:
self.findDefault()
return self
def tostring(self, encoding=None):
"""Returns the designspace as a string. Default encoding ``utf-8``."""
if encoding is str or (
encoding is not None and encoding.lower() == "unicode"
):
f = StringIO()
xml_declaration = False
elif encoding is None or encoding == "utf-8":
f = BytesIO()
encoding = "UTF-8"
xml_declaration = True
else:
raise ValueError("unsupported encoding: '%s'" % encoding)
writer = self.writerClass(f, self)
writer.write(encoding=encoding, xml_declaration=xml_declaration)
return f.getvalue()
def read(self, path):
"""Read a designspace file from ``path`` and populates the fields of
``self`` with the data.
"""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
reader = self.readerClass(path, self)
reader.read()
if self.sources:
self.findDefault()
def write(self, path):
"""Write this designspace to ``path``."""
if hasattr(path, "__fspath__"): # support os.PathLike objects
path = path.__fspath__()
self.path = path
self.filename = os.path.basename(path)
self.updatePaths()
writer = self.writerClass(path, self)
writer.write()
def _posixRelativePath(self, otherPath):
relative = os.path.relpath(otherPath, os.path.dirname(self.path))
return posix(relative)
def updatePaths(self):
"""
Right before we save we need to identify and respond to the following situations:
In each descriptor, we have to do the right thing for the filename attribute.
::
case 1.
descriptor.filename == None
descriptor.path == None
-- action:
write as is, descriptors will not have a filename attr.
useless, but no reason to interfere.
case 2.
descriptor.filename == "../something"
descriptor.path == None
-- action:
write as is. The filename attr should not be touched.
case 3.
descriptor.filename == None
descriptor.path == "~/absolute/path/there"
-- action:
calculate the relative path for filename.
We're not overwriting some other value for filename, it should be fine
case 4.
descriptor.filename == '../somewhere'
descriptor.path == "~/absolute/path/there"
-- action:
there is a conflict between the given filename, and the path.
So we know where the file is relative to the document.
Can't guess why they're different, we just choose for path to be correct and update filename.
"""
assert self.path is not None
for descriptor in self.sources + self.instances:
if descriptor.path is not None:
# case 3 and 4: filename gets updated and relativized
descriptor.filename = self._posixRelativePath(descriptor.path)
def addSource(self, sourceDescriptor: SourceDescriptor):
"""Add the given ``sourceDescriptor`` to ``doc.sources``."""
self.sources.append(sourceDescriptor)
def addSourceDescriptor(self, **kwargs):
"""Instantiate a new :class:`SourceDescriptor` using the given
``kwargs`` and add it to ``doc.sources``.
"""
source = self.writerClass.sourceDescriptorClass(**kwargs)
self.addSource(source)
return source
def addInstance(self, instanceDescriptor: InstanceDescriptor):
"""Add the given ``instanceDescriptor`` to :attr:`instances`."""
self.instances.append(instanceDescriptor)
def addInstanceDescriptor(self, **kwargs):
"""Instantiate a new :class:`InstanceDescriptor` using the given
``kwargs`` and add it to :attr:`instances`.
"""
instance = self.writerClass.instanceDescriptorClass(**kwargs)
self.addInstance(instance)
return instance
def addAxis(self, axisDescriptor: Union[AxisDescriptor, DiscreteAxisDescriptor]):
"""Add the given ``axisDescriptor`` to :attr:`axes`."""
self.axes.append(axisDescriptor)
def addAxisDescriptor(self, **kwargs):
"""Instantiate a new :class:`AxisDescriptor` using the given
``kwargs`` and add it to :attr:`axes`.
The axis will be and instance of :class:`DiscreteAxisDescriptor` if
the ``kwargs`` provide a ``value``, or a :class:`AxisDescriptor` otherwise.
"""
if "values" in kwargs:
axis = self.writerClass.discreteAxisDescriptorClass(**kwargs)
else:
axis = self.writerClass.axisDescriptorClass(**kwargs)
self.addAxis(axis)
return axis
def addRule(self, ruleDescriptor: RuleDescriptor):
"""Add the given ``ruleDescriptor`` to :attr:`rules`."""
self.rules.append(ruleDescriptor)
def addRuleDescriptor(self, **kwargs):
"""Instantiate a new :class:`RuleDescriptor` using the given
``kwargs`` and add it to :attr:`rules`.
"""
rule = self.writerClass.ruleDescriptorClass(**kwargs)
self.addRule(rule)
return rule
def addVariableFont(self, variableFontDescriptor: VariableFontDescriptor):
"""Add the given ``variableFontDescriptor`` to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
self.variableFonts.append(variableFontDescriptor)
def addVariableFontDescriptor(self, **kwargs):
"""Instantiate a new :class:`VariableFontDescriptor` using the given
``kwargs`` and add it to :attr:`variableFonts`.
.. versionadded:: 5.0
"""
variableFont = self.writerClass.variableFontDescriptorClass(**kwargs)
self.addVariableFont(variableFont)
return variableFont
def addLocationLabel(self, locationLabelDescriptor: LocationLabelDescriptor):
"""Add the given ``locationLabelDescriptor`` to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
self.locationLabels.append(locationLabelDescriptor)
def addLocationLabelDescriptor(self, **kwargs):
"""Instantiate a new :class:`LocationLabelDescriptor` using the given
``kwargs`` and add it to :attr:`locationLabels`.
.. versionadded:: 5.0
"""
locationLabel = self.writerClass.locationLabelDescriptorClass(**kwargs)
self.addLocationLabel(locationLabel)
return locationLabel
def newDefaultLocation(self):
"""Return a dict with the default location in design space coordinates."""
# Without OrderedDict, output XML would be non-deterministic.
# https://github.com/LettError/designSpaceDocument/issues/10
loc = collections.OrderedDict()
for axisDescriptor in self.axes:
loc[axisDescriptor.name] = axisDescriptor.map_forward(
axisDescriptor.default
)
return loc
def labelForUserLocation(self, userLocation: SimpleLocationDict) -> Optional[LocationLabelDescriptor]:
"""Return the :class:`LocationLabel` that matches the given
``userLocation``, or ``None`` if no such label exists.
.. versionadded:: 5.0
"""
return next(
(label for label in self.locationLabels if label.userLocation == userLocation), None
)
def updateFilenameFromPath(self, masters=True, instances=True, force=False):
"""Set a descriptor filename attr from the path and this document path.
If the filename attribute is not None: skip it.
"""
if masters:
for descriptor in self.sources:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
if instances:
for descriptor in self.instances:
if descriptor.filename is not None and not force:
continue
if self.path is not None:
descriptor.filename = self._posixRelativePath(descriptor.path)
def newAxisDescriptor(self):
"""Ask the writer class to make us a new axisDescriptor."""
return self.writerClass.getAxisDecriptor()
def newSourceDescriptor(self):
"""Ask the writer class to make us a new sourceDescriptor."""
return self.writerClass.getSourceDescriptor()
def newInstanceDescriptor(self):
"""Ask the writer class to make us a new instanceDescriptor."""
return self.writerClass.getInstanceDescriptor()
def getAxisOrder(self):
"""Return a list of axis names, in the same order as defined in the document."""
names = []
for axisDescriptor in self.axes:
names.append(axisDescriptor.name)
return names
def getAxis(self, name):
"""Return the axis with the given ``name``, or ``None`` if no such axis exists."""
for axisDescriptor in self.axes:
if axisDescriptor.name == name:
return axisDescriptor
return None
def getLocationLabel(self, name: str) -> Optional[LocationLabelDescriptor]:
"""Return the top-level location label with the given ``name``, or
``None`` if no such label exists.
.. versionadded:: 5.0
"""
for label in self.locationLabels:
if label.name == name:
return label
return None
def map_forward(self, userLocation: SimpleLocationDict) -> SimpleLocationDict:
"""Map a user location to a design location.
Assume that missing coordinates are at the default location for that axis.
Note: the output won't be anisotropic, only the xvalue is set.
.. versionadded:: 5.0
"""
return {
axis.name: axis.map_forward(userLocation.get(axis.name, axis.default))
for axis in self.axes
}
def map_backward(self, designLocation: AnisotropicLocationDict) -> SimpleLocationDict:
"""Map a design location to a user location.
Assume that missing coordinates are at the default location for that axis.
When the input has anisotropic locations, only the xvalue is used.
.. versionadded:: 5.0
"""
return {
axis.name: (
axis.map_backward(designLocation[axis.name])
if axis.name in designLocation
else axis.default
)
for axis in self.axes
}
def findDefault(self):
"""Set and return SourceDescriptor at the default location or None.
The default location is the set of all `default` values in user space
of all axes.
This function updates the document's :attr:`default` value.
.. versionchanged:: 5.0
Allow the default source to not specify some of the axis values, and
they are assumed to be the default.
See :meth:`SourceDescriptor.getFullDesignLocation()`
"""
self.default = None
# Convert the default location from user space to design space before comparing
# it against the SourceDescriptor locations (always in design space).
defaultDesignLocation = self.newDefaultLocation()
for sourceDescriptor in self.sources:
if sourceDescriptor.getFullDesignLocation(self) == defaultDesignLocation:
self.default = sourceDescriptor
return sourceDescriptor
return None
def normalizeLocation(self, location):
"""Return a dict with normalized axis values."""
from fontTools.varLib.models import normalizeValue
new = {}
for axis in self.axes:
if axis.name not in location:
# skipping this dimension it seems
continue
value = location[axis.name]
# 'anisotropic' location, take first coord only
if isinstance(value, tuple):
value = value[0]
triple = [
axis.map_forward(v) for v in (axis.minimum, axis.default, axis.maximum)
]
new[axis.name] = normalizeValue(value, triple)
return new
def normalize(self):
"""
Normalise the geometry of this designspace:
- scale all the locations of all masters and instances to the -1 - 0 - 1 value.
- we need the axis data to do the scaling, so we do those last.
"""
# masters
for item in self.sources:
item.location = self.normalizeLocation(item.location)
# instances
for item in self.instances:
# glyph masters for this instance
for _, glyphData in item.glyphs.items():
glyphData['instanceLocation'] = self.normalizeLocation(glyphData['instanceLocation'])
for glyphMaster in glyphData['masters']:
glyphMaster['location'] = self.normalizeLocation(glyphMaster['location'])
item.location = self.normalizeLocation(item.location)
# the axes
for axis in self.axes:
# scale the map first
newMap = []
for inputValue, outputValue in axis.map:
newOutputValue = self.normalizeLocation({axis.name: outputValue}).get(axis.name)
newMap.append((inputValue, newOutputValue))
if newMap:
axis.map = newMap
# finally the axis values
minimum = self.normalizeLocation({axis.name: axis.minimum}).get(axis.name)
maximum = self.normalizeLocation({axis.name: axis.maximum}).get(axis.name)
default = self.normalizeLocation({axis.name: axis.default}).get(axis.name)
# and set them in the axis.minimum
axis.minimum = minimum
axis.maximum = maximum
axis.default = default
# now the rules
for rule in self.rules:
newConditionSets = []
for conditions in rule.conditionSets:
newConditions = []
for cond in conditions:
if cond.get('minimum') is not None:
minimum = self.normalizeLocation({cond['name']: cond['minimum']}).get(cond['name'])
else:
minimum = None
if cond.get('maximum') is not None:
maximum = self.normalizeLocation({cond['name']: cond['maximum']}).get(cond['name'])
else:
maximum = None
newConditions.append(dict(name=cond['name'], minimum=minimum, maximum=maximum))
newConditionSets.append(newConditions)
rule.conditionSets = newConditionSets
def loadSourceFonts(self, opener, **kwargs):
"""Ensure SourceDescriptor.font attributes are loaded, and return list of fonts.
Takes a callable which initializes a new font object (e.g. TTFont, or
defcon.Font, etc.) from the SourceDescriptor.path, and sets the
SourceDescriptor.font attribute.
If the font attribute is already not None, it is not loaded again.
Fonts with the same path are only loaded once and shared among SourceDescriptors.
For example, to load UFO sources using defcon:
designspace = DesignSpaceDocument.fromfile("path/to/my.designspace")
designspace.loadSourceFonts(defcon.Font)
Or to load masters as FontTools binary fonts, including extra options:
designspace.loadSourceFonts(ttLib.TTFont, recalcBBoxes=False)
Args:
opener (Callable): takes one required positional argument, the source.path,
and an optional list of keyword arguments, and returns a new font object
loaded from the path.
**kwargs: extra options passed on to the opener function.
Returns:
List of font objects in the order they appear in the sources list.
"""
# we load fonts with the same source.path only once
loaded = {}
fonts = []
for source in self.sources:
if source.font is not None: # font already loaded
fonts.append(source.font)
continue
if source.path in loaded:
source.font = loaded[source.path]
else:
if source.path is None:
raise DesignSpaceDocumentError(
"Designspace source '%s' has no 'path' attribute"
% (source.name or "<Unknown>")
)
source.font = opener(source.path, **kwargs)
loaded[source.path] = source.font
fonts.append(source.font)
return fonts
@property
def formatTuple(self):
"""Return the formatVersion as a tuple of (major, minor).
.. versionadded:: 5.0
"""
if self.formatVersion is None:
return (5, 0)
numbers = (int(i) for i in self.formatVersion.split("."))
major = next(numbers)
minor = next(numbers, 0)
return (major, minor)
def getVariableFonts(self) -> List[VariableFontDescriptor]:
"""Return all variable fonts defined in this document, or implicit
variable fonts that can be built from the document's continuous axes.
In the case of Designspace documents before version 5, the whole
document was implicitly describing a variable font that covers the
whole space.
In version 5 and above documents, there can be as many variable fonts
as there are locations on discrete axes.
.. seealso:: :func:`splitInterpolable`
.. versionadded:: 5.0
"""
if self.variableFonts:
return self.variableFonts
variableFonts = []
discreteAxes = []
rangeAxisSubsets: List[Union[RangeAxisSubsetDescriptor, ValueAxisSubsetDescriptor]] = []
for axis in self.axes:
if isinstance(axis, DiscreteAxisDescriptor):
discreteAxes.append(axis)
else:
rangeAxisSubsets.append(RangeAxisSubsetDescriptor(name=axis.name))
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
for values in valueCombinations:
basename = None
if self.filename is not None:
basename = os.path.splitext(self.filename)[0] + "-VF"
if self.path is not None:
basename = os.path.splitext(os.path.basename(self.path))[0] + "-VF"
if basename is None:
basename = "VF"
axisNames = "".join([f"-{axis.tag}{value}" for axis, value in zip(discreteAxes, values)])
variableFonts.append(VariableFontDescriptor(
name=f"{basename}{axisNames}",
axisSubsets=rangeAxisSubsets + [
ValueAxisSubsetDescriptor(name=axis.name, userValue=value)
for axis, value in zip(discreteAxes, values)
]
))
return variableFonts
def deepcopyExceptFonts(self):
"""Allow deep-copying a DesignSpace document without deep-copying
attached UFO fonts or TTFont objects. The :attr:`font` attribute
is shared by reference between the original and the copy.
.. versionadded:: 5.0
"""
fonts = [source.font for source in self.sources]
try:
for source in self.sources:
source.font = None
res = copy.deepcopy(self)
for source, font in zip(res.sources, fonts):
res.font = font
return res
finally:
for source, font in zip(self.sources, fonts):
source.font = font
| 2.25 | 2 |
ax/models/torch/posterior_mean.py | dme65/Ax | 1 | 6587 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Tuple
import torch
from botorch.acquisition.acquisition import AcquisitionFunction
from botorch.acquisition.monte_carlo import qSimpleRegret
from botorch.acquisition.objective import ConstrainedMCObjective, GenericMCObjective
from botorch.acquisition.utils import get_infeasible_cost
from botorch.models.model import Model
from botorch.utils import (
get_objective_weights_transform,
get_outcome_constraint_transforms,
)
from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
from botorch.utils.transforms import squeeze_last_dim
from torch import Tensor
def get_PosteriorMean(
model: Model,
objective_weights: Tensor,
outcome_constraints: Optional[Tuple[Tensor, Tensor]] = None,
X_observed: Optional[Tensor] = None,
X_pending: Optional[Tensor] = None,
**kwargs: Any,
) -> AcquisitionFunction:
r"""Instantiates a PosteriorMean acquisition function.
Note: If no OutcomeConstraints given, return an analytic acquisition
function. This requires {optimizer_kwargs: {joint_optimization: True}} or an
optimizer that does not assume pending point support.
Args:
objective_weights: The objective is to maximize a weighted sum of
the columns of f(x). These are the weights.
outcome_constraints: A tuple of (A, b). For k outcome constraints
and m outputs at f(x), A is (k x m) and b is (k x 1) such that
A f(x) <= b. (Not used by single task models)
X_observed: A tensor containing points observed for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
X_pending: A tensor containing points whose evaluation is pending (i.e.
that have been submitted for evaluation) present for all objective
outcomes and outcomes that appear in the outcome constraints (if
there are any).
Returns:
PosteriorMean: The instantiated acquisition function.
"""
if X_observed is None:
raise ValueError("There are no feasible observed points.")
# construct Objective module
if kwargs.get("chebyshev_scalarization", False):
obj_tf = get_chebyshev_scalarization(
weights=objective_weights,
Y=squeeze_last_dim(torch.stack(kwargs.get("Ys")).transpose(0, 1)),
)
else:
obj_tf = get_objective_weights_transform(objective_weights)
if outcome_constraints is None:
objective = GenericMCObjective(objective=obj_tf)
else:
con_tfs = get_outcome_constraint_transforms(outcome_constraints)
inf_cost = get_infeasible_cost(X=X_observed, model=model, objective=obj_tf)
objective = ConstrainedMCObjective(
objective=obj_tf, constraints=con_tfs or [], infeasible_cost=inf_cost
)
# Use qSimpleRegret, not analytic posterior, to handle arbitrary objective fns.
acq_func = qSimpleRegret(model, objective=objective)
return acq_func
| 1.84375 | 2 |
src/drivers/velodyne_nodes/test/velodyne_node.test.py | fanyu2021/fyAutowareAuto | 0 | 6588 | # Copyright 2018 the Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Co-developed by Tier IV, Inc. and Apex.AI, Inc.
import ament_index_python
import launch
import launch.actions
import launch_ros.actions
import lidar_integration
def generate_test_description(ready_fn):
PORT = lidar_integration.get_open_port()
# The node under test and the checker node that will pass/fail our tests:
test_topic = "veloyne_cloud_node_test_topic"
velodyne_cloud_node = launch_ros.actions.Node(
package="velodyne_nodes",
node_executable="velodyne_cloud_node_exe",
node_name="vlp16_driver_node",
node_namespace="lidar_front",
parameters=[
"{}/param/vlp16_test.param.yaml".format(
ament_index_python.get_package_share_directory("velodyne_nodes")
),
{
"port": PORT,
"expected_num_subscribers": 1,
}
],
remappings=[("points_raw", test_topic)],
arguments=["--model", "vlp16"]
)
pcl_checker = lidar_integration.make_pcl_checker(
topic=test_topic,
size=55000,
period=100,
period_tolerance=2.2,
size_tolerance=1.4,
)
return lidar_integration.get_lidar_launch_description(
test_nodes=[velodyne_cloud_node],
checkers=[pcl_checker],
other_actions=[
launch.actions.OpaqueFunction(function=lambda context: ready_fn())
],
port=PORT
)
# Test cases are created automatically by the lidar_integration package. We just need to
# instantiate them
active = lidar_integration.make_active_tests()
after_shutdown = lidar_integration.make_post_shutdown_tests()
| 1.914063 | 2 |
example.py | manhcuogntin4/Color-transfer | 0 | 6589 | # USAGE
# python example.py --source images/ocean_sunset.jpg --target images/ocean_day.jpg
# import the necessary packages
from color_transfer import color_transfer
import numpy as np
import argparse
import cv2
def show_image(title, image, width = 300):
# resize the image to have a constant width, just to
# make displaying the images take up less screen real
# estate
r = width / float(image.shape[1])
dim = (width, int(image.shape[0] * r))
resized = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)
# show the resized image
cv2.imshow(title, resized)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-s", "--source", required = True,
help = "Path to the source image")
ap.add_argument("-t", "--target", required = True,
help = "Path to the target image")
ap.add_argument("-o", "--output", help = "Path to the output image (optional)")
args = vars(ap.parse_args())
# load the images
source = cv2.imread(args["source"])
target = cv2.imread(args["target"])
# transfer the color distribution from the source image
# to the target image
transfer = color_transfer(source, target)
# check to see if the output image should be saved
if args["output"] is not None:
cv2.imwrite(args["output"], transfer)
# show the images and wait for a key press
show_image("Source", source)
show_image("Target", target)
show_image("Transfer", transfer)
cv2.waitKey(0) | 3.40625 | 3 |
scripts/registration_pipeline.py | heethesh/Argoverse-HDMap-Update | 0 | 6590 | import copy
import numpy as np
import open3d as o3d
from tqdm import tqdm
from scipy import stats
import utils_o3d as utils
def remove_ground_plane(pcd, z_thresh=-2.7):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, -1] > z_thresh]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def remove_y_plane(pcd, y_thresh=5):
cropped = copy.deepcopy(pcd)
cropped_points = np.array(cropped.points)
cropped_points = cropped_points[cropped_points[:, 0] < y_thresh]
cropped_points[:, -1] = -cropped_points[:, -1]
pcd_final = o3d.geometry.PointCloud()
pcd_final.points = o3d.utility.Vector3dVector(cropped_points)
return pcd_final
def compute_features(pcd, voxel_size, normals_nn=100, features_nn=120, downsample=True):
normals_radius = voxel_size * 2
features_radius = voxel_size * 4
# Downsample the point cloud using Voxel grids
if downsample:
print(':: Input size:', np.array(pcd.points).shape)
pcd_down = utils.downsample_point_cloud(pcd, voxel_size)
print(':: Downsample with a voxel size %.3f' % voxel_size)
print(':: Downsample size', np.array(pcd_down.points).shape)
else: pcd_down = copy.deepcopy(pcd)
# Estimate normals
print(':: Estimate normal with search radius %.3f' % normals_radius)
pcd_down.estimate_normals(
o3d.geometry.KDTreeSearchParamHybrid(radius=normals_radius, max_nn=normals_nn))
# Compute FPFH features
print(':: Compute FPFH feature with search radius %.3f' % features_radius)
features = o3d.registration.compute_fpfh_feature(pcd_down,
o3d.geometry.KDTreeSearchParamHybrid(radius=features_radius, max_nn=features_nn))
return pcd_down, features
def match_features(pcd0, pcd1, feature0, feature1, thresh=None, display=False):
pcd0, pcd1 = copy.deepcopy(pcd0), copy.deepcopy(pcd1)
print(':: Input size 0:', np.array(pcd0.points).shape)
print(':: Input size 1:', np.array(pcd1.points).shape)
print(':: Features size 0:', np.array(feature0.data).shape)
print(':: Features size 1:', np.array(feature1.data).shape)
utils.paint_uniform_color(pcd0, color=[1, 0.706, 0])
utils.paint_uniform_color(pcd1, color=[0, 0.651, 0.929])
scores, indices = [], []
fpfh_tree = o3d.geometry.KDTreeFlann(feature1)
for i in tqdm(range(len(pcd0.points)), desc=':: Feature Matching'):
[_, idx, _] = fpfh_tree.search_knn_vector_xd(feature0.data[:, i], 1)
scores.append(np.linalg.norm(pcd0.points[i] - pcd1.points[idx[0]]))
indices.append([i, idx[0]])
scores, indices = np.array(scores), np.array(indices)
median = np.median(scores)
if thresh is None: thresh = median
inliers_idx = np.where(scores <= thresh)[0]
pcd0_idx = indices[inliers_idx, 0]
pcd1_idx = indices[inliers_idx, 1]
print(':: Score stats: Min=%0.3f, Max=%0.3f, Median=%0.3f, N<Thresh=%d' % (
np.min(scores), np.max(scores), median, len(inliers_idx)))
if display:
for i, j in zip(pcd0_idx, pcd1_idx):
pcd0.colors[i] = [1, 0, 0]
pcd1.colors[j] = [1, 0, 0]
utils.display([pcd0, pcd1])
return pcd0_idx, pcd1_idx
def estimate_scale(pcd0, pcd1, pcd0_idx, pcd1_idx, top_percent=1.0,
ransac_iters=5000, sample_size=50):
points0 = np.asarray(pcd0.points)[pcd0_idx]
points1 = np.asarray(pcd1.points)[pcd1_idx]
mean0 = np.mean(points0, axis=0)
mean1 = np.mean(points1, axis=0)
top_count = int(top_percent * len(pcd0_idx))
assert top_count > sample_size, 'top_count <= sample_size'
scales = []
for i in tqdm(range(ransac_iters), desc=':: Scale Estimation RANSAC'):
args = np.random.choice(top_count, sample_size, replace=False)
points0_r = points0[args]
points1_r = points1[args]
score0 = np.sum((points0_r - mean0) ** 2, axis=1)
score1 = np.sum((points1_r - mean1) ** 2, axis=1)
scale = np.sqrt(np.mean(score1) / np.mean(score0))
scales.append(scale)
best_scale = stats.mode(scales)[0][0]
print(':: Estimated scale:', best_scale)
return best_scale
def global_registration(source_down, target_down, source_fpfh, target_fpfh, voxel_size,
distance_threshold=1.0, num_iters=4000000, num_val_iters=500):
print(':: Distance threshold %.3f' % distance_threshold)
result = o3d.registration.registration_ransac_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh, distance_threshold,
o3d.registration.TransformationEstimationPointToPoint(False), 4, [
o3d.registration.CorrespondenceCheckerBasedOnEdgeLength(0.9),
o3d.registration.CorrespondenceCheckerBasedOnDistance(
distance_threshold)
], o3d.registration.RANSACConvergenceCriteria(num_iters, num_val_iters))
return result
def fast_global_registration(source_down, target_down,
source_fpfh, target_fpfh, voxel_size):
distance_threshold = 1.0
result = o3d.registration.registration_fast_based_on_feature_matching(
source_down, target_down, source_fpfh, target_fpfh,
o3d.registration.FastGlobalRegistrationOption(
maximum_correspondence_distance=distance_threshold))
return result
def refine_registration(source, target, source_fpfh, target_fpfh, initial_result, voxel_size):
distance_threshold = 0.1
print(':: Distance threshold %.3f' % distance_threshold)
result = o3d.registration.registration_icp(
source, target, distance_threshold, initial_result.transformation,
o3d.registration.TransformationEstimationPointToPlane())
return result
def registration(pcd0, pcd1, feature1, feature2, voxel_size, method='global'):
if method == 'global':
print('\nRANSAC global registration on scaled point clouds...')
initial_result = global_registration(pcd0, pcd1, feature1, feature2, voxel_size)
elif method == 'fast_global':
print('\nFast global registration on scaled point clouds...')
initial_result = fast_global_registration(pcd0, pcd1, feature1, feature2, voxel_size)
else:
print(':: Registration method not supported')
return
print(':: Initial registration results:')
print(initial_result)
print('\nDisplaying initial result...')
draw_registration_result(pcd0, pcd1, initial_result.transformation)
print('\nRefine registration...')
result = refine_registration(pcd0, pcd1, feature1, feature2, initial_result, voxel_size)
print(':: Final registration results:')
print(result)
return result
def draw_registration_result(source, target, transformation):
source_temp = copy.deepcopy(source)
target_temp = copy.deepcopy(target)
source_temp.paint_uniform_color([1, 0.706, 0])
target_temp.paint_uniform_color([0, 0.651, 0.929])
source_temp.transform(transformation)
o3d.visualization.draw_geometries([source_temp, target_temp])
def run():
voxel_size = 0.2
dso_scale = 0.03
pcd_lidar = o3d.io.read_point_cloud('../maps/scans/scan_050.pcd')
pcd_lidar = remove_ground_plane(pcd_lidar)
pcd_dso = o3d.io.read_point_cloud('../maps/dso_map_cleaned.pcd')
pcd_dso = remove_ground_plane(pcd_dso, z_thresh=4.5)
pcd_dso = remove_y_plane(pcd_dso, y_thresh=0.2)
# pcd_dso = utils.scale_point_cloud(pcd_dso, dso_scale).rotate([0.5, 0.5, 0.5]).translate([10, 20, 30])
# Ground plane removal results
# utils.display(pcds=[pcd_lidar, pcd_dso], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])
# utils.display(pcds=[pcd_dso], colors=[[0, 0.651, 0.929]])
# return
print('\nComputing FPFH features for lidar point cloud...')
pcd_lidar_down, features_lidar = compute_features(pcd_lidar, voxel_size=voxel_size)
print('\nComputing FPFH features for DSO point cloud...')
pcd_dso_down, features_dso = compute_features(pcd_dso, voxel_size=voxel_size * (dso_scale if dso_scale < 1 else 1))
print('\nMatching FPFH features...')
pcd_lidar_idx, pcd_dso_idx = match_features(pcd_lidar_down, pcd_dso_down,
features_lidar, features_dso, thresh=None)
print('\nEstimating scale using matches...')
scale = estimate_scale(pcd_lidar_down, pcd_dso_down, pcd_lidar_idx, pcd_dso_idx)
scale = 0.06
print('\nCorrecting scale...')
pcd_dso_scaled = utils.scale_point_cloud(pcd_dso, 1.0 / scale)
utils.display(pcds=[pcd_lidar, pcd_dso_scaled], colors=[[1, 0.706, 0], [0, 0.651, 0.929]])
# return
# Registration
pcd_dso_scaled_down, features_dso_scaled = compute_features(
pcd_dso_scaled, voxel_size=voxel_size)
result = registration(pcd_lidar_down, pcd_dso_scaled_down, features_lidar,
features_dso_scaled, voxel_size, method='global')
print('\nDisplaying result...')
draw_registration_result(pcd_lidar, pcd_dso_scaled, result.transformation)
if __name__ == '__main__':
run()
| 2.34375 | 2 |
neo4j/aio/__init__.py | michaelcraige/neo4j-python-driver | 1 | 6591 | <filename>neo4j/aio/__init__.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright (c) 2002-2019 "Neo4j,"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from asyncio import (
IncompleteReadError,
Lock,
StreamReader,
StreamReaderProtocol,
StreamWriter,
get_event_loop,
wait,
)
from collections import deque
from logging import getLogger
from os import strerror
from random import choice
from ssl import SSLError
from sys import platform, version_info
from time import perf_counter
from neo4j.addressing import Address
from neo4j.aio._collections import WaitingList
from neo4j.aio._mixins import Addressable, Breakable
from neo4j.errors import (
BoltError,
BoltConnectionError,
BoltSecurityError,
BoltConnectionBroken,
BoltHandshakeError,
Neo4jAvailabilityError,
)
from neo4j.api import Version
from neo4j.conf import Config, PoolConfig
from neo4j.meta import version as neo4j_version
from neo4j.routing import RoutingTable
log = getLogger(__name__)
MAGIC = b"\x60\x60\xB0\x17"
class Bolt(Addressable, object):
#: True if this instance uses secure communication, false
#: otherwise.
secure = None
#: As a class attribute, this denotes the version of Bolt handled
#: by that subclass. As an instance attribute, this represents the
#: version of the protocol in use.
protocol_version = ()
# Record of the time at which this connection was opened.
__t_opened = None
# Handle to the StreamReader object.
__reader = None
# Handle to the StreamWriter object, which can be used on close.
__writer = None
# Flag to indicate that the connection is closed
__closed = False
@classmethod
def default_user_agent(cls):
""" Return the default user agent string for a connection.
"""
template = "neo4j-python/{} Python/{}.{}.{}-{}-{} ({})"
fields = (neo4j_version,) + tuple(version_info) + (platform,)
return template.format(*fields)
@classmethod
def protocol_handlers(cls, protocol_version=None):
""" Return a dictionary of available Bolt protocol handlers,
keyed by version tuple. If an explicit protocol version is
provided, the dictionary will contain either zero or one items,
depending on whether that version is supported. If no protocol
version is provided, all available versions will be returned.
:param protocol_version: tuple identifying a specific protocol
version (e.g. (3, 5)) or None
:return: dictionary of version tuple to handler class for all
relevant and supported protocol versions
:raise TypeError: if protocol version is not passed in a tuple
"""
# Carry out subclass imports locally to avoid circular
# dependency issues.
from neo4j.aio.bolt3 import Bolt3
handlers = {bolt.protocol_version: bolt for bolt in [
# This list can be updated as protocol
# versions are added and removed.
Bolt3,
]}
if protocol_version is None:
return handlers
if not isinstance(protocol_version, tuple):
raise TypeError("Protocol version must be specified as a tuple")
return {version: handler
for version, handler in handlers.items()
if version == protocol_version}
@classmethod
def opener(cls, auth=None, **config):
""" Create and return an opener function for a given set of
configuration parameters. This is useful when multiple servers share
the same configuration details, such as within a connection pool.
"""
async def f(address, *, loop=None):
return await Bolt.open(address, auth=auth, loop=loop, **config)
return f
@classmethod
async def open(cls, address, *, auth=None, loop=None, **config):
""" Open a socket connection and perform protocol version
negotiation, in order to construct and return a Bolt client
instance for a supported Bolt protocol version.
:param address: tuples of host and port, such as
("127.0.0.1", 7687)
:param auth:
:param loop:
:param config:
:return: instance of a Bolt subclass
:raise BoltConnectionError: if a connection could not be
established
:raise BoltConnectionLost: if an I/O error occurs on the
underlying socket connection
:raise BoltHandshakeError: if handshake completes without a
successful negotiation
:raise TypeError: if any of the arguments provided are passed
as incompatible types
:raise ValueError: if any of the arguments provided are passed
with unsupported values
"""
# Args
address = Address(address)
if loop is None:
loop = get_event_loop()
config = PoolConfig.consume(config)
# Connect
reader, writer = await cls._connect(address, loop, config)
try:
# Handshake
subclass = await cls._handshake(reader, writer, config.protocol_version)
# Instantiation
obj = subclass(reader, writer)
obj.secure = bool(config.secure)
assert hasattr(obj, "__ainit__")
await obj.__ainit__(auth)
return obj
except BoltError:
writer.write_eof()
writer.close()
raise
@classmethod
async def _connect(cls, address, loop, config):
""" Attempt to establish a TCP connection to the address
provided.
:param address:
:param loop:
:param config:
:return: a 3-tuple of reader, writer and security settings for
the new connection
:raise BoltConnectionError: if a connection could not be
established
"""
assert isinstance(address, Address)
assert loop is not None
assert isinstance(config, Config)
connection_args = {
"host": address.host,
"port": address.port,
"family": address.family,
# TODO: other args
}
ssl_context = config.get_ssl_context()
if ssl_context:
connection_args["ssl"] = ssl_context
connection_args["server_hostname"] = address.host
log.debug("[#0000] C: <DIAL> %s", address)
try:
reader = BoltStreamReader(loop=loop)
protocol = StreamReaderProtocol(reader, loop=loop)
transport, _ = await loop.create_connection(lambda: protocol, **connection_args)
writer = BoltStreamWriter(transport, protocol, reader, loop)
except SSLError as err:
log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address,
err.errno, strerror(err.errno))
raise BoltSecurityError("Failed to establish a secure connection", address) from err
except OSError as err:
log.debug("[#%04X] S: <REJECT> %s (%d %s)", 0, address,
err.errno, strerror(err.errno))
raise BoltConnectionError("Failed to establish a connection", address) from err
else:
local_address = Address(transport.get_extra_info("sockname"))
remote_address = Address(transport.get_extra_info("peername"))
log.debug("[#%04X] S: <ACCEPT> %s -> %s",
local_address.port_number, local_address, remote_address)
return reader, writer
@classmethod
async def _handshake(cls, reader, writer, protocol_version):
""" Carry out a Bolt handshake, optionally requesting a
specific protocol version.
:param reader:
:param writer:
:param protocol_version:
:return:
:raise BoltConnectionLost: if an I/O error occurs on the
underlying socket connection
:raise BoltHandshakeError: if handshake completes without a
successful negotiation
"""
local_address = Address(writer.transport.get_extra_info("sockname"))
remote_address = Address(writer.transport.get_extra_info("peername"))
handlers = cls.protocol_handlers(protocol_version)
if not handlers:
raise ValueError("No protocol handlers available (requested Bolt %r)", protocol_version)
offered_versions = sorted(handlers.keys(), reverse=True)[:4]
request_data = MAGIC + b"".join(
v.to_bytes() for v in offered_versions).ljust(16, b"\x00")
log.debug("[#%04X] C: <HANDSHAKE> %r", local_address.port_number, request_data)
writer.write(request_data)
await writer.drain()
response_data = await reader.readexactly(4)
log.debug("[#%04X] S: <HANDSHAKE> %r", local_address.port_number, response_data)
try:
agreed_version = Version.from_bytes(response_data)
except ValueError as err:
writer.close()
raise BoltHandshakeError("Unexpected handshake response %r" % response_data,
remote_address, request_data, response_data) from err
try:
subclass = handlers[agreed_version]
except KeyError:
log.debug("Unsupported Bolt protocol version %s", agreed_version)
raise BoltHandshakeError("Unsupported Bolt protocol version",
remote_address, request_data, response_data)
else:
return subclass
def __new__(cls, reader, writer):
obj = super().__new__(cls)
obj.__t_opened = perf_counter()
obj.__reader = reader
obj.__writer = writer
Addressable.set_transport(obj, writer.transport)
return obj
def __repr__(self):
return "<Bolt address=%r protocol_version=%r>" % (self.remote_address,
self.protocol_version)
async def __ainit__(self, auth):
""" Asynchronous initializer for implementation by subclasses.
:param auth:
"""
@property
def age(self):
""" The age of this connection in seconds.
"""
return perf_counter() - self.__t_opened
@property
def broken(self):
""" Flag to indicate whether this connection has been broken
by the network or remote peer.
"""
return self.__reader.broken or self.__writer.broken
@property
def closed(self):
""" Flag to indicate whether this connection has been closed
locally."""
return self.__closed
async def close(self):
""" Close the connection.
"""
if self.closed:
return
if not self.broken:
log.debug("[#%04X] S: <HANGUP>", self.local_address.port_number)
self.__writer.write_eof()
self.__writer.close()
try:
await self.__writer.wait_closed()
except BoltConnectionBroken:
pass
self.__closed = True
async def reset(self, force=False):
""" Reset the connection to a clean state.
By default, a RESET message will only be sent if required, i.e.
if the connection is not already in a clean state. If forced,
this check will be overridden and a RESET will be sent
regardless.
"""
async def run(self, cypher, parameters=None, discard=False, readonly=False,
bookmarks=None, timeout=None, metadata=None):
""" Run an auto-commit transaction.
:param cypher:
:param parameters:
:param discard:
:param readonly:
:param bookmarks:
:param timeout:
:param metadata:
:raise BoltTransactionError: if a transaction cannot be carried
out at this time
"""
async def begin(self, readonly=False, bookmarks=None,
timeout=None, metadata=None):
""" Begin an explicit transaction.
:param readonly:
:param bookmarks:
:param timeout:
:param metadata:
:return:
"""
async def run_tx(self, f, args=None, kwargs=None, readonly=False,
bookmarks=None, timeout=None, metadata=None):
""" Run a transaction function and return the return value from
that function.
"""
async def get_routing_table(self, context=None):
""" Fetch a new routing table.
:param context: the routing context to use for this call
:return: a new RoutingTable instance or None if the given router is
currently unable to provide routing information
:raise ServiceUnavailable: if no writers are available
:raise ProtocolError: if the routing information received is unusable
"""
class BoltStreamReader(Addressable, Breakable, StreamReader):
""" Wrapper for asyncio.streams.StreamReader
"""
def set_transport(self, transport):
Addressable.set_transport(self, transport)
StreamReader.set_transport(self, transport)
async def readuntil(self, separator=b'\n'): # pragma: no cover
assert False # not used by current implementation
async def read(self, n=-1): # pragma: no cover
assert False # not used by current implementation
async def readexactly(self, n):
try:
return await super().readexactly(n)
except IncompleteReadError as err:
message = ("Network read incomplete (received {} of {} "
"bytes)".format(len(err.partial), err.expected))
log.debug("[#%04X] S: <CLOSE>", self.local_address.port_number)
Breakable.set_broken(self)
raise BoltConnectionBroken(message, self.remote_address) from err
except OSError as err:
log.debug("[#%04X] S: <CLOSE> %d %s", err.errno, strerror(err.errno))
Breakable.set_broken(self)
raise BoltConnectionBroken("Network read failed", self.remote_address) from err
class BoltStreamWriter(Addressable, Breakable, StreamWriter):
""" Wrapper for asyncio.streams.StreamWriter
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
Addressable.set_transport(self, self.transport)
async def drain(self):
try:
await super().drain()
except OSError as err:
log.debug("[#%04X] S: <CLOSE> (%s)", self.local_address.port_number, err)
Breakable.set_broken(self)
raise BoltConnectionBroken("Network write failed", self.remote_address) from err
async def wait_closed(self):
try:
await super().wait_closed()
except AttributeError: # pragma: no cover
# This is a dirty hack for Python 3.6, which didn't include
# 'wait_closed'. The code polls waiting for the stream
# reader inside the protocol to go away which, by the
# implementation of 3.6, occurs on 'connection_lost'. This
# hack is likely safe unless the implementation of 3.6
# changes in a subsequent patch, and can be removed when
# Python 3.6 support is no longer required.
#
from asyncio import sleep
try:
while self._protocol._stream_reader is not None:
await sleep(0.1)
except AttributeError:
pass
class Pool:
def acquire(self, *, force_reset=False, timeout=None):
raise NotImplementedError
def release(self, *connections, force_reset=False):
raise NotImplementedError
def close(self, *, force=False):
raise NotImplementedError
class BoltPool:
""" A pool of connections to a single address.
:param opener: a function to which an address can be passed that
returns an open and ready Bolt connection
:param address: the remote address for which this pool operates
:param max_size: the maximum permitted number of simultaneous
connections that may be owned by this pool, both in-use and
free
:param max_age: the maximum permitted age, in seconds, for
connections to be retained in this pool
"""
@classmethod
async def open(cls, address, *, auth=None, loop=None, **config):
""" Create a new connection pool, with an option to seed one
or more initial connections.
"""
pool_config = PoolConfig.consume(config)
def opener(addr):
return Bolt.open(addr, auth=auth, loop=loop, **pool_config)
pool = cls(loop, opener, pool_config, address)
seeds = [await pool.acquire() for _ in range(pool_config.init_size)]
for seed in seeds:
await pool.release(seed)
return pool
def __init__(self, loop, opener, config, address):
if loop is None:
self._loop = get_event_loop()
else:
self._loop = loop
self._opener = opener
self._address = Address(address)
self._max_size = config.max_size
self._max_age = config.max_age
self._in_use_list = deque()
self._free_list = deque()
self._waiting_list = WaitingList(loop=self._loop)
def __repr__(self):
return "<{} addr'{}' [{}{}{}]>".format(
self.__class__.__name__,
self.address,
"|" * len(self._in_use_list),
"." * len(self._free_list),
" " * (self.max_size - self.size),
)
def __contains__(self, cx):
return cx in self._in_use_list or cx in self._free_list
def __len__(self):
return self.size
@property
def address(self):
""" The remote address for which this pool operates.
"""
return self._address
@property
def max_size(self):
""" The maximum permitted number of simultaneous connections
that may be owned by this pool, both in-use and free.
"""
return self._max_size
@max_size.setter
def max_size(self, value):
old_value = self._max_size
self._max_size = value
if value > old_value:
# The maximum size has grown, so new slots have become
# available. Notify any waiting acquirers of this extra
# capacity.
self._waiting_list.notify()
@property
def max_age(self):
""" The maximum permitted age, in seconds, for connections to
be retained in this pool.
"""
return self._max_age
@property
def in_use(self):
""" The number of connections in this pool that are currently
in use.
"""
return len(self._in_use_list)
@property
def size(self):
""" The total number of connections (both in-use and free)
currently owned by this connection pool.
"""
return len(self._in_use_list) + len(self._free_list)
async def _sanitize(self, cx, *, force_reset):
""" Attempt to clean up a connection, such that it can be
reused.
If the connection is broken or closed, it can be discarded.
Otherwise, the age of the connection is checked against the
maximum age permitted by this pool, consequently closing it
on expiry.
Should the connection be neither broken, closed nor expired,
it will be reset (optionally forcibly so) and the connection
object will be returned, indicating success.
"""
if cx.broken or cx.closed:
return None
expired = self.max_age is not None and cx.age > self.max_age
if expired:
await cx.close()
return None
await cx.reset(force=force_reset)
return cx
async def acquire(self, *, force_reset=False):
""" Acquire a connection from the pool.
In the simplest case, this will return an existing open
connection, if one is free. If not, and the pool is not full,
a new connection will be created. If the pool is full and no
free connections are available, this will block until a
connection is released, or until the acquire call is cancelled.
:param force_reset: if true, the connection will be forcibly
reset before being returned; if false, this will only occur
if the connection is not already in a clean state
:return: a Bolt connection object
"""
log.debug("Acquiring connection from pool %r", self)
cx = None
while cx is None or cx.broken or cx.closed:
try:
# Plan A: select a free connection from the pool
cx = self._free_list.popleft()
except IndexError:
if self.size < self.max_size:
# Plan B: if the pool isn't full, open
# a new connection
cx = await self._opener(self.address)
else:
# Plan C: wait for more capacity to become
# available, then try again
log.debug("Joining waiting list")
await self._waiting_list.join()
else:
cx = await self._sanitize(cx, force_reset=force_reset)
self._in_use_list.append(cx)
return cx
async def release(self, cx, *, force_reset=False):
""" Release a Bolt connection, putting it back into the pool
if the connection is healthy and the pool is not already at
capacity.
:param cx: the connection to release
:param force_reset: if true, the connection will be forcibly
reset before being released back into the pool; if false,
this will only occur if the connection is not already in a
clean state
:raise ValueError: if the connection is not currently in use,
or if it does not belong to this pool
"""
log.debug("Releasing connection %r", cx)
if cx in self._in_use_list:
self._in_use_list.remove(cx)
if self.size < self.max_size:
# If there is spare capacity in the pool, attempt to
# sanitize the connection and return it to the pool.
cx = await self._sanitize(cx, force_reset=force_reset)
if cx:
# Carry on only if sanitation succeeded.
if self.size < self.max_size:
# Check again if there is still capacity.
self._free_list.append(cx)
self._waiting_list.notify()
else:
# Otherwise, close the connection.
await cx.close()
else:
# If the pool is full, simply close the connection.
await cx.close()
elif cx in self._free_list:
raise ValueError("Connection is not in use")
else:
raise ValueError("Connection does not belong to this pool")
async def prune(self):
""" Close all free connections.
"""
await self.__close(self._free_list)
async def close(self):
""" Close all connections immediately.
This does not permanently disable the connection pool, it
merely shuts down all open connections, including those in
use. Depending on the applications, it may be perfectly
acceptable to re-acquire connections after pool closure,
which will have the implicit affect of reopening the pool.
To close gracefully, allowing work in progress to continue
until connections are released, use the following sequence
instead:
pool.max_size = 0
pool.prune()
This will force all future connection acquisitions onto the
waiting list, and released connections will be closed instead
of being returned to the pool.
"""
await self.prune()
await self.__close(self._in_use_list)
async def __close(self, connections):
""" Close all connections in the given list.
"""
closers = deque()
while True:
try:
cx = connections.popleft()
except IndexError:
break
else:
closers.append(cx.close())
if closers:
await wait(closers, loop=self._loop)
class Neo4jPool:
""" Connection pool with routing table.
"""
@classmethod
async def open(cls, *addresses, auth=None, routing_context=None, loop=None, **config):
pool_config = PoolConfig.consume(config)
def opener(addr):
return Bolt.open(addr, auth=auth, **pool_config)
obj = cls(loop, opener, config, addresses, routing_context)
# TODO: get initial routing table and construct
await obj._ensure_routing_table_is_fresh()
return obj
def __init__(self, loop, opener, config, addresses, routing_context):
if loop is None:
self._loop = get_event_loop()
else:
self._loop = loop
self._opener = opener
self._config = config
self._pools = {}
self._missing_writer = False
self._refresh_lock = Lock(loop=self._loop)
self._routing_context = routing_context
self._max_size_per_host = config.max_size
self._initial_routers = addresses
self._routing_table = RoutingTable(addresses)
self._activate_new_pools_in(self._routing_table)
def _activate_new_pools_in(self, routing_table):
""" Add pools for addresses that exist in the given routing
table but which don't already have pools.
"""
for address in routing_table.servers():
if address not in self._pools:
self._pools[address] = BoltPool(self._loop, self._opener, self._config, address)
async def _deactivate_pools_not_in(self, routing_table):
""" Deactivate any pools that aren't represented in the given
routing table.
"""
for address in self._pools:
if address not in routing_table:
await self._deactivate(address)
async def _get_routing_table_from(self, *routers):
""" Try to update routing tables with the given routers.
:return: True if the routing table is successfully updated,
otherwise False
"""
log.debug("Attempting to update routing table from "
"{}".format(", ".join(map(repr, routers))))
for router in routers:
pool = self._pools[router]
cx = await pool.acquire()
try:
new_routing_table = await cx.get_routing_table(self._routing_context)
except BoltError:
await self._deactivate(router)
else:
num_routers = len(new_routing_table.routers)
num_readers = len(new_routing_table.readers)
num_writers = len(new_routing_table.writers)
# No writers are available. This likely indicates a temporary state,
# such as leader switching, so we should not signal an error.
# When no writers available, then we flag we are reading in absence of writer
self._missing_writer = (num_writers == 0)
# No routers
if num_routers == 0:
continue
# No readers
if num_readers == 0:
continue
log.debug("Successfully updated routing table from "
"{!r} ({!r})".format(router, self._routing_table))
return new_routing_table
finally:
await pool.release(cx)
return None
async def _get_routing_table(self):
""" Update the routing table from the first router able to provide
valid routing information.
"""
# copied because it can be modified
existing_routers = list(self._routing_table.routers)
has_tried_initial_routers = False
if self._missing_writer:
has_tried_initial_routers = True
rt = await self._get_routing_table_from(self._initial_routers)
if rt:
return rt
rt = await self._get_routing_table_from(*existing_routers)
if rt:
return rt
if not has_tried_initial_routers and self._initial_routers not in existing_routers:
rt = await self._get_routing_table_from(self._initial_routers)
if rt:
return rt
# None of the routers have been successful, so just fail
log.error("Unable to retrieve routing information")
raise Neo4jAvailabilityError("Unable to retrieve routing information")
async def _ensure_routing_table_is_fresh(self, readonly=False):
""" Update the routing table if stale.
This method performs two freshness checks, before and after acquiring
the refresh lock. If the routing table is already fresh on entry, the
method exits immediately; otherwise, the refresh lock is acquired and
the second freshness check that follows determines whether an update
is still required.
"""
if self._routing_table.is_fresh(readonly=readonly):
return
async with self._refresh_lock:
if self._routing_table.is_fresh(readonly=readonly):
if readonly:
# if reader is fresh but writers are not, then
# we are reading in absence of writer
self._missing_writer = not self._routing_table.is_fresh(readonly=False)
else:
rt = await self._get_routing_table()
self._activate_new_pools_in(rt)
self._routing_table.update(rt)
await self._deactivate_pools_not_in(rt)
async def _select_pool(self, readonly=False):
""" Selects the pool with the fewest in-use connections.
"""
await self._ensure_routing_table_is_fresh(readonly=readonly)
if readonly:
addresses = self._routing_table.readers
else:
addresses = self._routing_table.writers
pools = [pool for address, pool in self._pools.items() if address in addresses]
pools_by_usage = {}
for pool in pools:
pools_by_usage.setdefault(pool.in_use, []).append(pool)
if not pools_by_usage:
raise Neo4jAvailabilityError("No {} service currently "
"available".format("read" if readonly else "write"))
return choice(pools_by_usage[min(pools_by_usage)])
async def acquire(self, *, readonly=False, force_reset=False):
""" Acquire a connection to a server that can satisfy a set of parameters.
:param readonly: true if a readonly connection is required,
otherwise false
:param force_reset:
"""
while True:
pool = await self._select_pool(readonly=readonly)
try:
cx = await pool.acquire(force_reset=force_reset)
except BoltError:
await self._deactivate(pool.address)
else:
if not readonly:
# If we're not acquiring a connection as
# readonly, then intercept NotALeader and
# ForbiddenOnReadOnlyDatabase errors to
# invalidate the routing table.
from neo4j.errors import (
NotALeader,
ForbiddenOnReadOnlyDatabase,
)
def handler(failure):
""" Invalidate the routing table before raising the failure.
"""
log.debug("[#0000] C: <ROUTING> Invalidating routing table")
self._routing_table.ttl = 0
raise failure
cx.set_failure_handler(NotALeader, handler)
cx.set_failure_handler(ForbiddenOnReadOnlyDatabase, handler)
return cx
async def release(self, connection, *, force_reset=False):
""" Release a connection back into the pool.
This method is thread safe.
"""
for pool in self._pools.values():
try:
await pool.release(connection, force_reset=force_reset)
except ValueError:
pass
else:
# Unhook any custom error handling and exit.
from neo4j.errors import (
NotALeader,
ForbiddenOnReadOnlyDatabase,
)
connection.del_failure_handler(NotALeader)
connection.del_failure_handler(ForbiddenOnReadOnlyDatabase)
break
else:
raise ValueError("Connection does not belong to this pool")
async def _deactivate(self, address):
""" Deactivate an address from the connection pool,
if present, remove from the routing table and also closing
all idle connections to that address.
"""
log.debug("[#0000] C: <ROUTING> Deactivating address %r", address)
# We use `discard` instead of `remove` here since the former
# will not fail if the address has already been removed.
self._routing_table.routers.discard(address)
self._routing_table.readers.discard(address)
self._routing_table.writers.discard(address)
log.debug("[#0000] C: <ROUTING> table=%r", self._routing_table)
try:
pool = self._pools.pop(address)
except KeyError:
pass # assume the address has already been removed
else:
pool.max_size = 0
await pool.prune()
async def close(self, force=False):
""" Close all connections and empty the pool. If forced, in-use
connections will be closed immediately; if not, they will
remain open until released.
"""
pools = dict(self._pools)
self._pools.clear()
for address, pool in pools.items():
if force:
await pool.close()
else:
pool.max_size = 0
await pool.prune()
class Neo4j:
# The default router address list to use if no addresses are specified.
default_router_addresses = Address.parse_list(":7687 :17601 :17687")
# TODO
# @classmethod
# async def open(cls, *addresses, auth=None, security=False, protocol_version=None, loop=None):
# opener = Bolt.opener(auth=auth, security=security, protocol_version=protocol_version)
# router_addresses = Address.parse_list(" ".join(addresses), default_port=7687)
# return cls(opener, router_addresses, loop=loop)
#
# def __init__(self, opener, router_addresses, loop=None):
# self._routers = Neo4jPool(opener, router_addresses or self.default_router_addresses)
# self._writers = Neo4jPool(opener)
# self._readers = Neo4jPool(opener)
# self._routing_table = None
#
# @property
# def routing_table(self):
# return self._routing_table
#
# async def update_routing_table(self):
# cx = await self._routers.acquire()
# try:
# result = await cx.run("CALL dbms.cluster.routing.getRoutingTable($context)", {"context": {}})
# record = await result.single()
# self._routing_table = RoutingTable.parse_routing_info([record]) # TODO: handle ValueError?
# return self._routing_table
# finally:
# self._routers.release(cx)
# async def main():
# from neo4j.debug import watch; watch("neo4j")
# neo4j = await Neo4j.open(":17601 :17602 :17603", auth=("neo4j", "password"))
# await neo4j.update_routing_table()
# print(neo4j.routing_table)
#
#
# if __name__ == "__main__":
# run(main())
| 1.84375 | 2 |
python/setup.py | bubriks/feature-store-api | 49 | 6592 | import os
import imp
from setuptools import setup, find_packages
__version__ = imp.load_source(
"hsfs.version", os.path.join("hsfs", "version.py")
).__version__
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="hsfs",
version=__version__,
install_requires=[
"pyhumps==1.6.1",
"requests",
"furl",
"boto3",
"pandas",
"numpy",
"pyjks",
"mock",
"avro==1.10.2",
"sqlalchemy",
"PyMySQL",
],
extras_require={
"dev": [
"pytest",
"flake8",
"black"],
"docs": [
"mkdocs==1.1.2",
"mkdocs-material==6.2.2",
"mike==0.5.5",
"sphinx==3.5.4",
"keras_autodoc @ git+https://[email protected]/moritzmeister/keras-autodoc@split-tags-properties",
"markdown-include"],
"hive": ["pyhopshive[thrift]"]
},
author="Logical Clocks AB",
author_email="<EMAIL>",
description="HSFS: An environment independent client to interact with the Hopsworks Featurestore",
license="Apache License 2.0",
keywords="Hopsworks, Feature Store, Spark, Machine Learning, MLOps, DataOps",
url="https://github.com/logicalclocks/feature-store-api",
download_url="https://github.com/logicalclocks/feature-store-api/releases/tag/"
+ __version__,
packages=find_packages(),
long_description=read("../README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
],
)
| 1.546875 | 2 |
src/server_py3/aps/src/wes/api/v1/users/__init__.py | kfrime/yonder | 0 | 6593 | #!/usr/bin/env python3
from . import signup, signin, signout, update, info, detail
| 1.0625 | 1 |
hubconf.py | jamesmcclain/pytorch-multi-class-focal-loss | 81 | 6594 | <reponame>jamesmcclain/pytorch-multi-class-focal-loss
# Optional list of dependencies required by the package
dependencies = ['torch']
from focal_loss import FocalLoss, focal_loss
| 1.539063 | 2 |
autotest/ogr/ogr_gpx.py | HongqiangWei/gdal | 3 | 6595 | <reponame>HongqiangWei/gdal
#!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GPX driver functionality.
# Author: <NAME> <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2007, <NAME> <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
import string
sys.path.append( '../pymod' )
import gdaltest
import ogrtest
import ogr
import osr
import gdal
def ogr_gpx_init():
gdaltest.gpx_ds = None
try:
gdaltest.gpx_ds = ogr.Open( 'data/test.gpx' )
except:
gdaltest.gpx_ds = None
if gdaltest.gpx_ds is None:
gdaltest.have_gpx = 0
else:
gdaltest.have_gpx = 1
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds.GetLayerCount() != 5:
gdaltest.post_reason( 'wrong number of layers' )
return 'fail'
return 'success'
###############################################################################
# Test waypoints gpx layer.
def ogr_gpx_1():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is None:
return 'fail'
lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' )
expect = [2, None]
tr = ogrtest.check_features_against_list( lyr, 'ele', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['waypoint name', None]
tr = ogrtest.check_features_against_list( lyr, 'name', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['href', None]
tr = ogrtest.check_features_against_list( lyr, 'link1_href', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['text', None]
tr = ogrtest.check_features_against_list( lyr, 'link1_text', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['type', None]
tr = ogrtest.check_features_against_list( lyr, 'link1_type', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['href2', None]
tr = ogrtest.check_features_against_list( lyr, 'link2_href', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['text2', None]
tr = ogrtest.check_features_against_list( lyr, 'link2_text', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['type2', None]
tr = ogrtest.check_features_against_list( lyr, 'link2_type', expect )
if not tr:
return 'fail'
lyr.ResetReading()
expect = ['2007/11/25 17:58:00+01', None]
tr = ogrtest.check_features_against_list( lyr, 'time', expect )
if not tr:
return 'fail'
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POINT (1 0)',
max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POINT (4 3)',
max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Test routes gpx layer.
def ogr_gpx_2():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is None:
return 'fail'
lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' )
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'LINESTRING (6 5,9 8,12 11)', max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'LINESTRING EMPTY', max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Test route_points gpx layer.
def ogr_gpx_3():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is None:
return 'fail'
lyr = gdaltest.gpx_ds.GetLayerByName( 'route_points' )
expect = ['route point name', None, None]
tr = ogrtest.check_features_against_list( lyr, 'name', expect )
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POINT (6 5)', max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Test tracks gpx layer.
def ogr_gpx_4():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is None:
return 'fail'
lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' )
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING ((15 14,18 17),(21 20,24 23))', max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'MULTILINESTRING EMPTY', max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
feat = lyr.GetNextFeature()
f_geom = feat.GetGeometryRef()
if f_geom.ExportToWkt()!= 'MULTILINESTRING EMPTY':
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Test route_points gpx layer.
def ogr_gpx_5():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is None:
return 'fail'
lyr = gdaltest.gpx_ds.GetLayerByName( 'track_points' )
expect = ['track point name', None, None, None]
tr = ogrtest.check_features_against_list( lyr, 'name', expect )
lyr.ResetReading()
feat = lyr.GetNextFeature()
if ogrtest.check_feature_geometry( feat, 'POINT (15 14)', max_error = 0.0001 ) != 0:
return 'fail'
feat.Destroy()
return 'success'
###############################################################################
# Copy our small gpx file to a new gpx file.
def ogr_gpx_6():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is None:
return 'skip'
try:
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ogr.GetDriverByName('CSV').DeleteDataSource( 'tmp/gpx.gpx' )
gdal.PopErrorHandler()
except:
pass
co_opts = [ ]
# Duplicate waypoints
gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' )
gpx2_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx',
options = co_opts )
gpx2_lyr = gpx2_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint )
gpx_lyr.ResetReading()
dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() )
feat = gpx_lyr.GetNextFeature()
while feat is not None:
dst_feat.SetFrom( feat )
if gpx2_lyr.CreateFeature( dst_feat ) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
feat = gpx_lyr.GetNextFeature()
dst_feat.Destroy()
# Duplicate routes
gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'routes' )
gpx2_lyr = gpx2_ds.CreateLayer( 'routes', geom_type = ogr.wkbLineString )
gpx_lyr.ResetReading()
dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() )
feat = gpx_lyr.GetNextFeature()
while feat is not None:
dst_feat.SetFrom( feat )
if gpx2_lyr.CreateFeature( dst_feat ) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
feat = gpx_lyr.GetNextFeature()
dst_feat.Destroy()
# Duplicate tracks
gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'tracks' )
gpx2_lyr = gpx2_ds.CreateLayer( 'tracks', geom_type = ogr.wkbMultiLineString )
gpx_lyr.ResetReading()
dst_feat = ogr.Feature( feature_def = gpx2_lyr.GetLayerDefn() )
feat = gpx_lyr.GetNextFeature()
while feat is not None:
dst_feat.SetFrom( feat )
if gpx2_lyr.CreateFeature( dst_feat ) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
feat = gpx_lyr.GetNextFeature()
dst_feat.Destroy()
gpx_lyr = None
gpx2_lyr = None
# Explicit destroy is required for old-gen python bindings
gpx2_ds.Destroy()
gdaltest.gpx_ds.Destroy()
gdaltest.gpx_ds = ogr.Open( 'tmp/gpx.gpx' )
return 'success'
###############################################################################
# Output extra fields as <extensions>.
def ogr_gpx_7():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is not None:
gdaltest.gpx_ds.Destroy()
gdaltest.gpx_ds = None
bna_ds = ogr.Open( 'data/bna_for_gpx.bna' )
try:
os.remove ('tmp/gpx.gpx')
except:
pass
co_opts = [ 'GPX_USE_EXTENSIONS=yes' ]
# Duplicate waypoints
bna_lyr = bna_ds.GetLayerByName( 'bna_for_gpx_points' )
gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx',
options = co_opts )
gpx_lyr = gdaltest.gpx_ds.CreateLayer( 'waypoints', geom_type = ogr.wkbPoint )
bna_lyr.ResetReading()
for i in range(bna_lyr.GetLayerDefn().GetFieldCount()):
field_defn = bna_lyr.GetLayerDefn().GetFieldDefn(i)
gpx_lyr.CreateField( field_defn )
dst_feat = ogr.Feature( feature_def = gpx_lyr.GetLayerDefn() )
feat = bna_lyr.GetNextFeature()
while feat is not None:
dst_feat.SetFrom( feat )
if gpx_lyr.CreateFeature( dst_feat ) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
feat = bna_lyr.GetNextFeature()
dst_feat.Destroy()
bna_ds.Destroy()
gdaltest.gpx_ds.Destroy()
gdaltest.gpx_ds = None
#Now check that the extensions fields have been well written
gdaltest.gpx_ds = ogr.Open('tmp/gpx.gpx')
gpx_lyr = gdaltest.gpx_ds.GetLayerByName( 'waypoints' )
expect = ['PID1', 'PID2']
tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Primary_ID', expect )
if not tr:
return 'fail'
gpx_lyr.ResetReading()
expect = ['SID1', 'SID2']
tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Secondary_ID', expect )
if not tr:
return 'fail'
gpx_lyr.ResetReading()
expect = ['TID1', None]
tr = ogrtest.check_features_against_list( gpx_lyr, 'ogr_Third_ID', expect )
if not tr:
return 'fail'
return 'success'
###############################################################################
# Output extra fields as <extensions>.
def ogr_gpx_8():
if not gdaltest.have_gpx:
return 'skip'
if gdaltest.gpx_ds is not None:
gdaltest.gpx_ds.Destroy()
gdaltest.gpx_ds = None
try:
os.remove ('tmp/gpx.gpx')
except:
pass
gdaltest.gpx_ds = ogr.GetDriverByName('GPX').CreateDataSource('tmp/gpx.gpx', options = ['LINEFORMAT=LF'])
lyr = gdaltest.gpx_ds.CreateLayer( 'route_points', geom_type = ogr.wkbPoint )
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(2 49)')
feat.SetField('route_name', 'ROUTE_NAME')
feat.SetField('route_fid', 0)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(3 50)')
feat.SetField('route_name', '--ignored--')
feat.SetField('route_fid', 0)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(3 51)')
feat.SetField('route_name', 'ROUTE_NAME2')
feat.SetField('route_fid', 1)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(3 49)')
feat.SetField('route_fid', 1)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
lyr = gdaltest.gpx_ds.CreateLayer( 'track_points', geom_type = ogr.wkbPoint )
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(2 49)')
feat.SetField('track_name', 'TRACK_NAME')
feat.SetField('track_fid', 0)
feat.SetField('track_seg_id', 0)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(3 50)')
feat.SetField('track_name', '--ignored--')
feat.SetField('track_fid', 0)
feat.SetField('track_seg_id', 0)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(3 51)')
feat.SetField('track_fid', 0)
feat.SetField('track_seg_id', 1)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
geom = ogr.CreateGeometryFromWkt('POINT(3 49)')
feat.SetField('track_name', 'TRACK_NAME2')
feat.SetField('track_fid', 1)
feat.SetField('track_seg_id', 0)
feat.SetGeometry(geom)
lyr.CreateFeature(feat)
gdaltest.gpx_ds.Destroy()
gdaltest.gpx_ds = None
f = open('tmp/gpx.gpx','rb')
f_ref = open('data/ogr_gpx_8_ref.txt','rb')
f_content = f.read()
f_ref_content = f_ref.read()
f.close()
f_ref.close()
if f_content.find(f_ref_content) == -1:
gdaltest.post_reason('did not get expected result')
print(f_content)
return 'fail'
return 'success'
###############################################################################
#
def ogr_gpx_cleanup():
if gdaltest.gpx_ds is not None:
gdaltest.gpx_ds.Destroy()
gdaltest.gpx_ds = None
try:
os.remove ('tmp/gpx.gpx')
except:
pass
return 'success'
gdaltest_list = [
ogr_gpx_init,
ogr_gpx_1,
ogr_gpx_2,
ogr_gpx_3,
ogr_gpx_4,
ogr_gpx_5,
ogr_gpx_6,
# Rerun test 1, 2 and 4 with generated tmp/tmp.gpx
ogr_gpx_1,
ogr_gpx_2,
ogr_gpx_4,
ogr_gpx_7,
ogr_gpx_8,
ogr_gpx_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'ogr_gpx' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
| 1.515625 | 2 |
mwp_solver/models/sausolver.py | max-stack/MWP-SS-Metrics | 0 | 6596 | # Code Taken from https://github.com/LYH-YF/MWPToolkit
# -*- encoding: utf-8 -*-
# @Author: <NAME>
# @Time: 2021/08/21 04:59:55
# @File: sausolver.py
import random
import torch
from torch import nn
import copy
from module.Encoder.rnn_encoder import BasicRNNEncoder
from module.Embedder.basic_embedder import BasicEmbedder
from module.Decoder.tree_decoder import SARTreeDecoder
from module.Layer.tree_layers import NodeGenerater, SubTreeMerger, TreeNode, TreeEmbedding
from module.Layer.tree_layers import Prediction, GenerateNode, Merge, SemanticAlignmentModule
from module.Strategy.beam_search import TreeBeam
from loss.masked_cross_entropy_loss import MaskedCrossEntropyLoss, masked_cross_entropy
from loss.mse_loss import MSELoss
from utils.utils import copy_list
from utils.enum_type import NumMask, SpecialTokens
class SAUSolver(nn.Module):
"""
Reference:
Qin et al. "Semantically-Aligned Universal Tree-Structured Solver for Math Word Problems" in EMNLP 2020.
"""
def __init__(self, config, dataset):
super(SAUSolver, self).__init__()
# parameter
self.hidden_size = config["hidden_size"]
self.device = config["device"]
self.USE_CUDA = True if self.device == torch.device('cuda') else False
self.beam_size = config['beam_size']
self.max_out_len = config['max_output_len']
self.embedding_size = config["embedding_size"]
self.dropout_ratio = config["dropout_ratio"]
self.num_layers = config["num_layers"]
self.rnn_cell_type = config["rnn_cell_type"]
self.loss_weight = config['loss_weight']
self.vocab_size = len(dataset.in_idx2word)
self.out_symbol2idx = dataset.out_symbol2idx
self.out_idx2symbol = dataset.out_idx2symbol
generate_list = dataset.generate_list
self.generate_nums = [self.out_symbol2idx[symbol] for symbol in generate_list]
self.mask_list = NumMask.number
self.num_start = dataset.num_start
self.operator_nums = dataset.operator_nums
self.generate_size = len(generate_list)
self.unk_token = self.out_symbol2idx[SpecialTokens.UNK_TOKEN]
try:
self.out_sos_token = self.out_symbol2idx[SpecialTokens.SOS_TOKEN]
except:
self.out_sos_token = None
try:
self.out_eos_token = self.out_symbol2idx[SpecialTokens.EOS_TOKEN]
except:
self.out_eos_token = None
try:
self.out_pad_token = self.out_symbol2idx[SpecialTokens.PAD_TOKEN]
except:
self.out_pad_token = None
# module
self.embedder = BasicEmbedder(self.vocab_size, self.embedding_size, self.dropout_ratio)
# self.t_encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type, self.dropout_ratio)
self.encoder = BasicRNNEncoder(self.embedding_size, self.hidden_size, self.num_layers, self.rnn_cell_type,
self.dropout_ratio, batch_first=False)
#self.decoder = SARTreeDecoder(self.hidden_size, self.operator_nums, self.generate_size, self.dropout_ratio)
self.decoder = Prediction(self.hidden_size,self.operator_nums,self.generate_size,self.dropout_ratio)
self.node_generater = GenerateNode(self.hidden_size, self.operator_nums, self.embedding_size,
self.dropout_ratio)
self.merge = Merge(self.hidden_size, self.embedding_size, self.dropout_ratio)
self.sa = SemanticAlignmentModule(self.hidden_size,self.hidden_size,self.hidden_size)
self.loss1 = MaskedCrossEntropyLoss()
#
def calculate_loss(self, batch_data:dict) -> float:
"""Finish forward-propagating, calculating loss and back-propagation.
:param batch_data: one batch data.
:return: loss value.
batch_data should include keywords 'question', 'ques len', 'equation', 'equ len',
'num stack', 'num size', 'num pos'
"""
seq = torch.tensor(batch_data["question"]).to(self.device)
seq_length = torch.tensor(batch_data["ques len"]).long()
target = torch.tensor(batch_data["equation"]).to(self.device)
target_length = torch.LongTensor(batch_data["equ len"]).to(self.device)
nums_stack = copy.deepcopy(batch_data["num stack"])
num_size = batch_data["num size"]
num_pos = batch_data["num pos"]
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
unk = self.unk_token
loss = self.train_tree(seq, seq_length, target, target_length, nums_stack, num_size, generate_nums, num_pos, unk, num_start)
return loss
def model_test(self, batch_data:dict) -> tuple:
"""Model test.
:param batch_data: one batch data.
:return: predicted equation, target equation.
batch_data should include keywords 'question', 'ques len', 'equation',
'num stack', 'num pos', 'num list'
"""
seq = torch.tensor(batch_data["question"]).to(self.device)
seq_length = torch.tensor(batch_data["ques len"]).long()
target = torch.tensor(batch_data["equation"]).to(self.device)
nums_stack = copy.deepcopy(batch_data["num stack"])
num_pos = batch_data["num pos"]
num_list = batch_data['num list']
generate_nums = self.generate_nums
num_start = self.num_start
# sequence mask for attention
all_node_output = self.evaluate_tree(seq, seq_length, generate_nums, num_pos, num_start, self.beam_size,
self.max_out_len)
all_output = self.convert_idx2symbol(all_node_output, num_list[0], copy_list(nums_stack[0]))
targets = self.convert_idx2symbol(target[0], num_list[0], copy_list(nums_stack[0]))
return all_output, targets
def train_tree(self,input_batch, input_length, target_batch, target_length, nums_stack_batch, num_size_batch, generate_nums, num_pos, unk, num_start,
english=False,var_nums=[], batch_first=False):
# sequence mask for attention
seq_mask = []
max_len = max(input_length)
for i in input_length:
seq_mask.append([0 for _ in range(i)] + [1 for _ in range(i, max_len)])
seq_mask = torch.ByteTensor(seq_mask)
num_mask = []
max_num_size = max(num_size_batch) + len(generate_nums) + len(var_nums) # 最大的位置列表数目+常识数字数目+未知数列表
for i in num_size_batch:
d = i + len(generate_nums) + len(var_nums)
num_mask.append([0] * d + [1] * (max_num_size - d))
num_mask = torch.ByteTensor(num_mask) # 用于屏蔽无关数字,防止生成错误的Nx
#unk = output_lang.word2index["UNK"]
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = input_batch.transpose(0, 1)
target = target_batch.transpose(0, 1)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.decoder.hidden_size)]).unsqueeze(0)
batch_size = len(input_length)
if self.USE_CUDA:
input_var = input_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
# Zero gradients of both optimizers
# Run words through encoder
#encoder_outputs, problem_output = self.encoder(input_var, input_length)
seq_emb = self.embedder(input_var)
pade_outputs, _ = self.encoder(seq_emb, input_length)
problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)] # root embedding B x 1
max_target_length = max(target_length)
all_node_outputs = []
all_sa_outputs = []
# all_leafs = []
copy_num_len = [len(_) for _ in num_pos]
num_size = max(copy_num_len)
# 提取与问题相关的数字embedding
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,
self.encoder.hidden_size)
embeddings_stacks = [[] for _ in range(batch_size)] # B x 1 当前的tree state/ subtree embedding / output
left_childs = [None for _ in range(batch_size)] # B x 1
for t in range(max_target_length):
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(
node_stacks, left_childs, encoder_outputs, all_nums_encoder_outputs, padding_hidden, seq_mask, num_mask)
# all_leafs.append(p_leaf)
outputs = torch.cat((op, num_score), 1)
all_node_outputs.append(outputs)
target_t, generate_input = self.generate_tree_input(target[t].tolist(), outputs, nums_stack_batch, num_start,
unk)
target[t] = target_t
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input, current_context)
left_childs = []
for idx, l, r, node_stack, i, o in zip(range(batch_size), left_child.split(1), right_child.split(1),
node_stacks, target[t].tolist(), embeddings_stacks):
if len(node_stack) != 0:
node = node_stack.pop()
else:
left_childs.append(None)
continue
# 未知数当数字处理,SEP当操作符处理
if i < num_start: # 非数字
node_stack.append(TreeNode(r))
node_stack.append(TreeNode(l, left_flag=True))
o.append(TreeEmbedding(node_label[idx].unsqueeze(0), terminal=False))
# print(o[-1].embedding.size())
# print(encoder_outputs[idx].size())
else: # 数字
current_num = current_nums_embeddings[idx, i - num_start].unsqueeze(0)
while len(o) > 0 and o[-1].terminal:
sub_stree = o.pop()
op = o.pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num) # Subtree embedding
if batch_first:
encoder_mapping, decoder_mapping = self.sa(current_num, encoder_outputs[idx])
else:
temp_encoder_outputs = encoder_outputs.transpose(0, 1)
encoder_mapping, decoder_mapping = self.sa(current_num,temp_encoder_outputs[idx])
all_sa_outputs.append((encoder_mapping, decoder_mapping))
o.append(TreeEmbedding(current_num, terminal=True))
if len(o) > 0 and o[-1].terminal:
left_childs.append(o[-1].embedding)
else:
left_childs.append(None)
# all_leafs = torch.stack(all_leafs, dim=1) # B x S x 2
all_node_outputs = torch.stack(all_node_outputs, dim=1) # B x S x N
target = target.transpose(0, 1).contiguous() # B x S
if self.USE_CUDA:
# all_leafs = all_leafs.cuda()
all_node_outputs = all_node_outputs.cuda()
target = target.cuda()
new_all_sa_outputs = []
for sa_pair in all_sa_outputs:
new_all_sa_outputs.append((sa_pair[0].cuda(), sa_pair[1].cuda()))
all_sa_outputs = new_all_sa_outputs
# target_length = torch.LongTensor(target_length).cuda()
else:
pass
# target_length = torch.LongTensor(target_length)
semantic_alignment_loss = nn.MSELoss()
total_semanti_alognment_loss = 0
sa_len = len(all_sa_outputs)
for sa_pair in all_sa_outputs:
total_semanti_alognment_loss += semantic_alignment_loss(sa_pair[0], sa_pair[1])
# print(total_semanti_alognment_loss)
total_semanti_alognment_loss = total_semanti_alognment_loss / sa_len
# print(total_semanti_alognment_loss)
# op_target = target < num_start
# loss_0 = masked_cross_entropy_without_logit(all_leafs, op_target.long(), target_length)
loss = masked_cross_entropy(all_node_outputs, target,target_length) + 0.01 * total_semanti_alognment_loss
# loss = loss_0 + loss_1
loss.backward()
# clip the grad
# torch.nn.utils.clip_grad_norm_(encoder.parameters(), 5)
# torch.nn.utils.clip_grad_norm_(predict.parameters(), 5)
# torch.nn.utils.clip_grad_norm_(generate.parameters(), 5)
# Update parameters with optimizers
return loss.item() # , loss_0.item(), loss_1.item()
def evaluate_tree(self, input_batch, input_length, generate_nums, num_pos, num_start, beam_size=5, max_length=30):
seq_mask = torch.BoolTensor(1, input_length).fill_(0)
# Turn padded arrays into (batch_size x max_len) tensors, transpose into (max_len x batch_size)
input_var = input_batch.transpose(0, 1)
num_mask = torch.BoolTensor(1, len(num_pos[0]) + len(generate_nums)).fill_(0)
padding_hidden = torch.FloatTensor([0.0 for _ in range(self.hidden_size)]).unsqueeze(0)
batch_size = 1
if self.USE_CUDA:
input_var = input_var.cuda()
seq_mask = seq_mask.cuda()
padding_hidden = padding_hidden.cuda()
num_mask = num_mask.cuda()
# Run words through encoder
seq_emb = self.embedder(input_var)
pade_outputs, _ = self.encoder(seq_emb, input_length)
problem_output = pade_outputs[-1, :, :self.hidden_size] + pade_outputs[0, :, self.hidden_size:]
encoder_outputs = pade_outputs[:, :, :self.hidden_size] + pade_outputs[:, :, self.hidden_size:]
# Prepare input and output variables
node_stacks = [[TreeNode(_)] for _ in problem_output.split(1, dim=0)]
num_size = len(num_pos[0])
all_nums_encoder_outputs = self.get_all_number_encoder_outputs(encoder_outputs, num_pos, batch_size, num_size,
self.hidden_size)
# B x P x N
embeddings_stacks = [[] for _ in range(batch_size)]
left_childs = [None for _ in range(batch_size)]
beams = [TreeBeam(0.0, node_stacks, embeddings_stacks, left_childs, [])]
for t in range(max_length):
current_beams = []
while len(beams) > 0:
b = beams.pop()
if len(b.node_stack[0]) == 0:
current_beams.append(b)
continue
# left_childs = torch.stack(b.left_childs)
left_childs = b.left_childs
num_score, op, current_embeddings, current_context, current_nums_embeddings = self.decoder(b.node_stack,
left_childs,
encoder_outputs,
all_nums_encoder_outputs,
padding_hidden,
seq_mask,
num_mask)
out_score = nn.functional.log_softmax(torch.cat((op, num_score), dim=1), dim=1)
# out_score = p_leaf * out_score
topv, topi = out_score.topk(beam_size)
for tv, ti in zip(topv.split(1, dim=1), topi.split(1, dim=1)):
current_node_stack = copy_list(b.node_stack)
current_left_childs = []
current_embeddings_stacks = copy_list(b.embedding_stack)
current_out = copy.deepcopy(b.out)
out_token = int(ti)
current_out.append(out_token)
node = current_node_stack[0].pop()
if out_token < num_start:
generate_input = torch.LongTensor([out_token])
if self.USE_CUDA:
generate_input = generate_input.cuda()
left_child, right_child, node_label = self.node_generater(current_embeddings, generate_input,
current_context)
current_node_stack[0].append(TreeNode(right_child))
current_node_stack[0].append(TreeNode(left_child, left_flag=True))
current_embeddings_stacks[0].append(TreeEmbedding(node_label[0].unsqueeze(0), False))
else:
current_num = current_nums_embeddings[0, out_token - num_start].unsqueeze(0)
while len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
sub_stree = current_embeddings_stacks[0].pop()
op = current_embeddings_stacks[0].pop()
current_num = self.merge(op.embedding, sub_stree.embedding, current_num)
current_embeddings_stacks[0].append(TreeEmbedding(current_num, True))
if len(current_embeddings_stacks[0]) > 0 and current_embeddings_stacks[0][-1].terminal:
current_left_childs.append(current_embeddings_stacks[0][-1].embedding)
else:
current_left_childs.append(None)
current_beams.append(TreeBeam(b.score + float(tv), current_node_stack, current_embeddings_stacks,
current_left_childs, current_out))
beams = sorted(current_beams, key=lambda x: x.score, reverse=True)
beams = beams[:beam_size]
flag = True
for b in beams:
if len(b.node_stack[0]) != 0:
flag = False
if flag:
break
return beams[0].out
def get_all_number_encoder_outputs(self, encoder_outputs, num_pos, batch_size, num_size, hidden_size):
indices = list()
sen_len = encoder_outputs.size(0)
masked_index = []
temp_1 = [1 for _ in range(hidden_size)]
temp_0 = [0 for _ in range(hidden_size)]
for b in range(batch_size):
for i in num_pos[b]:
indices.append(i + b * sen_len)
masked_index.append(temp_0)
indices += [0 for _ in range(len(num_pos[b]), num_size)]
masked_index += [temp_1 for _ in range(len(num_pos[b]), num_size)]
indices = torch.LongTensor(indices)
masked_index = torch.BoolTensor(masked_index)
masked_index = masked_index.view(batch_size, num_size, hidden_size)
if self.USE_CUDA:
indices = indices.cuda()
masked_index = masked_index.cuda()
all_outputs = encoder_outputs.transpose(0, 1).contiguous()
all_embedding = all_outputs.view(-1, encoder_outputs.size(2)) # S x B x H -> (B x S) x H
all_num = all_embedding.index_select(0, indices)
all_num = all_num.view(batch_size, num_size, hidden_size)
return all_num.masked_fill_(masked_index, 0.0)
def generate_tree_input(self, target, decoder_output, nums_stack_batch, num_start, unk):
# when the decoder input is copied num but the num has two pos, chose the max
target_input = copy.deepcopy(target)
for i in range(len(target)):
if target[i] == unk:
num_stack = nums_stack_batch[i].pop()
max_score = -float("1e12")
for num in num_stack:
if decoder_output[i, num_start + num] > max_score:
target[i] = num + num_start
max_score = decoder_output[i, num_start + num]
if target_input[i] >= num_start:
target_input[i] = 0
return torch.LongTensor(target), torch.LongTensor(target_input)
def mse_loss(self, outputs, targets, mask=None):
# outputs : [batch_size,output_len,hidden_size]
# targets : [batch_size,output_len,hidden_size]
# mask : [batch_size,output_len]
mask = mask.to(self.device)
x = torch.sqrt(torch.sum(torch.square((outputs - targets)), dim=-1)) # [batch_size,output_len]
y = torch.sum(x * mask, dim=-1) / torch.sum(mask, dim=-1) # [batch_size]
return torch.sum(y)
def convert_idx2symbol(self, output, num_list, num_stack):
# batch_size=output.size(0)
'''batch_size=1'''
seq_len = len(output)
num_len = len(num_list)
output_list = []
res = []
for s_i in range(seq_len):
idx = output[s_i]
if idx in [self.out_sos_token, self.out_eos_token, self.out_pad_token]:
break
symbol = self.out_idx2symbol[idx]
if "NUM" in symbol:
num_idx = self.mask_list.index(symbol)
if num_idx >= num_len:
res = []
break
res.append(num_list[num_idx])
elif symbol == SpecialTokens.UNK_TOKEN:
try:
pos_list = num_stack.pop()
c = num_list[pos_list[0]]
res.append(c)
except:
return None
else:
res.append(symbol)
output_list.append(res)
return output_list
| 2.203125 | 2 |
rosetta/tests/test_parallel.py | rafacarrascosa/rosetta | 1 | 6597 | <filename>rosetta/tests/test_parallel.py
import unittest
from functools import partial
import pandas as pd
from pandas.util.testing import assert_frame_equal, assert_series_equal
import numpy as np
import threading
from StringIO import StringIO
from rosetta.parallel import parallel_easy, pandas_easy
from rosetta.parallel.threading_easy import threading_easy, LockIterateApply
# A couple functions for testing parallel easy
# Must be defined outside of the test class for some reason.
def _abfunc(x, a, b=1):
return x * a * b
abfunc = partial(_abfunc, 2, 3)
def frame_to_series(frame):
x = frame.iloc[0, 0]
return pd.Series([x] * len(frame.columns), index=frame.columns)
def rightmax(mylist):
return [max(mylist[i: i+2]) for i in range(len(mylist))]
def leftmax(mylist):
for i in range(len(mylist)):
if i == 0:
result = [mylist[0]]
else:
result.append(max(mylist[i - 1: i+1]))
return result
class TestBase(unittest.TestCase):
"""
Tests the parallel_easy module.
"""
def setUp(self):
self.numbers = range(5)
self.benchmark = [0, 6, 12, 18, 24]
def test_map_easy_1job(self):
result = parallel_easy.map_easy(abfunc, self.numbers, 1)
self.assertEqual(result, self.benchmark)
def test_map_easy_3job(self):
result = parallel_easy.map_easy(abfunc, self.numbers, 3)
self.assertEqual(result, self.benchmark)
def test_imap_easy_1job(self):
result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 1, 1)
result = []
for number in result_iterator:
result.append(number)
self.assertEqual(result, self.benchmark)
def test_imap_easy_3job(self):
result_iterator = parallel_easy.imap_easy(abfunc, self.numbers, 3, 1)
result = []
for number in result_iterator:
result.append(number)
self.assertEqual(result, self.benchmark)
def test_n_jobs_wrap_positive(self):
"""
For n_jobs positive, the wrap should return n_jobs.
"""
for n_jobs in range(1, 5):
result = parallel_easy._n_jobs_wrap(n_jobs)
self.assertEqual(result, n_jobs)
def test_n_jobs_wrap_zero(self):
"""
For n_jobs zero, the wrap should raise a ValueError
"""
self.assertRaises(ValueError, parallel_easy._n_jobs_wrap, 0)
class TestMapEasyPaddedBlock(unittest.TestCase):
"""
Tests the parallel_easy.map_easy_padded_blocks function.
"""
def setUp(self):
#self.numbers_1 = [
# 0, 0, 2, -1, 4, 2, 6, 7, 6, 9, 12, 11, 11, 14, 55, 55, 44, 33, 33]
self.numbers_10 = np.random.randint(0, 5, 10)
self.numbers_101 = np.random.randint(0, 5, 101)
self.numbers_51 = np.random.randint(0, 5, 101)
#self.numbers_1 = [0, 1, 2, 0, 3, 2, 4, 3, 2, 3, 3]
self.n_jobs = 1
def lefttest(self, numbers, buffer_len, blocksize):
result = parallel_easy.map_easy_padded_blocks(
leftmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize)
benchmark = leftmax(numbers)
self.assertEqual(result, benchmark)
def righttest(self, numbers, buffer_len, blocksize):
result = parallel_easy.map_easy_padded_blocks(
rightmax, numbers, self.n_jobs, buffer_len, blocksize=blocksize)
benchmark = rightmax(numbers)
self.assertEqual(result, benchmark)
def test_map_easy_padded_blocks_14(self):
buffer_len = 1
blocksize = 4
self.lefttest(self.numbers_10, buffer_len, blocksize)
self.lefttest(self.numbers_101, buffer_len, blocksize)
self.lefttest(self.numbers_51, buffer_len, blocksize)
self.righttest(self.numbers_10, buffer_len, blocksize)
self.righttest(self.numbers_101, buffer_len, blocksize)
self.righttest(self.numbers_51, buffer_len, blocksize)
def test_map_easy_padded_blocks_24(self):
buffer_len = 2
blocksize = 4
self.lefttest(self.numbers_10, buffer_len, blocksize)
self.lefttest(self.numbers_101, buffer_len, blocksize)
self.lefttest(self.numbers_51, buffer_len, blocksize)
self.righttest(self.numbers_10, buffer_len, blocksize)
self.righttest(self.numbers_101, buffer_len, blocksize)
self.righttest(self.numbers_51, buffer_len, blocksize)
def test_map_easy_padded_blocks_37(self):
buffer_len = 3
blocksize = 7
self.lefttest(self.numbers_101, buffer_len, blocksize)
self.lefttest(self.numbers_51, buffer_len, blocksize)
self.righttest(self.numbers_101, buffer_len, blocksize)
self.righttest(self.numbers_51, buffer_len, blocksize)
def test_map_easy_padded_blocks_17(self):
buffer_len = 1
blocksize = 7
self.lefttest(self.numbers_10, buffer_len, blocksize)
self.lefttest(self.numbers_101, buffer_len, blocksize)
self.lefttest(self.numbers_51, buffer_len, blocksize)
self.righttest(self.numbers_10, buffer_len, blocksize)
self.righttest(self.numbers_101, buffer_len, blocksize)
self.righttest(self.numbers_51, buffer_len, blocksize)
class TestPandasEasy(unittest.TestCase):
"""
Tests the pandas_easy module.
"""
def setUp(self):
pass
def test_groupby_to_scalar_to_series_1(self):
df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]})
benchmark = df.groupby('a').apply(max)
result = pandas_easy.groupby_to_scalar_to_series(df, max, 1, by='a')
assert_series_equal(result, benchmark)
def test_groupby_to_scalar_to_series_2(self):
s = pd.Series([1, 2, 3, 4])
labels = ['a', 'a', 'b', 'b']
benchmark = s.groupby(labels).apply(max)
result = pandas_easy.groupby_to_scalar_to_series(
s, max, 1, by=labels)
assert_series_equal(result, benchmark)
def test_groupby_to_series_to_frame_1(self):
df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]})
labels = ['g1', 'g1', 'g2']
benchmark = df.groupby(labels).mean()
result = pandas_easy.groupby_to_series_to_frame(
df, np.mean, 1, use_apply=True, by=labels)
assert_frame_equal(result, benchmark)
def test_groupby_to_series_to_frame_2(self):
df = pd.DataFrame({'a': [6, 2, 2], 'b': [4, 5, 6]})
labels = ['g1', 'g1', 'g2']
benchmark = df.groupby(labels).apply(frame_to_series)
result = pandas_easy.groupby_to_series_to_frame(
df, frame_to_series, 1, use_apply=False, by=labels)
assert_frame_equal(result, benchmark)
class TestLockIterateApply(unittest.TestCase):
"""
Test the Locked Iterator Class
"""
def setUp(self):
self.data = ['my', 'name', 'is', 'daniel']
self.num_threads = 4
def bytwo(x):
return 2 * x
self.func = bytwo
def it():
for i in self.data:
yield i
self.myiter = it()
def test_locked_iterator(self):
threads = []
lock = threading.Lock()
out = StringIO()
for i in range(self.num_threads):
t = LockIterateApply(self.func, self.myiter, lock, ',', out)
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', ''])
results = set(out.getvalue().split(','))
self.assertEqual(results, benchmark)
def test_threading_easy(self):
out = StringIO()
threading_easy(self.func, self.myiter, self.num_threads, ',', out)
benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', ''])
results = set(out.getvalue().split(','))
self.assertEqual(results, benchmark)
def test_threading_easy_single(self):
out = StringIO()
threading_easy(self.func, self.myiter, 1, ',', out)
benchmark = set(['mymy', 'namename', 'danieldaniel', 'isis', ''])
results = set(out.getvalue().split(','))
self.assertEqual(results, benchmark)
| 2.765625 | 3 |
modules/helper/subtitles/subtitles.py | sdelcore/video-event-notifier-old | 0 | 6598 | import time
import srt
import re
import datetime
from mqtthandler import MQTTHandler
INIT_STATUS={
"video": {
"title": None,
"series_title": None,
"season": None,
"episode": None
},
"time": None,
"events": None
}
class SubtitleHandler:
subtitles = []
phrases = []
def __init__(self, broker):
self.mqtt = MQTTHandler(broker)
def parseSRT(self, srt_filename):
f=open(srt_filename, "r")
subtitle_generate = srt.parse(f.read())
f.close()
self.subtitles = list(subtitle_generate)
return self.subtitles
def parsePhrases(self, phrase_filename):
f=open(phrase_filename, "r")
lines = f.readlines()
for line in lines:
phrase = line.rstrip("\n\r").split("/")
self.phrases.append(phrase)
return self.phrases
def isPhraseInLine(self,phrase, sub, content):
sub_line = re.sub('[^A-Za-z0-9\s]+', '', str(content)).lower()
phrase = re.sub('[^A-Za-z0-9\s]+', '', str(phrase)).lower()
count = 0
while bool(re.search(phrase, sub_line)):
count += 1
sub_line = sub_line.replace(phrase, '', 1)
return count
def getEventTime(self,sub):
middle = sub.end - sub.start
between_sec = datetime.timedelta.total_seconds(middle) / 2
sec = between_sec + datetime.timedelta.total_seconds(sub.start)
return int(sec)
def matchEventToMovie(self, movie, subtitles, phrases, time_offset):
global INIT_STATUS
status = INIT_STATUS
status["video"]["title"] = movie
#TODO determine how to set up phrase data
for sub in subtitles:
c = sub.content.replace('\n', ' ')
c = c.split(" ")
firstpart, secondpart = " ".join(c[:len(c)//2]), " ".join(c[len(c)//2:])
mult = 0
for phrase in phrases:
line = phrase[0]
events = phrase[1]
mult += self.isPhraseInLine(line,sub,sub.content)
#f = self.isPhraseInLine(line,sub, firstpart)
#s = self.isPhraseInLine(line,sub, secondpart)
#if f + s == 0:
# mult += self.isPhraseInLine(line,sub,sub.content )
#else:
# mult += f+s
## DEAR LESS DRUNK SELF
# this currently adds the number of events over the entire subtitle
# what you need to do if you wish to accept it, is to split each subtitle into to two parts
# the first part will the the half that has the first bit of text, which will have the correct time to event for the work
# the second half will have the correct time to event gfor the second half
# you could have three if statements that check and each toher them reach a send.message()
if mult > 0: # wotn work properly if events is greater than 1
status["time"] = self.getEventTime(sub) + time_offset
status["events"] = int(events) * mult
self.sendMessage(status)
#print(sub.content)
def sendMessage(self, msg):
self.mqtt.send(msg)
print(msg)
return msg
def isDone(self):
return True | 2.890625 | 3 |
thecsvparser.py | rbago/CEBD1160_Class4_hwk | 0 | 6599 | #!/usr/bin/env python
import os
import numpy as np
import pandas as pd
os.getcwd()
# Request for the filename
# Current version of this script works only with TSV type files
mainFilename = input('Input your file name (diabetes.tab.txt or housing.data.txt): ')
print()
# To create proper dataframe, transforming it with numpy
# Then changing it with pandas
filenameData = np.genfromtxt(mainFilename, dtype='str')
filenameData = pd.DataFrame(filenameData)
# Obtains first row to identify header is string or numeric
headers = filenameData.iloc[0]
try:
pd.to_numeric(headers)
except:
filenameData = pd.DataFrame(filenameData.values[1:], columns=headers)
# Changes strings to numbers (self identifies for float or integer)
filenameData = filenameData.apply(pd.to_numeric)
# Obtains the mean and standard deviation of the columns
listMean = filenameData.mean()
listStd = filenameData.std()
print(filenameData)
# Prints out the results
print('Mean for each column:')
for idx in filenameData.columns:
print(idx,':',listMean[idx])
print()
print('Standard deviation for each column:')
for idx in filenameData.columns:
print(idx,':',listStd[idx])
| 3.640625 | 4 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.