content
stringlengths 5
1.05M
|
---|
// AUTHOR: Devendra Patel
//Python3 Concept: Palindrome
// GITHUB: https://github.com/github-dev21
print("Enter the Number ")
num = int(input())
temp = num
reverse = 0
while(num>0):
dig = num%10
reverse = reverse*10+dig
num = num//10
print(reverse)
if temp==reverse:
print("Number is in Palindrome")
else:
print("Number is not in Palindrome")
|
#!/usr/bin/python3
"""
In a given 2D binary array A, there are two islands. (An island is a
4-directionally connected group of 1s not connected to any other 1s.)
Now, we may change 0s to 1s so as to connect the two islands together to form 1
island.
Return the smallest number of 0s that must be flipped. (It is guaranteed that
the answer is at least 1.)
Example 1:
Input: [[0,1],[1,0]]
Output: 1
Example 2:
Input: [[0,1,0],[0,0,0],[0,0,1]]
Output: 2
Example 3:
Input: [[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]
Output: 1
Note:
1 <= A.length = A[0].length <= 100
A[i][j] == 0 or A[i][j] == 1
"""
from typing import List
dirs = ((0, -1), (0, 1), (-1, 0), (1, 0))
class Solution:
def shortestBridge(self, A: List[List[int]]) -> int:
"""
market component 1 and component 2
iterate 0 and BFS, min(dist1 + dist2 - 1)?
O(N * N) high complexity
BFS grow from 1 component
"""
m, n = len(A), len(A[0])
# coloring
colors = [[None for _ in range(n)] for _ in range(m)]
color = 0
for i in range(m):
for j in range(n):
if A[i][j] == 1 and colors[i][j] is None:
self.dfs(A, i, j, colors, color)
color += 1
assert color == 2
# BFS
step = 0
q = []
visited = [[False for _ in range(n)] for _ in range(m)]
for i in range(m):
for j in range(n):
if colors[i][j] == 0:
visited[i][j] = True
q.append((i, j))
while q:
cur_q = []
for i, j in q:
for I, J in self.nbr(A, i, j):
if not visited[I][J]:
if colors[I][J] == None:
visited[I][J] = True # pre-check, dedup
cur_q.append((I, J))
elif colors[I][J] == 1:
return step
step += 1
q = cur_q
raise
def nbr(self, A, i, j):
m, n = len(A), len(A[0])
for di, dj in dirs:
I = i + di
J = j + dj
if 0 <= I < m and 0 <= J < n:
yield I, J
def dfs(self, A, i, j, colors, color):
colors[i][j] = color
for I, J in self.nbr(A, i, j):
if colors[I][J] is None and A[I][J] == 1:
self.dfs(A, I, J, colors, color)
if __name__ == "__main__":
assert Solution().shortestBridge([[1,1,1,1,1],[1,0,0,0,1],[1,0,1,0,1],[1,0,0,0,1],[1,1,1,1,1]]) == 1
|
from random import choice
import matplotlib.pyplot as plt
class Randomwalk:
"""A class to generate random walks"""
def __init__(self, num_points=5000):
"""initialize attributes of a walk"""
self.num_points = num_points
#All walks start at (0, 0)
self.x_values = [0]
self.y_values = [0]
def fill_walk(self):
"""Calculate all the points in the walk"""
# Keep taking steps until the walk reaches the desired length.
while len(self.x_values) < self.num_points:
# Decide which direction to go and how far to go in that direction
x_direction = choice([1, -1])
x_distance = choice([0, 1, 2, 3, 4])
x_step = x_direction * x_distance
y_direction = choice([1, -1])
y_distance = choice([0, 1, 2, 3, 4])
y_step = y_direction * y_distance
# Reject moves that go nowhere
if (x_step == 0) and (y_step == 0):
continue
# Calculate the new position.
x = self.x_values[-1] + x_step
y = self.y_values[-1] + y_step
self.x_values.append(x)
self.y_values.append(y)
# Make a randomwalk
rw = Randomwalk()
rw.fill_walk()
# Plot the points in the walk
plt.style.use('classic')
fig, ax = plt.subplots()
ax.scatter(rw.x_values, rw.y_values, s=15)
plt.show() |
"""
This script starts up our game
"""
from game_window import main
if __name__ == "__main__":
main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
import math
from .neural_blocks import ( SkipConnMLP, NNEncoder, FourierEncoder )
from .utils import ( autograd, eikonal_loss, dir_to_elev_azim, fat_sigmoid )
from .refl import ( LightAndRefl )
def load(args, shape, light_and_refl: LightAndRefl):
assert(isinstance(light_and_refl, LightAndRefl)), "Need light and reflectance for integrator"
if args.integrator_kind is None: return None
elif args.integrator_kind == "direct": cons = Direct
elif args.integrator_kind == "path": cons = Path
else: raise NotImplementedError(f"load integrator: {args.integrator_kind}")
ls = 0
if hasattr(shape, "latent_size"): ls = shape.latent_size
elif hasattr(shape, "total_latent_size"): ls = shape.total_latent_size()
occ = load_occlusion_kind(args.occ_kind, ls)
integ = cons(shape=shape, refl=light_and_refl.refl, occlusion=occ)
return integ
def load_occlusion_kind(kind=None, latent_size:int=0):
if kind is None: occ = lighting_wo_isect
elif kind == "hard": occ = LightingWIsect()
elif kind == "learned": occ = LearnedLighting(latent_size=latent_size)
elif kind == "all-learned": occ = AllLearnedOcc(latent_size=latent_size)
else: raise NotImplementedError(f"load occlusion: {args.occ_kind}")
return occ
# no shadow
def lighting_wo_isect(pts, lights, isect_fn, latent=None, mask=None):
dir, _, spectrum = lights(pts if mask is None else pts[mask], mask=mask)
return dir, spectrum
# hard shadow lighting
class LightingWIsect(nn.Module):
def __init__(self): super().__init__()
def forward(self, pts, lights, isect_fn, latent=None, mask=None):
pts = pts if mask is None else pts[mask]
dir, dist, spectrum = lights(pts, mask=mask)
far = dist.max().item() if mask.any() else 6
visible = isect_fn(pts, dir, near=0.1, far=far)
spectrum = torch.where(
visible[...,None],
spectrum,
torch.zeros_like(spectrum)
)
return dir, spectrum
class LearnedLighting(nn.Module):
def __init__(
self,
latent_size:int=0,
):
super().__init__()
in_size=6
self.attenuation = SkipConnMLP(
in_size=in_size, out=1, latent_size=latent_size, num_layers=5, hidden_size=128,
enc=FourierEncoder(input_dims=in_size), xavier_init=True,
)
def forward(self, pts, lights, isect_fn, latent=None, mask=None):
pts = pts if mask is None else pts[mask]
dir, dist, spectrum = lights(pts, mask=mask)
far = dist.max().item() if mask.any() else 6
# TODO why doesn't this isect fn seem to work?
visible = isect_fn(r_o=pts, r_d=dir, near=2e-3, far=3, eps=1e-3)
att = self.attenuation(torch.cat([pts, dir], dim=-1), latent).sigmoid()
spectrum = torch.where(visible.reshape_as(att), spectrum, spectrum * att)
return dir, spectrum
class AllLearnedOcc(nn.Module):
def __init__(
self,
latent_size:int=0,
):
super().__init__()
in_size=6
self.attenuation = SkipConnMLP(
in_size=in_size, out=1, latent_size=latent_size,
enc=FourierEncoder(input_dims=in_size),
num_layers=5, hidden_size=128, xavier_init=True,
)
def forward(self, pts, lights, isect_fn, latent=None, mask=None):
pts = pts if mask is None else pts[mask]
dir, _, spectrum = lights(pts, mask=mask)
att = self.attenuation(torch.cat([pts, dir], dim=-1), latent).sigmoid()
return dir, spectrum * att
class Renderer(nn.Module):
def __init__(
self,
shape,
refl,
occlusion,
):
super().__init__()
self.shape = shape
self.refl = refl
self.occ = occlusion
def forward(self, _rays): raise NotImplementedError()
class Direct(Renderer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
@property
def sdf(self): return self.shape
def total_latent_size(self): return self.shape.latent_size
def set_refl(self, refl): self.refl = refl
def forward(self, rays): return direct(self.shape, self.refl, self.occ, rays, self.training)
# Functional version of integration
def direct(shape, refl, occ, rays, training=True):
r_o, r_d = rays.split([3, 3], dim=-1)
pts, hits, tput, n = shape.intersect_w_n(r_o, r_d)
_, latent = shape.from_pts(pts[hits])
out = torch.zeros_like(r_d)
for light in refl.light.iter():
light_dir, light_val = occ(pts, light, shape.intersect_mask, mask=hits, latent=latent)
bsdf_val = refl(x=pts[hits], view=r_d[hits], normal=n[hits], light=light_dir, latent=latent)
out[hits] = out[hits] + bsdf_val * light_val
if training: out = torch.cat([out, tput], dim=-1)
return out
def path(shape, refl, occ, rays, training=True):
r_o, r_d = rays.split([3, 3], dim=-1)
pts, hits, tput, n = shape.intersect_w_n(r_o, r_d)
_, latent = shape.from_pts(pts[hits])
out = torch.zeros_like(r_d)
for light in refl.light.iter():
light_dir, light_val = occ(pts, light, shape.intersect_mask, mask=hits, latent=latent)
bsdf_val = refl(x=pts[hits], view=r_d[hits], normal=n[hits], light=light_dir, latent=latent)
out[hits] = out[hits] + bsdf_val * light_val
# TODO this should just be a random sample of pts in some range?
pts_2nd_ord = pts.reshape(-1, 3)
pts_2nd_ord = pts[torch.randint(high=pts_2nd_ord.shape[0], size=32, device=pts.device), :]
with torch.no_grad():
# compute light to set of points
light_dir, light_val = occ(pts_2nd_ord, shape.intersect_mask, latent=latent)
# compute dir from each of the 2nd order pts to the main points
dirs = pts_2nd_ord - pts
# TODO apply the learned occlusion here
att = occ.attenuation(torch.cat([pts_2nd_ord, dirs], dim=-1), latent=latent)
# TODO this should account for the BSDF when computing the reflected component
out[hits] = out[hits] + att * light_val
if training: out = torch.cat([out, tput], dim=-1)
return out
class Path(Renderer):
def __init__(
self,
bounces:int=1,
**kwargs,
):
super().__init__(**kwargs)
self.bounces = bounces
def forward(self, rays):
raise NotImplementedError()
|
#!/usr/bin/env python3
import os
import bspump
import bspump.ipc
import bspump.common
class EchoSink(bspump.Sink):
def process(self, context, event):
'''
Send the event back to the client socket.
'''
print(event)
sock = context['stream']
sock.send(event.encode('utf-8'))
sock.send(b'\n')
class EchoPipeline(bspump.Pipeline):
'''
To test this pipeline, use:
socat STDIO SSL:127.0.0.1:8082
'''
def __init__(self, app, pipeline_id):
super().__init__(app, pipeline_id)
mydir = os.path.dirname(os.path.realpath(__file__))
self.build(
bspump.ipc.StreamServerSource(app, self, config={
'address': ':: 8082',
'cert': os.path.join(mydir, './ssl/cert.pem'),
'key': os.path.join(mydir, './ssl/key.pem'),
}),
EchoSink(app, self)
)
if __name__ == '__main__':
app = bspump.BSPumpApplication()
svc = app.get_service("bspump.PumpService")
svc.add_pipeline(EchoPipeline(app, "EchoPipeline"))
app.run()
|
import os
import shutil
import tensorflow as tf
import matplotlib.pyplot as plt
import logging
import copy
from collections import OrderedDict
import signal
from skopt import gp_minimize
from skopt import dump as dump_result
from skopt import load as load_result
from skopt.space import Real, Categorical, Integer
from skopt.plots import plot_convergence, plot_objective
from skopt.plots import plot_objective, plot_evaluations
from pyrocko.guts import Object, Int, Float, List, Tuple, String
from .util import delete_if_exists, ensure_dir
from .data import name_to_class
try:
from skopt.plots import plot_histogram
_plot_histogram_error = False
except ImportError as e:
_plot_histogram_error = e
logging.debug(e)
def to_skopt_real(x, name, prior):
return Real(low=x[0], high=x[1], prior=prior, name=name)
class Param(Object):
name = String.T()
_type = None
target_attribute = String.T(default='model',
help='Which of models parameter should be modified (e.g. `config`)')
def make_parameter(self):
return self._type(low=self.low, high=self.high, name=self.name)
class PCategorical(Param):
prior = String.T(optional=True)
categories = List.T(String.T())
default = String.T()
_type = Categorical
def make_parameter(self):
return self._type(name=self.name, prior=self.prior,
categories=self.categories)
class PInteger(Param):
low = Float.T()
high = Float.T()
default = Float.T()
_type = Integer
def make_parameter(self):
return self._type(low=self.low, high=self.high, name=self.name)
class PReal(Param):
low = Float.T()
high = Float.T()
default = Float.T()
prior = String.T(default='uniform')
_type = Real
def make_parameter(self):
return self._type(low=self.low, high=self.high, name=self.name,
prior=self.prior)
class Optimizer(Object):
n_calls = Int.T(default=50, help='number of test sets')
path_out = String.T(default='optimizer-results',
help='base path where to store results, plots and logs')
params = List.T(Param.T(), default=[PReal(name='learning_rate', low=1e-6,
high=1e-2, default=1e-4)])
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = None
self.result = None
self.best_model_dir = self.extend_path('best-model')
self.fn_result = self.extend_path('result.optmz')
self.best_loss = 9e99
self.param_keys = [p.name for p in self.params]
self.params_dict = OrderedDict()
for p in self.params:
self.params_dict[p.name] = p.make_parameter()
self._config_operations = [p.name for p in self.params if
p.target_attribute=='config']
self.optimizer_defaults = [(p.name, p.default) for p in self.params]
self._ncalls = 0
signal.signal(signal.SIGINT, self.plot_results)
def clear(self):
'''delete former runs.'''
shutil.rmtree(self.path_out)
@property
def dimensions(self):
return [v for _, v in self.params_dict.items()]
@property
def optimizer_keys(self):
return [k for (k, default) in self.optimizer_defaults]
@property
def optimizer_values(self):
return [default for (k, default) in self.optimizer_defaults]
@property
def non_categorical_dimensions(self):
'''Returns a list of non-categorical dimension names.'''
return [dim.name for dim in self.dimensions if not \
isinstance(dim, Categorical)]
def announce_test(self, params):
'''Log a parameter test set. '''
logging.info('+' * 20)
logging.info('evaluating next set of parameters:')
base =' {}: {}\n'
for kv in params.items():
logging.info(base.format(*kv))
def update_model(self, model, kwargs):
'''Set config and model attributes by kwargs.
Rather sloppy...
'''
new_config = copy.deepcopy(model.config)
self._ncalls += 1
model.name = 'opt_%s-' % self._ncalls + self.base_name
for key, arg in kwargs.items():
# choose which object to modify (model or model.config)
if 'config' in key:
key = key.split('.')[-1]
want_modifiy = new_config
else:
want_modifiy = model
# If name is a ChunkOperation subclass, instatiate an object of
# that class
attribute = name_to_class.get(arg, False)
if attribute:
# chunk operation found
attribute = attribute()
else:
attribute = arg
if not getattr(want_modifiy, key):
raise Exception('No such parameter: %s' % key)
setattr(want_modifiy, key, attribute)
model.config = new_config
def save_model(self, model):
'''copy the `model` to the `best_model` directory.'''
shutil.rmtree(self.best_model_dir)
shutil.copytree(model.outdir, self.best_model_dir)
def evaluate(self, args):
''' wrapper to parse gp_minimize args to model.train'''
kwargs = dict(zip(self.optimizer_keys, args))
self.announce_test(kwargs)
self.update_model(self.model, kwargs)
try:
loss = self.model.train_and_evaluate()[0]['loss']
if loss < self.best_loss:
print('found a better loss at %s' % loss)
print('kwargs: ', kwargs)
self.save_model(self.model)
self.best_loss = loss
else:
self.model.clear_model()
return loss
except tf.errors.ResourceExhaustedError as e:
logging.warn(e)
logging.warn('Skipping this test, loss = 9e9')
return 9e9
def set_model(self, model):
logging.info('prefixing model output path to %s' % self.path_out)
model.prefix = self.path_out
self.model = model
self.base_name = self.model.name
def optimize(self, model):
'''Calling this method to optimize a :py:class:`pinky.model.Model`
instance. '''
self.set_model(model)
ensure_dir(self.best_model_dir)
self.result = gp_minimize(
func=self.evaluate,
dimensions=self.dimensions,
acq_func='EI',
n_calls=self.n_calls,
x0=self.optimizer_values)
self.evaluate_result()
self.plot_results()
def ensure_result(self):
''' Load and set minimizer result.'''
if self.result is None:
if self.fn_result is None:
logging.warn(
'Cannot load results from filename: %s' % self.fn_result)
self.result = load_result(self.fn_result)
def extend_path(self, *path):
'''Prepend `self.path_out` to `path`.'''
return os.path.join(self.path_out, *path)
def evaluate_result(self):
self.ensure_result()
# best = self.result.space.point_to_dict(self.result.x)
best = self.result.x
logging.info('Best parameter set:')
logging.info(best)
logging.info('Best parameter loss:')
logging.info(self.result.fun)
def plot_results(self, *args):
'''Produce and save result plots. '''
# self.ensure_result()
ensure_dir(self.extend_path('plots'))
if _plot_histogram_error:
logging.warn(_plot_histogram_error)
else:
for dim_name in self.optimizer_keys:
fig, ax = plot_histogram(result=self.result) #, dimension_name=dim_name)
fig.savefig(self.extend_path('plots/histogram_%s.pdf' % dim_name))
# ax = plot_objective(result=self.result,)
# dimension_names=self.non_categorical_dimensions)
# fig = plt.gcf()
# fig.savefig(self.extend_path('plots/objectives.pdf'))
ax = plot_evaluations(
result=self.result,)
# dimension_names=self.non_categorical_dimensions)
fig = plt.gcf()
fig.savefig(self.extend_path('plots/evaluations.pdf'))
@classmethod
def get_example(cls):
'''Get an example instance of this class.'''
return cls()
if __name__ == '__main__':
print(Optimizer.get_example())
|
import math
a = int(input())
print(round(3 * a**2 * math.sqrt(5 * (5 + 2 * math.sqrt(5))), 2))
print(round(a**3 / 4 * (15 + 7 * math.sqrt(5)), 2)) |
"""
Tests for BlockCountsTransformer.
"""
# pylint: disable=protected-access
from openedx.core.djangoapps.content.block_structure.factory import BlockStructureFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import SampleCourseFactory
from ..block_counts import BlockCountsTransformer
class TestBlockCountsTransformer(ModuleStoreTestCase):
"""
Test behavior of BlockCountsTransformer
"""
def setUp(self):
super().setUp()
self.course_key = SampleCourseFactory.create().id
self.course_usage_key = self.store.make_course_usage_key(self.course_key)
self.block_structure = BlockStructureFactory.create_from_modulestore(self.course_usage_key, self.store)
def test_transform(self):
# collect phase
BlockCountsTransformer.collect(self.block_structure)
self.block_structure._collect_requested_xblock_fields()
# transform phase
BlockCountsTransformer(['problem', 'chapter']).transform(usage_info=None, block_structure=self.block_structure)
# block_counts
chapter_x_key = self.course_key.make_usage_key('chapter', 'chapter_x')
block_counts_for_chapter_x = self.block_structure.get_transformer_block_data(
chapter_x_key, BlockCountsTransformer,
)
block_counts_for_course = self.block_structure.get_transformer_block_data(
self.course_usage_key, BlockCountsTransformer,
)
# verify count of chapters
assert block_counts_for_course.chapter == 2
# verify count of problems
assert block_counts_for_course.problem == 6
assert block_counts_for_chapter_x.problem == 3
# verify other block types are not counted
for block_type in ['course', 'html', 'video']:
assert not hasattr(block_counts_for_course, block_type)
assert not hasattr(block_counts_for_chapter_x, block_type)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'loadingDialog.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtGui import QMovie
from PyQt5.QtWidgets import QDialog, QMessageBox
class LoadingDialog(QDialog):
def __init__(self, parent=None):
super(LoadingDialog, self).__init__(parent)
self.setupUi()
def setupUi(self):
self.setObjectName("Dialog")
self.resize(181, 141)
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.setModal(Qt.ApplicationModal)
self.label = QtWidgets.QLabel(self)
self.label.setGeometry(QtCore.QRect(0, 0, 181, 141))
self.gif = QMovie('loading2.gif')
self.gif.setScaledSize(QSize(self.label.width(),self.label.height()))
self.label.setMovie(self.gif)
self.gif.start()
self.label.setObjectName("label")
self.retranslateUi(self)
QtCore.QMetaObject.connectSlotsByName(self)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "加载中.."))
|
from itertools import islice
from random import random
import time
import csv
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
from matplotlib import interactive
import datetime as dt
import matplotlib.animation as animation
from pylive import live_plotter
interactive(True)
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import seaborn as sns
import matplotlib
import config
import timeSeries
import DataStreamer as DS # the wrapper for the datastreamer
ds = DS.DataStreamer()
#update the graph
def update_line(hl, new_data):
hl.set_xdata(numpy.append(hl.get_xdata(), new_data))
hl.set_ydata(numpy.append(hl.get_ydata(), new_data))
plt.draw()
def saveResults(data, filename=None):
if filename == None:
filename = time.strftime("%Y%m%d-%H%M%S") + '.csv'
data.to_csv(config.resultsFilePath+filename, index=False)
return filename
# a better working implementation of the fit transform function available in the label encoder library. Adds in the features
# I personally expected in it when I used it.
#performs individual column label encoding. provides list of label encoders for reversing of the transformation at a later date.
#copy is performed to not effect the original data passed in.
#object only is the default state, set this to false to enable the label encode for all columns individually.
def fit_transform_cols(data, object_only = True):
if object_only == True:
object_data = data.select_dtypes(include=['object']).copy()
else:
object_data = data.copy()
# print(types.head())
output = pd.DataFrame(data).copy()
le_list = {}
for col in object_data:
#print(np.sort(object_data[col].unique()) )
le = preprocessing.LabelEncoder()
le.fit(object_data[col])
le_list[col] = le
#print("classes of {} are:{}".format(col,np.sort(le.classes_)))
output[col] = le.transform(object_data[col])
return output, le_list
#reverses the fit_transform_cols. takes in the dataset and the dictionary of of the label encoders.
# keys of the dictionary refer to the column names of the input dataframe that are to be transformed reversed.
#can take single label encoder to run on entire dataframe.
def transform_reverse_cols(data, le_list):
output = data.copy()
# if single label encoder is passed, use that for the entire dataframe.
if isinstance(le_list,preprocessing.LabelEncoder):
for col in output:
output[col]= le_list.inverse_transform(output[col])
#print("classes of {} are:{}".format(key,np.sort(value.classes_)))
else:
output = pd.DataFrame(data).copy()
for key, value in le_list.items():
print("value type:{}".format(type(value)))
print("classes of {} are:{}".format(key,np.sort(value.classes_)))
#print(key, type(value))
output[key]= value.inverse_transform(data[key])
return output
def startDataStream():
count = 0
inputFile =[]
labels =[]
csv_start_time = time.monotonic()
data = pd.read_csv(config.CSVfileName, header = config.HEADER, nrows = config.MAXROWS)
labels = (data.iloc[:,41])
inputFile =data.drop([41], axis=1)
le = preprocessing.LabelEncoder()
le.fit(labels)
print("classes of labels are:{}".format(le.classes_))
labels_encoded = le.transform(labels)
inputFile, le_list = fit_transform_cols(inputFile)
#inputFile = inputFile.apply(preprocessing.LabelEncoder().fit_transform)
#print(inputFile)
inputFile = inputFile.astype(str)
inputFile = inputFile.values.tolist()
labels = labels_encoded.astype(str)
X_train, X_test, y_train, y_test = train_test_split(inputFile, labels, test_size=config.TESTSIZE, random_state=config.RANDOMSTATE)
#print(type(labels))
labels = labels.tolist()
if config.DEBUG:
print("total set size:{}".format(len(inputFile)))
print("train size: {}".format(len(X_train)))
print("test size: {}".format(len(X_test)))
start_time = time.monotonic()
print("csv read in: {} seconds".format(start_time - csv_start_time))
#### defining the variable input rates ####
## pass through sin curve from 0 to 10.
#vRate = np.linspace(0, 10, 1000)
#vRate = np.sin(vRate)
## pass through cos curve from 0 to 10.
#vRate = np.linspace(0, 10, 1000)
#vRate = np.cos(vRate)
## pass through steady rate of 10
#vRate = 10
## create line from 1 - 100 and then back to 1
#vRate=np.linspace(1,100,100)
#vRate=np.append(vRate,np.linspace(100,1,100))
vRate=np.linspace(1,10,100)
vRate = np.append(vRate, np.linspace(10,1,100))
vRate = np.append(vRate,np.linspace(1,10,100))
vRate = np.append(vRate,np.linspace(10,1,100))
vRate = np.append(vRate,np.linspace(1,10,100))
#vRate = np.append(vRate,np.linspace(10,1,100))
if config.DEBUG:
print("vRate = {}".format(vRate))
ds.initialize(vRate)
runStartTime = ds.process(X_train, y_train, X_test)
#print("initReader: {} sent, {} recieved".format(len(inputFile),sent))
#print(cppProcess.checkComplete())
count =0
size = 100
x_vec = np.linspace(0,1,size+1)[0:-1]
#y_vec = np.random.randn(len(x_vec))
#x_vec = np.zeros(shape=(1,1))
y_vec = np.zeros(shape=(100,1))
line1 = []
fig=plt.figure(figsize=(13,6))
while ds.checkComplete() != True:
#print(ds.checkException())
#if config.DEBUG:
# print('currently processed {} lines...\r'.format(ds.getResultsCount()))
#y_vec[-1] = np.random.randn(1)
y_vec[-1] = ds.getResultsCount()
line1 = live_plotter(x_vec,y_vec,line1, figure=fig)
y_vec = np.append(y_vec[1:],0.0)
## display results ##
#results = cppProcess.getResults()
### Results ###
results = ds.getResults()
end_time = time.monotonic()
print("results processed in: {} seconds".format(end_time - start_time))
print(type(results))
df_results = pd.DataFrame(results)
#print(df_results.shape)
#print(y_test.shape)
# if the y_test array isn't full, initialize it to NAN values so it can be added to the output for consistency sake.
if len(y_test) == 0:
y_test = np.full(len(df_results.index), np.nan)
df_results['label'] = y_test
#print(df_results.head())
df_results = df_results.rename(columns={ df_results.columns[0]: "predicted",df_results.columns[1]: "readInTime",df_results.columns[2]: "processTime",df_results.columns[3]: "latency",df_results.columns[4]: "Label" })
#print("return results: {}".format(results))
print("return results: {} rows processed".format(len(df_results)))
print(df_results.head())
print(DS.caclulateErr(df_results))
#print(DS.caclulateLatency(df_results, vRate))
fileName = saveResults(df_results)
newVRate = DS.expandVRate(vRate, df_results)
print(df_results.dtypes)
DS.visualizeResults(newVRate, df_results, fileName)
input()
if __name__ == "__main__":
startDataStream()
print("program end!")
|
#brew install wxpython
#sudo apt install linuxbrew-wrapper
#sudo apt install python-pip
#pip install SpeechRecognition
#sudo apt-get install python-pyaudio python3-pyaudio
#pip install --allow-unverified=pyaudio pyaudio
#sudo apt-get install python-wxtools
#sudo pip install pyttsx
#sudo pip install wikipedia
#IF MEMORYERROR = run as sudo pip --no-cache-dir install SpeechRecognition
'''
Holmes: The Know-It-All Encyclopedia who will assist you
in your search for knowledge.
Copyright (c) 2017 Revekka Kostoeva
'''
#Imports
import wx
import wikipedia
import pyttsx
import pyaudio
import feedparser
import random
#Define engine for speech, set properties (rate, accent)
engine = pyttsx.init()
engine.setProperty('rate', 115)
engine.setProperty('voice', "en-scottish")
#List of RSS feeds for news sources
newsurls = {
'googlenews': 'http://news.google.com/?output=rss',
'cnn': 'http://rss.cnn.com/rss/cnn_topstories.rss',
'wired': 'https://www.wired.com/feed/',
'scientific american': 'http://rss.sciam.com/sciam/biology'
}
#Fetch rss feed and return parsed RSS
def parseRSS(rss_url):
return feedparser.parse(rss_url)
#Fetch RSS feed headlines (titles), return them as String
def getHeadlines(rss_url):
headlines = []
feed = parseRSS(rss_url)
for newsitem in feed['items']:
headlines.append(newsitem['title'])
return headlines
#Create frame (GUI)
class MyFrame(wx.Frame):
def __init__(self):
#GUI Constructor
wx.Frame.__init__(self, None,
pos=wx.DefaultPosition,
size = wx.Size(450, 100),
style = wx.MINIMIZE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX
| wx.CLIP_CHILDREN,
title = "Holmes")
panel = wx.Panel(self)
my_sizer = wx.BoxSizer(wx.VERTICAL)
lbl = wx.StaticText(panel, label = "Hello. How may my superior mind assist you today?")
my_sizer.Add(lbl, 0, wx.ALL, 5)
self.txt = wx.TextCtrl(panel, style=wx.TE_PROCESS_ENTER, size=(400,30))
self.txt.SetFocus()
self.txt.Bind(wx.EVT_TEXT_ENTER, self.OnEnter)
my_sizer.Add(self.txt, 0, wx.ALL, 5)
panel.SetSizer(my_sizer)
self.Show()
engine.say("Hello. How may my superior mind assist you today?")
engine.runAndWait()
#Define boolean variable
self.beginning = True
#List to hold all headlines
self.allHeadlines = []
#Define function to invoke when the engine event fires
def fire(word):
print(word)
#Define OnEnter click event
def OnEnter(self, event):
input = self.txt.GetValue()
input = input.lower()
self.txt.Clear()
engine = pyttsx.init()
engine.setProperty('rate', 120)
engine.setProperty('voice', "en-scottish")
if self.beginning == True:
engine.runAndWait()
self.beginning = False
#Define engine for speech, set properties (rate, accent)
if input == '':
phrases = ["You look marvelous today", "Ahh, to be friends with a mind as superior as my own",
"That was a curios incident", "They were the footprints of a gigantic hound!",
"I make a point of never having any prejudices, and of following docilely where fact may lead me",
"It is a capital mistake to theorize in advance of the facts. Insensibly one begins to twist facts to suit theories instead of theories to suit facts",
"It is my business to know what other people do not know",
"Scotland Yard feels lonely without me, and it causes an unhealthy excitement among the criminal classes",
"No man lives or has ever lived who has brought the same amount of study and of natural talent to the detection of crime which I have done",
"Never trust to general impressions, my boy, but concentrate yourself upon details",
"The chief proof of man's real greatness lies in his perception of his own smallness",
"There is nothing more stimulating than a case where everything goes against you",
]
engine.say(random.choice(phrases))
return
if "i am bored" in input or "suprise me" in input:
phrases = ["When you have eliminated the impossible, whatever remains, however improbably, must be the truth",
"There is no one who knows the higher criminal world of London so well as I do",
"Boredom is only a state of mind and it is not permanent",
"The world is full of obvious things which nobody by any chance ever observes",
"I trust that age doth not wither nor custom stale my infinite variety",
"The work is its own reward",
"Mediocrity knows nothing higher than itself; but talent instantly recognizes genius",
"Where there is no imagination there is no horror",
"The game is afoot",
"You say that we go round the sun. If we went round the moon it would not make a pennyworth of difference to me or to my work",
]
engine.say(random.choice(phrases))
return
if "hi" in input or "hello" in input:
engine.say("Hello, dear")
return
elif "news" in input or "headlines" in input or "headline" in input:
for key, url in newsurls.items():
self.allHeadlines.extend(getHeadlines(url))
for headline in self.allHeadlines:
engine.say(headline)
elif ("print" in input) and ("more" in input):
input =input.replace("print", "")
input =input.replace("more", "")
print wikipedia.summary(input)
elif "print" in input:
input = input.replace('print', '')
print wikipedia.summary(input, sentences = 2)
elif "more" in input:
input = input.replace("more", "")
engine.say(wikipedia.summary(input))
elif ("who" in input or "what" in input) and ("more" in input) and ("print" in input):
input = input.replace("print", "")
input = input.replace("more", "")
print wikipedia.summary(input)
elif ("who" in input or "what" in input) and ("more" in input):
input = input = input.split(' ')
input = input = " ".join(input[2:])
engine.say(wikipedia.summary(input))
elif ("who" in input or "what" in input) and ("print" in input):
input = input.replace("print", "")
print wikipedia.summary(input, sentences = 2)
elif "who" in input or "what" in input:
input = input.split(' ')
input = " ".join(input[2:])
engine.say(wikipedia.summary(input, sentences = 2))
else:
engine.say(wikipedia.summary(input, sentences = 2))
#Main
if __name__ == "__main__":
app = wx.App(True)
frame = MyFrame()
app.MainLoop()
|
import hashlib
from six.moves.urllib.request import urlopen # noqa
from six.moves.urllib.parse import urlencode # noqa
class Message:
'''
@init
'''
def __init__(self, message_configs):
self.sign_type = 'normal'
self.message_configs = message_configs
self.signature = ''
'''
@createSignature
'''
def __create_signature(self, request):
if self.sign_type == 'normal':
self.signature = self.message_configs['appkey']
else:
self.__build_signature(request)
'''
@buildSignature
'''
def __build_signature(self, request):
appid = self.message_configs['appid']
appkey = self.message_configs['appkey']
para_keys = request.keys();
para_keys.sort()
sign_str = ''
for key in para_keys:
sign_str += "%s=%s&"%(key,request[key])
sign_str = appid+appkey+sign_str[:-1]+appid+appkey
if self.sign_type == 'md5':
hash=hashlib.md5()
hash.update(sign_str)
self.signature = hash.hexdigest()
elif self.sign_type == 'sha1':
hash=hashlib.sha1()
hash.update(sign_str)
self.signature = hash.hexdigest()
def __http_get(self,url):
return eval(urlopen(url = url).read())
def __http_post(self, url, para):
data = urlencode(para)
resp = urlopen(url=url,data = data.encode()).read()
return eval(resp)
'''
@getTimestamp
'''
def get_timestamp(self):
api = 'https://api.submail.cn/service/timestamp.json'
resp = self.__http_get(api)
return resp['timestamp']
'''
@Send
'''
def send(self,request):
'''
@setup API httpRequest URI
'''
api = 'https://api.submail.cn/message/send.json'
'''
create final API post query Start
'''
request['appid'] = self.message_configs['appid']
'''
@get timestamp from server
'''
request['timestamp'] = self.get_timestamp()
'''
@setup sign_type
'''
sign_type_state = ['normal','md5','sha1']
if 'sign_type' not in self.message_configs:
self.sign_type = 'normal'
elif self.message_configs['sign_type'] not in sign_type_state:
self.sign_type = 'normal'
else:
self.sign_type = self.mail_config['sign_type']
request['sign_type'] = self.mail_config['sign_type']
'''
@create signature
'''
self.__create_signature(request)
request['signature'] = self.signature
'''
create final API post query End
'''
'''
@send request
'''
return self.__http_post(api, request)
'''
@xsend
'''
def xsend(self, request):
'''
@setup API httpRequest URI
'''
api = 'https://api.submail.cn/message/xsend.json'
'''
create final API post query Start
'''
request['appid'] = self.message_configs['appid']
'''
@get timestamp from server
'''
request['timestamp'] = self.get_timestamp()
'''
@setup sign_type
'''
sign_type_state = ['normal','md5','sha1']
if 'sign_type' not in self.message_configs:
self.sign_type = 'normal'
elif self.message_configs['sign_type'] not in sign_type_state:
self.sign_type = 'normal'
else:
self.sign_type = self.message_configs['sign_type']
request['sign_type'] = self.message_configs['sign_type']
'''
@create signature
'''
self.__create_signature(request)
request['signature'] = self.signature
'''
create final API post query End
'''
'''
@send request
'''
return self.__http_post(api, request)
'''
@addressbook/message/subscribe
'''
def subscribe(self,request):
'''
@setup API httpRequest URI
'''
api='https://api.submail.cn/addressbook/message/subscribe.json'
'''
create final API post query Start
'''
request['appid'] = self.message_configs['appid']
'''
@get timestamp from server
'''
request['timestamp'] = self.get_timestamp()
'''
@setup sign_type
'''
sign_type_state = ['normal','md5','sha1']
if 'sign_type' not in self.message_configs:
self.sign_type = 'normal'
elif self.message_configs['sign_type'] not in sign_type_state:
self.sign_type = 'normal'
else:
self.sign_type = self.message_configs['sign_type']
request['sign_type'] = self.message_configs['sign_type']
'''
@create signature
'''
self.__create_signature(request)
request['signature'] = self.signature
'''
create final API post query End
'''
'''
@subscribe request
'''
return self.__http_post(api, request)
'''
@addressbook/message/unsubscribe
'''
def unsubscribe(self,request):
'''
@setup API httpRequest URI
'''
api='https://api.submail.cn/addressbook/message/unsubscribe.json'
'''
create final API post query Start
'''
request['appid'] = self.message_configs['appid']
'''
@get timestamp from server
'''
request['timestamp'] = self.get_timestamp()
'''
@setup sign_type
'''
sign_type_state = ['normal','md5','sha1']
if 'sign_type' not in self.message_configs:
self.sign_type = 'normal'
elif self.message_configs['sign_type'] not in sign_type_state:
self.sign_type = 'normal'
else:
self.sign_type = self.message_configs['sign_type']
request['sign_type'] = self.message_configs['sign_type']
'''
@create signature
'''
self.__create_signature(request)
request['signature'] = self.signature
'''
create final API post query End
'''
'''
@unsubscribe request
'''
return self.__http_post(api, request)
|
from __future__ import unicode_literals
from . import ValidationTestCase
from .models import ModelToValidate
class TestModelsWithValidators(ValidationTestCase):
def test_custom_validator_passes_for_correct_value(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=42,
f_with_iterable_of_validators=42)
self.assertIsNone(mtv.full_clean())
def test_custom_validator_raises_error_for_incorrect_value(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=12,
f_with_iterable_of_validators=42)
self.assertFailsValidation(mtv.full_clean, ['f_with_custom_validator'])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
'f_with_custom_validator',
['This is not the answer to life, universe and everything!']
)
def test_field_validators_can_be_any_iterable(self):
mtv = ModelToValidate(number=10, name='Some Name', f_with_custom_validator=42,
f_with_iterable_of_validators=12)
self.assertFailsValidation(mtv.full_clean, ['f_with_iterable_of_validators'])
self.assertFieldFailsValidationWithMessage(
mtv.full_clean,
'f_with_iterable_of_validators',
['This is not the answer to life, universe and everything!']
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPen
from src.Figure import Figure
from utils import midpoint
class LineSegment(Figure):
def __init__(self, start_point=None, end_point=None, border_color=None):
location = midpoint(start_point, end_point)
Figure.__init__(self, location, border_color)
self.start_point = start_point
self.end_point = end_point
@staticmethod
def name():
return 'Segment'
def render(self, qp):
qp.setPen(self.get_pen())
qp.drawLine(self.get_start_point(), self.get_end_point())
def get_start_point(self):
return self.start_point
def set_start_point(self, value):
self.start_point = value
def get_end_point(self):
return self.end_point
def set_end_point(self, value):
self.end_point = value
|
'''
introduction to multi-threading with python
'''
import threading, time
def take_a_nap():
time.sleep(5)
print('wake up!')
print('start of program')
thread_obj = threading.Thread(target=take_a_nap)
thread_obj.start()
print('end of program') |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from typing import List, Optional
from io import BufferedIOBase
from pathlib import Path
from .exceptions import MissingEnv, CreateDirectoryException
from redis import Redis
from redis.exceptions import ConnectionError
from datetime import datetime, timedelta
import time
from glob import glob
import json
import traceback
from urllib.parse import urlparse
from datetime import datetime, timedelta
from bs4 import BeautifulSoup # type: ignore
try:
import cloudscraper # type: ignore
HAS_CF = True
except ImportError:
HAS_CF = False
def get_homedir() -> Path:
if not os.environ.get('LOOKYLOO_HOME'):
# Try to open a .env file in the home directory if it exists.
if (Path(__file__).resolve().parent.parent / '.env').exists():
with (Path(__file__).resolve().parent.parent / '.env').open() as f:
for line in f:
key, value = line.strip().split('=', 1)
if value[0] in ['"', "'"]:
value = value[1:-1]
os.environ[key] = value
if not os.environ.get('LOOKYLOO_HOME'):
guessed_home = Path(__file__).resolve().parent.parent
raise MissingEnv(f"LOOKYLOO_HOME is missing. \
Run the following command (assuming you run the code from the clonned repository):\
export LOOKYLOO_HOME='{guessed_home}'")
return Path(os.environ['LOOKYLOO_HOME'])
def safe_create_dir(to_create: Path):
if to_create.exists() and not to_create.is_dir():
raise CreateDirectoryException(f'The path {to_create} already exists and is not a directory')
os.makedirs(to_create, exist_ok=True)
def set_running(name: str) -> None:
r = Redis(unix_socket_path=get_socket_path('cache'), db=1, decode_responses=True)
r.hset('running', name, 1)
def unset_running(name: str) -> None:
r = Redis(unix_socket_path=get_socket_path('cache'), db=1, decode_responses=True)
r.hdel('running', name)
def is_running() -> dict:
r = Redis(unix_socket_path=get_socket_path('cache'), db=1, decode_responses=True)
return r.hgetall('running')
def get_socket_path(name: str) -> str:
mapping = {
'cache': Path('cache', 'cache.sock'),
'storage': Path('storage', 'storage.sock'),
}
return str(get_homedir() / mapping[name])
def check_running(name: str) -> bool:
socket_path = get_socket_path(name)
try:
r = Redis(unix_socket_path=socket_path)
return True if r.ping() else False
except ConnectionError:
return False
def shutdown_requested() -> bool:
try:
r = Redis(unix_socket_path=get_socket_path('cache'), db=1, decode_responses=True)
return True if r.exists('shutdown') else False
except ConnectionRefusedError:
return True
except ConnectionError:
return True
def long_sleep(sleep_in_sec: int, shutdown_check: int=10) -> bool:
if shutdown_check > sleep_in_sec:
shutdown_check = sleep_in_sec
sleep_until = datetime.now() + timedelta(seconds=sleep_in_sec)
while sleep_until > datetime.now():
time.sleep(shutdown_check)
if shutdown_requested():
return False
return True
def update_user_agents():
if not HAS_CF:
# The website with the UAs is behind Cloudflare's anti-bot page, we need cloudscraper
return
today = datetime.now()
ua_path = get_homedir() / 'user_agents' / str(today.year) / f'{today.month:02}'
safe_create_dir(ua_path)
ua_file_name = ua_path / f'{today.date().isoformat()}.json'
if ua_file_name.exists():
# Already have a UA for that day.
return
try:
s = cloudscraper.create_scraper()
r = s.get('https://techblog.willshouse.com/2012/01/03/most-common-user-agents/')
except Exception as e:
traceback.print_exc()
return
soup = BeautifulSoup(r.text, 'html.parser')
uas = soup.find_all('textarea')[1].text
to_store = {'by_frequency': []}
for ua in json.loads(uas):
os = ua['system'].split(' ')[-1]
if os not in to_store:
to_store[os] = {}
browser = ' '.join(ua['system'].split(' ')[:-1])
if browser not in to_store[os]:
to_store[os][browser] = []
to_store[os][browser].append(ua['useragent'])
to_store['by_frequency'].append({'os': os, 'browser': browser, 'useragent': ua['useragent']})
with open(ua_file_name, 'w') as f:
json.dump(to_store, f, indent=2)
def get_user_agents() -> dict:
ua_files_path = str(get_homedir() / 'user_agents' / '*' / '*' / '*.json')
paths = sorted(glob(ua_files_path), reverse=True)
if not paths:
update_user_agents()
paths = sorted(glob(ua_files_path), reverse=True)
with open(paths[0]) as f:
return json.load(f)
def load_cookies(cookie_pseudofile: Optional[BufferedIOBase]=None) -> List[dict]:
if cookie_pseudofile:
cookies = json.load(cookie_pseudofile)
else:
if not (get_homedir() / 'cookies.json').exists():
return []
with (get_homedir() / 'cookies.json').open() as f:
cookies = json.load(f)
to_return = []
try:
for cookie in cookies:
u = urlparse(cookie['Host raw']).netloc.split(':', 1)[0]
to_add = {'path': cookie['Path raw'],
'name': cookie['Name raw'],
'httpOnly': cookie['HTTP only raw'] == 'true',
'secure': cookie['Send for'] == 'Encrypted connections only',
'expires': (datetime.now() + timedelta(days=10)).strftime('%Y-%m-%dT%H:%M:%S') + 'Z',
'domain': u,
'value': cookie['Content raw']
}
to_return.append(to_add)
except Exception as e:
print(f'Unable to load the cookie file: {e}')
return to_return
|
# Copyright 2017 Conchylicultor. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Image preprocessing utilities
"""
import tensorflow as tf
def center_scale(img):
# Finally, rescale to [-1,1] instead of [0, 1)
with tf.name_scope('center_scale'):
img = img - 0.5
img = img * 2.0
# The centered scaled image is directly compatible with tf.summary
return img
def _img_transformation(self, img):
""" Apply some random transformations to the tensor image.
Use the same transformations that the official inception code
Warning: The input image should range from [0-1]
Return the new image
"""
with tf.name_scope('image_transform'):
# TODO: If modifying the orientation, also need to modify the labels accordingly
#img = tf.image.random_flip_left_right(img)
#img = tf.image.random_flip_up_down(img)
choice = tf.random_uniform(shape=(), minval=0, maxval=2, dtype=tf.int32) # Generate a number inside [0,1]
choice = tf.cast(choice, tf.bool)
brightness = functools.partial(tf.image.random_brightness, max_delta=32/255) # imgnet: 32. / 255. ? Ciffar: 63 TODO: Tune
contrast = functools.partial(tf.image.random_contrast, lower=0.5, upper=1.5) # imgnet: lower=0.5, upper=1.5
#hue = functools.partial(tf.image.random_hue, max_delta=0.2)
saturation = functools.partial(tf.image.random_saturation, lower=0.5, upper=1.5)
choices = [
[brightness, saturation, contrast],
[brightness, contrast, saturation],
]
def transform(input_img, n=None):
for fct in choices[n]:
input_img = fct(input_img)
return input_img
# Randomly apply transform order 1 or 2
transform1 = functools.partial(transform, img, n=0)
transform2 = functools.partial(transform, img, n=1)
img = tf.cond(choice, transform1, transform2)
# The random_* ops do not necessarily clamp.
img = tf.clip_by_value(img, 0.0, 1.0)
return img
def get_t_jpg(filename, preprocessing=None):
"""
Return a float tensor image in range [-1.0 ; 1.0]
preprocessing is a transformation function
Preprocessing is done with float32 images in range [0.0, 1.0]
"""
# TODO: Difference between tf.read_file() and tf.WholeFileReader() ?
# TODO: Add summary ?
t_image = tf.read_file(filename)
t_image = tf.image.decode_jpeg(t_image, channels=3) # [0-255], RGB
t_image = tf.image.convert_image_dtype(t_image, dtype=tf.float32) # Range: [0-1]
# Should normalize images ? How to compute mean-std on the
# training set (randomly sample 10000 images from the training set
# Would this bias the result ? Should we just perform a simple
# linear scaling), use per_image_standardization ?
# tf code for inception only call convert_image_dtype and at the end (
# after image preprocessing, scale to [-1.0;1.0])
if preprocessing is not None:
t_image = preprocessing(t_image)
t_image = center_scale(t_image) # Finally, rescale to [-1,1] instead of [0, 1)
return t_image
|
import os
import requests
import unittest
from riptide.engine import loader as riptide_engine_loader
from riptide.tests.integration.project_loader import load
from riptide.tests.integration.testcase_engine import EngineTest
class EngineStartStopTest(EngineTest):
def test_engine_loading(self):
for project_ctx in load(self,
['integration_all.yml', 'integration_some.yml',
'integration_no_command.yml', 'integration_no_service.yml'],
['.']):
with project_ctx as loaded:
loaded_engine = riptide_engine_loader.load_engine(loaded.engine_name)
self.assertIsInstance(loaded_engine, loaded.engine.__class__,
'The engine loader has to return the correct AbstractEngine instance of the engine')
def test_start_stop(self):
pass # XXX: PyCharm has a problem with docstrings in tests with subtests
"""Full start/stop check for all different scenarios"""
for project_ctx in load(self,
['integration_all.yml', 'integration_no_command.yml', 'integration_no_service.yml'],
['.', 'src']):
with project_ctx as loaded:
project = loaded.config["project"]
services = project["app"]["services"].keys() if "services" in project["app"] else []
# Create src folder
os.makedirs(os.path.join(loaded.temp_dir, loaded.src), exist_ok=True)
# START
self.run_start_test(loaded.engine, project, services, loaded.engine_tester)
# STOP
self.run_stop_test(loaded.engine, project, services, loaded.engine_tester)
def test_start_stop_subset(self):
pass # XXX: PyCharm has a problem with docstrings in tests with subtests
"""Start some services, stop some again, assert that the rest is still running and then stop the rest."""
for project_ctx in load(self,
['integration_all.yml'],
['.']):
with project_ctx as loaded:
project = loaded.config["project"]
services_to_start_first = ["simple", "simple_with_src", "custom_command", "configs"]
services_to_stop_first = ["custom_command", "simple_with_src"]
still_running_after_first = ["configs", "simple"]
services_to_start_end = project["app"]["services"].keys() if "services" in project["app"] else []
# Create src folder
os.makedirs(os.path.join(loaded.temp_dir, loaded.src), exist_ok=True)
# START first
self.run_start_test(loaded.engine, project, services_to_start_first, loaded.engine_tester)
# STOP first
self.run_stop_test(loaded.engine, project, services_to_stop_first, loaded.engine_tester)
# Assert the rest is still running
self.assert_running(loaded.engine, project, still_running_after_first, loaded.engine_tester)
# START end
self.run_start_test(loaded.engine, project, services_to_start_end, loaded.engine_tester)
# STOP end
self.run_stop_test(loaded.engine, project, services_to_start_end, loaded.engine_tester)
def test_simple_result(self):
pass # XXX: PyCharm has a problem with docstrings in tests with subtests
"""Starts only the simple test service and checks it's http response"""
for project_ctx in load(self,
['integration_all.yml'],
['.']):
with project_ctx as loaded:
project = loaded.config["project"]
services = ["simple"]
# START
self.run_start_test(loaded.engine, project, services, loaded.engine_tester)
# Check response
self.assert_response(b'hello riptide\n', loaded.engine, project, "simple")
# STOP
self.run_stop_test(loaded.engine, project, services, loaded.engine_tester)
|
#!/usr/bin/python
import random
import sys
import time
def overlap(start1, end1, start2, end2):
# using this routine to check if two lines overlap. Essential if they
# intersect and are on the same row (horizontal) or column (vertical)
# then they overlap
return (end1 >= start2) and (end2 >= start1)
def coord():
# auto generate random coordinates, length and function
# function means horizontal, vertical or diagonal number search
x = random.randint(0,16) # row
y = random.randint(0,16) # column
f = random.randint(1,3) # function
l = random.randint(5,9) # length
if f == 1: # horizontal mode
if (y+l >= 17):
d = 2
elif (y-l < 0):
d = 1
else:
d = random.randint(1,2)
elif f == 2: # vertical mode
if (x+l >= 17):
d = 2
elif (x-l < 0):
d = 1
else:
d = random.randint(1,2)
elif f == 3: # diagonal mode
if (x+l >= 17) and (y-l < 0): # need to search up-right
d = 1
elif (x+l >= 17) and (y+l >= 17): # need to search up-left
d = 2
elif (x-l < 0) and (y-l < 0): # need to search down-right
d = 3
elif (x-l < 0) and (y+l >= 17): # need to search down-left
d = 4
elif (x+l <= 16 and x-l >=0) and (y-l < 0):
d = random.choice([1,3])
elif (x+l <= 16 and x-l >=0) and (y+l >= 17):
d = random.choice([2,4])
elif (x-l < 0) and (y+l <= 16 and y-l >=0):
d = random.choice([3,4])
elif (x+l >= 17) and (y+l <= 16 and y-l >=0):
d = random.choice([1,2])
else:
d = random.randint(1,4)
else:
pass
return x,y,f,l,d
def find_numb(m,x,y,f,l,d):
# this routine finds the number search in the matrix and returns that number
# based on d we know either horizontal, vertical, or diagonal
# print ("%s %s %s %s %s" % (x,y,f,l,d))
n=[]
for i in range (l):
if f == 1 and d == 1: # horizontal mode
n.append(m[x][y+i]) # right
if f == 1 and d == 2:
n.append(m[x][y-i]) # left
if f == 2 and d == 1: # vertical mode
n.append(m[x+i][y]) # down
if f == 2 and d == 2:
n.append(m[x-i][y]) # up
if f == 3 and d == 1: # diagonal mode
n.append(m[x-i][y+i]) # up-right
elif f == 3 and d == 2:
n.append(m[x-i][y-i]) # up-left
elif f == 3 and d == 3:
n.append(m[x+i][y+i]) # down-right
elif f == 3 and d == 4:
n.append(m[x+i][y-i]) # dowb-left
else:
pass
return n
# html_file is the output html file
html_file = "number_search_puzzle.html"
matrix, result, dup_list, items, numb =([] for i in range (5))
hd1,hd2,vd1,vd2,dd1,dd2,dd3,dd4=([] for i in range(8))
matrix_puzz=""
numbers_puzz=""
position_from_top = 80 # for display in the html file.
regen_coord = 1
number_of_puzzles = 1
if len(sys.argv) > 1:
number_of_puzzles = sys.argv[1]
# the html_content string
html_content = """<HTML>
<!doctype html public "-//w3c//dtd html 3.2//en">
<head>
<style>
.container {
width: 800px;
position: relative;
}
.left-element {
display: inline-block;
position: relative;
outline: #2eb7ed solid medium;
padding: 10px;
font-family: arial;
font-weight: bold;
font-size: 25;
letter-spacing: 4px;
left: 20;
top: 40;
margin-left: 20px;
}
.left_under {
display: inline-block;
position: absolute;
font-family: arial;
font-weight: bold;
padding-top: 570px;
vertical-align: middle;
font-size: 15;
left: 230;
}
.right-element {
display: inline-block;
position: absolute;
font-family: arial;
font-weight: bold;
font-size: 25;
letter-spacing: 4px;
right: 0;
}
.pagebreak { page-break-after: always;
position: relative; }
</style>
</head>
<body bgcolor="ffffff" text="000000" link="0000ff" vlink="800080" alink="ff0000"> """
for repeat in range (int(number_of_puzzles)):
# Need to produce the number matrix. Below implemented as 17x17
for y in range (17):
row=[]
for x in range (17):
row.append(random.randint(0,9))
matrix.append(row)
# Next part takes each line from them matrix, saves to a variable and then adds to the html
# file using the variable html_content. This probably can be combined with the loop that produced
# the matrix list. But decided to keep it separate to keep it easier to understand
html_content += """<div class="container">
<div class="left-element">
"""
for n in range (17):
matrix_puzz = " ".join([str(x) for x in matrix[n]])
#html_content += """<div1>"""
html_content += matrix_puzz
html_content += """<br>"""
html_content += """</div>"""
html_content += """<div class="left_under">"""
html_content += """Puzzle Number: """
html_content += str(repeat+1)
html_content += """</div>"""
#position_from_top += 29
# Next part creates multiple random coordinates to look for the numbers in the puzzle
html_content += """<div class="right-element">
"""
# pos_from_top_numbers = 80 # need to reinit the position from top for the numbers
# displayed on the right side of the puzzle
# The next part is generating 21 numbers. ie numbers to search.
for p in range (21):
row, column, func, length, direction = coord()
while (regen_coord):
# Need to look for overlapping numbers in the puzzle. Lists hd1, hd2,
# vd1, vd2, and dd1-4 act as history buffer to compare against
if (func == 1 and direction == 1): #right
if hd1 and not dup_list:
dup_list = [items for items in hd1 if
((overlap (items[1], (items[1]+items[2]), column,
(column+length)) and (items[0] == row)))]
if hd2 and not dup_list:
dup_list = [items for items in hd2 if
((overlap ((items[1]-items[2]), items[1], column,
(column+length)) and (items[0] == row)))]
elif (func == 1 and direction == 2): # left
if hd2 and not dup_list:
dup_list = [items for items in hd2 if
((overlap ((items[1]-items[2]), items[1], (column-length),
column) and (items[0] == row)))]
if hd1 and not dup_list:
dup_list = [items for items in hd1 if
((overlap (items[1], (items[1]+items[2]), (column-length),
column) and (items[0] == row)))]
elif (func == 2 and direction == 1): # down
if vd1 and not dup_list:
dup_list = [items for items in vd1 if
((overlap (items[0], (items[0]+items[2]), row, (row+length))
and (items[1] == column)))]
if vd2 and not dup_list:
dup_list = [items for items in vd2 if
((overlap ((items[0]-items[2]), items[0], row,
(row+length)) and (items[1] == column)))]
elif (func == 2 and direction == 2): # up
if vd2 and not dup_list:
dup_list = [items for items in vd2 if
((overlap ((items[0]-items[2]), items[0], (row-length), row)
and (items[1] == column)))]
if vd1 and not dup_list:
dup_list = [items for items in vd1 if
((overlap (items[0], (items[0]+items[2]), (row-length),
row) and (items[1] == column)))]
elif (func == 3 and (direction == 1 or direction == 4)): # up-right or
dup_list = [items for items in dd1 if # down-left
(((items[0]+items[1]) == (row+column)) and dd1)]
if not dup_list:
dup_list = [items for items in dd4 if
(((items[0]+items[1]) == (row+column)) and dd4)]
elif (func == 3 and (direction == 2 or direction == 3)): # up-left or
dup_list = [items for items in dd2 if # down-right
(((items[0]-items[1]) == (row-column)) and dd2)]
if not dup_list:
dup_list = [items for items in dd3 if
(((items[0]-items[1]) == (row-column)) and dd3)]
if not dup_list:
result = [row, column, length]
if (func == 1 and direction == 1):
hd1.append(result)
elif (func == 1 and direction == 2):
hd2.append(result)
elif (func == 2 and direction == 1):
vd1.append(result)
elif (func == 2 and direction == 2):
vd2.append(result)
elif (func == 3 and direction == 1):
dd1.append(result)
elif (func == 3 and direction == 2):
dd2.append(result)
elif (func == 3 and direction == 3):
dd3.append(result)
elif (func == 3 and direction == 4):
dd4.append(result)
regen_coord = 0
elif dup_list:
# print dup_list
row, column, func, length, direction = coord()
#print "dedup"
del dup_list[:]
regen_coord = 1
# Now that we have the random coordinates we pass that to the routine to generate
# the actual number
numb=find_numb(matrix,row,column,func,length,direction)
# option to print the coordinates of each number in the matrix...
#print ("row=%s, column=%s, func=%s, length=%s, direction=%s" % (row+1, column+1,
# func, length, direction))
numbers_puzz = "".join([str(x) for x in numb])
# we get the number back from the routine then append to the html file we will write
# at the end
#html_content += """<DIV style="display:inline-block Right; font-family: arial; font-style: italic;
# font-variant:small-caps; font-weight:bold;letter-spacing: 4px; font-size:25;
# width: 250px; left: 650px; height: 25px">"""
html_content += """<span>"""
html_content += numbers_puzz
html_content += """</span>"""
html_content += """<br>"""
html_content += """</div>"""
#position_from_top += 29
if number_of_puzzles > 1:
html_content += """
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<br>
<div class="pagebreak"> </div>
"""
y = 0
matrix = []
#time.sleep(0.5)
hd1,hd2,vd1,vd2,dd1,dd2,dd3,dd4=([] for i in range(8))
# Now complete the trailing part of the html file
html_content += """
</body>
</html>
"""
# write to the html and properly close it
out_file = open(html_file, "w")
out_file.write(html_content)
out_file.close()
|
from bs4 import BeautifulSoup as bs
import pandas as pd
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
import time
import re
import ast
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import datetime
matplotlib.use('Agg')
def init_browser():
"""start chrome browser
"""
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
return Browser('chrome', **executable_path, headless=True)
def visit_browser(url:str):
"""use splinter to visit browser
"""
browser = init_browser()
try:
browser.visit(url)
time.sleep(2)
html = browser.html
soup = bs(html, "html5lib")
except:
print("Error in visiting the page----")
return soup
def scrape_cards_info(html_str:str, name:str, html_tag:str, html_class:str, order_num:int):
"""scrape homecard information
"""
if name in ["beds","baths","area"]:
try:
output = html_str.find(html_tag,html_class).find_all("div","stats")[order_num].text
except:
print(f"error in finding {name} variable-------")
output = ""
elif name in ["link"]:
output = html_str.a["href"]
elif name in ["price"]:
try:
output = html_str.find(html_tag, html_class).text
except:
output = ""
print(f"error in finding {name} variable------")
else:
try:
output = html_str.find(html_tag,html_class).span.text
except:
output = ""
print(f"error in finding {name} variable------")
return output
def scraper():
"""scrape redfin hourse information
"""
# define url and visit
main_url = "https://www.redfin.com/county/321/CA/Los-Angeles-County"
soup = visit_browser(main_url)
# find out number of pages
last_page = int(soup.find_all("div","PagingControls")[0].find_all("a")[-1].text)
homecard_list = []
for i in range(last_page):
url = f"https://www.redfin.com/county/321/CA/Los-Angeles-County/Page-{i+1}"
# visit each page
soup = visit_browser(url)
# find out number of cards
homecards = soup.find_all("div","bottomV2")
# loop thought each homecard
for homecard in homecards:
sub_dict= {}
# store values into sub_dict
sub_dict["beds"] = scrape_cards_info(homecard, "beds","div","HomeStatsV2",0)
sub_dict["baths"] = scrape_cards_info(homecard, "baths","div","HomeStatsV2",1)
sub_dict["area"] = scrape_cards_info(homecard, "area","div","HomeStatsV2",2)
sub_dict["price"] = scrape_cards_info(homecard, "price","span","homecardV2Price",1)
sub_dict["address"] = scrape_cards_info(homecard, "address","div","homeAddressV2",1)
sub_dict["link"] = "https://www.redfin.com"+scrape_cards_info(homecard, "link","","",1)
sub_dict["time"] = datetime.datetime.utcnow()
try:
sub_dict["city"] = ast.literal_eval(homecard.find_all("script")[0].text)[0]["address"]["addressLocality"].strip()
except:
sub_dict["city"] = "No Information Available"
# append dict to list
homecard_list.append(sub_dict)
return homecard_list
def data_cleaner(list_of_dict)->pd.core.frame.DataFrame:
"""takes input from scraper function convert list of dict into dataframe and clean up.
"""
dataframe = pd.DataFrame(columns = ["beds","baths","area","price","address", "link","city","time"], data = list_of_dict)
# clean up data
dataframe["beds"] = [re.split("\s", bed)[0] if re.split("\s", bed)[0].isnumeric() else "" for bed in dataframe["beds"]]
dataframe["baths"] = [re.split("\s", bed)[0] if re.split("\s", bed)[0].isnumeric() else "" for bed in dataframe["baths"]]
dataframe["price"] = [int(p.replace("$","").replace(",","")) for p in dataframe["price"]]
# dataframe["area"] = [re.split("\s", a)[0].replace(",","") for a in dataframe["area"]]
dataframe = dataframe.sort_values("city",ascending = False)
return dataframe
def summary(dataframe)->dict:
"""summarize dataframe from scraper function
"""
summary_info = {}
summary_info["avg_price"] = "${:,.0f}".format(round(dataframe["price"].describe()[1],0)) # average price
summary_info["median_price"] = "${:,.0f}".format(round(dataframe["price"].describe()[5],0)) # median price
summary_info["max_price"] = "${:,.0f}".format(round(dataframe["price"].describe()[-1],0)) # max price
summary_info["html_table"] = dataframe.to_html(index=False)
return summary_info
def plot_data(dataframe)->None:
"""export a png plot
"""
pt = dataframe.groupby("city").agg(price = ("price","median")).sort_values("price", ascending = False).reset_index()
plt.figure(figsize= (20,25))
plt.title("Median House Price In LA County Area", fontsize = 30)
plot = sns.barplot(x = "price", y = "city", data = pt)
plot_figure = plot.get_figure()
plot_figure.savefig("static/plot.png")
return None
|
"""
codeeval.py
A utility for setting up codeeval challenges.
Usage: codeeval.py <challenge index>
"""
import sys
import requests
import os
from bs4 import BeautifulSoup
URL = "https://www.codeeval.com/browse/{}/".format
index = sys.argv[1]
FILE_CONTENTS = """\"\"\"
{url}
{title}
Challenge Description:
{challenge}
Input Sample:
{input_desc}
Output Sample:
{output_desc}
\"\"\"
"""
INPUT_CONTENTS = """
###### IO Boilerplate ######
import sys
if len(sys.argv) < 2:
input_file_name = "{}"
else:
input_file_name = sys.argv[1]
with open(input_file_name) as input_file:
input_lines = map(lambda x: x.strip(), filter(lambda x: x != '', input_file.readlines()))
###### /IO Boilerplate ######
"""
DEFAULT_CODE = """
def main():
pass
if __name__ == '__main__':
main()
"""
def format_example(ex):
output = ""
for line in ex.split('\n'):
output += "\n {}".format(line)
output += "\n"
return output
def main():
os.chdir('challenges')
generated_url = URL(index)
request = requests.get(generated_url)
soup = BeautifulSoup(request.text)
content = soup.find(id='requisition')
tags = content.find_all(['p', 'pre'])
def pop_tag(tags):
return tags.pop(0).text
description = pop_tag(tags)
input_desc = pop_tag(tags)
input_ex = ""
if tags[0].name == "pre":
input_ex = pop_tag(tags)
input_desc += format_example(input_ex)
output_desc = pop_tag(tags)
if tags[0].name == "pre":
output_desc += format_example(pop_tag(tags))
title = content.h2.text
stripped_name = title.strip().lower().replace(' ', '')
files = os.listdir('.')
valid_files = []
for fname in files:
try:
fname_prefix = fname[:fname.index('-')]
except ValueError:
continue
if fname_prefix.isdigit():
valid_files.append(int(fname_prefix))
prefix = max(valid_files) + 1
prefix = str(prefix).zfill(3)
file_name = "{}-{}.py".format(prefix, stripped_name)
if input_ex:
input_ex_file_name = "{}-{}-in.txt".format(prefix, stripped_name)
with open(file_name, 'w') as open_file:
open_file.write(FILE_CONTENTS.format(url=generated_url, title=title, challenge=description, input_desc=input_desc, output_desc=output_desc))
if input_ex:
open_file.write(INPUT_CONTENTS.format(input_ex_file_name))
open_file.write(DEFAULT_CODE)
if input_ex:
with open(input_ex_file_name, 'w') as input_file:
input_file.write(input_ex.strip())
if __name__ == '__main__':
main() |
from setuptools import find_packages, setup
with open("./README.md") as readme_file:
readme = readme_file.read()
with open("./requirements.txt") as req_file:
requirements = req_file.read()
setup(
name='qubot',
version='0.0.13',
python_requires=">=3.7",
packages=find_packages(exclude=["tests", "examples", "docs"]),
url='https://github.com/anthonykrivonos/qubot',
include_package_data=True,
license='MIT',
author='anthonykrivonos, kenkenchuen',
author_email='[email protected], [email protected]',
description='Qubot automated testing framework.',
install_requires=requirements,
setup_requires=[],
long_description=readme,
long_description_content_type="text/markdown",
zip_safe=False,
entry_points={
"console_scripts": [
"qubot=qubot.main:main"
]
}
)
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class Module(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'annotations': 'list[Annotation]',
'class_loader': 'ClassLoader',
'declared_annotations': 'list[Annotation]',
'descriptor': 'ModuleDescriptor',
'layer': 'ModuleLayer',
'name': 'str',
'named': 'bool',
'packages': 'list[str]'
}
attribute_map = {
'annotations': 'annotations',
'class_loader': 'classLoader',
'declared_annotations': 'declaredAnnotations',
'descriptor': 'descriptor',
'layer': 'layer',
'name': 'name',
'named': 'named',
'packages': 'packages'
}
def __init__(self, annotations=None, class_loader=None, declared_annotations=None, descriptor=None, layer=None, name=None, named=None, packages=None, _configuration=None): # noqa: E501
"""Module - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._annotations = None
self._class_loader = None
self._declared_annotations = None
self._descriptor = None
self._layer = None
self._name = None
self._named = None
self._packages = None
self.discriminator = None
if annotations is not None:
self.annotations = annotations
if class_loader is not None:
self.class_loader = class_loader
if declared_annotations is not None:
self.declared_annotations = declared_annotations
if descriptor is not None:
self.descriptor = descriptor
if layer is not None:
self.layer = layer
if name is not None:
self.name = name
if named is not None:
self.named = named
if packages is not None:
self.packages = packages
@property
def annotations(self):
"""Gets the annotations of this Module. # noqa: E501
:return: The annotations of this Module. # noqa: E501
:rtype: list[Annotation]
"""
return self._annotations
@annotations.setter
def annotations(self, annotations):
"""Sets the annotations of this Module.
:param annotations: The annotations of this Module. # noqa: E501
:type: list[Annotation]
"""
self._annotations = annotations
@property
def class_loader(self):
"""Gets the class_loader of this Module. # noqa: E501
:return: The class_loader of this Module. # noqa: E501
:rtype: ClassLoader
"""
return self._class_loader
@class_loader.setter
def class_loader(self, class_loader):
"""Sets the class_loader of this Module.
:param class_loader: The class_loader of this Module. # noqa: E501
:type: ClassLoader
"""
self._class_loader = class_loader
@property
def declared_annotations(self):
"""Gets the declared_annotations of this Module. # noqa: E501
:return: The declared_annotations of this Module. # noqa: E501
:rtype: list[Annotation]
"""
return self._declared_annotations
@declared_annotations.setter
def declared_annotations(self, declared_annotations):
"""Sets the declared_annotations of this Module.
:param declared_annotations: The declared_annotations of this Module. # noqa: E501
:type: list[Annotation]
"""
self._declared_annotations = declared_annotations
@property
def descriptor(self):
"""Gets the descriptor of this Module. # noqa: E501
:return: The descriptor of this Module. # noqa: E501
:rtype: ModuleDescriptor
"""
return self._descriptor
@descriptor.setter
def descriptor(self, descriptor):
"""Sets the descriptor of this Module.
:param descriptor: The descriptor of this Module. # noqa: E501
:type: ModuleDescriptor
"""
self._descriptor = descriptor
@property
def layer(self):
"""Gets the layer of this Module. # noqa: E501
:return: The layer of this Module. # noqa: E501
:rtype: ModuleLayer
"""
return self._layer
@layer.setter
def layer(self, layer):
"""Sets the layer of this Module.
:param layer: The layer of this Module. # noqa: E501
:type: ModuleLayer
"""
self._layer = layer
@property
def name(self):
"""Gets the name of this Module. # noqa: E501
:return: The name of this Module. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Module.
:param name: The name of this Module. # noqa: E501
:type: str
"""
self._name = name
@property
def named(self):
"""Gets the named of this Module. # noqa: E501
:return: The named of this Module. # noqa: E501
:rtype: bool
"""
return self._named
@named.setter
def named(self, named):
"""Sets the named of this Module.
:param named: The named of this Module. # noqa: E501
:type: bool
"""
self._named = named
@property
def packages(self):
"""Gets the packages of this Module. # noqa: E501
:return: The packages of this Module. # noqa: E501
:rtype: list[str]
"""
return self._packages
@packages.setter
def packages(self, packages):
"""Sets the packages of this Module.
:param packages: The packages of this Module. # noqa: E501
:type: list[str]
"""
self._packages = packages
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Module, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Module):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Module):
return True
return self.to_dict() != other.to_dict()
|
class BaseRetryStorage(object):
"""Plugable interface for retry queue storage"""
fields = ['operation', 'target_host', 'source_host', 'filename']
def count(self):
"""Returs total retry count"""
raise NotImplementedError
def all(self):
"""Returns all retries in queue"""
raise NotImplementedError
def create(self, **kwargs):
"""Creates new retry object in queue"""
raise NotImplementedError
def delete(self, retry):
"""Deletes given retry object from queue"""
raise NotImplementedError
def filter_by_filename(self, filename):
"""Returns retry objects for given file name"""
raise NotImplementedError |
from __future__ import unicode_literals
# Django
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
# 3rd Party
from grapevine.admin.base import BaseModelAdmin, SendableInline
from grapevine.emails.admin import EmailableAdminMixin
# Local Apps
from .models import WelcomeEmail
class WelcomeEmailInline(SendableInline):
model = WelcomeEmail
class UserAdmin(DjangoUserAdmin):
inlines = [WelcomeEmailInline]
class WelcomeEmailAdmin(EmailableAdminMixin, BaseModelAdmin):
raw_id_fields = ["user"]
list_display = ["id", "user", "admin_message"]
PREVIEW_HEIGHT = 200
fieldsets = (
('Main', {"fields": ("user",)},),
('Message', {"fields": ("admin_message", "scheduled_send_time",)},),
)
admin.site.unregister(get_user_model())
admin.site.register(get_user_model(), UserAdmin)
admin.site.register(WelcomeEmail, WelcomeEmailAdmin)
|
import discum
bot = discum.Client(token="ur token")
@bot.gateway.command
def example(resp):
if resp.raw[
"t"] == "MESSAGE_CREATE": # if you want to play with the raw response
print("Detected a message")
bot.gateway.removeCommand(
example
) # this works because bot.gateway.command returns the inputted function after adding the function to the command list
bot.gateway.run(auto_reconnect=True)
|
import unittest
import numpy as np
import galsim
from desc.imsim.atmPSF import AtmosphericPSF
class AtmPSF(unittest.TestCase):
def test_r0_500(self):
"""Test that inversion of the Tokovinin fitting formula for r0_500 works."""
np.random.seed(57721)
for _ in range(100):
airmass = np.random.uniform(1.001, 1.5)
rawSeeing = np.random.uniform(0.5, 1.5)
band = 'ugrizy'[np.random.randint(6)]
rng = galsim.BaseDeviate(np.random.randint(2**32))
atmPSF = AtmosphericPSF(airmass, rawSeeing, band, rng, screen_size=6.4)
wlen = dict(u=365.49, g=480.03, r=622.20, i=754.06, z=868.21, y=991.66)[band]
targetFWHM = rawSeeing * airmass**0.6 * (wlen/500)**(-0.3)
r0_500 = atmPSF.atm.r0_500_effective
L0 = atmPSF.atm[0].L0
vkFWHM = AtmosphericPSF._vkSeeing(r0_500, wlen, L0)
np.testing.assert_allclose(targetFWHM, vkFWHM, atol=1e-3, rtol=0)
if __name__ == '__main__':
unittest.main()
|
# DESAFIO - 005 - ANTECESSOR E SUCESSOR:
# Escreva um código em que peça um número ao usuário e retorne esse número
# mais o seu antecessor e seu sucessor:
numero = int(input("Digite um número: "))
antecessor = numero - 1
sucessor = numero + 1
print(f"Você digitou {numero} , seu antecessor é {antecessor} e seu sucessor é {sucessor} ! ")
n = int(input("Digite um número: "))
print(f"Você digitou {n} , seu antecessor é {n - 1} e seu sucessor é {n + 1} ! ")
|
input = """
p(X) | -p(Y) :- a(X,Y).
a(1,2).
a(2,1).
"""
output = """
p(X) | -p(Y) :- a(X,Y).
a(1,2).
a(2,1).
"""
|
# -*- coding: utf-8 -*-
"""IPv6 Destination Options and Hop-by-Hop Options"""
import collections
import csv
import re
from pcapkit.vendor.default import Vendor
__all__ = ['Option']
#: IPv6 option registry.
DATA = {
# [RFC 8200] 0
0x00: ('pad', 'Pad1'),
0x01: ('padn', 'PadN'), # [RFC 8200]
# [RFC 2473] 1
0x04: ('tun', 'Tunnel Encapsulation Limit'),
# [RFC 2711] 2
0x05: ('ra', 'Router Alert'),
0x07: ('calipso', 'Common Architecture Label IPv6 Security Option'),
# [RFC 5570]
0x08: ('smf_dpd', 'Simplified Multicast Forwarding'), # [RFC 6621]
# [RFC 8250] 10
0x0F: ('pdm', 'Performance and Diagnostic Metrics'),
# [RFC 4782][RFC Errata 2034] 6
0x26: ('qs', 'Quick-Start'),
0x63: ('rpl', 'Routing Protocol for Low-Power and Lossy Networks'),
# [RFC 6553]
0x6D: ('mpl', 'Multicast Protocol for Low-Power and Lossy Networks'),
# [RFC 7731]
0x8B: ('ilnp', 'Identifier-Locator Network Protocol Nonce'), # [RFC 6744]
0x8C: ('lio', 'Line-Identification Option'), # [RFC 6788]
0xC2: ('jumbo', 'Jumbo Payload'), # [RFC 2675]
0xC9: ('home', 'Home Address'), # [RFC 6275]
0xEE: ('ip_dff', 'Depth-First Forwarding'), # [RFC 6971]
}
class Option(Vendor):
"""Destination Options and Hop-by-Hop Options"""
#: Value limit checker.
FLAG = 'isinstance(value, int) and 0x00 <= value <= 0xFF'
#: Link to registry.
LINK = 'https://www.iana.org/assignments/ipv6-parameters/ipv6-parameters-2.csv'
def count(self, data):
"""Count field records.
Args:
data (List[str]): CSV data.
Returns:
Counter: Field recordings.
"""
reader = csv.reader(data)
next(reader) # header
return collections.Counter(map(lambda item: self.safe_name(item[4]), reader)) # pylint: disable=map-builtin-not-iterating
def process(self, data):
"""Process CSV data.
Args:
data (List[str]): CSV data.
Returns:
List[str]: Enumeration fields.
List[str]: Missing fields.
"""
reader = csv.reader(data)
next(reader) # header
enum = list()
miss = [
"extend_enum(cls, 'Unassigned_0x%s' % hex(value)[2:].upper().zfill(2), value)",
'return cls(value)'
]
for item in reader:
if not item[0]:
continue
code = item[0]
dscp = item[4]
rfcs = item[5]
temp = list()
for rfc in filter(None, re.split(r'\[|\]', rfcs)):
if re.match(r'\d+', rfc):
continue
if 'RFC' in rfc and re.match(r'\d+', rfc[3:]):
#temp.append(f'[{rfc[:3]} {rfc[3:]}]')
temp.append(f'[:rfc:`{rfc[3:]}`]')
else:
temp.append(f'[{rfc}]'.replace('_', ' '))
tmp1 = f" {''.join(temp)}" if rfcs else ''
splt = re.split(r' \[\d+\]', dscp)[0]
subn = re.sub(r'.* \((.*)\)', r'\1', splt)
name = DATA.get(int(code, base=16), (str(),))[0].upper() or subn
desc = self.wrap_comment(re.sub(r'\r*\n', ' ', f'{name}{tmp1}', re.MULTILINE))
renm = self.rename(name or 'Unassigned', code, original=dscp)
pres = f"{renm} = {code}"
sufs = f'#: {desc}'
#if len(pres) > 74:
# sufs = f"\n{' '*80}{sufs}"
#enum.append(f'{pres.ljust(76)}{sufs}')
enum.append(f'{sufs}\n {pres}')
return enum, miss
if __name__ == "__main__":
Option()
|
import time
from datetime import timedelta
from multiprocessing import Value, Manager
# Each column is 20 chars wide, plus the separator
COL_WIDTH = 12
COLUMNS = ["CURRENT", "TOTAL", "PERCENTAGE", "RUNTIME", "RATE", "EXPECTED"]
COL_SEPARATOR = "|"
ROW_SEPARATOR = "-"
TIME_FORMAT = "%H:%M:%S"
class SyncedCrawlingProgress:
def __init__(self, total_count=1000, update_every=100000):
# Variables that need to be synced across Threads
self.count = Value('i', 0)
self.last_time = Value('d', time.time())
self.last_count = Value('i', 0)
self.start_time = time.time()
self.update_every = update_every
self.total_count = total_count
print(self.row_string(COLUMNS))
print(ROW_SEPARATOR * (len(COLUMNS) * COL_WIDTH + len(COLUMNS) - 1))
def row_string(self, values):
string = ""
for value in values[0:-1]:
string += str(value).center(COL_WIDTH) + COL_SEPARATOR
string += str(values[-1]).center(COL_WIDTH)
return string
def inc(self, by=1):
with self.count.get_lock():
self.count.value += by
if self.count.value - self.last_count.value >= self.update_every:
# Print update
self.print_update()
# Then update relevant variables
with self.last_time.get_lock(), self.last_count.get_lock():
self.last_count.value = self.count.value
self.last_time.value = time.time()
def print_update(self):
# Prints current number, total number, percentage, runtime, increase per second, expected remaining runtime
percentage = self.count.value / self.total_count * 100
runtime = time.time() - self.start_time
increases_per_second = (self.count.value - self.last_count.value) / (time.time() - self.last_time.value)
expected_remaining_runtime = (self.total_count - self.count.value) / increases_per_second
print(self.row_string([self.count.value,
self.total_count,
"%02.0d%%" % percentage,
self.time_str(runtime),
"%.02f" % increases_per_second,
self.time_str(expected_remaining_runtime)
]))
def time_str(self, seconds):
return '%02d:%02d:%02d' % (seconds / 3600, seconds / 60 % 60, seconds % 60)
def set_total_count(self, total_count):
self.total_count = total_count
class TablePrinter:
def __init__(self, header=None):
if header is None:
header = ["Col 1", "Col 2", "Col 3"]
print(self.row_string(header))
print(ROW_SEPARATOR * (len(COLUMNS) * COL_WIDTH + len(COLUMNS) - 1))
def print_row(self, row=None):
if row is None:
row = ["El1", "El2", "El3"]
print(self.row_string(row))
def row_string(self, values):
string = ""
for value in values[0:-1]:
string += str(value).center(COL_WIDTH) + COL_SEPARATOR
string += str(values[-1]).center(COL_WIDTH)
return string
class StatusVisualization:
def __init__(self, total_count=1000, update_every=100000):
# Variables that need to be synced across Threads
self.count = 0
self.last_time = time.time()
self.last_count = 0
self.start_time = time.time()
self.update_every = update_every
self.total_count = total_count
print(self.row_string(COLUMNS))
print(ROW_SEPARATOR * (len(COLUMNS) * COL_WIDTH + len(COLUMNS) - 1))
def row_string(self, values):
string = ""
for value in values[0:-1]:
string += str(value).center(COL_WIDTH) + COL_SEPARATOR
string += str(values[-1]).center(COL_WIDTH)
return string
def inc(self, by=1):
self.count += by
if self.count - self.last_count >= self.update_every:
# Print update
self.print_update()
# Then update relevant variables
self.last_count = self.count
self.last_time = time.time()
def print_update(self):
# Prints current number, total number, percentage, runtime, increase per second, expected remaining runtime
percentage = self.count / self.total_count * 100
runtime = time.time() - self.start_time
increases_per_second = (self.count - self.last_count) / (time.time() - self.last_time)
expected_remaining_runtime = (self.total_count - self.count) / increases_per_second
print(self.row_string([self.count,
self.total_count,
"%02.0d%%" % percentage,
self.time_str(runtime),
"%.02f" % increases_per_second,
self.time_str(expected_remaining_runtime)
]))
def time_str(self, seconds):
return '%02d:%02d:%02d' % (seconds / 3600, seconds / 60 % 60, seconds % 60)
def set_total_count(self, total_count):
self.total_count = total_count
|
class StorageException(Exception):
"""Base class for all storage exceptions"""
class PathExistsException(StorageException):
"""The given path already exists"""
class StorageNotSupported(StorageException):
"""Storage type not supported"""
class InvalidStoragePath(StorageException):
"""Invalid storage path given"""
|
# "Lorenz-95" (or 96) model.
#
# A summary for the purpose of DA is provided in
# section 3.5 of thesis found at
# ora.ox.ac.uk/objects/uuid:9f9961f0-6906-4147-a8a9-ca9f2d0e4a12
#
# A more detailed summary is given in Chapter 11 of
# Majda, Harlim: Filtering Complex Turbulent Systems"
#
# Note: implementation is ndim-agnostic.
#
# Note: the model integration is unstable (--> infinity)
# in the presence of large peaks in amplitude,
# Example: x = [0,-30,0,30]; step(x,dt=0.05,recursion=4).
# This may be occasioned by the Kalman analysis update,
# especially if the system is only partially observed.
# Is this effectively a CFL condition? Could be addressed by:
# - post-processing,
# - modifying the step() function, e.g.:
# - crop amplitude
# - or lowering dt
# - using an implicit time stepping scheme instead of rk4
import numpy as np
from scipy.linalg import circulant
try:
from tools.math import rk4, integrate_TLM, is1d
except:
from DAPPER.tools.math import rk4, integrate_TLM, is1d
Force = 8.0
prevent_blow_up = False
def dxdt(x):
a = x.ndim-1
s = lambda x,n: np.roll(x,-n,axis=a)
return (s(x,1)-s(x,-2))*s(x,-1) - x + Force
def step(x0, t, dt):
#if prevent_blow_up:
#clip = abs(x0)>30
#x0[clip] *= 0.1
return rk4(lambda t,x: dxdt(x), x0, np.nan, dt)
def TLM(x):
"""Tangent linear model"""
assert is1d(x)
m = len(x)
TLM = np.zeros((m,m))
md = lambda i: np.mod(i,m)
for i in range(m):
TLM[i,i] = -1.0
TLM[i, i-2 ] = -x[i-1]
TLM[i,md(i+1)] = +x[i-1]
TLM[i, i-1 ] = x[md(i+1)]-x[i-2]
return TLM
def dfdx(x,t,dt):
"""Integral of TLM. Jacobian of step."""
# method='analytic' is a substantial upgrade for Lor95
return integrate_TLM(TLM(x),dt,method='analytic')
def typical_init_params(m):
"""
Approximate (3 degrees of acf of) climatology.
Obtained for F=8, m=40.
NB: Should not be used for X0 because it's like
starting the filter from a state of divergence,
which might be too challenging to particle filters.
The code has been left here for legacy reasons.
"""
mu0 = 2.34*np.ones(m)
# Auto-cov-function
acf = lambda i: 0.0 + 14*(i==0) + 0.9*(i==1) - 4.7*(i==2) - 1.2*(i==3)
P0 = circulant(acf(periodic_distance_range(m)))
return mu0, P0
def periodic_distance_range(m):
return np.minimum(np.arange(m),np.arange(m,0,-1))
#return np.roll(np.abs(np.arange(m) - m//2), (m+1)//2)
#return np.concatenate((range((m+1)//2), range(m//2,0,-1)))
|
from jinja2 import Environment, FileSystemLoader
import json
import pytest
import os
import copy
from em_stitch.montage.montage_solver import (
MontageSolver, get_transform)
from tempfile import TemporaryDirectory
import glob
import shutil
from marshmallow import ValidationError
test_files_dir = os.path.join(os.path.dirname(__file__), 'test_files')
example_env = Environment(loader=FileSystemLoader(test_files_dir))
def json_template(env, template_file, **kwargs):
template = env.get_template(template_file)
d = json.loads(template.render(**kwargs))
return d
@pytest.fixture(scope='module')
def solver_input_args():
data_dir = os.path.join(test_files_dir, "montage_example")
with TemporaryDirectory() as output_dir:
yield json_template(
example_env,
"montage_solver_example.json",
data_dir=data_dir,
output_dir=output_dir,
template_dir=test_files_dir)
def test_read_from(solver_input_args):
meta = glob.glob(os.path.join(
solver_input_args['data_dir'],
'_metadata*.json'))[0]
tf0 = get_transform(meta, '', {}, 'metafile')
with TemporaryDirectory() as output_dir:
tfp = os.path.join(output_dir, 'ref.json')
with open(tfp, 'w') as f:
json.dump(tf0.to_dict(), f)
tf1 = get_transform('', tfp, {}, 'reffile')
tf2 = get_transform('', '', tf0.to_dict(), 'dict')
assert tf0 == tf1 == tf2
def test_solver(solver_input_args):
local_args = copy.deepcopy(solver_input_args)
with TemporaryDirectory() as output_dir:
local_args['output_dir'] = output_dir
ms = MontageSolver(input_data=local_args, args=[])
ms.run()
assert os.path.isfile(ms.args['output_json'])
with open(ms.args['output_json'], 'r') as f:
j = json.load(f)
assert len(j) == 2
for ij in j:
assert os.path.isfile(
os.path.join(
ms.args['output_dir'],
ij['output']))
assert os.path.isfile(
os.path.join(
ms.args['output_dir'],
ij['collection']))
for k in ['x', 'y', 'mag']:
assert ij[k]['mean'] < 2.0
assert ij[k]['stdev'] < 2.0
def test_solver_no_output_dir(solver_input_args):
local_args = copy.deepcopy(solver_input_args)
with TemporaryDirectory() as output_dir:
meta = glob.glob(os.path.join(
local_args['data_dir'],
'_metadata*.json'))[0]
newmeta = os.path.join(
output_dir,
os.path.basename(meta))
shutil.copy(meta, newmeta)
local_args['data_dir'] = output_dir
local_args.pop('output_dir')
ms = MontageSolver(input_data=local_args, args=[])
ms.run()
assert os.path.isfile(ms.args['output_json'])
with open(ms.args['output_json'], 'r') as f:
j = json.load(f)
assert len(j) == 2
for ij in j:
assert os.path.isfile(
os.path.join(
ms.args['output_dir'],
ij['output']))
assert os.path.isfile(
os.path.join(
ms.args['output_dir'],
ij['collection']))
for k in ['x', 'y', 'mag']:
assert ij[k]['mean'] < 2.0
assert ij[k]['stdev'] < 2.0
def test_solver_metafile_specify(solver_input_args):
local_args = copy.deepcopy(solver_input_args)
with TemporaryDirectory() as output_dir:
local_args['output_dir'] = output_dir
local_args['metafile'] = glob.glob(
os.path.join(
local_args['data_dir'],
'_metadata*.json'))[0]
local_args.pop('data_dir')
ms = MontageSolver(input_data=local_args, args=[])
ms.run()
assert os.path.isfile(ms.args['output_json'])
with open(ms.args['output_json'], 'r') as f:
j = json.load(f)
assert len(j) == 2
for ij in j:
assert os.path.isfile(
os.path.join(
ms.args['output_dir'],
ij['output']))
assert os.path.isfile(
os.path.join(
ms.args['output_dir'],
ij['collection']))
for k in ['x', 'y', 'mag']:
assert ij[k]['mean'] < 2.0
assert ij[k]['stdev'] < 2.0
def test_solver_schema_errors(solver_input_args):
local_args = copy.deepcopy(solver_input_args)
with TemporaryDirectory() as output_dir:
local_args['output_dir'] = output_dir
local_args['solver_templates'][0] = os.path.join(
os.path.dirname(local_args['solver_templates'][0]),
'file_does_not_exist.json')
with pytest.raises(ValidationError):
MontageSolver(input_data=local_args, args=[])
local_args = copy.deepcopy(solver_input_args)
local_args['output_dir'] = output_dir
local_args.pop('data_dir')
with pytest.raises(ValidationError):
MontageSolver(input_data=local_args, args=[])
|
"""
Copyright (c) 2014, Austin R. Benson, David F. Gleich,
Purdue University, and Stanford University.
All rights reserved.
This file is part of MRNMF and is under the BSD 2-Clause License,
which can be found at http://opensource.org/licenses/BSD-2-Clause
Copyright (c) 2015, Mariano Tepper,
Duke University.
All rights reserved.
Mariano Tepper made the following changes to this file:
- modified names and line lengths to adhere more closely to PEP8
- changed docstrings
- some numpy operations are more numpy-ish now.
- small edits, refactoring, and cleanups
- removed some code
"""
import numpy as np
from scipy.optimize import nnls
def spa(data, r, colnorms):
"""
Successive projection algorithm (SPA) for NMF. This algorithm
computes the column indices.
:param data: The data matrix.
:type data: numpy.ndarray
:param r: The target separation rank.
:type r: int
:param colnorms: The column L1 norms.
:type colnorms: numpy.ndarray
:return: A list of r columns chosen by SPA.
:rtype: list of int
"""
idx = np.nonzero(colnorms)
x = np.copy(data)
if colnorms is not None:
x[:, idx] /= colnorms[idx]
cols = []
m, n = x.shape
for _ in xrange(r):
#col_norms = np.linalg.norm(x, ord='fro', axis=0)
col_norms = np.linalg.norm(x,axis=0)
#col_norms = np.linalg.norm(x, ord=2, axis=0)
#col_norms[cols] = -1
col_ind = np.argmax(col_norms)
cols.append(col_ind)
col = np.atleast_2d(x[:, col_ind]) # col is a row vector
x = np.dot(np.eye(m) - np.dot(col.T, col) / col_norms[col_ind], x)
return cols
def xray(x, r):
"""
X-ray algorithm for NMF. This algorithm computes the column
indices.
:param x: The data matrix.
:type x: numpy.ndarray
:param r: The target separation rank.
:type r: int
:return: A list of r columns chosen by X-ray.
:rtype: list of int
"""
cols = []
R = np.copy(x)
while len(cols) < r:
# Loop until we choose a column that has not been selected.
while True:
p = np.random.random((1, x.shape[0]))
#scores = np.linalg.norm(np.dot(R.T, x), ord='fro', axis=0)
scores = np.linalg.norm(np.dot(R.T, x), axis=0)
scores /= np.reshape(np.dot(p, x),len(scores))
#scores = np.linalg.norm(np.dot(R.T, x), ord=2, axis=0)
#scores /= np.squeeze(np.dot(p, x))
scores[cols] = -1 # IMPORTANT
best_col = np.argmax(scores)
if best_col in cols:
# Re-try
continue
else:
cols.append(best_col)
H, rel_res = nnls_frob(x, cols)
R = x - np.dot(x[:, cols], H)
break
return cols
def nnls_frob(x, cols):
"""
Compute H, the coefficient matrix, by nonnegative least squares
to minimize the Frobenius norm. Given the data matrix X and the
columns cols, H is
.. math:: \arg\min_{Y \ge 0} \| X - X(:, cols) H \|_F.
:param X: The data matrix.
:type X: numpy.ndarray
:param cols: The column indices.
:type cols: list of int
:return: The matrix H and the relative residual.
"""
ncols = x.shape[1]
x_sel = x[:, cols]
H = np.zeros((len(cols), ncols))
for i in xrange(ncols):
sol, res = nnls(x_sel, x[:, i])
H[:, i] = sol
rel_res = np.linalg.norm(x - np.dot(x_sel, H), 'fro')
rel_res /= np.linalg.norm(x, 'fro')
return H, rel_res
def select_columns(data, alg, r, colnorms=None):
""" Compute an approximate separable NMF of the matrix data. By
compute, we mean choose r columns and a best fitting coefficient
matrix H. The r columns are selected by the 'alg' option, which
is one of 'SPA' or 'XRAY'. The coefficient matrix H is the
one that produces the smallest Frobenius norm error.
:param data: The data matrix.
:type data: numpy.ndarray
:param alg: Choice of algorithm for computing the columns. One of
'SPA' or 'XRAY'.
:type alg: string
:param r: The target separation rank.
:type r: int
:param colnorms: The column L1 norms, needed only by SPA.
:type colnorms: numpy.ndarray
:return The selected columns, the matrix H, and the relative residual.
"""
if alg == 'XRAY':
cols = xray(data, r)
elif alg == 'SPA':
cols = spa(data, r, colnorms)
else:
raise Exception('Unknown algorithm: {0}'.format(alg))
return cols
|
"""An example of sending and receiving cookies."""
import logging
from typing import Dict
from urllib.parse import parse_qsl
from bareasgi import (
Application,
Scope,
Info,
RouteMatches,
Content,
HttpResponse,
text_reader,
text_writer
)
import bareutils.header as header
logging.basicConfig(level=logging.DEBUG)
FORM_HTML = """
<!DOCTYPE html>
<html>
<body>
<h2>HTML Form</h2>
<form action="/post_form" method="post">
First name:<br>
<input type="text" name="first_name" value="{first_name}">
<br>
Last name:<br>
<input type="text" name="last_name" value="{last_name}">
<br><br>
<input type="submit" value="Submit">
</form>
<h2>Cookies</h2>
{cookies}
</body>
</html>
"""
async def index(
_scope: Scope,
_info: Info,
_matches: RouteMatches,
_content: Content
) -> HttpResponse:
"""Redirect to the test page"""
return 303, [(b'Location', b'/get_form')]
async def get_form(
scope: Scope,
_info: Info,
_matches: RouteMatches,
_content: Content
) -> HttpResponse:
"""A response handler which returns a form and sets some cookies"""
cookies = header.cookie(scope['headers'])
first_name = cookies.get(b'first_name', [b'Micky'])[0]
last_name = cookies.get(b'last_name', [b'Mouse'])[0]
html_list = '<dl>'
for name, values in cookies.items():
for value in values:
html_list += f'<dt>{name.decode()}</dt><dd>{value.decode()}</dd>'
html_list += '</dl>'
html = FORM_HTML.format(
first_name=first_name.decode(),
last_name=last_name.decode(),
cookies=html_list
)
headers = [
(b'content-type', b'text/html'),
]
return 200, headers, text_writer(html)
async def post_form(
scope: Scope,
_info: Info,
_matches: RouteMatches,
content: Content
) -> HttpResponse:
"""A response handler that reads the cookies from a posted form."""
content_type = header.find(b'content-type', scope['headers'])
if content_type != b'application/x-www-form-urlencoded':
return 500
variables = await text_reader(content)
values: Dict[str, str] = dict(parse_qsl(variables))
first_name = values['first_name']
last_name = values['last_name']
headers = [
(b'location', b'/get_form'),
(b'set-cookie', f'first_name={first_name}'.encode()),
(b'set-cookie', f'last_name={last_name}'.encode()),
]
return 303, headers
if __name__ == "__main__":
import uvicorn
app = Application()
app.http_router.add({'GET'}, '/', index)
app.http_router.add({'GET'}, '/get_form', get_form)
app.http_router.add({'POST'}, '/post_form', post_form)
uvicorn.run(app, port=9009)
|
from datetime import datetime
import pytest
import pytest_mock
import book2
def test_book_b4_begin_time(mocker):
fake_time = begin_date_time = datetime(2021, 6, 14, 7, 59)
mocker.patch('book2._get_now', return_value=fake_time)
result = book2.book_vaccine()
assert result == "2021-06-14 08:00 才能預約"
def test_book_after_begin_time(mocker):
fake_time = begin_date_time = datetime(2021, 6, 14, 8, 10)
mocker.patch('book2._get_now', return_value=fake_time)
result = book2.book_vaccine()
assert result == "開始預約" |
# The prime factors of 13195 are 5, 7, 13 and 29.
#
# What is the largest prime factor of the number 600851475143 ?
# This is a naive approach that only works for small numbers.
def is_prime(current, primes):
# Search if current value already exists in primes or is a multiple of known primes
for prime in primes:
# If equal to the prime exit
if current == prime:
return False
# If multiple of the prime exit
if current % prime == 0:
return False
# Current is prime
primes.add(current)
return current
def find_primes(num):
primes = set([2]) # Start with 2
current = 2
count = 0
while True:
if is_prime(current, primes):
count += 1
# Exit when enough
if count == num:
return primes
# Try the next number
current += 1
def bin(s):
return str(s) if s <= 1 else bin(s >> 1) + str(s & 1)
def check_key_combo_for_target(keys, mask, target):
total = 1
product = set()
for mask_idx in range(0, len(keys)):
bit_set = (2 ** mask_idx) & mask >= 1
if bit_set:
total *= keys[mask_idx]
product.add(keys[mask_idx])
if total == target:
print("Found target value!")
return product
return False
# This is horribly wasteful because we spend half of each pass checking
# combinations we already tested on earlier passes.
def inc_bit_mask(keys, target):
min_key = 1
max_key = 2 ** len(keys)
for mask in range(min_key, max_key):
product = check_key_combo_for_target(keys, mask, target)
if product:
print("Target is product of primes: ", product)
return True
return False
# This needlessly regenerates the list of primes each pass instead of just
# adding the next largest prime to the set.
def search(target, min_search_range, max_search_range):
for search_range in range(min_search_range, max_search_range):
primes = list(find_primes(search_range))
print("Primes: ", primes)
# Check if possible for target to be product of primes.
total = 1
for prime in primes:
total *= prime
if total < target:
print("Skipping set because product", total, "is too small.")
continue
if inc_bit_mask(primes, target):
return
print("Failed to find product")
search((2 * 11 * 19 * 3), 1, 10) # [11, 19, 2, 3]
search(13195, 1, 20) # [29, 13, 5, 7]
#search(600851475143, 1, 100) # Takes forever :-(
|
# -*- coding: utf-8 -*-
# Copyright 2013-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2013-2018
# - Martin Barisits <[email protected]>, 2013-2019
# - Cedric Serfon <[email protected]>, 2013-2020
# - Ralph Vigne <[email protected]>, 2013
# - Mario Lassnig <[email protected]>, 2013-2019
# - Yun-Pin Sun <[email protected]>, 2013
# - Thomas Beermann <[email protected]>, 2013-2018
# - Joaquin Bogado <[email protected]>, 2014-2015
# - Wen Guan <[email protected]>, 2015
# - Hannes Hansen <[email protected]>, 2018-2019
# - Tobias Wegner <[email protected]>, 2019
# - Andrew Lister <[email protected]>, 2019
# - Ruturaj Gujar, <[email protected]>, 2019
# - Brandon White, <[email protected]>, 2019
# - Aristeidis Fkiaras <[email protected]>, 2020
# - Benedikt Ziemons <[email protected]>, 2020-2021
# - Rizart Dona <[email protected]>, 2021
import json as json_lib
from six import iteritems
from sqlalchemy import String, cast, type_coerce, JSON
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.sql.expression import text
from rucio.common import exception
from rucio.core.did_meta_plugins.did_meta_plugin_interface import DidMetaPlugin
from rucio.db.sqla import models
from rucio.db.sqla.session import read_session, transactional_session, stream_session
from rucio.db.sqla.util import json_implemented
class JSONDidMeta(DidMetaPlugin):
"""
A plugin to store DID metadata on a table on the relational database, using JSON blobs
"""
def __init__(self):
super(JSONDidMeta, self).__init__()
self.plugin_name = "JSON"
@read_session
def get_metadata(self, scope, name, session=None):
"""
Get data identifier metadata (JSON)
:param scope: The scope name.
:param name: The data identifier name.
:param session: The database session in use.
"""
if not json_implemented(session=session):
raise NotImplementedError
try:
row = session.query(models.DidMeta).filter_by(scope=scope, name=name).one()
meta = getattr(row, 'meta')
return json_lib.loads(meta) if session.bind.dialect.name in ['oracle', 'sqlite'] else meta
except NoResultFound:
raise exception.DataIdentifierNotFound("No generic metadata found for '%(scope)s:%(name)s'" % locals())
def set_metadata(self, scope, name, key, value, recursive=False, session=None):
self.set_metadata_bulk(scope=scope, name=name, meta={key: value}, recursive=recursive, session=session)
@transactional_session
def set_metadata_bulk(self, scope, name, meta, recursive=False, session=None):
if not json_implemented(session=session):
raise NotImplementedError
if session.query(models.DataIdentifier).filter_by(scope=scope, name=name).one_or_none() is None:
raise exception.DataIdentifierNotFound("Data identifier '%s:%s' not found" % (scope, name))
row_did_meta = session.query(models.DidMeta).filter_by(scope=scope, name=name).scalar()
if row_did_meta is None:
# Add metadata column to new table (if not already present)
row_did_meta = models.DidMeta(scope=scope, name=name)
row_did_meta.save(session=session, flush=False)
existing_meta = {}
if hasattr(row_did_meta, 'meta'):
if row_did_meta.meta:
existing_meta = row_did_meta.meta
# Oracle returns a string instead of a dict
if session.bind.dialect.name in ['oracle', 'sqlite'] and existing_meta:
existing_meta = json_lib.loads(existing_meta)
for key, value in meta.items():
existing_meta[key] = value
row_did_meta.meta = None
session.flush()
# Oracle insert takes a string as input
if session.bind.dialect.name in ['oracle', 'sqlite']:
existing_meta = json_lib.dumps(existing_meta)
row_did_meta.meta = existing_meta
row_did_meta.save(session=session, flush=True)
@transactional_session
def delete_metadata(self, scope, name, key, session=None):
"""
Delete a key from the metadata column
:param scope: the scope of did
:param name: the name of the did
:param key: the key to be deleted
"""
if not json_implemented(session=session):
raise NotImplementedError
try:
row = session.query(models.DidMeta).filter_by(scope=scope, name=name).one()
existing_meta = getattr(row, 'meta')
# Oracle returns a string instead of a dict
if session.bind.dialect.name in ['oracle', 'sqlite'] and existing_meta is not None:
existing_meta = json_lib.loads(existing_meta)
if key not in existing_meta:
raise exception.KeyNotFound(key)
existing_meta.pop(key, None)
row.meta = None
session.flush()
# Oracle insert takes a string as input
if session.bind.dialect.name in ['oracle', 'sqlite']:
existing_meta = json_lib.dumps(existing_meta)
row.meta = existing_meta
except NoResultFound:
raise exception.DataIdentifierNotFound("Key not found for data identifier '%(scope)s:%(name)s'" % locals())
@stream_session
def list_dids(self, scope, filters, did_type='collection', ignore_case=False, limit=None,
offset=None, long=False, recursive=False, session=None):
# Currently for sqlite only add, get and delete is implemented.
if not json_implemented(session=session):
raise NotImplementedError
query = session.query(models.DidMeta)
if scope is not None:
query = query.filter(models.DidMeta.scope == scope)
filters.pop('name', None)
for k, v in iteritems(filters):
if session.bind.dialect.name == 'oracle':
query = query.filter(text("json_exists(meta,'$?(@.{} == \"{}\")')".format(k, v)))
else:
query = query.filter(cast(models.DidMeta.meta[k], String) == type_coerce(v, JSON))
if long:
for row in query.yield_per(5):
yield {
'scope': row.scope,
'name': row.name,
'did_type': 'Info not available in JSON Plugin',
'bytes': 'Info not available in JSON Plugin',
'length': 'Info not available in JSON Plugin'
}
else:
for row in query.yield_per(5):
yield row.name
@read_session
def manages_key(self, key, session=None):
return json_implemented(session=session)
def get_plugin_name(self):
"""
Returns Plugins Name.
This can then be used when listing the metadata of did to only provide dids from this plugin.
"""
return self.plugin_name
|
from os import environ
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from logger import Logger
class Mailer:
def __init__(self):
self.email = environ.get('email_address')
self.password = environ.get('email_password')
self.server = environ.get('email_server')
self.port = environ.get('email_port')
if not self.email:
raise Exception('Email address missing in .env!')
if not self.password:
raise Exception('Email password missing in .env!')
if not self.server:
raise Exception('Email server missing in .env!')
if not self.port:
raise Exception('Email port missing in .env!')
def send(self, subject, message, email_to):
msg = MIMEMultipart()
msg.set_unixfrom('author')
msg['From'] = self.email
msg['To'] = email_to
msg['Subject'] = subject
message = message
msg.attach(MIMEText(message))
Logger.log('Creating connection')
mailserver = smtplib.SMTP_SSL(self.server, self.port)
Logger.log('Ehlo')
mailserver.ehlo()
Logger.log('Logging in')
mailserver.login(self.email, self.password)
Logger.log('Sending email')
response = mailserver.sendmail(self.email, email_to, msg.as_string())
Logger.log(f'Message sent to {email_to}')
mailserver.quit()
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib import Utils,Task,Options,Errors
from waflib.TaskGen import before_method,after_method,feature
from waflib.Tools import ccroot
from waflib.Configure import conf
ccroot.USELIB_VARS['cs']=set(['CSFLAGS','ASSEMBLIES','RESOURCES'])
ccroot.lib_patterns['csshlib']=['%s']
@feature('cs')
@before_method('process_source')
def apply_cs(self):
cs_nodes=[]
no_nodes=[]
for x in self.to_nodes(self.source):
if x.name.endswith('.cs'):
cs_nodes.append(x)
else:
no_nodes.append(x)
self.source=no_nodes
bintype=getattr(self,'bintype',self.gen.endswith('.dll')and'library'or'exe')
self.cs_task=tsk=self.create_task('mcs',cs_nodes,self.path.find_or_declare(self.gen))
tsk.env.CSTYPE='/target:%s'%bintype
tsk.env.OUT='/out:%s'%tsk.outputs[0].abspath()
self.env.append_value('CSFLAGS','/platform:%s'%getattr(self,'platform','anycpu'))
inst_to=getattr(self,'install_path',bintype=='exe'and'${BINDIR}'or'${LIBDIR}')
if inst_to:
mod=getattr(self,'chmod',bintype=='exe'and Utils.O755 or Utils.O644)
self.install_task=self.add_install_files(install_to=inst_to,install_from=self.cs_task.outputs[:],chmod=mod)
@feature('cs')
@after_method('apply_cs')
def use_cs(self):
names=self.to_list(getattr(self,'use',[]))
get=self.bld.get_tgen_by_name
for x in names:
try:
y=get(x)
except Errors.WafError:
self.env.append_value('CSFLAGS','/reference:%s'%x)
continue
y.post()
tsk=getattr(y,'cs_task',None)or getattr(y,'link_task',None)
if not tsk:
self.bld.fatal('cs task has no link task for use %r'%self)
self.cs_task.dep_nodes.extend(tsk.outputs)
self.cs_task.set_run_after(tsk)
self.env.append_value('CSFLAGS','/reference:%s'%tsk.outputs[0].abspath())
@feature('cs')
@after_method('apply_cs','use_cs')
def debug_cs(self):
csdebug=getattr(self,'csdebug',self.env.CSDEBUG)
if not csdebug:
return
node=self.cs_task.outputs[0]
if self.env.CS_NAME=='mono':
out=node.parent.find_or_declare(node.name+'.mdb')
else:
out=node.change_ext('.pdb')
self.cs_task.outputs.append(out)
if getattr(self,'install_task',None):
self.pdb_install_task=self.add_install_files(install_to=self.install_task.install_to,install_from=out)
if csdebug=='pdbonly':
val=['/debug+','/debug:pdbonly']
elif csdebug=='full':
val=['/debug+','/debug:full']
else:
val=['/debug-']
self.env.append_value('CSFLAGS',val)
@feature('cs')
@after_method('debug_cs')
def doc_cs(self):
csdoc=getattr(self,'csdoc',self.env.CSDOC)
if not csdoc:
return
node=self.cs_task.outputs[0]
out=node.change_ext('.xml')
self.cs_task.outputs.append(out)
if getattr(self,'install_task',None):
self.doc_install_task=self.add_install_files(install_to=self.install_task.install_to,install_from=out)
self.env.append_value('CSFLAGS','/doc:%s'%out.abspath())
class mcs(Task.Task):
color='YELLOW'
run_str='${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}'
def split_argfile(self,cmd):
inline=[cmd[0]]
infile=[]
for x in cmd[1:]:
if x.lower()=='/noconfig':
inline.append(x)
else:
infile.append(self.quote_flag(x))
return(inline,infile)
def configure(conf):
csc=getattr(Options.options,'cscbinary',None)
if csc:
conf.env.MCS=csc
conf.find_program(['csc','mcs','gmcs'],var='MCS')
conf.env.ASS_ST='/r:%s'
conf.env.RES_ST='/resource:%s'
conf.env.CS_NAME='csc'
if str(conf.env.MCS).lower().find('mcs')>-1:
conf.env.CS_NAME='mono'
def options(opt):
opt.add_option('--with-csc-binary',type='string',dest='cscbinary')
class fake_csshlib(Task.Task):
color='YELLOW'
inst_to=None
def runnable_status(self):
return Task.SKIP_ME
@conf
def read_csshlib(self,name,paths=[]):
return self(name=name,features='fake_lib',lib_paths=paths,lib_type='csshlib')
|
from autofunc.get_match_factor import match
from autofunc.get_top_results import get_top_results
from autofunc.find_associations import find_associations
from autofunc.get_data import get_data
import os.path
import numpy as np
def test_1():
"""
Tests that the match factor for a known learning set and test case is close to the known value
"""
script_dir = os.path.dirname(__file__)
file1 = os.path.join(script_dir, '../assets/bladeCombined.csv')
store_data, records = get_data(file1)
conf_results, results = find_associations(store_data, records, support=0.0003, confidence=0.01, lift=0.1)
thresh_results = get_top_results(conf_results, 0.7)
test_file = os.path.join(script_dir, '../assets/jigsawQuery.csv')
test_data, test_records = get_data(test_file)
learned_dict, matched, overmatched, unmatched, match_factor = match(thresh_results, test_records)
assert np.allclose(0.82051, match_factor)
|
from factory.declarations import LazyAttribute, Sequence, SubFactory
from factory.django import DjangoModelFactory
from roster.factories import StudentFactory
from exams.models import ExamAttempt, PracticeExam
class ExamFactory(DjangoModelFactory):
class Meta:
model = PracticeExam
family = 'Waltz'
number = Sequence(lambda n: n + 1)
is_test = False
class ExamAttemptFactory(DjangoModelFactory):
class Meta:
model = ExamAttempt
student = SubFactory(StudentFactory)
quiz = SubFactory(ExamFactory)
score = 0
guess1 = LazyAttribute(lambda o: o.quiz.answer1)
guess2 = LazyAttribute(lambda o: o.quiz.answer2)
guess3 = LazyAttribute(lambda o: o.quiz.answer3)
guess4 = LazyAttribute(lambda o: o.quiz.answer4)
guess5 = LazyAttribute(lambda o: o.quiz.answer5)
|
import asyncio
from aiofsk.transport import AFSKTransport
async def text_console(modem):
def _text_console():
while True:
text = input(">> ")
if 'quit' in text:
return
modem.write(text.encode())
try:
return await asyncio.get_event_loop().run_in_executor(None, _text_console)
finally:
modem.stop()
async def main():
# modem = AFSKTransport(baud=300, loopback=False, modulator='nrzi')
modem = AFSKTransport(baud=300, loopback=False, modulator='standard')
terminal_task = asyncio.create_task(text_console(modem))
try:
await modem.connect_and_run_forever()
finally:
if not terminal_task.done():
terminal_task.cancel()
if __name__ == '__main__':
asyncio.run(main())
|
from sklearn.model_selection import train_test_split
from sklearn import datasets
from keras.optimizers import SGD
from keras.utils import np_utils
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
import os
from glob import glob
import random
import shutil
class Load_data :
def load_data_files(base_dir):
folder_name = "dataset"
RAW_DATASET = os.path.join(base_dir, folder_name)
abs_dir = os.path.join(os.getcwd(), folder_name)
sub_dir = os.listdir(abs_dir)
data_dic = {}
for class_name in sub_dir:
imgs = glob(os.path.join(RAW_DATASET,class_name,"*.jpg"))
data_dic[class_name] = imgs
print("Class: {}".format(class_name))
print("Number of images: {} \n".format(len(imgs)))
return data_dic, len(imgs)
def copy_files_to_directory(files, directory):
if not os.path.exists(directory):
os.makedirs(directory)
print("Created directory: {}".format(directory))
for f in files:
shutil.copy(f, directory)
print("Copied {} files.\n".format(len(files)))
def train_validation_split(base_dir, data_dic, split_ratio=0.2):
DATASET = os.path.join(base_dir,"split_dataset")
if not os.path.exists(DATASET):
os.makedirs(DATASET)
for class_name, imgs in data_dic.items():
idx_split = int(len(imgs) * split_ratio)
random.shuffle(imgs)
validation = imgs[:idx_split]
train = imgs[idx_split:]
Load_data.copy_files_to_directory(train, os.path.join(DATASET,"train",class_name))
Load_data.copy_files_to_directory(validation, os.path.join(DATASET,"validation",class_name))
|
import numpy as np
import astropy.units as u
__all__ = [
"energy_dispersion",
]
def _normalize_hist(hist):
# (N_E, N_MIGRA, N_FOV)
# (N_E, N_FOV)
norm = hist.sum(axis=1)
h = np.swapaxes(hist, 0, 1)
with np.errstate(invalid="ignore"):
h /= norm
h = np.swapaxes(h, 0, 1)
return np.nan_to_num(h)
def energy_dispersion(
selected_events, true_energy_bins, fov_offset_bins, migration_bins,
):
"""
Calculate energy dispersion for the given DL2 event list.
Energy dispersion is defined as the probability of finding an event
at a given relative deviation ``(reco_energy / true_energy)`` for a given
true energy.
Parameters
----------
selected_events: astropy.table.QTable
Table of the DL2 events.
Required columns: ``reco_energy``, ``true_energy``, ``source_fov_offset``.
true_energy_bins: astropy.units.Quantity[energy]
Bin edges in true energy
migration_bins: astropy.units.Quantity[energy]
Bin edges in relative deviation, recommended range: [0.2, 5]
fov_offset_bins: astropy.units.Quantity[angle]
Bin edges in the field of view offset.
For Point-Like IRFs, only giving a single bin is appropriate.
Returns
-------
energy_dispersion: numpy.ndarray
Energy dispersion matrix
with shape (n_true_energy_bins, n_migration_bins, n_fov_ofset_bins)
"""
mu = (selected_events["reco_energy"] / selected_events["true_energy"]).to_value(
u.one
)
energy_dispersion, _ = np.histogramdd(
np.column_stack(
[
selected_events["true_energy"].to_value(u.TeV),
mu,
selected_events["source_fov_offset"].to_value(u.deg),
]
),
bins=[
true_energy_bins.to_value(u.TeV),
migration_bins,
fov_offset_bins.to_value(u.deg),
],
)
n_events_per_energy = energy_dispersion.sum(axis=1)
assert len(n_events_per_energy) == len(true_energy_bins) - 1
energy_dispersion = _normalize_hist(energy_dispersion)
return energy_dispersion
|
import os
from webdriverwrapper import unittest, Chrome, ChromeOptions
from webdriverwrapper.decorators import *
from webdriverwrapper.exceptions import ErrorMessagesException
class TestCaseTest(unittest.WebdriverTestCase):
instances_of_driver = unittest.ONE_INSTANCE_PER_TESTCASE
def init(self):
self.path = os.path.dirname(os.path.realpath(__file__))
def _get_driver(self):
opt = ChromeOptions()
opt.add_argument('--no-sandbox')
opt.add_argument('--proxy-auto-detect')
return Chrome(options=opt)
# This test will be OK. Error in the middle Selenium do not see.
def test_not_check_errors_in_middle_of_test(self):
self.driver.get('file://%s/html/error_messages.html' % self.path)
self.driver.get('file://%s/html/some_page.html' % self.path)
# There is explicit call of check_errors, therefor this test fails.
def test_check_errors_in_middle_of_test(self):
self.driver.get('file://%s/html/error_messages.html' % self.path)
try:
self.check_errors()
except ErrorMessagesException:
pass # ok
except Exception as exc:
self.fail('Wrong exception! %s' % str(exc))
else:
self.fail('Exception not raised!')
self.driver.get('file://%s/html/some_page.html' % self.path)
def test_make_screenshot(self):
self.driver.get('file://%s/html/some_page.html' % self.path)
self.make_screenshot('/tmp/test-screenshot.png')
|
from Jumpscale import j
# as used in gec farms as standard rack
def bom_calc(environment):
from hardware.components.components_hpe import bom_populate
environment.bom = bom_populate(environment.bom)
# # see the bill of material sheet to define the devices
# compute = environment.bom.device_get("hpe_compute_server")
# storage = environment.bom.device_get("hpe_storage_server")
# switch = environment.bom.device_get("switch_48")
#
# # an environment to simulate the overhead per node (eg. 1 switch per node)
# environment.device_node_add("compute", compute, 11)
# environment.device_node_add("storage", storage, 5)
# environment.device_overhead_add("switch", switch, 2)
environment.device_node_add("compute", template="hpe_compute_server", nr=11)
environment.device_node_add("storage", template="hpe_storage_server", nr=5)
environment.device_overhead_add("switch", template="switch_48", nr=2)
|
import re
from . import basiciv
from .helper import check_date
from . import worksiv
class Batch(basiciv.Queue):
def __init__(self, **kwargs):
super().__init__()
self.que_tar = [
{
'illust_attrs': zip_[0],
'rank': zip_[1],
'yes_rank': zip_[2]
} for zip_ in zip(
kwargs['illust_attrs'],
kwargs['rank'],
kwargs['yes_rank']
)
]
class Daily(basiciv.BasicConfig, basiciv.LoadInfo, basiciv.Queue):
name = 'daily'
rank_url = 'https://www.pixiv.net/ranking.php'
rank_total = 0
one_count = 0
current_page = None
current_date = None
def __init__(self, ymd=None, filters='complex', **kwargs):
"""
:param ymd:
Example:
~~~~~~~
20190101
'2019-01-01'
'2019/01/01'
'2019.01.01'
:param filters:
Optional:
~~~~~~~~
- complex
- illust
- ugoira (note: Unsupported view dynamic picture.)
- manga
default complex
"""
super(Daily, self).__init__(
**kwargs
)
self.params = {
'mode': self.name,
'date': '',
'p': 1,
'format': 'json'
}
if filters != 'complex':
self.params.update({
'content': filters,
})
if ymd:
reg = re.compile(
r'[0-9]{4}[^a-zA-Z0-9]?[0-9]{2}[^a-zA-Z0-9]?[0-9]{2}'
)
s = reg.fullmatch(str(ymd))
date = s.string
date = date.replace('/', '')\
.replace('-', '')\
.replace('.', '')
year = int(date[:4])
mouth = int(date[4:6])
day = int(date[6:])
check_date(year, mouth, day)
self.params.update({
'date': date,
})
self.current_date = date
self.__run__(params=self.params)
def __run__(self, params):
print(self.init_run)
r = self.sess.get(
self.rank_url,
params=params,
timeout=self.kvpair['timeout']
)
print(r.text)
self.interface = r.json()
if self.rank_total == 0:
self.rank_total = self.interface['rank_total']
self.current_page = self.interface['page']
self.current_date = self.interface['date']
print(self.init_finished)
self.init_run = 'Current batch_size: {}, rank total: {}\n' \
'Loading date: {}, page: {} ...' \
.format(
len(self.que_tar), self.rank_total,
params['date'], params['p']
)
self.init_finished = 'Load finished!'
def run(self, ymd=None):
if ymd:
self.__init__(ymd=ymd)
return self
def one(self):
if self.one_count < 50:
curr_one = {
'illust_attrs': worksiv.Works(
self.interface['contents'][self.one_count]['illust_id']
),
'rank': self.interface['contents'][self.one_count]['rank'],
'yes_rank': self.interface['contents'][self.one_count]['yes_rank']
}
self.one_count += 1
self.que_tar.append(curr_one)
return self.last()
else:
return self.next_page().one()
def batch(self, nums=-1):
if self.one_count < 50:
list1, list2, list3 = [], [], []
if nums == -1:
while self.one_count < 50:
take = self.interface['contents'][self.one_count]
list1.append(worksiv.Works(take['illust_id']))
list2.append(take['rank'])
list3.append(take['yes_rank'])
self.one_count += 1
else:
for _ in range(nums):
if self.one_count == 50:
break
take = self.interface['contents'][self.one_count]
list1.append(worksiv.Works(take['illust_id']))
list2.append(take['rank'])
list3.append(take['yes_rank'])
self.one_count += 1
curr_batch = Batch(
illust_attrs=list1,
rank=list2,
yes_rank=list3
)
self.que_tar += curr_batch.que_tar
return self.curr()
else:
return self.next_page().batch(nums)
def prev_date(self):
if self.interface['prev_date']:
self.rank_total = 0
self.step_number = 0
self.one_count = 0
self.que_tar.clear()
self.params = {
'date': self.interface['prev_date']
}
return self.run()
def next_date(self):
if self.interface['next_date']:
self.rank_total = 0
self.step_number = 0
self.one_count = 0
self.que_tar.clear()
self.params.update({
'date': self.interface['next_date']
})
return self.run()
def prev_page(self):
if self.interface['prev']:
self.one_count = 0
self.params.update({
'p': self.interface['prev']
})
return self.run()
def next_page(self):
if self.interface['next']:
self.one_count = 0
self.params.update({
'p': self.interface['next']
})
return self.run()
class Weekly(Daily):
name = 'weekly'
class Monthly(Daily):
name = 'monthly'
class Rookie(Daily):
name = 'rookie'
class Original(Daily):
name = 'original'
class Male(Daily):
name = 'male'
class Female(Daily):
name = 'female'
class DailyR(Daily):
name = 'daily_r18'
def __init__(self, ymd=None, filters='complex', **kwargs):
super(DailyR, self).__init__(
ymd=ymd,
filters=filters,
**kwargs
)
class WeeklyR(DailyR):
name = 'weekly_r18'
class MaleR(DailyR):
name = 'male_r18'
class FemaleR(DailyR):
name = 'female_r18'
|
from .base import * # noqa: F403
from .base import env
MIDDLEWARE.append("api.middleware.RangesMiddleware") # noqa: F405
DJANGO_DRF_FILEPOND_STORAGES_BACKEND = "storages.backends.s3boto3.S3Boto3Storage"
AWS_ACCESS_KEY_ID = env("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = env("AWS_SECRET_ACCESS_KEY")
AWS_S3_REGION_NAME = env("REGION_NAME", "us-west-1")
AWS_STORAGE_BUCKET_NAME = env("BUCKET_NAME", "doccano")
AWS_DEFAULT_ACL = "private"
AWS_BUCKET_ACL = "private"
AWS_AUTO_CREATE_BUCKET = True
|
# Author: Payam Ghassemi, [email protected]
# Sep 8, 2018
# Copyright 2018 Payam Ghassemi
import numpy as np
from matplotlib import pyplot as plt
import scipy.io
import pickle
# Built-in python libraries
import sys
import os
from urllib.request import urlretrieve
# 3rd-party libraries I'll be using
import matplotlib
import pandas as pd
import seaborn as sns
from scipy import stats
#matplotlib.rcParams['pdf.fonttype'] = 42
#matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['text.usetex'] = False
matplotlib.rcParams['lines.markeredgewidth'] = 1
def set_style():
plt.style.use(['seaborn-white', 'seaborn-paper'])
font = {'family' : 'serif',
'weight' : 'normal',
'size' : 12}
font = {'family':'Times New Roman',
'weight' : 'normal',
'size' : 12}
matplotlib.rc("font", **font)
def set_size(fig, width=6, height=3):
fig.set_size_inches(width, height)
plt.tight_layout()
def get_colors():
return np.array([
[0.1, 0.1, 0.1], # black
[0.4, 0.4, 0.4], # very dark gray
[0.7, 0.7, 0.7], # dark gray
[0.9, 0.9, 0.9], # light gray
[0.984375, 0.7265625, 0], # dark yellow
[1, 1, 0.9] # light yellow
])
set_style()
flatui = [ "#1C366A", "#106f96", "#1DABE6", "#2ecc71", "#C3CED0", "#E43034", "#3498db", "#e74c3c","#a65d42","#6e5200","#dcc4d2"]
palette = sns.set_palette(flatui) # sns.color_palette("colorblind", 3) #"Set2"
flatui = [ "#2ecc71", "#C3CED0", "#1DABE6", "#1C366A", "#106f96", "#E43034", "#3498db", "#e74c3c","#a65d42","#6e5200","#dcc4d2"]
palette_B = sns.set_palette(flatui) # sns.color_palette("colorblind", 3) #"Set2" |
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
import argparse
from itertools import cycle
class Calibrator(object):
def __init__(self, start):
self.start = start
self.updates = []
self.current_frequency = start
def load_input_file(self, file_name):
lines = []
with open(file_name, "rb") as f:
lines = f.readlines()
for line in lines:
self.updates.append(int(line))
def run_updates(self):
for update in self.updates:
self.current_frequency = self.current_frequency + update
def reset(self):
self.current_frequency = self.start
self.updates = []
def search_for_twin_frequencies(self):
seen_frequencies = []
for update in cycle(self.updates):
self.current_frequency = self.current_frequency + update
if self.current_frequency not in seen_frequencies:
seen_frequencies.append(self.current_frequency)
else:
break
def main(args):
calibrator = Calibrator(args.start)
calibrator.load_input_file(args.input)
calibrator.run_updates()
print("Frequency after 1 run : {0}".format(calibrator.current_frequency))
calibrator.reset()
calibrator.load_input_file(args.input)
calibrator.search_for_twin_frequencies()
print("First twin frequency : {0}".format(calibrator.current_frequency))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", dest="input", required=True)
parser.add_argument("-s", "--start", dest="start", type=int, default=0)
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python3
"""
A tool for sorting text with human-readable byte sizes like "2.5 KiB" or "6TB"
Example uses include sorting the output of "du -h" or "docker image ls".
"""
import argparse
import re
import sys
# byte size multiples are hard. https://en.wikipedia.org/wiki/Kibibyte
# fmt: off
IEC_MULTIPLES = {
# singular
'b': 1,
'B': 1,
# decimal
'k': 1000, # kilobyte
'kB': 1000,
'M': 1000000, # megabyte (1000**2)
'MB': 1000000,
'G': 1000000000, # gigabyte (1000**3)
'GB': 1000000000,
'T': 1000000000000, # terabyte (1000**4)
'TB': 1000000000000,
'P': 1000000000000000, # petabyte (1000**5)
'PB': 1000000000000000,
'E': 1000000000000000000, # exabyte (1000**6)
'EB': 1000000000000000000,
'Z': 1000000000000000000000, # zettabyte (1000**7)
'ZB': 1000000000000000000000,
'Y': 1000000000000000000000000, # yottabyte (1000**8)
'YB': 1000000000000000000000000,
# binary
'Ki': 1024, # kibibyte
'KiB': 1024,
'Mi': 1048576, # mebibyte (1024**2)
'MiB': 1048576,
'Gi': 1073741824, # gibibyte (1024**3)
'GiB': 1073741824,
'Ti': 1099511627776, # tebibyte (1024**4)
'TiB': 1099511627776,
'Pi': 1125899906842624, # pebibyte (1024**5)
'PiB': 1125899906842624,
'Ei': 1152921504606846976, # exbibyte (1024**6)
'EiB': 1152921504606846976,
'Zi': 1180591620717411303424, # zebibyte (1024**7)
'ZiB': 1180591620717411303424,
'Yi': 1208925819614629174706176, # yobibyte (1024**8)
'YiB': 1208925819614629174706176,
}
# without these, the default set doesn't recognize "K" nor "KB"
IEC_KILO_PATCH = {
"K": 1000, # kilobyte (NOT IEC)
"KB": 1000,
"Kb": 1000,
}
# JEDEC / Classic = powers of 1024 with metric labels
CLASSIC_MULTIPLES = {
'B': 1,
'K': 1024, # kilobyte
'KB': 1024,
'M': 1048576, # megabyte (1024^2)
'MB': 1048576,
'G': 1073741824, # gigabyte (1024^3)
'GB': 1073741824,
'T': 1099511627776, # terabyte (1024^4) (not JEDEC)
'TB': 1099511627776,
'P': 1125899906842624, # petabyte (1024^5) (not JEDEC)
'PB': 1125899906842624,
'E': 1152921504606846976, # exabyte (1024^6) (not JEDEC)
'EB': 1152921504606846976,
'Z': 1180591620717411303424, # zettabyte (1024^7) (not JEDEC)
'ZB': 1180591620717411303424,
'Y': 1208925819614629174706176, # yottabyte (1024^8) (not JEDEC)
'YB': 1208925819614629174706176,
}
# fmt: on
def main():
"""command-line execution handler"""
argp = argparse.ArgumentParser(
description="tool for sorting text with human-readable byte sizes like '2.5 KiB' or '6TB'"
)
argp.add_argument(
"infile",
nargs="*",
type=argparse.FileType("r"),
default=sys.stdin,
help="the input file to read, defaults to stdin if this argument is omitted",
)
argp.add_argument(
"-r",
"--reverse",
action="store_true",
help="print the output lines in reverse order",
)
argp.add_argument(
"-c",
"--classic",
action="store_true",
help="override IEC 1000 byte multiples with JEDEC-ish 1024 byte multiples having metric labels",
)
argp.add_argument(
"-C",
"--strict-classic",
action="store_true",
help="like --classic but also remove support for all IEC 1000 byte multiples",
)
argp.add_argument(
"-s",
"--strict",
action="store_true",
help="do NOT suppliment the supported IEC multiples with unofficial 'K' and 'KB' (1000 bytes values)",
)
argp.add_argument(
"-m",
"--only-matches",
action="store_true",
help="only print lines which contain a recognized data size expression",
)
argp.add_argument(
"-p",
"--print-sizes",
action="store_true",
help="instead of sorting input lines, just print a report of the size multiples that would be used",
)
args = argp.parse_args()
# figure out what byte multiples to use
multiples = IEC_MULTIPLES
if not args.strict:
multiples.update(IEC_KILO_PATCH)
if args.classic:
multiples.update(CLASSIC_MULTIPLES)
if args.strict_classic:
multiples = CLASSIC_MULTIPLES
sorted_labels = sorted(multiples.keys(), key=multiples.__getitem__)
if args.print_sizes:
sys.stderr.write("LABEL\tBYTES\n")
for label in sorted_labels:
sys.stdout.write("{}\t{}\n".format(label, multiples[label]))
sys.exit(0)
# build the regex
# returned match groups look like this: ('98.6', 'MB') or this ('228', 'K')
# fmt: off
regex = re.compile(
(
r'\b'
r'(\d+(?:\.\d+)?)'
r'\s*'
r'('
)
+
'|'.join(sorted_labels)
+
(
r')'
r'\b'
)
)
# fmt: on
# start processing input lines
parsed = {}
for line in args.infile:
match = regex.search(line)
if not match:
if not args.only_matches:
# this just bunches non-matched lines at the beginning
# I think we can do better, but I don't know how yet
parsed[line] = 0
continue
size_str, multiple_str = match.groups()
parsed[line] = float(size_str) * multiples[multiple_str]
for line in sorted(parsed, key=parsed.__getitem__, reverse=args.reverse):
sys.stdout.write(line)
if __name__ == "__main__":
main()
|
import prefect
from prefect import task, Flow
from prefect.storage import Docker
@task
def hello_task():
logger = prefect.context.get("logger")
logger.info("Hello, Kubernetes!")
with Flow("encoding-task",
storage=Docker(registry_url="prefectdevacr.azurecr.io",
python_dependencies=["vectorhub==1.2.3",
"VecDB==0.5.8"],
image_tag='latest')
) as flow:
hello_task()
if __name__ == '__main__':
# flow.run()
flow.register(project_name="AKS")
|
import filecmp
import os
def dirs_same_enough(dir1,dir2,report=False):
''' use os.walk and filecmp.cmpfiles to
determine if two dirs are 'same enough'.
Args:
dir1, dir2: two directory paths
report: if True, print the filecmp.dircmp(dir1,dir2).report_full_closure()
before returning
Returns:
bool
'''
# os walk: root, list(dirs), list(files)
# those lists won't have consistent ordering,
# os.walk also has no guaranteed ordering, so have to sort.
walk1 = sorted(list(os.walk(dir1)))
walk2 = sorted(list(os.walk(dir2)))
def report_and_exit(report,bool_):
if report:
filecmp.dircmp(dir1,dir2).report_full_closure()
return bool_
else:
return bool_
if len(walk1) != len(walk2):
comparison = filecmp.dircmp(dir1, dir2)
return comparison.report_full_closure()
# false_or_report(report)
for (p1,d1,fl1),(p2,d2,fl2) in zip(walk1,walk2):
d1,fl1, d2, fl2 = set(d1),set(fl1),set(d2),set(fl2)
if d1 != d2 or fl1 != fl2:
return report_and_exit(report,False)
for f in fl1:
same,diff,weird = filecmp.cmpfiles(p1,p2,fl1,shallow=False)
if diff or weird:
return report_and_exit(report,False)
return report_and_exit(report,True)
path_git=r'F:\work\svn2git\svn2git\test_data\GIT'
path_svn=r'F:\work\svn2git\svn2git\test_data\SVN'
dirs_same_enough(path_git,path_svn,report=True) |
from operator import itemgetter
from django.http import Http404
from django.shortcuts import redirect, render
from django.urls import reverse_lazy
from django.views.generic import TemplateView
from exporter.applications.forms.countries import (
countries_form,
choose_contract_type_form,
contract_type_per_country_form,
)
from exporter.applications.forms.locations import (
which_location_form,
external_locations_form,
add_external_location,
Locations,
sites_form,
new_external_location_form,
)
from exporter.applications.helpers.check_your_answers import is_application_oiel_of_type
from exporter.applications.helpers.countries import prettify_country_data
from exporter.applications.helpers.validators import (
validate_external_location_choice,
validate_and_update_goods_location_choice,
validate_contract_type_countries_choice,
)
from exporter.applications.services import (
get_application,
get_application_countries,
post_application_countries,
put_contract_type_for_country,
get_application_countries_and_contract_types,
)
from exporter.core.constants import CaseTypes, APPLICANT_EDITING
from exporter.core.services import (
get_sites_on_draft,
post_sites_on_draft,
post_external_locations,
get_external_locations_on_draft,
post_external_locations_on_draft,
delete_external_locations_from_draft,
)
from lite_content.lite_exporter_frontend.applications import ContractTypes
from lite_forms.views import SingleFormView, MultiFormView
from core.auth.views import LoginRequiredMixin
def get_locations_page(request, application_id, **kwargs):
application = get_application(request, application_id)
if not application["goods_locations"]:
return redirect(reverse_lazy("applications:edit_location", kwargs={"pk": application_id}))
context = {
"application": application,
"is_application_draft_or_major_edit": application["status"]["key"] in [APPLICANT_EDITING, "draft"],
}
if kwargs.get("errors"):
context["errors"] = kwargs["errors"]
return render(
request,
"applications/goods-locations/goods-locations.html",
context,
)
class GoodsLocation(LoginRequiredMixin, TemplateView):
def get(self, request, **kwargs):
return get_locations_page(request, application_id=kwargs["pk"])
class EditGoodsLocation(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
application = get_application(request, self.object_pk)
self.form = which_location_form(self.object_pk, application.sub_type)
self.action = validate_and_update_goods_location_choice
self.data = {"choice": Locations.DEPARTED if application.get("have_goods_departed") else ""}
if application.status == "submitted":
if application["goods_locations"]:
return reverse_lazy("applications:location", kwargs={"pk": self.object_pk})
elif application["sites"]:
return reverse_lazy("applications:existing_sites", kwargs={"pk": self.object_pk})
def get_success_url(self):
choice = self.get_validated_data()["choice"]
if choice == Locations.EXTERNAL:
return (
reverse_lazy("applications:select_add_external_location", kwargs={"pk": self.object_pk})
+ "?return_to="
+ self.request.get_full_path()
)
elif choice == Locations.ORGANISATION:
return reverse_lazy("applications:existing_sites", kwargs={"pk": self.object_pk})
elif choice == Locations.DEPARTED:
return reverse_lazy("applications:task_list", kwargs={"pk": self.object_pk})
class SelectAddExternalLocation(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
self.form = add_external_location(request)
self.action = validate_external_location_choice
def get_success_url(self):
choice = self.get_validated_data()["choice"]
if choice == "new":
return (
reverse_lazy("applications:add_external_location", kwargs={"pk": self.object_pk})
+ "?return_to="
+ self.request.get_full_path()
)
else:
return reverse_lazy("applications:add_preexisting_external_location", kwargs={"pk": self.object_pk})
class ExistingSites(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
application = get_application(request, self.object_pk)
if application.status == "submitted" and not application["goods_locations"]["type"] == "sites":
raise Http404
self.data, _ = get_sites_on_draft(request, self.object_pk)
self.form = sites_form(request, application.type_reference)
self.action = post_sites_on_draft
self.success_url = reverse_lazy("applications:location", kwargs={"pk": self.object_pk})
class AddExternalLocation(LoginRequiredMixin, MultiFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
application = get_application(request, self.object_pk)
location_type = request.POST.get("location_type", None)
self.forms = new_external_location_form(request, application.type_reference, location_type)
self.action = post_external_locations
self.success_url = reverse_lazy("applications:location", kwargs={"pk": self.object_pk})
class RemoveExternalLocation(LoginRequiredMixin, TemplateView):
def get(self, request, **kwargs):
draft_id = str(kwargs["pk"])
ext_loc_id = str(kwargs["ext_loc_pk"])
data, _ = delete_external_locations_from_draft(request, draft_id, ext_loc_id)
parameters = {
"request": request,
"application_id": draft_id,
}
if data.get("errors"):
parameters["errors"] = data["errors"]["external_locations"]
return get_locations_page(**parameters)
class AddExistingExternalLocation(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
application = get_application(request, self.object_pk)
self.data, _ = get_external_locations_on_draft(request, self.object_pk)
self.form = external_locations_form(request, application.type_reference)
self.action = post_external_locations_on_draft
self.success_url = reverse_lazy("applications:location", kwargs={"pk": self.object_pk})
class Countries(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
self.data = {
"countries": [
country_entry["country_id"]
for country_entry in get_application_countries_and_contract_types(request, self.object_pk)["countries"]
]
}
self.form = countries_form(request, self.object_pk)
self.action = post_application_countries
def get_success_url(self):
application = get_application(self.request, self.object_pk)
# Only military OIELs and Open Trade Control Licences have contract types per destination
if not (is_application_oiel_of_type("military", application) or application.type_reference == CaseTypes.OICL):
return reverse_lazy("applications:task_list", kwargs={"pk": self.object_pk})
countries_without_contract_type = [
entry["country_id"]
for entry in get_application_countries_and_contract_types(self.request, self.object_pk)["countries"]
if not entry["contract_types"]
]
if not countries_without_contract_type:
return reverse_lazy("applications:countries_summary", kwargs={"pk": self.object_pk})
else:
return reverse_lazy("applications:choose_contract_type", kwargs={"pk": self.object_pk})
class ChooseContractType(LoginRequiredMixin, SingleFormView):
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
self.form = choose_contract_type_form()
self.action = validate_contract_type_countries_choice
def get_success_url(self):
choice = self.get_validated_data()["choice"]
countries_without_contract_type = [
entry["country_id"]
for entry in get_application_countries_and_contract_types(self.request, self.object_pk)["countries"]
if not entry["contract_types"]
]
if choice == ContractTypes.Variables.ALL_COUNTRIES_CHOSEN:
return reverse_lazy(
"applications:add_contract_type",
kwargs={"pk": self.object_pk, "country": ContractTypes.Variables.ALL_COUNTRIES_CHOSEN},
)
if countries_without_contract_type:
return reverse_lazy(
"applications:add_contract_type",
kwargs={"pk": self.object_pk, "country": countries_without_contract_type[0]},
)
else:
# Redirect to the summary page if a country has been removed
return reverse_lazy("applications:countries_summary", kwargs={"pk": self.object_pk})
class AddContractTypes(LoginRequiredMixin, SingleFormView):
contract_types_and_countries = None
def init(self, request, **kwargs):
self.object_pk = kwargs["pk"]
self.action = put_contract_type_for_country
self.contract_types_and_countries = get_application_countries_and_contract_types(request, self.object_pk)[
"countries"
]
current_country = self.kwargs["country"]
selected_countries = [country_entry["country_id"] for country_entry in self.contract_types_and_countries]
data_for_current_country = [
country_entry
for country_entry in self.contract_types_and_countries
if country_entry["country_id"] == current_country
]
self.data = (
{
"contract_types": data_for_current_country[0]["contract_types"].split(",")
if data_for_current_country[0]["contract_types"]
else None,
"other_contract_type_text": data_for_current_country[0]["other_contract_type_text"],
}
if data_for_current_country
else {}
)
if current_country != ContractTypes.Variables.ALL_COUNTRIES_CHOSEN:
if current_country == "UKCS":
country_name = "UK Continental Shelf"
else:
country_name = data_for_current_country[0]["country__name"]
if country_name not in str(self.contract_types_and_countries):
return render(request, "404.html")
self.form = contract_type_per_country_form([current_country], country_name)
else:
selected_countries_ids = selected_countries
self.form = contract_type_per_country_form(selected_countries_ids, "all the countries")
def get_success_url(self):
# Go through all countries without contract types and render the form again if needed
next_country = None
if self.kwargs["country"] != ContractTypes.Variables.ALL_COUNTRIES_CHOSEN:
for country_entry in self.contract_types_and_countries:
# If a country has no contract types and it is not the current country (still empty as we do not do an additional call to the api)
if not country_entry["contract_types"] and country_entry["country_id"] != self.kwargs["country"]:
next_country = country_entry["country_id"]
break
if next_country:
return reverse_lazy(
"applications:add_contract_type", kwargs={"pk": self.object_pk, "country": next_country}
)
else:
return reverse_lazy("applications:countries_summary", kwargs={"pk": self.object_pk})
class CountriesAndContractTypesSummary(LoginRequiredMixin, TemplateView):
def get(self, request, **kwargs):
object_pk = kwargs["pk"]
countries_data = get_application_countries_and_contract_types(request, object_pk)
countries = [
{
"country_id": country_entry["country_id"],
"country_name": country_entry["country__name"],
"contract_types": country_entry["contract_types"],
"other_contract_type_text": country_entry["other_contract_type_text"],
}
for country_entry in countries_data["countries"]
]
prettified_countries = prettify_country_data(sorted(countries, key=itemgetter("country_name")))
context = {
"application_id": str(object_pk),
"is_application_oiel_continental_shelf": len(countries) == 1 and countries[0]["country_id"] == "UKCS",
"countries": prettified_countries,
"is_application_draft_or_major_edit": countries_data["status"] in [APPLICANT_EDITING, "draft"],
}
return render(request, "applications/goods-locations/destinations-summary-list.html", context)
class StaticDestinations(LoginRequiredMixin, TemplateView):
# To be used for OIELs where all countries are preselected and non-modifiable by the user
# The UKCS OIEL is a special case - this is the initial page displayed before prompting the user to select contract types
def get(self, request, **kwargs):
application_id = str(kwargs["pk"])
application = get_application(request, application_id)
goodstype_category = None
if application.get("goodstype_category"):
goodstype_category = application.get("goodstype_category").get("key")
goodstype_category_label = application.get("goodstype_category").get("value")
context = {
"application_id": application_id,
"countries": get_application_countries(request, application_id),
"goodstype_category": goodstype_category,
"goodstype_category_label": goodstype_category_label,
}
return render(request, "applications/goods-locations/static-all-destinations.html", context)
|
#!/usr/bin/env python3
from __future__ import unicode_literals, print_function, division
import sys
sys.path.append('..')
from io import open
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import argparse
import json
from model.settings import hparams
import glob
if __name__ == '__main__':
#os.chdir('/' + '/'.join(sys.argv[0].split('/')[:-1]))
parser = argparse.ArgumentParser(description='Plot some NMT values.')
parser.add_argument('--files', help='File glob for plotting. Must be json files!!', nargs='+')
parser.add_argument('--title', help='Graph title.')
parser.add_argument('--label-x', help='X axis label.')
parser.add_argument('--label-y', help='Y axis label.')
args = parser.parse_args()
args = vars(args)
print(args)
do_filelist = False
do_title_graph = False
if args['files'] is not None:
do_filelist = True
if args['title'] is not None:
do_title_graph = True
arg_filename = '/'.join( hparams['save_dir'].split('/')[1:]) + '/' + 'test*.json'
arg_title = 'Loss and Accuracy vs. Steps'
if do_filelist:
arg_filename = str(','.join(args['files']))
if do_title_graph:
arg_title = str(args['title'])
arg_filelist = arg_filename.split(',')
arg_glob_list = []
for i in arg_filelist:
if not i.endswith('.pth') and not i.endswith('.txt'):
print(i,'use for plot')
arg_glob_list.extend(glob.glob(i))
print(arg_glob_list)
arg_list = []
for i in arg_glob_list:
if os.path.isfile(i):
with open(i, 'r') as z:
sublist = []
j = json.loads(z.read())
for k in j:
sublist.append((int(k), float(j[k])))
sublist.sort(key=lambda tuple: tuple[0])
#print(sublist)
arg_list.append(sublist)
#print(arg_list)
arg_plot_color = [ 'r', 'b', 'g', 'y','c','m']
fig, ax = plt.subplots()
plt.ylabel('Accuracy')
if args['label_y'] is not None:
plt.ylabel(args['label_y'])
plt.xlabel('Sentence Pairs')
if args['label_x'] is not None:
plt.xlabel(args['label_x'])
plt.title(arg_title)
handles = []
for i in range(len(arg_list)):
ii = i % len(arg_plot_color)
label_out = arg_glob_list[i].split('/')[-1]
if label_out.endswith('.json'):
label_out = label_out[: - len('.json')]
pass
color_patch = mpatches.Patch(color=arg_plot_color[ii], label=label_out)
handles.append(color_patch)
lst_x = []
lst_y = []
for k in arg_list[i]:
lst_x.append(k[0])
lst_y.append(k[1])
ax.plot(lst_x, lst_y, arg_plot_color[ii] + '-')
ax.legend(handles=handles)
plt.show()
|
#!/usr/bin/python3 -m unittest
#
# basic inspection
#
import unittest
import pymm
import numpy as np
import math
import torch
def colored(r, g, b, text):
return "\033[38;2;{};{};{}m{} \033[38;2;255;255;255m".format(r, g, b, text)
def log(*args):
print(colored(0,255,255,*args))
shelf = pymm.shelf('myTransactionsShelf',pmem_path='/mnt/pmem0')
class TestReveal(unittest.TestCase):
def test_inspect(self):
shelf.inspect(verbose=False)
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python3
# encoding: utf-8
import importlib
import tensorflow as tf
assert tf.__version__[0] == '2'
# algorithms based on TF 2.x
from typing import (Tuple,
Callable,
Dict)
from rls.common.yaml_ops import load_yaml
from rls.utils.display import colorize
from rls.utils.logging_utils import get_logger
logger = get_logger(__name__)
class AlgoRegistry(object):
def __init__(self):
self.algo_specs = {}
def register(self, name, **attrs):
if name in self.algo_specs.keys():
raise Exception(f'Cannot re-register algorithms: {name}')
self.algo_specs[name] = dict(attrs)
def get_model_info(self, name):
if name in self.algo_specs.keys():
return self.algo_specs[name]
raise Exception(f'Cannot find algorithm: {name}')
registry = AlgoRegistry()
def register(name, **attrs):
registry.register(name, **attrs)
def get_model_info(name: str) -> Tuple[Callable, Dict, str, str]:
'''
Args:
name: name of algorithms
Return:
algo_class of the algorithm model named `name`.
defaulf config of specified algorithm.
policy_type of policy, `on-policy` or `off-policy`
'''
algo_info = registry.get_model_info(name)
class_name = algo_info['algo_class']
policy_mode = algo_info['policy_mode']
policy_type = algo_info['policy_type']
LOGO = algo_info.get('logo', '')
logger.info(colorize(LOGO, color='green'))
model = getattr(
importlib.import_module(f'rls.algos.{policy_type}.{name}'),
class_name)
algo_config = {}
algo_config.update(
load_yaml(f'rls/algos/config.yaml')['general']
)
algo_config.update(
load_yaml(f'rls/algos/config.yaml')[policy_mode.replace('-', '_')]
)
algo_config.update(
load_yaml(f'rls/algos/config.yaml')[name]
)
return model, algo_config, policy_mode, policy_type
|
from format.util import *
def visualize(file_before, file_after, file_diffs, file_output_name = None):
"""visualize the jpg diff, open with Tk window or save as file
args:
file_before (str)
file_after (str)
file_diffs (list)
file_output_name (str)
"""
if file_output_name == None:
visualize_image_as_window([file_before, file_diffs[0], file_diffs[1], file_after])
else:
visualize_image_as_png(file_diffs[0], file_diffs[1], file_output_name)
|
class Electric_calculation:
def __init__(self, datacase, resistance):
try:
self.datacase = datacase
self.resistance = resistance
self.datacaseinit = datacase
except:
self.datacase = int(input("테스트 케이스의 갯수를 입력하세요> "))
self.resistance = int(input("저항의 값을 입력하세요> "))
def intensity(self):
self.amperelist = []
self.r_list = []
for voltage in range(self.datacase):
self.r_list.append(voltage)
ampere = voltage / self.resistance
self.amperelist.append(ampere)
return self.amperelist
def voltage(self):
self.voltagelist = []
for i in range(self.datacase):
voltage = self.amperelist[i] * self.r_list[i]
self.voltagelist.append(voltage)
return self.voltagelist
|
def condition(row):
'''
Args:
row (int): Individual entry for specified column
Returns:
val (int): If number equals 0 returns
Else return 1
'''
if row == 0:
val = 0
else:
val = 1
return val
def roundup(row):
'''
Args:
row (int): values in row equal to specified numbers
Return:
row (int): If value meets criteria add .25 and return new value
'''
if row == 0.75:
row += .25
elif row == 1.25:
row += .25
elif row == 1.75:
row += .25
elif row == 2.25:
row += .25
elif row == 2.75:
row += .25
elif row == 3.25:
row += .25
return row |
import pandas as pd
import os
import argparse
import re
from Bio import SeqIO
pd.set_option("display.max_columns",40)
parser=argparse.ArgumentParser()
parser.add_argument("-f1","--File1")
parser.add_argument("-f2","--File2")
args=parser.parse_args()
f1=args.File1
f2=args.File2
f_1=pd.read_table(f1,header=0)
f_1["coor"]=f_1["RefName_x"]+"_"+f_1["cluster"].apply(str)
print(f_1.shape)
print(f_1.drop_duplicates(["coor"],keep="first").shape)
f_2=pd.read_table(f2,header=0)
f_2["coor"]=f_2["RefName_x"]+"_"+f_2["cluster"].apply(str)
print(f_2.shape)
print(f_2.drop_duplicates(["coor"],keep="first").shape)
f_1=f_1.loc[~f_1["coor"].isin(f_2["coor"])]
print(f_1.shape)
print(f_1.drop_duplicates(["coor"],keep="first").shape)
f_1.to_csv(f1+"_rebg.tsv",index=None,sep="\t")
|
from aiohttp_admin.contrib import models
from aiohttp_admin.backends.sa import PGResource
from .main import schema
from ..db import comment
@schema.register
class Comments(models.ModelAdmin):
fields = ('id', 'post_id', 'created_at', 'body', )
class Meta:
resource_type = PGResource
table = comment
|
"""
Probabilistic multiple cracking model
"""
from typing import List, Any, Union
import bmcs_utils.api as bu
import traits.api as tr
import numpy as np
import warnings
from bmcs_utils.trait_types import Float
from scipy.optimize import newton
warnings.filterwarnings("error", category=RuntimeWarning)
class CrackBridgeModel(bu.Model):
"""
Record of all material parameters of the composite. The model components
(PullOutModel, CrackBridgeRespSurf, PMCM) are all linked to the database record
and access the parameters they require. Some parameters are shared across all
three components (Em, Ef, vf), some are specific to a particular type of the
PulloutModel.
"""
class CBMConstantBond(CrackBridgeModel):
"""
Return the matrix stress profile of a crack bridge for a given control slip
at the loaded end
"""
Em = bu.Float(28000)
Ef = bu.Float(180000)
vf = bu.Float(0.01)
T = bu.Float(8)
ipw_view = bu.View(
bu.Item('Em'),
bu.Item('Ef'),
bu.Item('vf'),
bu.Item('T'),
)
@property
def Ec(self):
return self.Em * (1 - self.vf) + self.Ef * self.vf # [MPa] mixture rule
class CrackBridgeRespSurface(bu.Model):
"""
Crack bridge response surface that returns the values of matrix stress
along ahead of a crack and crack opening for a specified remote stress
and boundary conditions.
"""
cb = bu.Instance(CrackBridgeModel)
def get_sig_m(self, z, sig_c):
"""Get the profile of matrix stress along the specimen
:param z: np.ndarray
:type sig_c: float
"""
cb = self.cb
sig_m = np.minimum(z * cb.T * cb.vf / (1 - cb.vf), cb.Em * sig_c /
(cb.vf * cb.Ef + (1 - cb.vf) * cb.Em))
return sig_m
def get_eps_f(self, z, sig_c):
cb = self.cb
sig_m = self.get_sig_m(sig_c, z )
eps_f = (sig_c - sig_m * (1 - cb.vf)) / cb.vf / cb.Ef
return eps_f
sig_c_slider: float = bu.Float(1.0, BC=True)
ipw_view = bu.View(
bu.Item('sig_c_slider')
)
@staticmethod
def subplots(fig):
ax = fig.subplots(1,1)
ax1 = ax.twinx()
return ax, ax1
def update_plot(self, axes):
ax, ax1 = axes
x_range = np.linspace(-1000,1000,1000)
z_range = np.abs(x_range)
sig_m_range = self.get_sig_m(self.sig_c_slider, z_range)
eps_f_range = self.get_eps_f(self.sig_c_slider, z_range)
sig_max = np.max(sig_m_range)
eps_max = np.max(eps_f_range)
ax.plot(x_range, sig_m_range, color='black')
ax.fill_between(x_range, sig_m_range, color='gray', alpha=0.1)
ax.set_ylim(ymin=-0.03*sig_max)
ax1.plot(x_range, eps_f_range, color='blue')
ax1.fill_between(x_range, eps_f_range, color='blue', alpha=0.1)
ax1.set_ylim(ymin=-0.03*eps_max)
class PMCMHist(bu.Model):
pmcm = tr.WeakRef
K = bu.Int(0)
t = bu.Float(0)
K_max = tr.Property(depends_on='state_changed')
@tr.cached_property
def _get_K_max(self):
sig_c_K, eps_c_K, sig_mu_x, x, CS, sig_m_x_K, sig_m_x_K1 = self.pmcm.cracking_history
K_max = len(sig_c_K)
# ceil the index of current crack
self.K = np.min([self.K, K_max])
return K_max
ipw_view = bu.View(
bu.Item('K', latex=r'\mathrm{state}', readonly=True),
time_editor=bu.HistoryEditor(
var='t',
var_max='K_max'
)
)
@staticmethod
def subplots(fig):
ax1, ax2 = fig.subplots(1,2)
ax11 = ax1.twinx()
return ax1, ax11, ax2
def update_plot(self, axes):
ax, ax_cs, ax_sig_x = axes
cr = int(self.t * (self.K_max-2))
self.K = cr
self.pmcm.plot(axes)
sig_c_K, eps_c_K, sig_mu_x, x, CS, sig_m_x_K, sig_m_x_K1 = self.pmcm.cracking_history
ax_sig_x.plot(x,sig_m_x_K[cr])
ax_sig_x.plot(x,sig_m_x_K1[cr], linestyle='dashed')
ax.plot(eps_c_K[cr],sig_c_K[cr],color='magenta',marker='o')
class PMCM(bu.Model):
name = "PMCM"
"""
Implement the global crack tracing algorithm based on a crack bridge response surface
"""
history = bu.Instance(PMCMHist)
def _history_default(self):
return PMCMHist(pmcm=self)
cb = bu.Instance(CrackBridgeModel)
def _cb_default(self):
return CBMConstantBond()
cb_rs = tr.Property#(depends_on="state_changed")
@tr.cached_property
def _get_cb_rs(self):
return CrackBridgeRespSurface(cb=self.cb)
tree = ['cb', 'cb_rs', 'history']
n_x = bu.Int(5000, ALG=True)
L_x = bu.Float(500, GEO=True)
sig_cu = bu.Float(20, MAT=True)
sig_mu = bu.Float(10, MAT=True)
m = bu.Float(4, MAT=True)
ipw_view = bu.View(
bu.Item('n_x'),
bu.Item('L_x'),
bu.Item('sig_cu'),
bu.Item('sig_mu'),
bu.Item('m'),
)
def get_z_x(self, x, XK): # distance to the closest crack (*\label{get_z_x}*)
"""Specimen discretization
"""
z_grid = np.abs(x[:, np.newaxis] - np.array(XK)[np.newaxis, :])
return np.amin(z_grid, axis=1)
def get_sig_c_z(self, sig_mu, z, sig_c_pre):
"""
:param sig_c_pre:
:type sig_mu: float
"""
# crack initiating load at a material element
#print('sig_mu', sig_mu)
fun = lambda sig_c: sig_mu - self.cb_rs.get_sig_m(sig_c, z)
try: # search for the local crack load level
sig_c = newton(fun, sig_c_pre)
#print('sig_c', sig_c)
return sig_c
except (RuntimeWarning, RuntimeError):
# solution not found (shielded zone) return the ultimate composite strength
return self.sig_cu
def get_sig_c_K(self, z_x, x, sig_c_pre, sig_mu_x):
# crack initiating loads over the whole specimen
get_sig_c_x = np.vectorize(self.get_sig_c_z)
sig_c_x = get_sig_c_x(sig_mu_x, z_x, sig_c_pre)
#print('sig_c_x', z_x, x, sig_c_pre, sig_mu_x)
#print('sig_c_x', sig_c_x)
y_idx = np.argmin(sig_c_x)
return sig_c_x[y_idx], x[y_idx]
cracking_history = tr.Property(depends_on='state_changed')
@tr.cached_property
def _get_cracking_history(self):
cb = self.cb
L_x, n_x, sig_mu, sig_cu, m = self.L_x, self.n_x, self.sig_mu, self.sig_cu, self.m
x = np.linspace(0, L_x, n_x) # specimen discretization
sig_mu_x: np.ndarray[np.float_] = sig_mu * np.random.weibull(
m, size=n_x) # matrix strength
XK: List[float] = [] # recording the crack postions
sig_c_K: List[float] = [0.] # recording the crack initiating loads
eps_c_K: List[float] = [0.] # recording the composite strains
CS: List[float] = [L_x, L_x / 2] # initial crack spacing
sig_m_x_K1: List[float] = [np.zeros_like(x)] # stress profiles at crack states
sig_m_x_K: List[float] = [np.zeros_like(x)] # stress profiles after crack states
Ec: float = cb.Ec
Em: float = cb.Em
idx_0 = np.argmin(sig_mu_x)
XK.append(x[idx_0]) # position of the first crack
sig_c_0 = sig_mu_x[idx_0] * Ec / Em
sig_m_x_K1.append(np.ones_like(x)*sig_mu_x[idx_0]) # matrix stress
#print('sig_c_0', sig_c_0)
sig_c_K.append(sig_c_0)
eps_c_K.append(sig_mu_x[idx_0] / Em)
while True:
z_x = self.get_z_x(x, XK) # distances to the nearest crack
sig_m_x_K.append(self.cb_rs.get_sig_m(sig_c_K[-1], z_x)) # matrix stress
sig_c_k, y_i = self.get_sig_c_K(z_x, x, sig_c_K[-1], sig_mu_x) # identify next crack
sig_m_x_K1.append(self.cb_rs.get_sig_m(sig_c_k, z_x)) # matrix stress
if sig_c_k == sig_cu:
break
XK.append(y_i) # record crack position
sig_c_K.append(sig_c_k) # corresponding composite stress
eps_c_K.append( # composite strain - integrate the strain field
np.trapz(self.cb_rs.get_eps_f(sig_c_k, self.get_z_x(x, XK)), x) / np.amax(x))
XK_arr = np.hstack([[0], np.sort(np.array(XK)), [L_x]])
CS.append(np.average(XK_arr[1:] - XK_arr[:-1])) # crack spacing
sig_c_K.append(sig_cu) # the ultimate state
eps_c_K.append(np.trapz(self.cb_rs.get_eps_f(sig_cu, self.get_z_x(x, XK) ), x) / np.amax(x))
CS.append(CS[-1])
return np.array(sig_c_K), np.array(eps_c_K), sig_mu_x, x, np.array(CS), np.array(sig_m_x_K), np.array(sig_m_x_K1)
@staticmethod
def subplots(fig):
ax1, ax2 = fig.subplots(1,2)
ax11 = ax1.twinx()
return ax1, ax11, ax2
def plot(self, axes):
ax, ax_cs, ax_sig_x = axes
sig_c_K, eps_c_K, sig_mu_x, x, CS, sig_m_x_K, sig_m_x_K1 = self.cracking_history
n_c = len(eps_c_K) - 2 # numer of cracks
ax.plot(eps_c_K, sig_c_K, marker='o', label='%d cracks:' % n_c)
ax.set_xlabel(r'$\varepsilon_\mathrm{c}$ [-]');
ax.set_ylabel(r'$\sigma_\mathrm{c}$ [MPa]')
ax_sig_x.plot(x, sig_mu_x, color='orange')
ax_sig_x.fill_between(x, sig_mu_x, 0, color='orange', alpha=0.1)
ax_sig_x.set_xlabel(r'$x$ [mm]');
ax_sig_x.set_ylabel(r'$\sigma$ [MPa]')
ax.legend()
eps_c_KK = np.array([eps_c_K[:-1], eps_c_K[1:]]).T.flatten()
CS_KK = np.array([CS[:-1], CS[:-1]]).T.flatten()
ax_cs.plot(eps_c_KK, CS_KK, color='gray')
ax_cs.fill_between(eps_c_KK, CS_KK, color='gray', alpha=0.2)
ax_cs.set_ylabel(r'$\ell_\mathrm{cs}$ [mm]');
def update_plot(self, axes):
self.plot(axes)
|
"""cv_detect_train.pu
Multiplatform OpenCV (cv2) face detection and capture tool.
The MIT License (MIT)
Copyright (c) 2020 Román Ramírez Giménez (MIT License)
Run this script to detect faces, save images and then be able to train models
for different tools (for example, Magic Mirror 2 facial recognition ones).
Usage:
%s:
-u <user> || --user=<user> (Note this is required for capture)
-d <device_id> || --device_id=<device_id>
(if not in Raspberry PI you must pass a device id as in
the camera device located at /dev/video[id])
-o <output_directory> || --output=<output_directory>
(where images will be stored, the format will be the
directory passed with the username joined, for example
"output/username/". There you will find several images:
- <number>.jpg: the captured image.
- <number>_boxed.jpg: the captured image with a red box
around the detected face/s.
- <number>_gray.jpg: the captured image in gray scale.
Capture, Train and Identify requires -o or will default to
BASE_CAPTURE_DIRECTORY if not passed in an explicit way.
-l <limit> || --limit=<limit>
(a limit for the number of images to save. By default 0
that means is unlimitted).
-a <algorithm number> || --algorithm=<algorithm number>
(the algorithm can be 1 to 3, corresponding to LBPH_RECOGNIZER,
FISHER_RECOGNIZER and EIGEN_RECOGNIZER).
-s || --silent
(do not show the positive image on screen)
-c || --capture
(capture images to be trained. Remember to set device id if not in
a Raspberry PI, -d 1 or so)
-t || --train
(train models from images in output_dir/images_dir and generate XML
file with the trained model)
-r || --recognize
(identify people opening the camera -do not forget the device id if
not in a Raspberry PI- and matching against the trained model)
-n || --no-extra-images
(this flag will limit the image generation just to what is useful
to train the model, not creating color, grey, additional images).
-h || --help
"""
import os
import sys
from engine.config import *
from engine.globals import *
from engine.capture import *
from engine.train import *
from engine.recognize import *
if __name__ == "__main__":
IS_RASPBIAN = False
IS_ARM = False
sysname, nodename, release, version, machine = os.uname()
nodename = nodename.lower()
if DEBUG is True:
print('OS: %s' % os.name)
print('Uname: sys[%s] node[%s] rel[%s] ver[%s] machine[%s]' % (sysname,
nodename,
release,
version,
machine))
if nodename == 'raspberrypi' or nodename == 'raspbian':
IS_RASPBIAN = True
if DEBUG is True:
print('==> IS raspbian environment')
else:
if DEBUG is True:
print('==> Not in raspbian environment')
if 'arm' in machine.lower():
IS_ARM = True
if DEBUG is True:
print('==> IS ARM architecture')
else:
if DEBUG is True:
print('==> Not in ARM architecture')
if len(sys.argv) < 2:
print('*: Please, check arguments passed')
show_help(program_name=sys.argv[0])
sys.exit(ERROR_INVALID_ARGUMENT_COUNT)
parsed_dict = parse_arguments(argv=sys.argv[1:],
program_name=sys.argv[0])
images_status = open_images_status_file(base_images_dir=parsed_dict['output_dir'])
if not images_status:
print('CRITICAL: something critical happened reading images status file. EXIT.')
sys.exit(-1)
if parsed_dict['wanna_train'] is True:
do_train(base_images_dir=parsed_dict['output_dir'],
recognizer_algorithm=parsed_dict['algorithm'])
elif parsed_dict['wanna_recognize'] is True:
cv_recognize(images_dir=parsed_dict['output_dir'],
device_id=parsed_dict['did'],
recognizer_algorithm=parsed_dict['algorithm'],
is_raspbian=IS_RASPBIAN, is_arm=IS_ARM)
elif parsed_dict['wanna_capture'] is True:
do_capture(user=parsed_dict['user'],
device_id=parsed_dict['did'],
is_raspbian=IS_RASPBIAN,
is_arm=IS_ARM,
silent_mode=parsed_dict['silent'],
base_images_dir=parsed_dict['output_dir'],
limit=parsed_dict['limit'],
status=images_status,
create_extra_images=parsed_dict['create_extra_images'])
else:
show_help(program_name=sys.argv[0])
sys.exit(ERROR_NO_ACTION_SELECTED)
|
import contextlib
import logging
import signal
import sys
LOGGER = logging.getLogger(__name__)
class ApplicationState(object):
LOAD = 0
MAIN_LOOP = 1
def __init__(self):
self.state = self.LOAD
self.running = True
signal.signal(signal.SIGINT, self.signal_handler)
def set_application_state(self, state):
self.state = state
def stop_main_loop(self):
self.running = False
def signal_handler(self, signal, frame):
if self.state == self.LOAD:
self._handle_signal_in_load_state()
elif self.state == self.MAIN_LOOP:
self._handle_signal_in_main_loop_state()
else:
raise RuntimeError("Invalid application state: {}".format(self.state))
@contextlib.contextmanager
def suspend_signal_handling(self):
current_handler = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, signal.SIG_IGN)
yield
signal.signal(signal.SIGINT, current_handler)
def _handle_signal_in_load_state(self):
LOGGER.debug("Handle SIGINT in 'load' state, do exit")
sys.exit(1)
def _handle_signal_in_main_loop_state(self):
LOGGER.debug("Handle SIGINT in 'main loop' state, stop the main loop")
self.stop_main_loop()
|
# Problem statement: Print all possible strings of length k that can be formed from a set of n charactersFor a length k ,set of size n n^k
# strings can be formed
''' Approach is to start with a string which is empty/blank ,add all characters one by one to that empty string ,
for every character added print all possible string with current character by recursive calling'''
def printAllStringsOfLengthk(set , k): # This method is mainly a wrapper over recursive function
length_set = len(set)
printAllStringsOfLengthkRec(set , " ", length_set, k)
''' The main recursive method'''
def printAllStringsOfLengthkRec(set, prefix, length_set, k):
if(k==0): #Base case where k is zero print prefix
print(prefix)
return
for i in range(length_set):
newPrefix = prefix + set[i] #In thiis line adding the next character of input
printAllStringsOfLengthkRec(set , newPrefix ,length_set ,k - 1)
# Driver code
if __name__ == '__main__':
print('First Sample Test') #Sample test 1
set1 = ['a', 'b','c']
k = 2
printAllStringsOfLengthk(set1 , k)
print('\n Second Test') #For sample test 2
set2 = ['m','a','n','v','i']
k = 2
printAllStringsOfLengthk(set2 , k)
# Code ends here
|
"""
Example implementation of a UBXMessage file reader
using the UBXReader iterator functions
Created on 25 Oct 2020
@author: semuadmin
"""
from pyubx2.ubxreader import UBXReader
import pyubx2.exceptions as ube
class UBXStreamer:
"""
UBXStreamer class.
"""
def __init__(self, filename):
"""
Constructor.
"""
self._filename = filename
self._stream = None
self._ubxreader = None
self._connected = False
self._reading = False
def __del__(self):
"""
Destructor.
"""
self.close()
def open(self):
"""
Open file.
"""
self._connected = False
try:
self._stream = open(self._filename, "rb")
self._connected = True
except Exception as err:
print(f"Error opening file {err}")
return self._connected
def close(self):
"""
Close file.
"""
if self._connected and self._stream:
try:
self._stream.close()
except Exception as err:
print(f"Error closing file {err}")
self._connected = False
return self._connected
def reader(self, validate=False, mode=0):
"""
Reads and parses UBX message data from stream
using UBXReader iterator method
"""
i = 0
self._ubxreader = UBXReader(self._stream, validate=validate, msgmode=mode)
for msg in self._ubxreader: # invokes iterator method
try:
(raw_data, parsed_data) = msg
# if raw_data:
# print(raw_data)
if parsed_data:
print(parsed_data)
i += 1
except (ube.UBXMessageError, ube.UBXTypeError, ube.UBXParseError) as err:
print(f"Something went wrong {err}")
continue
print(f"\n\n{i} message{'' if i == 1 else 's'} read from {self._filename}.")
if __name__ == "__main__":
print("Enter fully qualified name of file containing binary UBX data: ", end="")
filefqn = input().strip('"')
print("Do you want to validate the data stream (y/n)? (n) ", end="")
val = input() or "n"
VALD = val in ("Y", "y", "YES,", "yes", "True")
print("Message mode (0=GET (output), 1=SET (input), 2=POLL (poll)? (0) ", end="")
mode = input() or "0"
MODED = int(mode)
print("Instantiating UBXStreamer class...")
ubf = UBXStreamer(filefqn)
print(f"Opening file {filefqn}...")
if ubf.open():
print("Starting file reader")
ubf.reader(VALD, MODED)
print("\n\nClosing file...")
ubf.close()
print("Test Complete")
|
import os
import bpy
from .. import base
from ... import particles_io
def get_cache(socket):
node = socket.node
pos = node.outputs['Position']
vel = node.outputs['Velocity']
col = node.outputs['Hex Color']
mat = node.outputs['Material']
emt = node.outputs['Emitter']
size = node.outputs['Size']
folder = node.inputs['Folder'].get_value()[0]
pos_key = '{0}.{1}'.format(node.name, pos.name)
vel_key = '{0}.{1}'.format(node.name, vel.name)
col_key = '{0}.{1}'.format(node.name, col.name)
mat_key = '{0}.{1}'.format(node.name, mat.name)
emt_key = '{0}.{1}'.format(node.name, emt.name)
size_key = '{0}.{1}'.format(node.name, size.name)
# scene
scn = bpy.context.scene
if not folder:
scn.elements_sockets[pos_key] = ()
scn.elements_sockets[vel_key] = ()
scn.elements_sockets[col_key] = ()
scn.elements_sockets[mat_key] = ()
scn.elements_sockets[emt_key] = ()
scn.elements_sockets[size_key] = (1.0, ) # TODO
return
caches = {}
# particles file name
name = 'particles_{0:0>6}.bin'.format(scn.frame_current)
# absolute particles file path
path = bpy.path.abspath(os.path.join(folder, name))
if os.path.exists(path):
particles_io.read_pars(path, caches, folder, socket.name)
else:
scn.elements_sockets[pos_key] = ()
scn.elements_sockets[vel_key] = ()
scn.elements_sockets[col_key] = ()
scn.elements_sockets[mat_key] = ()
scn.elements_sockets[emt_key] = ()
scn.elements_sockets[size_key] = (1.0, ) # TODO
return
scn.elements_sockets[pos_key] = caches[folder][particles_io.POS]
scn.elements_sockets[vel_key] = caches[folder][particles_io.VEL]
scn.elements_sockets[col_key] = caches[folder][particles_io.COL]
scn.elements_sockets[mat_key] = caches[folder][particles_io.MAT]
scn.elements_sockets[emt_key] = caches[folder][particles_io.EMT]
scn.elements_sockets[size_key] = (1.0, ) # TODO
class ElementsCacheNode(base.BaseNode):
bl_idname = 'elements_cache_node'
bl_label = 'Cache'
required_nodes = {'Particles': ['elements_simulation_node', ], }
get_value = {
'Position': get_cache,
'Velocity': get_cache,
'Hex Color': get_cache,
'Material': get_cache,
'Emitter': get_cache,
'Size': get_cache
}
category = base.COMPONENT
def init(self, context):
self.width = 200.0
pars = self.inputs.new('elements_struct_socket', 'Particles')
pars.text = 'Particles'
folder = self.inputs.new('elements_folder_socket', 'Folder')
folder.text = 'Folder'
# particle position
pos = self.outputs.new('elements_vector_socket', 'Position')
pos.text = 'Position'
pos.hide_value = True
# particle velocity
vel = self.outputs.new('elements_vector_socket', 'Velocity')
vel.text = 'Velocity'
vel.hide_value = True
# particle color
col = self.outputs.new('elements_integer_socket', 'Hex Color')
col.text = 'Hex Color'
col.hide_value = True
# particle material id
mat = self.outputs.new('elements_integer_socket', 'Material')
mat.text = 'Material'
mat.hide_value = True
# particle emitter id
mat = self.outputs.new('elements_integer_socket', 'Emitter')
mat.text = 'Emitter'
mat.hide_value = True
# particle size
size = self.outputs.new('elements_float_socket', 'Size')
size.text = 'Size'
size.hide_value = True
|
import datetime
from direct.directnotify import DirectNotifyGlobal
from toontown.uberdog.ClientServicesManagerUD import executeHttpRequest
from direct.fsm.FSM import FSM
from direct.distributed.PyDatagram import PyDatagram
from direct.distributed.MsgTypes import *
from otp.ai.MagicWordGlobal import *
from direct.showbase.DirectObject import DirectObject
import threading
import httplib
class BanFSM(FSM):
def __init__(self, air, avId):
FSM.__init__(self, 'banFSM-%s' % avId)
self.air = air
self.avId = avId
# Needed variables for the actual banning.
self.DISLid = None
self.accountId = None
self.avName = None
def performBan(self):
# NO ALTIS API
return
httpReq = httplib.HTTPConnection('www.projectaltis.com')
httpReq.request('GET', '/api/ban/441107756FCF9C3715A7E8EA84612924D288659243D5242BFC8C2E26FE2B0428/%s' % self.accountId)
httpReq.getresponse().read()
print(self.accountId)
def ejectPlayer(self):
av = self.air.doId2do.get(self.avId)
if not av:
return
# Send the client a 'CLIENTAGENT_EJECT' with the players name.
datagram = PyDatagram()
datagram.addServerHeader(
av.GetPuppetConnectionChannel(self.avId),
self.air.ourChannel, CLIENTAGENT_EJECT)
datagram.addUint16(152)
datagram.addString(self.avName)
simbase.air.send(datagram)
def dbCallback(self, dclass, fields):
if dclass != simbase.air.dclassesByName['AccountAI']:
return
self.accountId = fields.get('ACCOUNT_ID')
if not self.accountId:
return
self.duration = None
self.performBan()
def getAvatarDetails(self):
av = self.air.doId2do.get(self.avId)
if not av:
return
self.DISLid = av.getDISLid()
self.avName = av.getName()
def log(self):
simbase.air.writeServerEvent('ban', self.accountId)
def cleanup(self):
self.air = None
self.avId = None
self.DISLid = None
self.avName = None
self.accountId = None
self.comment = None
self.duration = None
self = None
def enterStart(self):
self.getAvatarDetails()
self.air.dbInterface.queryObject(self.air.dbId, self.DISLid, self.dbCallback)
def exitStart(self):
self.log()
self.cleanup()
def enterOff(self):
pass
def exitOff(self):
pass
class BanManagerAI(DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('BanManagerAI')
def __init__(self, air):
self.air = air
self.banFSMs = {}
def ban(self, avId, comment):
self.banFSMs[avId] = BanFSM(self.air, avId)
self.banFSMs[avId].request('Start')
self.acceptOnce(self.air.getAvatarExitEvent(avId), self.banDone, [avId])
def banDone(self, avId):
self.banFSMs[avId].request('Off')
self.banFSMs[avId] = None
@magicWord(category=CATEGORY_MODERATOR, types=[str])
def kick(reason='No reason specified'):
"""
Kick the target from the game server.
"""
target = spellbook.getTarget()
if target == spellbook.getInvoker():
return "You can't kick yourself!"
datagram = PyDatagram()
datagram.addServerHeader(
target.GetPuppetConnectionChannel(target.doId),
simbase.air.ourChannel, CLIENTAGENT_EJECT)
datagram.addUint16(155)
datagram.addString('You were kicked by a moderator for the following reason: %s' % reason)
simbase.air.send(datagram)
return "Kicked %s from the game server!" % target.getName()
@magicWord(category=CATEGORY_MODERATOR, types=[str])
def ban(reason):
"""
Ban and Kick the target from the game server.
"""
target = spellbook.getTarget()
if target == spellbook.getInvoker():
return "You can't ban yourself!"
simbase.air.banManager.ban(target.doId, reason)
datagram = PyDatagram()
datagram.addServerHeader(
target.GetPuppetConnectionChannel(target.doId),
simbase.air.ourChannel, CLIENTAGENT_EJECT)
datagram.addUint16(155)
datagram.addString('You were banned by a moderator for the following reason: %s' % reason)
simbase.air.send(datagram)
return "Kicked and Banned %s from the game server!" % target.getName()
|
# Generated by Django 3.0.2 on 2020-01-20 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0007_auto_20190607_1422'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='language',
field=models.CharField(choices=[('en', 'English'), ('fr', 'French'), ('de', 'German'), ('es', 'Spanish'), ('sv', 'Swedish'), ('tr', 'Turkish')], default='en', max_length=255, verbose_name='Language'),
),
]
|
"""Responsys REST API Client."""
# used to issue CRUD requests, the meat and 'taters of this thing
import requests
# used with the login with certificate functions
# import base64 as base64
# Interact API returns a lot of json-like text objects
# we use this to bind them to python objects
import json
# used with the login with certificate functions
# from random import choice
# used with the login with certificate functions
# from string import ascii_uppercase
# our own rules for data objects.
from .containers import rules
# Helper functions for use with direct implementations of calls as below
# # Helps with Login with username and certificates
# def generate_client_challenge_value(length=16):
# return base64.b64encode(
# bytes(''.join(choice(ascii_uppercase) for i in range(16)), 'utf-8')
# )
class Client:
"""The main client."""
def __init__(self, config, creds):
"""Initialize."""
self.config = config
self.creds = creds
"""Internal methods."""
def _login(self, user_name, password, url):
"""Login with username and password."""
data = {
"user_name": user_name,
"password": password,
"auth_type": "password"
}
headers = {'content-type': 'application/x-www-form-urlencoded'}
return requests.post(url, data=data, headers=headers)
def _login_with_username_and_certificates(self, url, user_name):
"""Login with username and certificates."""
# # TODO: Implement
# # Step 1 - Authenticate server by sending the following REST request
# data = {
# "user_name" : user_name,
# "auth_type" : "server",
# "client_challenge" : client_challenge_value
# }
# service_url = 'auth/token'
# url = url + service_url
# client_challenge_value = generate_client_challenge_value()
# # Step 2 - Get response from the server and decrypt with RSA and
# # Public Key Certificate (downloaded from Interact interface)
# response = requests.post(url, data=data, headers=headers)
# # TODO: Implement parse response
# # Expect:
# # {
# # "authToken" : "<TEMP_AUTH_TOKEN>",
# # "serverChallenge" : "<BASE_64_ENCODED_SERVER_CHALLENGE>",
# # "clientChallenge" : "<ENCRYPTED_AND_THEN_BASE_64_ENCODED_CLIENT_CHALLENGE>"
# # }
# response = parse_response()
# # TODO: Implement import certificate
# certificate = import_local_public_key_certificate(file)
# # TODO: Implement RSA decryption
# response = decrypt(response)
# # TODO: Implement authorize call
# response = login_with_username_and_certificate_authorization(
# user_name,
# auth_type=client,
# server_challenge=encrypt(response["serverChallenge"])
# )
# return response
raise(NotImplementedError)
def _get_context(self):
"""
Return the login response as context.
Used with each individual call to Responsys API.
"""
context = json.loads(
self._login(
self.creds.user_name,
self.creds.password,
self.config.login_url
).text
)
context['api_url'] = self.config.api_url
return context
def _refresh_token(self, token):
"""Refresh the token. Called when it's expired."""
# # TODO: Implement
# # Refresh token
# def refresh_token(url, old_auth_token):
# service_url = 'auth/token'
# url = url + service_url
# data = {'auth_type' : 'token'}
# headers = {'Authorization' : auth_token}
# response = requests.post(url, data=data, headers=headers)
# return response
"""Internal helper methods."""
def _list_child(self, child, from_type):
if type(child) is from_type:
parent = []
parent.append(child)
return parent
else:
return child
def _nonstr_to_str(self, data):
# Quietly convert bytes to strings... I'm uneasy about this
if type(data) is bytes:
data = data.decode('utf-8')
# Convert other types to strings because Responsys ignores most of them
if type(data) in [int, float, bool, dict, list, set, tuple, type(None)]:
data = str(data)
return data
def _post(self, service_url, data, **kwargs):
context = self._get_context()
data = json.dumps(data)
headers = {
'Authorization': context["authToken"],
'Content-Type': 'application/json'
}
endpoint = '{e}/{a}/{s}'.format(
e=context["endPoint"],
a=context["api_url"],
s=service_url)
response = requests.post(data=data, headers=headers, url=endpoint)
try:
response = json.loads(response.text)
except:
pass
return response
def _delete(self, service_url):
context = self._get_context()
headers = {'Authorization': context["authToken"]}
endpoint = '{e}/{a}/{s}'.format(
e=context["endPoint"],
a=context["api_url"],
s=service_url)
response = requests.delete(url=endpoint, headers=headers)
try:
response = json.loads(response.text)
except:
pass
return response
def _trim_path(self, path):
# chop trailing slash
try:
if path[-1] == '/':
path = path[:-1]
# chop leading slash
if path[0] == '/':
path = path[1:]
except:
pass
return path
def _prep_doc_and_path(
self,
document,
local_path=None,
remote_path=None
):
if local_path is None:
local_path = self.config.local_content_library_folder
local_path = self._trim_path(local_path)
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
document_data = open(f'{local_path}/{document}', 'r').read()
# just use the filename, omit the path
document_name = document.split('/')[-1]
if document_name.endswith('.html'):
raise ValueError("""
.html is not allowed in Responsys Interact.
It would silently rename your .html files to .htm on upload.
Instead the Responsys Interact Python wrapper library doesn't allow it.
Rename your .html files to .htm before you upload them.
This will prevent mismatches and chaos.
You will be happy you did.
""")
data = {
'documentPath': '/contentlibrary/{p}/{d}'.format(
p=remote_path, d=document_name),
'content': document_data
}
return {'data': data, 'document_name': document_name, 'remote_path': remote_path}
"""Direct implentations of calls from Responsys Interact REST API documentation
https://docs.oracle.com/cloud/latest/marketingcs_gs/OMCEB/OMCEB.pdf
All comment descriptions are directly from the v1.3 REST API documentation,
except some English-language grammar and syntax inconsistencies are
modified from their documentation and code-comment style to match PEP-8.
"""
"""Main functions."""
def get_profile_lists(self):
"""Retrieving all profile lists for an account."""
return self._get('lists')
def update_profile_list(self,
list_name,
fields,
records,
html_value='H',
optin_value='I',
text_value='T',
insert_on_no_match=True,
insert_on_match='REPLACE_ALL',
match_column_name1='RIID_',
match_column_name2=None,
match_operator='NONE',
opt_out_value='O',
reject_records_if_channel_empty=None,
default_permission_status='OPTIN'):
"""Merge or update members in a profile list table."""
# Fields, records to lists to accept str arg for single record updates
fields = self._list_child(fields, str)
records = self._list_child(records, str)
# Clean non string objects from fields
if self.config.caste_nonstr_to_str == True:
try:
fields = [self._nonstr_to_str(f) for f in fields]
except:
pass
# Clean non string from records
try:
records = [self._nonstr_to_str(r) for r in records]
except:
pass
data = {
'recordData': {
'fieldNames': fields,
'records': [
records
],
'mapTemplateName': None
},
'mergeRule': {
'htmlValue': html_value,
'optinValue': optin_value,
'textValue': text_value,
'insertOnNoMatch': insert_on_no_match,
'updateOnMatch': insert_on_match,
'matchColumnName1': match_column_name1,
'matchColumnName2': match_column_name2,
'matchOperator': match_operator,
'optoutValue': opt_out_value,
'rejectRecordIfChannelEmpty': reject_records_if_channel_empty,
'defaultPermissionStatus': default_permission_status
}
}
service_url = 'lists/{list_name}/members'.format(list_name=list_name)
return self._post(service_url, data)
# raise(NotImplementedError)
def _get(self, service_url, **kwargs):
"""General purpose build for GET requests to Interact API."""
context = self._get_context()
endpoint = '{e}/{a}/{s}'.format(
e=context["endPoint"],
a=context["api_url"],
s=service_url)
headers = kwargs.get('headers', {'Authorization': context['authToken']})
# use parameters if we got them
if "parameters" in kwargs:
parameters = kwargs.get('parameters', None)
endpoint = '{e}?{p}'.format(e=endpoint, p=parameters)
response = requests.get(url=endpoint, headers=headers)
try:
response = json.loads(response.text)
except:
pass
return response
def get_campaigns(self):
"""Get all EMD email campaigns."""
return self._get('campaigns')
def get_push_campaigns(self):
"""Get all Push campaigns."""
return self._get('campaigns?type=push')
def get_member_of_list_by_riid(
self,
list_name,
riid,
fields_to_return=['all']
):
"""Retrieve a member of a profile list using RIID."""
service_url = 'lists/{l}/members/{id}'.format(l=list_name, id=riid)
parameters = 'fs={fs}'.format(fs=",".join(fields_to_return))
return self._get(service_url, parameters=parameters)
def get_member_of_list_by_attribute(
self,
list_name,
record_id,
query_attribute='c',
fields_to_return=['all']
):
"""Retrieve a member of a profile list based on query attribute."""
service_url = 'lists/{l}/members'.format(l=list_name)
parameters = 'fs={fs}&qa={qa}&id={id}'.format(
fs=",".join(fields_to_return),
qa=query_attribute,
id=record_id)
return self._get(service_url, parameters=parameters)
def get_profile_extensions_for_list(self, list_name):
"""Retrieve all profile extensions of a profile list."""
return self._get('lists/{l}/listExtensions'.format(l=list_name))
def get_member_of_profile_extension_by_riid(
self,
list_name,
pet_name,
riid,
fields_to_return=['all']
):
"""Retrieve a member of a profile extension table based on RIID."""
service_url = 'lists/{l}/listExtensions/{p}/members/{id}'.format(
l=list_name,
p=pet_name,
id=riid)
parameters = 'fs={fs}'.format(fs=",".join(fields_to_return))
return self._get(service_url, parameters=parameters)
def get_member_of_profile_extension_by_attribute(
self,
list_name,
pet_name,
record_id,
query_attribute='c',
fields_to_return=['all']
):
"""Retrieve a member of a profile extension table based on a query attribute."""
service_url = 'lists/{l}/listExtensions/{p}/members'.format(
l=list_name,
p=pet_name)
parameters = 'fs={fs}&qa={qa}&id={id}'.format(
fs=",".join(fields_to_return),
qa=query_attribute,
id=record_id)
return self._get(service_url, parameters=parameters)
def get_lists_for_record(self, riid):
"""Find what lists a record is in by RIID."""
all_lists = [list_name["name"] for list_name in self.get_profile_lists()]
# container list
member_of = []
for profile_list in all_lists:
response = self.get_member_of_list_by_riid(
self, profile_list, riid)
# if the member (by riid) is in the profile list
# add it to the list of all profile lists
if "recordData" in response:
member_of.append(profile_list)
return member_of
def send_email_message(
self,
recipients,
folder_name,
campaign_name,
optional_data={}):
"""Trigger email message."""
# Accept a string for one recipient but work with a list either way.
recipients = self._list_child(recipients, str)
if type(recipients) is not list:
raise TypeError(
'Recipients data must be a string of one recipient or a list.')
# Accept a dict for one recipient's optional data
# but work with a list either way.
optional_data = self._list_child(optional_data, dict)
optional_data = [
{
self._nonstr_to_str(k):self._nonstr_to_str(v) for k,v in d.items()
} for d in optional_data
]
# then if there's no optional data extend it out so we can zip it up
if optional_data == [{}] and len(recipients) > 1:
optional_data = optional_data * len(recipients)
if type(optional_data) is not list:
raise TypeError(
'Recipients data must be a dictionary of key/value pairs for\n'+
'one recipient or a list of dictionaries for multiple recipients')
if len(recipients) != len(optional_data):
raise ValueError(
'Recipients list must be same length as optional data list')
zipped = zip(recipients, optional_data)
data = {
"recipientData" : [
{
"recipient" : {
"customerId" : None,
"emailAddress" : recipient[0],
"listName" : {
"folderName" : folder_name,
"objectName" : campaign_name
},
"recipientId" : None,
"mobileNumber" : None,
"emailFormat" : "HTML_FORMAT"
},
"optionalData" : [
{} if len(d.items()) is 0 else {
"name": list(d.keys())[0],
"value": list(d.values())[0]
} for d in self._list_child(recipient[1], dict)
]
} for recipient in zipped
]
}
service_url = 'campaigns/{c}/email'.format(c=campaign_name)
return self._post(service_url, data)
def delete_from_profile_list(self, list_name, riid):
"""Delete Profile List Recipients based on RIID."""
service_url = 'lists/{l}/members/{id}'.format(l=list_name, id=riid)
return self._delete(service_url)
def delete_member_of_profile_extension_by_riid(
self,
list_name,
pet_name,
riid
):
"""Delete a member of a profile extension table based on RIID."""
service_url = 'lists/{l}/listExtensions/{p}/members/{id}'.format(
l=list_name,
p=pet_name,
id=riid)
return self._delete(service_url)
def create_supplemental_table(
self,
supplemental_table_name,
folder_name='',
fields='',
default_field_type='STR500',
data_extraction_key=None,
primary_key=None
):
"""Create a new supplemental table."""
if type(fields) == str:
raise TypeError('Fields must be a list.')
if folder_name == '':
folder_name = self.config.api_folder
service_url = 'folders/{f}/suppData'.format(f=folder_name)
if primary_key is None:
try:
primary_key = fields[0]
except ValueError:
raise ValueError(
"""Cannot create supplemental table with no fields.
Primary key field is required.""")
data = {
# TODO: Use field types per field
"table": {"objectName": supplemental_table_name},
"fields": [
{
"fieldName": field,
"fieldType": default_field_type,
"dataExtractionKey": False
} for field in fields
],
"primaryKeys": [primary_key]
}
return self._post(service_url, data)
def list_folder(
self,
remote_path=None,
object_type='all'
):
"""List the contents of a folder."""
valid_types = ['all', 'folders', 'docs', 'items']
if object_type not in valid_types:
raise ValueError(
"""Object type must be one of {v}.""".format(
v=str(valid_types)[1:-1])
)
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
service_url = 'clFolders/contentlibrary/{f}?type={o}'.format(
f=remote_path, o=object_type)
return self._get(service_url)
def create_folder(
self,
remote_path=None
):
"""Create a new folder in /contentlibrary/."""
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
service_url = 'clFolders'
data = {
"folderPath": '/contentlibrary/{f}'.format(f=remote_path)
}
return self._post(service_url, data)
def create_document(
self,
document,
local_path=None,
remote_path=None
):
"""Create a document in /contentlibrary/."""
if local_path is None:
local_path = self.config.local_content_library_folder,
local_path = self._trim_path(local_path)
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
service_url = 'clDocs'
data = self._prep_doc_and_path(
document, local_path, remote_path)['data']
return self._post(service_url, data)
def get_document(
self,
document,
remote_path=None
):
"""Get a document from /contentlibrary/."""
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
service_url = 'clDocs/contentlibrary/{rfp}/{d}'.format(
rfp=remote_path,
d=document)
return self._get(service_url)
def update_document(
self,
document,
local_path=None,
remote_path=None
):
"""Update a document that's already in /contentlibrary/."""
if local_path is None:
local_path = self.config.local_content_library_folder,
local_path = self._trim_path(local_path)
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
service_url = 'clDocs/contentlibrary/{rfp}/{d}'.format(
rfp=remote_path,
d=document)
prepped = self._prep_doc_and_path(
document, local_path, remote_path)
return self._post(service_url, prepped['data'])
def delete_document(self, document, remote_path=None):
"""Delete a document in /contentlibrary/'."""
# # First try to get the document before we delete it!
# self.get_document(document, remote_path)
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
service_url = 'clDocs/contentlibrary/{p}/{d}'.format(
p=remote_path, d=document)
return self._delete(service_url)
def delete_folder(self, remote_path=None):
"""Delete a folder in /contentlibrary/."""
if remote_path is None:
remote_path = self.config.remote_content_library_folder
remote_path = self._trim_path(remote_path)
remote_path = self._trim_path(remote_path)
service_url = 'clFolders/contentlibrary/{f}'.format(f=remote_path)
return self._delete(service_url)
# NOT IMPLEMENTED GROUP
def create_profile_extension(self, profile_extension_name, records):
"""Create a profile extension table."""
def update_profile_extension(self, profile_extension_name, records):
"""Update a profile extension table."""
raise(NotImplementedError)
def update_supplemental_table(self, supplemental_table_name, records):
"""Update a supplemental table."""
raise(NotImplementedError)
def get_record_from_supplemental_table(self, supplemental_table_name, record):
"""Get a record from a supplemental table."""
raise(NotImplementedError)
def delete_record_from_supplemental_table(self, supplemental_table_name, record):
"""Delete a record from a supplemental table."""
raise(NotImplementedError)
def update_list_and_send_email_message(
self, list, recipients, campaign_name
):
"""Update a list and then send an email message."""
raise(NotImplementedError)
def update_list_and_send_email_message_with_attachments(
self, list, recipeints, campaign_name, attachments
):
"""Update a list and send an email message."""
raise(NotImplementedError)
def update_list_and_send_sms(self, list, recipients, campaign_name):
"""Update a list and send an sms."""
raise(NotImplementedError)
def send_push_message(self, campaign_name, recipient_id):
"""Send a push message."""
raise(NotImplementedError)
def trigger_custom_event(self, event_name):
"""Trigger a custom event."""
raise(NotImplementedError)
def schedule_campaign(self, campaign_name, schedule):
"""Schedule a campaign."""
raise(NotImplementedError)
def get_schedules_for_campaign(self, campaign_name):
"""Get the schedule IDs for a campaign."""
raise(NotImplementedError)
def get_campaign_schedule(self, campaign_name):
"""Get the schedule for a campaign."""
raise(NotImplementedError)
def update_campaign_schedule(self, campaign_name):
"""Update a campaign schedule."""
raise(NotImplementedError)
def unschedule_campaign(self, campaign_name):
"""Unschedule a campaign."""
raise(NotImplementedError)
def create_media_file(self, path_to_media_file, media_file):
"""Create a media file."""
raise(NotImplementedError)
def get_media_file(self, path_to_media_file):
"""Get a media file."""
raise(NotImplementedError)
def update_media_file(self, path_to_old_media_file, new_media_file):
"""Update a media file."""
raise(NotImplementedError)
def delete_media_file(self, path_to_media_file):
"""Delete a media file."""
raise(NotImplementedError)
def copy_media_file(self, path_to_media_file, new_name=None):
"""Copy a media file."""
raise(NotImplementedError)
def set_images_in_document(self, path_to_interact_document, images):
"""Set the image data for media that are referenced in a document."""
raise(NotImplementedError)
def get_images_in_document(self, path_to_interact_document):
"""Get the image data for media that are referenced in a document."""
raise(NotImplementedError)
|
""" usuario table
Revision ID: 375f0af244b6
Revises: 2f9a3ba45b00
Create Date: 2018-09-08 03:19:22.000647
"""
# revision identifiers, used by Alembic.
revision = '375f0af244b6'
down_revision = '2f9a3ba45b00'
from alembic import op
import sqlalchemy as sa
def upgrade():
""" Executa as seguintes tarefas no banco de dados """
op.create_table(
'usuarios',
sa.Column('cod_usuario', sa.Integer(), nullable=False),
sa.Column('cod_grupo', sa.Integer(), nullable=True),
sa.Column('usuario', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=64), unique=True),
sa.Column('senha_hash', sa.String(length=128), nullable=False),
sa.Column('questoes_acertadas', sa.Integer(), nullable=False),
sa.Column('numero_jogos', sa.Integer(), nullable=False),
sa.Column('membro_desde', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['cod_grupo'], ['grupos.cod_grupo'],),
sa.PrimaryKeyConstraint('cod_usuario')
)
op.create_index('ix_usuarios_usuario', 'usuarios', ['usuario'], unique=True)
op.create_index('ix_usuarios_email', 'usuarios', ['email'], unique=True)
def downgrade():
""" Executa as seguintes tarefas no banco de dados """
op.drop_index('ix_usuarios_usuario', 'usuarios')
op.drop_index('ix_usuarios_email', 'usuarios')
op.drop_table('usuarios')
|
import os
class Config(object):
DEVELOPMENT = True
TEMPLATES_AUTO_RELOAD = True
SECRET_KEY = os.environ.get('SECRET_KEY')
|
def interversion(s):
return " ".join(list(reversed(s.split(" "))))
print(interversion("Marc Barthet"))
|
import copy
import contextlib
import StringIO
import unittest
from view import viewfile
def _make_simple_entry(name, branch='master', revision='HEAD'):
return viewfile.ViewFileEntry(name, 'git://{0}/{0}'.format(name), 'GIT',
branch, revision)
class TestViewFileEntryMethods(unittest.TestCase):
def test_has_revision(self):
unversioned_entry = _make_simple_entry('foo')
versioned_entry = _make_simple_entry(
'foo',
revision='7783ac32d05162f328bba0d64e56b80a9f15bb17')
self.assertFalse(unversioned_entry.has_revision())
self.assertTrue(versioned_entry.has_revision())
def test_eq(self):
self.assertTrue(_make_simple_entry('foo') == _make_simple_entry('foo'))
self.assertFalse(_make_simple_entry('foo') == _make_simple_entry('bar'))
class TestViewFileMethods(unittest.TestCase):
def test_dump(self):
view = viewfile.ViewFile()
view.entries.append(_make_simple_entry('foo'))
with contextlib.closing(StringIO.StringIO()) as f:
view.dump(f)
contents = f.getvalue()
self.assertEqual(contents,
'foo git://foo/foo GIT master HEAD\n')
def test_eq(self):
foo1 = viewfile.ViewFile([_make_simple_entry('foo')])
foo2 = viewfile.ViewFile([_make_simple_entry('foo')])
bar = viewfile.ViewFile([_make_simple_entry('bar')])
self.assertTrue(foo1 == foo2)
self.assertFalse(foo1 == bar)
class TestViewFileParse(unittest.TestCase):
def test_valid(self):
contents = \
'''
# Comments and whitespace only lines should be ignored
foo git://foo/foo GIT master HEAD
'''
with contextlib.closing(StringIO.StringIO(contents)) as f:
view = viewfile.parse(f)
expected = viewfile.ViewFile([_make_simple_entry('foo')])
self.assertEqual(view, expected)
def test_invalid(self):
invalid_views = [
'foo git://foo/foo GIT master',
'foo git://foo/foo GIT master HEAD extra'
]
for s in invalid_views:
with contextlib.closing(StringIO.StringIO(s)) as f:
with self.assertRaises(viewfile.ParseError):
viewfile.parse(f)
class TestViewFileDiff(unittest.TestCase):
def setUp(self):
self.foo_entry = _make_simple_entry('foo')
self.bar_entry = _make_simple_entry('bar')
self.foo_dev_entry = _make_simple_entry('foo', branch='dev')
self.empty_view = viewfile.ViewFile()
self.foo_view = viewfile.ViewFile([copy.copy(self.foo_entry)])
self.bar_view = viewfile.ViewFile([copy.copy(self.bar_entry)])
self.foobar_view = viewfile.ViewFile([copy.copy(self.foo_entry),
copy.copy(self.bar_entry)])
self.foo_dev_view = viewfile.ViewFile([copy.copy(self.foo_dev_entry)])
def test_no_changes(self):
diff = viewfile.diff(self.empty_view, self.empty_view)
self.assertEqual(diff, {})
diff = viewfile.diff(self.foo_view, self.foo_view)
self.assertEqual(diff, {})
def test_added(self):
diff = viewfile.diff(self.empty_view, self.foo_view)
self.assertEqual(diff, {'foo': (None, self.foo_entry)})
diff = viewfile.diff(self.empty_view, self.foobar_view)
self.assertEqual(diff, {'bar': (None, self.bar_entry),
'foo': (None, self.foo_entry)})
diff = viewfile.diff(self.foo_view, self.foobar_view)
self.assertEqual(diff, {'bar': (None, self.bar_entry)})
def test_removed(self):
diff = viewfile.diff(self.foo_view, self.empty_view)
self.assertEqual(diff, {'foo': (self.foo_entry, None)})
diff = viewfile.diff(self.foobar_view, self.empty_view)
self.assertEqual(diff, {'bar': (self.bar_entry, None),
'foo': (self.foo_entry, None)})
diff = viewfile.diff(self.foobar_view, self.foo_view)
self.assertEqual(diff, {'bar': (self.bar_entry, None)})
def test_changed(self):
diff = viewfile.diff(self.foo_view, self.foo_dev_view)
self.assertEqual(diff, {'foo': (self.foo_entry, self.foo_dev_entry)})
def test_complex(self):
diff = viewfile.diff(self.foobar_view, self.foo_dev_view)
self.assertEqual(diff, {'foo': (self.foo_entry, self.foo_dev_entry),
'bar': (self.bar_entry, None)})
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import subprocess
import time
"""Takes a textfile with ip-adresses and their frequency as input, performs
whois-request using the Linux-Bash and produces a csv-output showing the
ip-address and the correspondign frequency, the countrycode and the owner
of the ip-address"""
# the file which contains the ip-addresses and number of occurance must be
# specified when calling the script. if not, there is a little error-
# message and the script terminates
try:
inputfile = sys.argv[1]
except:
print("""
Error! You must provide a file as input like this:
python3 whois-helper.py addresses.txt
The script will be terminated now.
""")
sys.exit()
# create and open a file to write the results in
outputfile = open("results.csv", "w")
# declare needed variables for later
countrycode = None
description = None
# writing a headline in the file for the results
outputfile.write("\"frequency\",\"IP-address\",\"countrycode\",\"description\"\n")
with open(inputfile) as source:
for line in source:
line = line.strip()
cc_tmp_list = []
desc_tmp_list = []
# split the frequency and ip-address
pieces = line.split(' ')
# the command to execute
command = subprocess.Popen(['whois', pieces[1]], stdout = subprocess.PIPE)
# print-statement for the user
print("whois-query for:", pieces[1])
# write the number of occurance and the ip-address in the file for the results
outputfile.write("\"{}\",\"{}\",".format(pieces[0], pieces[1]))
# looping through the result of the whois-query
for line in command.stdout:
line = line.strip().decode('UTF-8')
if line.startswith("country:") or line.startswith("Country:"):
parts = line.strip().split(":")
cc_tmp_list.append(parts[1].strip())
if line.startswith("descr:") or line.startswith("OrgName") or line.startswith("owner:"):
parts = line.strip().split(":")
desc_tmp_list.append(parts[1].strip())
# usually there is more than one line matching the current
# pattern, but I want only the first ones
countrycode = cc_tmp_list[0]
description = desc_tmp_list[0]
# write countrycode and description to the file for the results
outputfile.write("\"{}\",\"{}\"\n".format(countrycode, description))
# Setting the variables to "None" again, in case an incorrect ip-address is queried
# otherwise the data of the previous ip-address would be written again
countrycode = None
description = None
# wait for 3 seconds in order not to get blocked - hopefully
time.sleep(3)
# close the file for the results
outputfile.close()
|
# ----------------------------------------------------------------------
# Author: [email protected]
# ----------------------------------------------------------------------
import os, subprocess
aliases_path = os.path.expanduser('~/.bash_aliases')
my_alias = "alias camera='" + os.getcwd() + "/start_camera.sh'\n"
if os.path.exists(aliases_path):
print('bash_aliases existing, modifying')
with open(aliases_path, 'r') as f:
current_file = f.readlines()
need_to_add_alias = True
for ind, line in enumerate(current_file):
if 'camera' in line:
need_to_add_alias = False
current_file[ind] = my_alias
break
if need_to_add_alias:
current_file.append(my_alias)
with open(aliases_path, 'w') as f:
f.writelines(current_file)
print('bash_aliases edited')
|
import pytest
from src.root.search.binary import binary_search
def test_binary():
array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
target = 6
index = 6
assert binary_search(array, target) == index
def test_binary_not_found():
array = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
target = 100
index = -1
assert binary_search(array, target) == index
|
from __future__ import print_function
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.layers import Input, LSTM, Dense, Embedding
import numpy as np
from src.utils import one_hot_encode, add_n
class Seq2Seq:
def __init__(self, num_encoder_tokens, num_decoder_tokens, start_token, end_token,
latent_dim=256, projection='one_hot', emb_dim=64,
restore_path=None):
self.num_encoder_tokens = num_encoder_tokens
self.num_decoder_tokens = num_decoder_tokens
self.start_token = start_token
self.end_token = end_token
if restore_path:
self.train_model = load_model(restore_path + '/train_model.s2s.h5')
self.encoder_model = load_model(restore_path + '/encoder_model.s2s.h5')
self.decoder_model = load_model(restore_path + '/decoder_model.s2s.h5')
return
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None,), name='encoder_input')
if projection == 'one_hot':
embedding = Embedding(
num_encoder_tokens+1,
num_encoder_tokens,
weights=[np.concatenate((np.zeros(shape=(1, num_encoder_tokens)), np.identity(num_encoder_tokens)))],
trainable=False,
mask_zero=True,
name='encoder_embedding'
)
elif projection == 'word2vec':
embedding = Embedding(
num_encoder_tokens + 1,
emb_dim,
mask_zero=True,
name='encoder_embedding'
)
else:
raise Exception("projection method not recognized")
encoder_emb = embedding(encoder_inputs)
encoder = LSTM(latent_dim, return_state=True, name='encoder_lstm')
encoder_outputs, state_h, state_c = encoder(encoder_emb)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None,), name='decoder_input')
if projection == 'one_hot':
embedding = Embedding(
num_decoder_tokens+1,
num_decoder_tokens,
weights=[np.concatenate((np.zeros(shape=(1, num_decoder_tokens)), np.identity(num_decoder_tokens)))],
trainable=False,
mask_zero=True,
name='decoder_embedding'
)
elif projection == 'word2vec':
embedding = Embedding(
num_decoder_tokens+1,
emb_dim,
mask_zero=True,
name='decoder_embedding'
)
else:
raise Exception("projection method not recognized")
decoder_emb = embedding(decoder_inputs)
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True, name='decoder_lstm')
decoder_outputs, _, _ = decoder_lstm(decoder_emb,
initial_state=encoder_states)
decoder_dense = Dense(self.num_decoder_tokens, activation='softmax', name='decoder_softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
self.train_model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Define sampling models
self.encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,), name='decoder_state_input_h')
decoder_state_input_c = Input(shape=(latent_dim,), name='decoder_state_input_c')
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_emb, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
self.decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
def pre_process_encoder_input(self, input):
# return one_hot_encode(input, self.max_encoder_seq_length, self.num_encoder_tokens)
return add_n(input, self.max_encoder_seq_length, 1)
def pre_process_decoder_input(self, input):
# return one_hot_encode(input, self.max_decoder_seq_length, self.num_decoder_tokens)
return add_n(input, self.max_decoder_seq_length, 1)
def pre_process_decoder_target(self, input):
# return one_hot_encode(input, self.max_decoder_seq_length, self.num_decoder_tokens)
return add_n(input, self.max_decoder_seq_length, 0)
def train(self, encoder_input_seqs, decoder_input_seqs, decoder_target_seqs, batch_size=64, epochs=100):
# Run training
self.max_encoder_seq_length = max([len(seq) for seq in encoder_input_seqs])
self.max_decoder_seq_length = max([len(seq) for seq in decoder_input_seqs])
encoder_input_data = self.pre_process_encoder_input(encoder_input_seqs)
print('encoder_input_data.shape: ', encoder_input_data.shape)
decoder_input_data = self.pre_process_decoder_input(decoder_input_seqs)
print('decoder_input_data.shape: ', decoder_input_data.shape)
decoder_target_data = self.pre_process_decoder_target(decoder_target_seqs)
print('decoder_target_data.shape: ', decoder_target_data.shape)
self.train_model.compile(optimizer='rmsprop', loss='sparse_categorical_crossentropy')
self.train_model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
# Save model
self.train_model.save('train_model.s2s.h5')
self.encoder_model.save('encoder_model.s2s.h5')
self.decoder_model.save('decoder_model.s2s.h5')
def encode(self, input_seq):
# encoder_input_data = one_hot_encode(input_seq, self.max_encoder_seq_length, self.num_encoder_tokens)
encoder_input_data = self.pre_process_encoder_input(input_seq)
return self.encoder_model.predict(encoder_input_data)
def decode(self, states_value):
# Generate empty target sequence of length 1.
# target_seq = np.zeros((1, 1, self.num_decoder_tokens))
# Populate the first token of target sequence with the start token.
# target_seq[0, 0, self.start_token] = 1.
target_seq = np.zeros((1, 1))
target_seq[0, 0] = self.start_token + 1
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_seq = []
while not stop_condition:
output_tokens, h, c = self.decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
decoded_seq += [sampled_token_index]
# Exit condition: either hit max length
# or find stop token.
if (sampled_token_index == self.end_token or
len(decoded_seq) > self.max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
# target_seq = np.zeros((1, 1, self.num_decoder_tokens))
# target_seq[0, 0, sampled_token_index] = 1.
target_seq = np.zeros((1, 1))
target_seq[0, 0] = sampled_token_index + 1
# Update states
states_value = [h, c]
return decoded_seq
def predict(self, input_seq):
# Encode the input as state vectors.
states_value = self.encode(input_seq)
prediction = self.decode(states_value)
return prediction
|
import inspect
from django.contrib.auth.models import User
from rest_framework import permissions
class PermissionFix(object):
"""
This class addresses a shortcoming in permissions.BasePermission.
Use as a mixin, to ensure that a Permission class defaults to
the has_permission method when there is no special object
permission method. Should be the first parent class.
"""
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class IsAdminUser(PermissionFix, permissions.IsAdminUser):
""" Fixed version of IsAdminUser. """
pass
class IsOwner(permissions.IsAuthenticated):
"""
Object-level permission to only allow owners of an object.
Assumes the model instance has a `get_owner` method, or is a User.
Unauthenticated users cannot be owners.
"""
def has_object_permission(self, request, view, obj):
if hasattr(obj, 'get_owner'):
return obj.get_owner() == request.user
elif type(obj) == User:
return obj == request.user
raise TypeError('No way to determine owner for {} model'.format(type(obj).__name__))
class ReadOnly(PermissionFix, permissions.BasePermission):
""" Always allow read operations, never allow modifying operations. """
def has_permission(self, request, view):
return request.method in permissions.SAFE_METHODS
class IsAnonCreate(PermissionFix, permissions.BasePermission):
""" Allow creation for unauthenticated users. """
def has_permission(self, request, view):
if request.user.is_authenticated():
return False
if request.method == "POST":
return True
return False
class IsAuthenticated(PermissionFix, permissions.IsAuthenticated):
""" Fixed version of IsAuthenticated. """
pass
# Logic below was inspired on
# https://github.com/caxap/rest_condition/blob/7d0f251ce24a5869f63317b2945643d03ab0c221/rest_condition/permissions.py
def _is_factory(obj):
return inspect.isclass(obj) or inspect.isfunction(obj)
class Or(permissions.BasePermission):
"""
Provides a simple way to define composite permissions where only
one of the operands needs to accept.
Example of usage:
>>> composed = Or(Perm1, Perm2, Perm3)
"""
def __init__(self, *perms):
self.perms = [perm() if _is_factory(perm) else perm for perm in perms]
def evaluate_permissions(self, method, *args, **kwargs):
return any((getattr(p, method)(*args, **kwargs) for p in self.perms))
def has_object_permission(self, request, view, obj):
return self.evaluate_permissions(
'has_object_permission',
request,
view,
obj,
)
def has_permission(self, request, view):
return self.evaluate_permissions('has_permission', request, view)
def __call__(self):
return self
|
import cocotb
from cocotb.clock import Clock
from cocotb.triggers import RisingEdge, FallingEdge, ClockCycles
from frequency_counter.test.test_seven_segment import read_segments
@cocotb.test()
async def test_wrapper(dut):
clock = Clock(dut.wb_clk_i, 10, units="ns")
cocotb.fork(clock.start())
# reset but project is inactive
dut.wb_rst_i <= 1
await ClockCycles(dut.wb_clk_i, 5)
dut.wb_rst_i <= 0
dut.la_data_in <= 0
# pause
await ClockCycles(dut.wb_clk_i, 100)
# activate project
dut.active <= 1
# reset it
dut.la_data_in <= 1 << 0
await ClockCycles(dut.wb_clk_i, 1)
dut.la_data_in <= 0 << 0
await ClockCycles(dut.wb_clk_i, 1)
await ClockCycles(dut.wb_clk_i, 100)
|
#!/usr/bin/python3
'''
decode a CID (Content IDentifier) as used in IPFS
see https://github.com/multiformats/cid
To decode a CID, follow the following algorithm:
1. * If it's a string (ASCII/UTF-8):
* If it is 46 characters long and starts with Qm..., it's a CIDv0. Decode
it as base58btc and continue to step 2.
* Otherwise, decode it according to the multibase spec and:
* If the first decoded byte is 0x12, return an error. CIDv0 CIDs
may not be multibase encoded and there will be no CIDv18
(0x12 = 18) to prevent ambiguity with decoded CIDv0s.
* Otherwise, you now have a binary CID. Continue to step 2.
2. Given a (binary) CID (cid):
* If it's 34 bytes long with the leading bytes [0x12, 0x20, ...],
it's a CIDv0.
* The CID's multihash is cid.
* The CID's multicodec is DagProtobuf
* The CID's version is 0.
* Otherwise, let N be the first varint in cid. This is the CID's version.
* If N == 0x01 (CIDv1):
* The CID's multicodec is the second varint in cid
* The CID's multihash is the rest of the cid (after the second varint).
* The CID's version is 1.
* If N == 0x02 (CIDv2), or N == 0x03 (CIDv3), the CID version is reserved.
* If N is equal to some other multicodec, the CID is malformed.
for multicodecs, see //github.com/multiformats/multicodec/blob/master/table.csv
some immediately useful ones:
0: "identity", raw binary
0x55 ('U'): "raw", raw binary
0x70 ('p'): "dag-pb" (DagProtobuf), MerkleDAG protobuf, used by CIDv0
the final hash is a representation of the contents of the Merkle DAG of the
data. from looking at several files under ~/.ipfs/blocks, it seems to be of
the form: 0x0a <size of remainder of block (past this size varint)>
0x08 0x02 0x12 <size of data itself> <data itself> 0x18 <size of data itself>
for example,
.ipfs/blocks/OO/CIQBT4N7PS5IZ5IG2ZOUGKFK27IE33WKGJNDW2TY3LSBNQ34R6OVOOQ
starts with (in hex) 0a 92 09 08 02 12 8a 09 <data follows>, which after
decoding the varints is: 10, 1170, 8, 2, 18, 1162. following the data we find:
18 8a 09, which decodes to 24, 1162.
the file itself, with the header and trailer bytes removed, is from
https://github.com/ipfs/go-ipfs/blob/master/assets/init-doc/security-notes,
and has 1162 bytes. The corresponding CID is
QmQ5vhrL7uv6tuoN9KeVBwd4PwfQkXdVVmDLUZuTNxqgvm. It has 1173 bytes total
(the 1170 above plus the initial 3 bytes 0a 92 08).
let's take a much smaller example. the text 'ipfs' has a Merkle DAG
of 0a 0a 08 02 12 04 69 70 66 73 18 04, which decodes to 10, 10, 8, 2, 18,
4, 'ipfs', 24, 4. That's file type 10 (0a), the size (10), the usual bytes
8, 2, and 18, followed by the actual data size (4), the actual data 'ipfs',
followed by the end marker byte 24 and the final size varint, again 4.
'''
import sys, logging, json, base64 # pylint: disable=multiple-imports
from hashlib import sha256
from subprocess import check_output
import base58
logging.basicConfig(level=logging.DEBUG if __debug__ else logging.INFO)
def decode_cid(cid):
r'''
decode a CID according to given specification
# ubuntu-20.04.1-desktop-amd64.iso
# see //docs.ipfs.io/concepts/hashing/
>>> decode_cid(b'QmPK1s3pNYLi9ERiq3BDxKa4XosgWwFRQUydHUtz4YgpqB')
'0e7071c59df3b9454d1d18a15270aa36d54f89606a576dc621757afd44ad1d2e'
# empty file
# see https://docs.ipfs.io/reference/cli/ (ipfs add example.jpg)
>>> decode_cid(b'QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH')
'bfccda787baba32b59c78450ac3d20b633360b43992c77289f9ed46d843561e6'
# file containing just 'ipfs'
>>> cid = 'QmejvEPop4D7YUadeGqYWmZxHhLc4JBUCzJJHWMzdcMe2y'
>>> decode_cid(cid.encode())
'f3b0e682d79b8b7a2c216d62ace28c5746a548218c77b556ec932f3a64b914b6'
'''
logging.debug('CID: %r', cid)
bcid, encoded, hashed = b'', b'', b''
if len(cid) == 46 and cid.startswith(b'Qm'):
logging.debug('we appear to have a valid CIDv0')
bcid = base58.b58decode(cid)
else:
raise NotImplementedError('Multibase not yet implemented')
logging.debug('binary CID: %r', bcid)
if len(bcid) == 34 and bcid.startswith(b'\x12\x20'):
logging.debug('multicodec: DagProtobuf, multihash: %r', bcid)
encoded = bcid
else:
cid_version, encoded = decode_varint(bcid)
logging.debug('cid_version: %s', cid_version)
if cid_version == 1:
multicodec, encoded = decode_varint(encoded)
logging.debug('multicodec: %s, multihash: %r', multicodec, encoded)
elif cid_version in [2, 3]:
raise NotImplementedError('Reserved version %d' % cid_version)
else:
raise NotImplementedError('Malformed CID')
multihash, multihashed = decode_varint(encoded)
if multihash != 0x12:
raise NotImplementedError('Only sha256 is currently supported')
hashed_size, hashed = decode_varint(multihashed)
if hashed_size != 32: # 0x20 (space)
raise NotImplementedError('Only 32 bytes (sha256) is supported')
return hashed.hex()
def decode_varint(bytestring):
r'''
decode a variable-length unsigned integer
return the integer and the remainder of the bytestring
see https://github.com/multiformats/unsigned-varint
>>> decode_varint(bytes([0b00000001]))
(1, b'')
>>> decode_varint(bytes([0b01111111]))
(127, b'')
>>> decode_varint(bytes([0b10000000, 0b00000001]))
(128, b'')
>>> decode_varint(bytes([0b11111111, 0b00000001]))
(255, b'')
>>> decode_varint(bytes([0b10101100, 0b00000010]))
(300, b'')
>>> decode_varint(bytes([0b10000000, 0b10000000, 0b00000001]))
(16384, b'')
>>> decode_varint(b'\x80\x80\x80\x80\x80\x80\x80\x80\x80\x01')
Traceback (most recent call last):
...
ValueError: Given bytestring exceeds maximum length 9
'''
result = 0
varint = []
index = 0
for index, byte in enumerate(bytestring):
varint.append(byte)
if not byte & 0b10000000:
break
if len(varint) > 9:
raise ValueError('Given bytestring exceeds maximum length 9')
for byte in varint[::-1]:
result <<= 7
result |= byte & 0b01111111
return result, bytestring[index + 1:]
def encode_varint(unsigned_integer):
'''
return varint for given integer
>>> list(map(bin, encode_varint(0)))
['0b0']
>>> list(map(bin, encode_varint(1))) # Kenobi's first name?
['0b1']
>>> list(map(bin, encode_varint(127)))
['0b1111111']
>>> list(map(bin, encode_varint(128)))
['0b10000000', '0b1']
>>> list(map(bin, encode_varint(255)))
['0b11111111', '0b1']
>>> list(map(bin, encode_varint(300)))
['0b10101100', '0b10']
>>> list(map(bin, encode_varint(16384)))
['0b10000000', '0b10000000', '0b1']
>>> encode_varint(-1)
Traceback (most recent call last):
...
ValueError: Only unsigned integers allowed
>>> encode_varint(1 << 64)
Traceback (most recent call last):
...
ValueError: Given integer exceeds maximum of 2^63
'''
if unsigned_integer > 1 << 63:
raise ValueError('Given integer exceeds maximum of 2^63')
elif unsigned_integer < 0:
raise ValueError('Only unsigned integers allowed')
result = []
while unsigned_integer or not result:
result.append((unsigned_integer & 0b01111111) | 0b10000000)
unsigned_integer >>= 7
result[-1] ^= 0b10000000 # final byte lacks a continuation
return result
def verify(cid, command=None):
'''
Check that the hash output of decode_cid() matches Data field of object
This is only likely to work with very small data blocks
# empty file
>>> verify('QmbFMke1KXqnYyBBWxB74N4c5SBnJMVAiMNRcGu6x1AwQH')
True
# the string 'ipfs'
>>> verify('QmejvEPop4D7YUadeGqYWmZxHhLc4JBUCzJJHWMzdcMe2y')
True
# IPFS alpha security notes
>>> verify('QmQ5vhrL7uv6tuoN9KeVBwd4PwfQkXdVVmDLUZuTNxqgvm')
True
'''
hashed = decode_cid(cid.encode())
command = command or ['ipfs', 'object', 'get', cid]
use_base64 = '--data-encoding=base64'
json_obj = check_output(command)
if command[-2] == use_base64:
data = base64.b64decode(json.loads(json_obj)['Data'])
else:
data = json.loads(json_obj)['Data'].encode()
data = bytes([0x0a, *encode_varint(len(data))]) + data
logging.debug('verify data: %r', data)
result = sha256(data).hexdigest() == hashed
# if it failed, we try again using base64
# https://github.com/ipfs/go-ipfs/issues/1582
if command[-2] != use_base64:
command.insert(-1, use_base64)
return verify(cid, command=command)
return result
if __name__ == '__main__':
print(decode_cid(*(arg.encode() for arg in sys.argv[1:])))
|
import sys
import cv2
import numpy
# ------------------------------
# stdin = sys.stdin.buffer.read()
# array = numpy.frombuffer(stdin, dtype='uint8')
# img = cv2.imdecode(array, 1)
# cv2.imshow("window", img)
# cv2.waitKey() |
"""empty message
Revision ID: 7349ce2f6db4
Revises:
Create Date: 2020-07-10 18:57:58.357627
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7349ce2f6db4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('codenames_players',
sa.Column('id', sa.String(length=64), nullable=False),
sa.Column('sid', sa.String(length=36), nullable=True),
sa.Column('team_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['id'], ['user_data.id'], ),
sa.ForeignKeyConstraint(['team_id'], ['codenames_teams.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('codenames_teams',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('room_id', sa.String(length=10), nullable=True),
sa.Column('team_name', sa.String(length=10), nullable=True),
sa.Column('score', sa.Integer(), nullable=True),
sa.Column('spymaster', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['room_id'], ['codenames_rooms.id'], ),
sa.ForeignKeyConstraint(['spymaster'], ['codenames_players.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('codenames_teams', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_codenames_teams_room_id'), ['room_id'], unique=False)
op.create_table('room_data',
sa.Column('id', sa.String(length=10), nullable=False),
sa.Column('admin_id', sa.String(length=64), nullable=True),
sa.Column('game', sa.String(length=20), nullable=True),
sa.ForeignKeyConstraint(['admin_id'], ['user_data.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user_data',
sa.Column('id', sa.String(length=64), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('room_id', sa.String(length=10), nullable=True),
sa.ForeignKeyConstraint(['room_id'], ['room_data.id'], ),
sa.PrimaryKeyConstraint('id')
)
with op.batch_alter_table('user_data', schema=None) as batch_op:
batch_op.create_index(batch_op.f('ix_user_data_room_id'), ['room_id'], unique=False)
batch_op.create_index(batch_op.f('ix_user_data_username'), ['username'], unique=False)
op.create_table('codenames_rooms',
sa.Column('id', sa.String(length=10), nullable=False),
sa.Column('state', sa.String(), nullable=True),
sa.Column('grid', sa.String(length=25), nullable=True),
sa.ForeignKeyConstraint(['id'], ['room_data.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('codenames_rooms')
with op.batch_alter_table('user_data', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_user_data_username'))
batch_op.drop_index(batch_op.f('ix_user_data_room_id'))
op.drop_table('user_data')
op.drop_table('room_data')
with op.batch_alter_table('codenames_teams', schema=None) as batch_op:
batch_op.drop_index(batch_op.f('ix_codenames_teams_room_id'))
op.drop_table('codenames_teams')
op.drop_table('codenames_players')
# ### end Alembic commands ###
|
from src.tabular.policies import *
from src.tabular.TD import QLearning |
from cougar.graphs.backbone import vgg
import torch
def test_vgg():
size = 300
input = torch.randn(1, 3, size, size)
model = vgg(size)
output = model(input)
assert len(output) == 6
assert output[0].shape[2] == 38
assert output[5].shape[2] == 1
|
import torch
import argparse
import glob
import logging
import os
import time
from data import load_dataset
from models import StyleTransformer, Discriminator, BartSystem
from train import train
from transformer_base import add_generic_args, add_generic_args, generic_train
class Config():
data_path = './data/chatbot/'
log_dir = 'runs/exp'
save_path = './save'
pretrained_embed_path = './embedding/'
device = torch.device('cuda' if True and torch.cuda.is_available() else 'cpu')
# device = torch.device('cpu')
discriminator_method = 'Multi' # 'Multi' or 'Cond'
load_pretrained_embed = False
min_freq = 3
max_length = 1024 # max_source_length
# embed_size = 256
d_model = 256
h = 4
num_styles = 2
num_classes = num_styles + 1 if discriminator_method == 'Multi' else 2
num_layers = 4
# batch_size = 64
lr_F = 0.0001
lr_D = 0.0001
L2 = 0
iter_D = 10
iter_F = 5
F_pretrain_iter = 1
log_steps = 5
eval_steps = 25
learned_pos_embed = True
dropout = 0
drop_rate_config = [(1, 0)]
temperature_config = [(1, 0)]
slf_factor = 0.25
cyc_factor = 0.5
adv_factor = 1
inp_shuffle_len = 0
inp_unk_drop_fac = 0
inp_rand_drop_fac = 0
inp_drop_prob = 0
### Bart system
output_dir='feedback_sum'
do_predict=True
max_source_length=1024
max_target_length=56
data_dir="feedback"
def main():
config = Config()
parser = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
parser = BartSystem.add_model_specific_args(parser, os.getcwd())
args = parser.parse_args()
# if not args.output_dir:
# args.output_dir = os.path.join("./results", f"{args.task}_{args.model_type}_{time.strftime('%Y%m%d_%H%M%S')}",)
# os.makedirs(args.output_dir)
args.do_train = False
model_F = BartSystem(args).to(config.device)
trainer = generic_train(model_F, args)
if args.output_dir:
try:
checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "checkpointepoch=*.ckpt"), recursive=True)))
if checkpoints[-1]:
BartSystem.load_from_checkpoint(checkpoints[-1])
except:
print("Failed to load checkpoint!")
# train_iters, dev_iters, test_iters, vocab = load_dataset(config)
train_iters, dev_iters, test_iters = model_F.train_dataloader(), model_F.val_dataloader(), model_F.test_dataloader()
model_D = Discriminator(config, model_F.tokenizer).to(config.device)
print(config.discriminator_method)
train(config, model_F, model_D, train_iters, dev_iters, test_iters)
if __name__ == '__main__':
main()
|
"""
Maps each :mod:`Reading <snsary.models.reading>` to a new one with the name altered as specified. This can be useful to distinguish the same :mod:`Readings <snsary.models.reading>` being aggregated in different ways.
Supported alterations include:
- ``to`` - replaces the name of the reading with the one specified.
- ``append`` - adds to the new or existing name of the reading.
"""
from .function import Function
class Rename(Function):
def __init__(self, append='', to=None):
self.__append = append
self.__to = to
def __call__(self, reading):
new_name = self.__to if self.__to else reading.name
new_name += self.__append
return [reading.dup(name=new_name)]
|
import functools
from flask import session, url_for, redirect
def needs_auth(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if "api_key" not in session:
return redirect(url_for("root.root"))
return f(*args, **kwargs)
return wrapper
def allowed_file(filename, extensions):
return "." in filename and filename.rsplit(".", 1)[1].lower() in extensions
|
import SimpleITK as sitk
import matplotlib.pyplot as plt
import numpy as np
import cv2
from scipy.signal import convolve
import skimage
from skimage import util
def random_noise(image,noise_num):
img_noise = image
rows, cols = img_noise.shape
for i in range(noise_num):
x = np.random.randint(0, rows)
y = np.random.randint(0, cols)
img_noise[x, y] = 255
return img_noise
def get_conv_ker(img, img_Noise):
f_image = np.fft.fftshift(np.fft.fft2(img))
f_Nimage = np.fft.fftshift(np.fft.fft2(img_Noise))
f_conv_ker = f_Nimage/f_image
print(f_conv_ker.shape)
img_ker = np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift(f_conv_ker)))
return img_ker
def read_img(case_path):
itkimage = sitk.ReadImage(case_path)
OR = itkimage.GetOrigin()
SP = itkimage.GetSpacing()
img_array = sitk.GetArrayFromImage(itkimage)
return img_array, OR, SP
def articraft(img):
x, y = img.shape[0], img.shape[1]
for i in range(x):
if i % 10==0:
for j in range(y):
img[i, j] = -3.02e+03
return img
def p_noise(img):
image = np.copy(img)
img_noise=util.random_noise(image, mode='Poisson', seed=1)
return img_noise
def g_noise(img):
image = np.copy(img)
img_noise=util.random_noise(image, mode='gaussian')
return img_noise
if __name__ == '__main__':
case_path = './1.3.6.1.4.1.14519.5.2.1.6279.6001.109002525524522225658609808059.mhd'
img_array, OR, SP = read_img(case_path)
img = np.copy(img_array[68, :, :])
img2 = np.copy(img_array[68, :, :])
img3 = np.copy(img_array[68, :, :])
img_noise = g_noise(img)
img_p_ker = get_conv_ker(img, p_noise(img3))
img_noise = p_noise(img3)
print(np.std(img_noise, ddof=1))
plt.show()
|
from .gos import *
__all__ = ["World", "Globe", "Agent"]
__title__ = 'globalopensimulator'
__version__ = '0.0.0a'
|
# Generated by Django 2.2.7 on 2021-10-31 14:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ugc', '0004_berries_customers_decors_forms_levels_orders_orderstatuses_topping'),
]
operations = [
migrations.AlterField(
model_name='orders',
name='delivery_date',
field=models.TextField(verbose_name='Дата доставки'),
),
]
|
'''
================================================
## VOICEBOOK REPOSITORY ##
================================================
repository name: voicebook
repository version: 1.0
repository link: https://github.com/jim-schwoebel/voicebook
author: Jim Schwoebel
author contact: [email protected]
description: a book and repo to get you started programming voice applications in Python - 10 chapters and 200+ scripts.
license category: opensource
license: Apache 2.0 license
organization name: NeuroLex Laboratories, Inc.
location: Seattle, WA
website: https://neurolex.ai
release date: 2018-09-28
This code (voicebook) is hereby released under a Apache 2.0 license license.
For more information, check out the license terms below.
================================================
## LICENSE TERMS ##
================================================
Copyright 2018 NeuroLex Laboratories, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
## SERVICE STATEMENT ##
================================================
If you are using the code written for a larger project, we are
happy to consult with you and help you with deployment. Our team
has >10 world experts in Kafka distributed architectures, microservices
built on top of Node.js / Python / Docker, and applying machine learning to
model speech and text data.
We have helped a wide variety of enterprises - small businesses,
researchers, enterprises, and/or independent developers.
If you would like to work with us let us know @ [email protected].
================================================
## SOX_FEATURES.PY ##
================================================
Get features using SoX library
Workaround by outputting CLI in txt file and
use function to extract these features.
'''
import os
import numpy as np
def clean_text(text):
text=text.lower()
chars=['a','b','c','d','e','f','g','h','i','j','k','l','m',
'o','p','q','r','s','t','u','v','w','x','y','z',' ',
':', '(',')','-','=',"'.'"]
for i in range(len(chars)):
text=text.replace(chars[i],'')
text=text.split('\n')
new_text=list()
# now get new text
for i in range(len(text)):
try:
new_text.append(float(text[i].replace('\n','').replace('n','')))
except:
pass
#print(text[i].replace('\n','').replace('n',''))
return new_text
def sox_featurize(filename):
# soxi and stats files
soxifile=filename[0:-4]+'_soxi.txt'
statfile=filename[0:-4]+'_stats.txt'
os.system('soxi %s > %s'%(filename, soxifile))
os.system('sox %s -n stat > %s 2>&1'%(filename, statfile))
# get basic info
s1=open(soxifile).read()
s1_labels=['channels','samplerate','precision',
'filesize','bitrate','sample encoding']
s1=clean_text(s1)
s2=open(statfile).read()
s2_labels=['samples read','length','scaled by','maximum amplitude',
'minimum amplitude','midline amplitude','mean norm','mean amplitude',
'rms amplitude','max delta','min delta','mean delta',
'rms delta','rough freq','vol adj']
s2=clean_text(s2)
labels=s1_labels+s2_labels
features=np.array(s1+s2)
return features,labels
# features, labels = sox_featurize('test.wav')
|
import logging
from collections import OrderedDict
from operator import itemgetter
import networkx as nx
import torch
from torch import nn
from torch.utils.data import DataLoader
from src.models.samplers.arch_sampler import ArchSampler
from src.models.samplers.conditional_softmax_sampler import \
CondiSoftmaxArchGenerator
from src.models.samplers.softmax_sampler import SoftmaxArchGenerator
from src.models.samplers.static_sampler import StaticArchGenerator
from src.models.utils import graph_arch_details
from src.train.utils import strat_split_from_y, RoundRobinDataloader
logger = logging.getLogger(__name__)
class SSNWrapper(nn.Module):
_SAMPLERS = dict(static=StaticArchGenerator,
layer_softmax=SoftmaxArchGenerator,
node_softmax=SoftmaxArchGenerator,
conditional_softmax=CondiSoftmaxArchGenerator)
def __init__(self, ssn_model, initial_p, deter_eval, arch_loss_coef,
entropy_coef, split_training, t_id, all_same=False,
arch_sampler=None):
super().__init__()
self.ssn = ssn_model
self.weight_mask = torch.zeros(len(self.ssn.stochastic_node_ids))
for node, pos in self.ssn.stochastic_node_ids.items():
if node[0] == t_id:
# print('OUIOUIOUI ', node)
self.weight_mask[pos] = 1
# else:
# print('NONNONNON', node)
# self.distrib_gen = ConstantDistribGenerator(n_vars, p=0.7)
self.all_same = all_same
self.initial_p = initial_p
self.arch_cost_coef = arch_loss_coef
self.entropy_coef = entropy_coef
self.split_training = split_training
self.cur_split = None
self.frozen_arch = False
# frozen model is a list to prevent it from appearing in the state_dict
self.frozen_model = []
self.t_id = t_id
if isinstance(arch_sampler, str):
self.arch_sampler = self.get_distrib_gen(deter_eval,
arch_sampler)
elif isinstance(arch_sampler, ArchSampler):
self.arch_sampler = arch_sampler
else:
raise ValueError('Unknown arch sampler type: {}'
.format(type(arch_sampler)))
weights = self.arch_sampler().squeeze()
# model.arch_sampler.freeze()
nodes = list(self.ssn.stochastic_node_ids.keys())
assert len(nodes) == weights.size(0) and weights.dim() == 1
def get_distrib_gen(self, deter_eval, arch_sampler):
n_vars = self.ssn.n_sampling_params
if n_vars == 0:
logger.warning('No sotchastic nodes are detected.')
# logger.warning(f'Replacing {arch_sampler} with static sampler')
# arch_sampler = 'static'
var_names = list(self.ssn.stochastic_node_ids.keys())
if arch_sampler == 'layer_softmax':
groups = [name[1] for name in var_names]
elif arch_sampler in ['node_softmax', 'conditional_softmax']:
groups = []
for var in var_names:
preds = list(self.ssn.graph.predecessors(var))
assert len(preds) == 1
groups.append(preds[0])
else:
groups = None
samp_cls = self._SAMPLERS[arch_sampler]
return samp_cls(distrib_dim=n_vars,
initial_p=self.initial_p,
groups=groups,
deter_eval=deter_eval,
all_same=self.all_same,
var_names=var_names,
graph=self.ssn.graph)
def forward(self, inputs, splits=None):
if splits is None:
assert not (self.split_training and self.training) or \
self.arch_sampler.is_deterministic()
self.cur_split = None
elif self.frozen_arch:
self.cur_split = 0
else:
self.cur_split = splits.unique().item()
if self.cur_split == 0:
self.arch_sampler.eval()
elif self.cur_split == 1:
self.ssn.eval()
if self.frozen_arch:
return self.frozen_model[0](inputs)
self.arch_sampler.start_new_sequence()
arch_probas = self.arch_sampler()
# Case of multiple input nodes where input is a list:
b_size = inputs[0].size(0) if isinstance(inputs, list) \
else inputs.size(0)
arch_samplings = self.arch_sampler.sample_archs(b_size, arch_probas)
self.ssn.samplings = arch_samplings
# self.ssn.start_new_sequence()
# self.ssn.set_probas()
# print('Arch_ent: {} ({}), Train={}, split={}'.format(self.arch_sampler.distrib_entropies[0].mean(),
# self.frozen_arch,
# self.training,
# self.cur_split))
# self.check_arch_freezing()
return self.ssn(inputs)
def check_arch_freezing(self, *args, ent=None, epoch=None):
# print('called with epoch={}'.format(epoch))
if ent is None:
ent = self.arch_sampler.distrib_entropies[0].mean()
# print('CALLED WITHOUT ENT')
else:
ent = ent
# print('CALLED WITH ENT')
# print('ENT={}, weights={}'.format(ent, weights))
if ent < 0.001 or epoch > 0.5:
self.freeze_arch()
def get_frozen_model(self):
if not self.frozen_arch:
self.freeze_arch()
return self.frozen_model[0]
def freeze_arch(self):
# print('FREEEEEZE')
if self.frozen_arch:
return
self.frozen_arch = True
weights = self.arch_sampler()
arch_samplings = self.arch_sampler.sample_archs(1, weights).squeeze()
self.frozen_model.append(self.ssn.get_pruned_model(arch_samplings))
def requires_grad(self):
if self.frozen_arch:
req = self.frozen_model[0].requires_grad()
# print('REQ:{}'.format(req))
return req
return True
def loss_wrapper(self, loss_fn):
def loss(*args, **kwargs):
task_loss = loss_fn(*args, **kwargs)
reward = -task_loss.detach()
# samp = self.ssn.samplings
# samp_size = samp.size()
# a = self.weight_mask
# print(samp_size)
# print(a)
# if self.frozen_arch:
if self.frozen_arch or len(self.arch_sampler.log_probas) == 0:
arch_loss = torch.zeros(1).to(task_loss.device)
entropy_loss = torch.zeros(1).to(task_loss.device)
else:
arch_costs = self.ssn.samplings * self.weight_mask.to(self.ssn.samplings.device)
arch_costs = arch_costs.sum(-1)
reward -= self.arch_cost_coef * arch_costs
reward -= reward.mean()
log_p = self.arch_sampler.log_probas
assert len(log_p) == 1
log_p = log_p[0]
assert task_loss.dim() == 1
assert task_loss.size(0) == log_p.size(0) or log_p.size(0) == 1
arch_loss = -(reward.unsqueeze(-1) * log_p).mean(1)
assert arch_loss.dim() == 1
entropy_loss = -self.entropy_coef * self.arch_sampler.entropy().mean()
# ent_1 = self.arch_sampler.entropy()
# ent_2 = [e.size() for e in self.arch_sampler.distrib_entropies]
# ent_3 = [e.mean() for e in self.arch_sampler.distrib_entropies]
# print(ent_1)
# print(ent_2)
# print(ent_3)
# print()
# if self.cur_split is None:
losses = {'task all_loss': task_loss,
'arch all_loss': arch_loss,
'entropy all_loss': entropy_loss}
return sum(losses.values()), losses
# elif self.cur_split == 0: # and self.t_id == 0:
# return task_loss
# else:
# return arch_loss + entropy_loss
return loss
@property
def in_node(self):
return self.ssn.in_nodes[0]
@property
def out_node(self):
return self.ssn.out_nodes[0]
def nodes_to_prune(self, *args, **kwargs):
return self.arch_sampler.nodes_to_prune(*args, **kwargs)
def get_weights(self):
return self.arch_sampler().squeeze()
def get_stoch_nodes(self):
return list(self.ssn.stochastic_node_ids.keys())
def get_graph(self):
return self.ssn.graph
def sampled_entropy(self):
if self.frozen_arch or len(self.arch_sampler.distrib_entropies) == 0:
return torch.zeros(1)
else:
assert len(self.arch_sampler.distrib_entropies) == 1
return self.arch_sampler.distrib_entropies[0].mean()
def global_entropy(self):
return self.arch_sampler.entropy().mean()
def train_loader_wrapper(self, train_loader):
if not self.split_training or self.arch_sampler.is_deterministic():
return train_loader
ds = train_loader.dataset
splits = strat_split_from_y(ds)
new_loaders = [DataLoader(split, train_loader.batch_size, shuffle=True,
num_workers=train_loader.num_workers)
for split in splits]
return RoundRobinDataloader(new_loaders)
def param_groups(self, *args, **kwargs):
return [
{'params': self.ssn.parameters(*args, **kwargs)},
{'params': self.arch_sampler.parameters(*args, **kwargs)}
]
def set_h_params(self, arch_loss_coef, entropy_coef, split_training):
self.arch_cost_coef = arch_loss_coef
self.entropy_coef = entropy_coef
self.split_training = split_training
def get_top_archs(self, n=1):
weights = self.get_weights()
res = []
for path in nx.all_simple_paths(self.ssn.graph, self.ssn.in_nodes[0],
self.ssn.out_nodes[0]):
p = 1
for node in path:
if node in self.ssn.stochastic_node_ids:
p *= weights[self.ssn.stochastic_node_ids[node]]
res.append((tuple(path), p.item()))
res = sorted(res, key=itemgetter(1), reverse=True)
return OrderedDict(res[:n])
def arch_repr(self):
return graph_arch_details(self.ssn.graph)
|
import numpy as np
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.cm import get_cmap
import sklearn
from sklearn.datasets import make_blobs
from sklearn.cluster import AgglomerativeClustering
##################################################
### Agglomerative clustering on random dataset ###
##################################################
### Prepare random dataset with blob distribution ##################################################################################################
X, y = make_blobs(n_samples=1000, random_state=1)
### Agglomertiave Clustering Preparation ###########################################################################################################
agg = AgglomerativeClustering(n_clusters=3) # Agglomerative clusterer with 3 clusters
cluster = agg.fit_predict(X) # Fit agglomerative clusterer with current random dataset
### Plot clustering results of agglomerative clustering on given random dataset #####################################################################
cmap = get_cmap('Pastel1') # Prepare color map / Each cluster uses an distinctive color
legend = []
for label in range(agg.n_clusters_):
# Plot only the points that correspond to certain cluster label using X[cluster==label]
# Assign the color to the points in the dataset according to their labels
plt.scatter(X[cluster==label][:, 0], X[cluster==label][:, 1], c=cmap.colors[label], label='Cluster '+ str(label))
legend.append('Cluster '+ str(label))
plt.legend(legend, loc='best')
plt.title('Agglomertiave Clustering with 3 Clusters')
plt.show() |
#!/usr/bin/env python
import sys
import os
# put this module at the front of the path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.split(__file__)[0], "../../")))
from hcp_prep.interfaces import *
out_file = os.path.join(os.path.split(__file__)[0], "interface_docs.txt")
ints = [HCDcm2nii, DicomInfo, NiiWrangler, HCPCommand, PreFS, FS, PostFS, VolumeProcessing, SurfaceProcessing]
with open(out_file, 'w') as f:
for c in ints:
print >>f, "%s\n%s\n%s" % (c.__name__, "="*len(c.__name__), c.help(returnhelp=True)) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
from tvm import topi
import tvm.topi.testing
import tvm
from tvm import te
from tvm import relay
from tvm.relay.testing import check_grad, run_infer_type
from tvm.relay.transform import gradient
import tvm.testing
def verify_max_pool2d_grad(x_shape, pool_size, strides, padding, ceil_mode):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.max_pool2d(
x, pool_size=pool_size, strides=strides, padding=padding, ceil_mode=ceil_mode
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="max",
ceil_mode=ceil_mode,
)
for target, ctx in tvm.testing.enabled_targets():
intrp = relay.create_executor(ctx=ctx, target=target)
op_res, (op_grad,) = intrp.evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_max_pool2d_grad():
verify_max_pool2d_grad(
(1, 4, 16, 16), pool_size=(2, 2), strides=(2, 2), padding=(0, 0), ceil_mode=False
)
verify_max_pool2d_grad(
(1, 4, 16, 16), pool_size=(1, 1), strides=(1, 1), padding=(1, 1), ceil_mode=False
)
def verify_avg_pool2d_grad(
x_shape, pool_size, strides, padding, ceil_mode, count_include_pad, dtype="float32"
):
for shape_dtype in ["int32", "int64"]:
x = relay.var("x", shape=[tvm.tir.IntImm(shape_dtype, x) for x in x_shape], dtype=dtype)
y = tvm.relay.nn.avg_pool2d(
x,
pool_size=pool_size,
strides=strides,
padding=padding,
ceil_mode=ceil_mode,
count_include_pad=count_include_pad,
)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype(dtype)
ph, pw = padding
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=pool_size,
strides=strides,
padding=[ph, pw, ph, pw],
pool_type="avg",
ceil_mode=ceil_mode,
)
for target, ctx in tvm.testing.enabled_targets():
intrp = relay.create_executor(ctx=ctx, target=target)
op_res, (op_grad,) = intrp.evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_avg_pool2d_grad():
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(2, 2),
strides=(2, 2),
padding=(0, 0),
ceil_mode=False,
count_include_pad=True,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
)
verify_avg_pool2d_grad(
(1, 4, 16, 16),
pool_size=(1, 1),
strides=(1, 1),
padding=(1, 1),
ceil_mode=False,
count_include_pad=False,
dtype="int32",
)
def verify_global_avg_pool2d_grad(x_shape):
x = relay.var("x", relay.TensorType(x_shape, "float32"))
y = tvm.relay.nn.global_avg_pool2d(x)
fwd_func = relay.Function([x], y)
fwd_func = run_infer_type(fwd_func)
bwd_func = run_infer_type(gradient(fwd_func))
data = np.random.rand(*x_shape).astype("float32")
y_shape = topi.utils.get_const_tuple(fwd_func.ret_type.shape)
out_grad = np.ones(shape=y_shape)
ref_grad = tvm.topi.testing.pool_grad_nchw(
data,
out_grad,
pool_size=(x_shape[2], x_shape[3]),
strides=(1, 1),
padding=[0, 0, 0, 0],
pool_type="avg",
ceil_mode=False,
)
for target, ctx in tvm.testing.enabled_targets():
intrp = relay.create_executor(ctx=ctx, target=target)
op_res, (op_grad,) = intrp.evaluate(bwd_func)(data)
np.testing.assert_allclose(op_grad.asnumpy(), ref_grad, rtol=0.01)
@tvm.testing.uses_gpu
def test_global_avg_pool2d_grad():
verify_global_avg_pool2d_grad((1, 4, 16, 16))
verify_global_avg_pool2d_grad((1, 8, 8, 24))
def verify_conv2d_grad(dshape, wshape, strides, padding, dilation, groups=1, mode="higher_order"):
dtype = "float32"
data = relay.var("data", shape=dshape, dtype=dtype)
weight = relay.var("weight", shape=wshape, dtype=dtype)
conv = relay.nn.conv2d(
data, weight, strides=strides, padding=padding, dilation=dilation, groups=groups
)
fwd_func = relay.Function([data, weight], conv)
check_grad(fwd_func, mode=mode)
@tvm.testing.uses_gpu
def test_conv2d_grad():
verify_conv2d_grad((1, 4, 16, 16), (16, 4, 3, 3), [1, 1], [1, 1], [1, 1])
verify_conv2d_grad((1, 4, 16, 16), (16, 4, 1, 1), [1, 1], [0, 0], [1, 1])
verify_conv2d_grad((1, 4, 16, 16), (16, 4, 1, 1), [2, 2], [0, 0], [1, 1])
verify_conv2d_grad((1, 4, 16, 16), (16, 4, 3, 3), [1, 1], [1, 1], [1, 1], mode="first_order")
def verify_dense_grad(d_shape, w_shape):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
weight = relay.var("weight", relay.TensorType(w_shape, "float32"))
fwd_func = relay.Function([data, weight], relay.nn.dense(data, weight))
check_grad(fwd_func)
def test_dense_grad():
verify_dense_grad((1, 8), (16, 8))
verify_dense_grad((1, 4), (3, 4))
verify_dense_grad((5, 4), (3, 4))
def verify_batch_flatten_grad(d_shape):
data = relay.var("data", relay.TensorType(d_shape, "float32"))
fwd_func = relay.Function([data], relay.nn.batch_flatten(data))
check_grad(fwd_func)
def test_batch_flatten_grad():
verify_batch_flatten_grad((1, 2, 3, 4))
verify_batch_flatten_grad((1, 8))
if __name__ == "__main__":
test_max_pool2d_grad()
test_avg_pool2d_grad()
test_global_avg_pool2d_grad()
test_conv2d_grad()
test_dense_grad()
test_batch_flatten_grad()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.