content
stringlengths 5
1.05M
|
---|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 07 12:49:18 2017
@author: smudd
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 16:57:22 2017
@author: smudd
"""
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 12:55:23 2017
@author: smudd
"""
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import rcParams
import matplotlib.cm as cm
from LSDMapFigure.PlottingRaster import MapFigure
from LSDMapFigure.PlottingRaster import BaseRaster
from LSDPlottingTools import colours as lsdcolours
from LSDPlottingTools import init_plotting_DV
from LSDPlottingTools import LSDMap_PointTools
import sys
#sys.path.append("PATH/TO/LSDPlottingTools/")
#
#init_plotting_DV()
#label_size = 100
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Liberation Sans']
#rcParams['font.size'] = label_size
#rcParams['lines.linewidth'] = 1.5
DataDirectory = "/home/smudd/LSDTopoData/TanDemX_data/Nepal/"
#DataDirectory = "/home/smudd/SMMDataStore/analysis_for_papers/Meghalaya/chi_analysis/"
#Directory = "C:\\Vagrantboxes\\LSDTopoTools\\Topographic_projects\\Meghalaya\\Divides\\"
#DataDirectory = "T:\\analysis_for_papers\\Meghalaya/chi_analysis\\"
Base_file = "HS"
#Directory = "/home/s1563094/Datastore/DATA/UK/LiDAR_DTM_1m/HIN/"
#Base_file = "HIN_"
#BackgroundRasterName = Base_file+".bil"
BackgroundRasterName = Base_file+".tif"
#DrapeRasterName = Base_file+"_hs.bil"
#ChiRasterName = Base_file+"_chi_coord.bil"
#BR = BaseRaster(BackgroundRasterName, Directory)
#BR.set_raster_type("Terrain")
#print(BR._colourmap)
#BR.show_raster()
#BR.set_colourmap("RdYlGn")
#BR.show_raster()
#PD_file = Base_file+"_chi_coord_basins.csv"
#PointData = LSDMap_PointTools.LSDMap_PointData(Directory+PD_file)
plt.clf()
cbar_loc = "bottom"
MF = MapFigure(BackgroundRasterName, DataDirectory,coord_type="UTM_km",colourbar_location = cbar_loc)
#MF.add_drape_image(DrapeRasterName,Directory,alpha = 0.4)
#MF.add_drape_image(ChiRasterName,Directory,colourmap = "cubehelix",alpha = 0.4)
#MF.add_point_data(PointData)
#MF.show_plot()
ImageName = DataDirectory+"TestNewArtist.png"
fig_size_inches = 12
ax_style = "Normal"
MF.save_fig(fig_width_inches = fig_size_inches, FigFileName = ImageName, axis_style = ax_style, Fig_dpi = 250)
# Customise the DrapePlot
#dp.make_drape_colourbar(cbar_label=colourbar_label)
#dp.set_fig_axis_labels()
#dp.show_plot() |
'''
Look up packaging instructions from Serious Python to see if anything has changed for Python 3
'''
from setuptools import setup
#this should be pretty detailed; generate from function definitions (i.e. Serious Python)
def readme():
with open('README.md') as f:
return f.read()
setup(name='aspace_tools',
version='0.0.1',
description='Scripts for interacting with the ArchivesSpace database and API',
long_description=readme(),
#url='https://github.com/ucancallmealicia/utilities',
license='MIT',
author='Alicia Detelich',
author_email='[email protected]',
classifiers=[
'Development Status : : Alpha',
'License :: OSI Approved :: MIT License'
'Programming Language :: Python :: 3.6',
'Natural Language :: English',
'Operating System :: OS Independent'
],
packages=['aspace_tools'],
install_requires=['requests', 'paramiko', 'pymysql', 'sshtunnel', 'pandas', 'bs4', 'pyyaml', 'responses'],
include_package_data=True,
zip_safe=False)
#NEED TO ADD A REQUIREMENT TO INSTALL UTILITIES |
import os
import shutil
import random
import numpy as np
from tensorflow import random as tf_random
import yaml
from datetime import datetime
import pytz
from PIL import Image, ImageOps
from pathlib import Path
import git
from models import generate_compiled_segmentation_model
from image_utils import str2bool
from metrics_utils import global_threshold
from local_utils import local_folder_has_files, getSystemInfo, getLibVersions
# infer can be run multiple times (labels, overlay), create new metadata each time
infer_datetime = datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ')
metadata_file_name = 'metadata_' + infer_datetime + '.yaml'
tmp_directory = Path('./tmp')
# rgb
class_colors = [
[0, 0, 255], # blue
[255, 255, 0], # yellow
[255, 0, 0], # red
[0, 255, 0], # green
[255, 0, 255] # magenta
]
def stitch_preds_together(tiles, target_size_1d, labels_output, pad_output, image):
n_tile_rows = len(tiles)
n_tile_cols = len(tiles[0])
if not pad_output:
stitched_array = np.zeros((image.size[1], image.size[0], 3))
else:
stitched_array = np.zeros((target_size_1d * n_tile_rows, target_size_1d * n_tile_cols, 3))
for i in range(n_tile_rows):
for j in range(n_tile_cols):
if not pad_output and i == n_tile_rows - 1 and j == n_tile_cols - 1:
stitched_array[image.size[1] - target_size_1d:image.size[1], image.size[0] - target_size_1d:image.size[0], :] = tiles[i][j]
elif not pad_output and i == n_tile_rows - 1:
stitched_array[image.size[1] - target_size_1d:image.size[1], j * target_size_1d:(j + 1) * target_size_1d, :] = tiles[i][j]
elif not pad_output and j == n_tile_cols - 1:
stitched_array[i * target_size_1d:(i + 1) * target_size_1d, image.size[0] - target_size_1d:image.size[0], :] = tiles[i][j]
else:
stitched_array[i * target_size_1d:(i + 1) * target_size_1d, j * target_size_1d:(j + 1) * target_size_1d, :] = tiles[i][j]
if labels_output:
stitched_image = Image.fromarray(np.mean(stitched_array, -1).astype('uint8'))
else:
stitched_image = Image.fromarray(stitched_array.astype('uint8'))
return stitched_image
def prepare_image(image, target_size_1d, pad_output):
# make the image an event multiple of 512x512
desired_size = target_size_1d * np.ceil(np.asarray(image.size) / target_size_1d).astype(int)
delta_w = desired_size[0] - image.size[0]
delta_h = desired_size[1] - image.size[1]
if pad_output:
padding = (delta_w // 2, delta_h // 2, delta_w - (delta_w // 2), delta_h - (delta_h // 2))
else:
padding = (0, 0, 0, 0)
padded_image = ImageOps.expand(image, padding, fill=int(np.asarray(image).mean()))
# break into 512x512 tiles
padded_image = np.asarray(padded_image)
tiles = []
for i in range(np.ceil(padded_image.shape[0] / target_size_1d).astype(int)):
tiles.append([])
for j in range(np.ceil(padded_image.shape[1] / target_size_1d).astype(int)):
if (not pad_output and i == np.ceil(padded_image.shape[0] / target_size_1d).astype(int) - 1
and j == np.ceil(padded_image.shape[1] / target_size_1d).astype(int) - 1):
tiles[i].append(padded_image[image.size[1] - target_size_1d:image.size[1],
image.size[0] - target_size_1d:image.size[0]].copy())
elif not pad_output and i == np.ceil(padded_image.shape[0] / target_size_1d).astype(int) - 1:
tiles[i].append(padded_image[image.size[1] - target_size_1d:image.size[1],
j * target_size_1d:(j + 1) * target_size_1d].copy())
elif not pad_output and j == np.ceil(padded_image.shape[1] / target_size_1d).astype(int) - 1:
tiles[i].append(padded_image[i * target_size_1d:(i + 1) * target_size_1d,
image.size[0] - target_size_1d:image.size[0]].copy())
else:
tiles[i].append(padded_image[i * target_size_1d:(i + 1) * target_size_1d,
j * target_size_1d:(j + 1) * target_size_1d].copy())
# scale the images to be between 0 and 1 if GV
for i in range(len(tiles)):
for j in range(len(tiles[i])):
tiles[i][j] = tiles[i][j] * 1. / 255
return tiles
def overlay_predictions(prepared_tiles, preds, prediction_threshold, background_class_index, labels_output):
prediction_tiles = []
for i in range(len(prepared_tiles)):
prediction_tiles.append([])
for j in range(len(prepared_tiles[i])):
prediction_tiles[i].append(np.dstack((prepared_tiles[i][j], prepared_tiles[i][j], prepared_tiles[i][j])))
prediction_tiles[i][j] = (prediction_tiles[i][j] * 255).astype(int)
relative_above_threshold_mask = np.divide(preds[i][j], np.multiply(np.ones_like(preds[i][j]),
prediction_threshold)).max(axis=-1) > 1
best_class_by_pixel = np.divide(preds[i][j], np.multiply(np.ones_like(preds[i][j]),
prediction_threshold)).argmax(axis=-1)
color_counter = 0
for class_i in range(preds[i][j].shape[-1]):
rel_above_threshold_and_best_class = relative_above_threshold_mask & (best_class_by_pixel == class_i)
if (background_class_index is not None) and (class_i == background_class_index):
continue
if labels_output:
prediction_tiles[i][j][rel_above_threshold_and_best_class] = int((color_counter + 1) *
np.floor(255 / preds[i][j].shape[-1]))
else:
prediction_tiles[i][j][rel_above_threshold_and_best_class] = class_colors[color_counter]
color_counter = (color_counter + 1) % len(class_colors)
return prediction_tiles
def segment_image(model, image, prediction_threshold, target_size_1d, background_class_index,
labels_output, pad_output):
prepared_tiles = prepare_image(image, target_size_1d, pad_output)
preds = []
for i in range(len(prepared_tiles)):
preds.append([])
for j in range(len(prepared_tiles[i])):
preds[i].append(model.predict(prepared_tiles[i][j].reshape(1, target_size_1d,
target_size_1d, 1))[0, :, :, :])
# make background black if labels only
if labels_output:
for i in range(len(prepared_tiles)):
for j in range(len(prepared_tiles[i])):
prepared_tiles[i][j] = prepared_tiles[i][j] * 0
pred_tiles = overlay_predictions(prepared_tiles, preds, prediction_threshold, background_class_index, labels_output)
stitched_pred = stitch_preds_together(pred_tiles, target_size_1d, labels_output, pad_output, image)
return stitched_pred
def main(gcp_bucket, model_id, background_class_index, stack_id, image_ids, user_specified_prediction_thresholds,
labels_output, pad_output, trained_thresholds_id, random_module_global_seed, numpy_random_global_seed,
tf_random_global_seed, message):
# seed global random generators if specified; global random seeds here must be int or default None (no seed given)
if random_module_global_seed is not None:
random.seed(random_module_global_seed)
if numpy_random_global_seed is not None:
np.random.seed(numpy_random_global_seed)
if tf_random_global_seed is not None:
tf_random.set_seed(tf_random_global_seed)
start_dt = datetime.now()
assert "gs://" in gcp_bucket
if background_class_index is not None:
assert background_class_index >= 0
# clean up the tmp directory
try:
shutil.rmtree(tmp_directory.as_posix())
except FileNotFoundError:
pass
tmp_directory.mkdir()
run_name = '{}_{}'.format(stack_id, model_id)
local_model_dir = Path(tmp_directory, 'models', model_id)
local_model_dir.mkdir(parents=True)
local_processed_data_dir = Path(tmp_directory, 'processed-data', stack_id)
local_processed_data_dir.mkdir(parents=True)
local_inferences_dir = Path(tmp_directory, 'inferences', run_name)
local_inferences_dir.mkdir(parents=True)
output_dir = Path(local_inferences_dir, str('output_' + infer_datetime))
output_dir.mkdir(parents=True)
os.system("gsutil -m cp -r '{}' '{}'".format(os.path.join(gcp_bucket, 'models', model_id),
Path(tmp_directory, 'models').as_posix()))
local_folder_has_files(local_model_dir, model_id)
os.system("gsutil -m cp -r '{}' '{}'".format(os.path.join(gcp_bucket, 'processed-data', stack_id),
Path(tmp_directory, 'processed-data').as_posix()))
local_folder_has_files(local_processed_data_dir, stack_id)
with Path(local_model_dir, 'config.yaml').open('r') as f:
train_config = yaml.safe_load(f)['train_config']
with Path(local_model_dir, 'metadata.yaml').open('r') as f:
model_metadata = yaml.safe_load(f)
if trained_thresholds_id is not None:
with Path(local_model_dir, trained_thresholds_id).open('r') as f:
threshold_output_data = yaml.safe_load(f)
image_folder = Path(local_processed_data_dir, 'images')
assert model_metadata['target_size'][0] == model_metadata['target_size'][1]
target_size_1d = model_metadata['target_size'][0]
num_classes = model_metadata['num_classes']
optimized_class_thresholds = {}
if trained_thresholds_id is not None and 'thresholds_training_output' in threshold_output_data['metadata']:
for i in range(num_classes):
if ('x' in threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))] and
threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))]['success']):
optimized_class_thresholds.update(
{str('class' + str(i)): threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))]['x']}
)
else:
AssertionError('Unsuccessfully trained threshold attempted to be loaded.')
else:
optimized_class_thresholds = None
# set threshold(s) used for inference
if user_specified_prediction_thresholds:
if len(user_specified_prediction_thresholds) == 1:
prediction_threshold = np.ones(num_classes) * user_specified_prediction_thresholds
else:
assert len(user_specified_prediction_thresholds) == num_classes
prediction_threshold = np.asarray(user_specified_prediction_thresholds)
elif trained_thresholds_id is not None and 'thresholds_training_output' in threshold_output_data['metadata']:
prediction_threshold = np.empty(num_classes)
for i in range(num_classes):
if ('x' in threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))] and
threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))]['success']):
prediction_threshold[i] = threshold_output_data['metadata']['thresholds_training_output'][str('class' + str(i))]['x']
else:
AssertionError('Unsuccessfully trained threshold attempted to be loaded.')
else:
prediction_threshold = np.ones(num_classes) * global_threshold
compiled_model = generate_compiled_segmentation_model(
train_config['segmentation_model']['model_name'],
train_config['segmentation_model']['model_parameters'],
num_classes,
train_config['loss'],
train_config['optimizer'],
Path(local_model_dir, "model.hdf5").as_posix(),
optimized_class_thresholds=optimized_class_thresholds)
if image_ids is None:
images_list = []
for i in Path(image_folder).iterdir():
images_list.append(i.parts[-1])
else:
images_list = image_ids.split(',')
labels_output = str2bool(labels_output)
pad_output = str2bool(pad_output)
n_images = len(list(Path(image_folder).iterdir()))
for i, image_file in enumerate(sorted(Path(image_folder).iterdir())):
if image_file.parts[-1] in images_list:
print('Segmenting image {} --- stack has {} images...'.format(image_file.parts[-1], n_images))
image = Image.open(image_file)
segmented_image = segment_image(compiled_model, image, prediction_threshold,
target_size_1d, background_class_index, labels_output, pad_output)
# enable saving of various versions of same inference
image_file_ext = image_file.parts[-1].split('.')[-1]
if labels_output and pad_output:
segmented_image.save(Path(output_dir, str(
image_file.parts[-1].split('.')[0] + '_pad_labels' + '.'
+ image_file_ext)).as_posix())
elif labels_output:
segmented_image.save(Path(output_dir, str(
image_file.parts[-1].split('.')[0] + '_labels' + '.'
+ image_file_ext)).as_posix())
elif pad_output:
segmented_image.save(Path(output_dir, str(
image_file.parts[-1].split('.')[0] + '_pad' + '.'
+ image_file_ext)).as_posix())
else:
segmented_image.save(Path(output_dir, str(
image_file.parts[-1].split('.')[0] + '.'
+ image_file_ext)).as_posix())
metadata_sys = {
'System_info': getSystemInfo(),
'Lib_versions_info': getLibVersions()
}
metadata = {
'message': message,
'gcp_bucket': gcp_bucket,
'model_id': model_id,
'user_specified_prediction_thresholds': user_specified_prediction_thresholds,
'trained_thresholds_id': trained_thresholds_id,
'trained_class_thresholds_loaded': optimized_class_thresholds,
'default_global_threshold_for_reference': global_threshold,
'prediction_thresholds_used': prediction_threshold.tolist(),
'background_class_index': background_class_index,
'stack_id': stack_id,
'image_ids': image_ids,
'labels_output': labels_output,
'pad_output': pad_output,
'created_datetime': datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'),
'git_hash': git.Repo(search_parent_directories=True).head.object.hexsha,
'elapsed_minutes': round((datetime.now() - start_dt).total_seconds() / 60, 1),
'random-module-global-seed': random_module_global_seed,
'numpy_random_global_seed': numpy_random_global_seed,
'tf_random_global_seed': tf_random_global_seed,
'metadata_system': metadata_sys
}
with Path(local_inferences_dir, metadata_file_name).open('w') as f:
yaml.safe_dump(metadata, f)
os.system("gsutil -m cp -n -r '{}' '{}'".format(Path(tmp_directory, 'inferences').as_posix(), gcp_bucket))
print('\n Infer Metadata:')
print(metadata)
print('\n')
shutil.rmtree(tmp_directory.as_posix())
if __name__ == "__main__":
import argparse
import sys
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument(
'--gcp-bucket',
type=str,
help='The GCP bucket where the raw data is located and to use to store the processed stacks.')
argparser.add_argument(
'--model-id',
type=str,
help='The model ID.')
argparser.add_argument(
'--background-class-index',
type=int,
default=None,
help='For this model, indicate background class index if used during model training, to exclude background overlay.')
argparser.add_argument(
'--stack-id',
type=str,
help='The stack ID (must already be processed).')
argparser.add_argument(
'--image-ids',
type=str,
default=None,
help='For these images, the corresponding stack ID (must already be processed).')
argparser.add_argument(
'--user-specified-prediction-thresholds',
type=float,
nargs='+',
default=None,
help='Threshold(s) to apply to the prediction to classify a pixel as part of a class. E.g., 0.5 or 0.5 0.3 0.6')
argparser.add_argument(
'--labels-output',
type=str,
default='False',
help='If false, will output overlaid image (RGB); if true, will output labels only image (GV).')
argparser.add_argument(
'--pad-output',
type=str,
default='False',
help='If false, will output inference identical to input image size.')
argparser.add_argument(
'--trained-thresholds-id',
type=str,
default=None,
help='The specified trained thresholds file id.')
argparser.add_argument(
'--random-module-global-seed',
type=int,
default=None,
help='The setting of random.seed(global seed), where global seed is int or default None (no seed given).')
argparser.add_argument(
'--numpy-random-global-seed',
type=int,
default=None,
help='The setting of np.random.seed(global seed), where global seed is int or default None (no seed given).')
argparser.add_argument(
'--tf-random-global-seed',
type=int,
default=None,
help='The setting of tf.random.set_seed(global seed), where global seed is int or default None (no seed given).')
argparser.add_argument(
'--message',
type=str,
default=None,
help='A str message the used wants to leave, the default is None.')
main(**argparser.parse_args().__dict__)
|
from cement import Controller, ex
import webbrowser, time, datetime
from cement import shell
from ..constants import DIFFICULTY_DISPLAY, GREEN, RED, WHITE
class Next(Controller):
class Meta:
label = "next"
stacked_type = 'embedded'
stacked_on = 'base'
@ex(
help='Get the next problem from the queue'
)
def next(self):
print()
problem = self.app.problems_queue.next_problem()
if not problem:
self.app.log.warning("Nothing to do!\n")
return
self.app.log.info("Opening %s[%s] %s" % (WHITE, DIFFICULTY_DISPLAY[problem.difficulty], problem.problem_title))
webbrowser.open(problem.url, new = 2)
start = time.time()
p = shell.Prompt("Time started! Press enter once you've solved", default='')
p.prompt() # Prompt and discard the input.
end = time.time()
elapsed = int(end - start)
minutes, seconds = divmod(elapsed, 60)
hours, minutes = divmod(minutes, 60)
self.app.log.info("%s took you %s%s\n" % (WHITE + problem.problem_title + GREEN, RED,
'{:d}:{:02d}:{:02d}s'.format(hours, minutes, seconds)))
|
from __future__ import unicode_literals
from django.apps import AppConfig
class DjangoDodoConfig(AppConfig):
name = 'django_dodo'
|
WIIU = 0
SWITCH = 1
class Settings:
MODE_ECB = 0
MODE_GCM = 1
field_types = {
"pia.version": int,
"pia.system_version": int,
"pia.application_version": int,
"pia.lan_version": int,
"common.pid_size": int
}
def __init__(self, version, app_version=-1, *, platform=SWITCH):
self.settings = {}
version //= 100
self["pia.version"] = version
self["pia.system_version"] = self.system_version(version)
self["pia.application_version"] = app_version
self["pia.lan_version"] = self.lan_version(version)
self["common.pid_size"] = 8 if platform == SWITCH else 4
def __getitem__(self, name): return self.settings[name]
def __setitem__(self, name, value):
if name not in self.field_types:
raise KeyError("Unknown setting: %s" %name)
self.settings[name] = self.field_types[name](value)
def system_version(self, version):
if version <= 503: return 0
if version == 506: return 2
if version == 507: return 3
if version == 508: return 4
if version == 509: return 5
if version == 510: return 6
if 511 <= version <= 518: return 7
if 519 <= version <= 529: return 8
raise ValueError("Unsupported pia version")
def lan_version(self, version):
if version < 509: return 0 # No crypto challenge
if version < 511: return 1 # Crypto challenge
return 2 # Crypto challenge and IPv6 support
def default(version, app_version=-1, *, platform=SWITCH):
return Settings(version, app_version, platform=platform)
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Main window of DataFinder administration client.
"""
import sys
import os
from qt import QPixmap, QIconSet, QMessageBox, QLabel
from qt import SIGNAL, QApplication
from qt import QGridLayout, PYSIGNAL, QListViewItem, QFileDialog
from qt import QPoint, SLOT
from datafinder.core.repository_manager import RepositoryManager
from datafinder.common.logger import getDefaultLogger
from datafinder.core.configuration.datamodel.constants import ROOT_RELATION_NAME
from datafinder.core.error import ConfigurationError
from datafinder.gui.gen.AdminMainWindowForm import AdminWindow
from datafinder.gui.admin import about_dialog
from datafinder.gui.admin import login_dialog
from datafinder.gui.admin.common import logger_handler
from datafinder.gui.admin.common import utils
from datafinder.gui.admin.datamodel_iconview import canvas_view
from datafinder.gui.admin import script_selection_dialog
from datafinder.gui.admin import icon_selection_dialog
from datafinder.gui.admin.datastore_configuration_wizard.controller import DataStoreConfigurationWizardController
from datafinder.gui.admin import relation_type_dialog
from datafinder.gui.admin import data_type_dialog
from datafinder.gui.admin.create_configuration_dialog.controller import CreateConfigurationController
__version__ = "$Revision-Id:$"
class AdminMain(AdminWindow):
""" Implements the administration client functionality. """
__tabDataTypesTitle = "Data Types"
__tabRelationTypesTitle = "Relation Types"
__tabDataStoresTitle = "Data Stores"
__baseCaption = "DataFinder Administration Client - Server: "
__errorMessageCaption = "DataFinder Administration Client: Error"
__informationMessageCaption = "DataFinder Administration Client: Information"
__logger = getDefaultLogger()
def __init__(self, repositoryManager):
""" Constructor. """
# Init GUI
AdminWindow.__init__(self)
# set icon in window-title:
self.setIcon(QPixmap.fromMimeSource("DF_Logo_24x24.png"))
iconSet = QIconSet(QPixmap.fromMimeSource("dataType16.png"))
self.dataNavigator.setTabIconSet(self.dataTypes, iconSet)
iconSet = QIconSet(QPixmap.fromMimeSource("relationType16.png"))
self.dataNavigator.setTabIconSet(self.relationTypes, iconSet)
iconSet = QIconSet(QPixmap.fromMimeSource("dataStore16.png"))
self.dataNavigator.setTabIconSet(self.dataStores, iconSet)
logger_handler.installGuiLoggingHandler(self.__logger, self.logList)
self.myStatusBar = self.statusBar()
self.statusLabel1 = QLabel("DataFinder", self.myStatusBar)
self.statusLabel2 = QLabel("OK", self.myStatusBar)
self.myStatusBar.addWidget(self.statusLabel1, 80)
self.myStatusBar.addWidget(self.statusLabel2, 20)
self.statusLabel1.show()
self.statusLabel2.show()
self.myStatusBar.show()
# prepare "About"-dialog:
self.dfAboutDialog = about_dialog.AboutDialog()
self.dfAboutDialog.setPixmap(QPixmap.fromMimeSource("about_datafinder_admin.png"))
# Login-dialog:
self.dfLoginDialog = login_dialog.LoginDialog(repositoryManager.preferences, parent=self, showurl=True)
# IconView:
propertyPanelLayout = QGridLayout(self.propertyPanel, 1, 1, 2, 6, "propertyPanelLayout")
self.iconView = canvas_view.CanvasView(self.propertyPanel, self, "iconView", 0)
propertyPanelLayout.addWidget(self.iconView, 0, 0)
self.iconViewCanvas = self.iconView.canvas()
self.connect(self.dfLoginDialog.okPushButton,
PYSIGNAL("updateWebdavServerView"),
self.updateWebdavServerSlot)
self.connect(self.dataTypeBrowser, SIGNAL("doubleClicked(QListViewItem*)"), self.__addDataTypeIconSlot)
self.connect(self.relationTypeBrowser, SIGNAL("doubleClicked(QListViewItem*)"), self.__addRelationTypeIconSlot)
self.connect(self.dataStoreBrowser, SIGNAL("clicked(QListViewItem*)"), self.iconView.updateCanvasView)
self.connect(self.dataStoreBrowser, SIGNAL("doubleClicked(QListViewItem*)"), self.editSlot)
# Init model
self._repositoryManager = repositoryManager
self.repositoryConfiguration = None
self._preferences = self._repositoryManager.preferences
self._lastUploadDirectory = os.path.expanduser("~")
self._dataTypes = list()
self._relationTypes = list()
self._dataStores = list()
self.__setConnectionState(False)
def __setConnectionState(self, isConnected):
"""
Sets the enabled state of the actions in accordance to
successful or unsuccessful connection.
"""
self.iconView.clear()
self.__setActionEnabledState(isConnected)
if isConnected:
self.setCaption(self.__baseCaption + unicode(self.repositoryConfiguration.repositoryConfigurationUri))
iconPath = self.repositoryConfiguration.localIconFilePath
if sys.platform == "win32" and iconPath.startswith("/"):
iconPath = iconPath[1:]
utils.addQtImagePath(iconPath)
else:
self.repositoryConfiguration = None
enableActions = [self.fileConnectAction, self.fileExitAction,
self.fileCreateConfigurationAction, self.editPreferencesAction,
self.helpAboutAction]
for action in enableActions:
action.setEnabled(True)
self.setCaption(self.__baseCaption + "<not connected>")
self.updateDataTypes()
self.updateRelationTypes()
self.updateDataStores()
def __setActionEnabledState(self, isEnabled):
""" Sets the enabled state of all actions. """
self.datamodelExportAction.setEnabled(isEnabled)
self.datamodelImportAction.setEnabled(isEnabled)
self.datamodelNewDataTypeAction.setEnabled(isEnabled)
self.datamodelNewRelationTypeAction.setEnabled(isEnabled)
self.deleteAction.setEnabled(isEnabled)
self.editAction.setEnabled(isEnabled)
self.editPreferencesAction.setEnabled(isEnabled)
self.fileConnectAction.setEnabled(isEnabled)
self.fileCreateConfigurationAction.setEnabled(isEnabled)
self.fileDeleteConfigurationAction.setEnabled(isEnabled)
self.fileExitAction.setEnabled(isEnabled)
self.helpAboutAction.setEnabled(isEnabled)
self.reloadConfigurationAction.setEnabled(isEnabled)
self.storeExportAction.setEnabled(isEnabled)
self.storeImportAction.setEnabled(isEnabled)
self.storeNewAction.setEnabled(isEnabled)
self.utilDeleteIconAction.setEnabled(isEnabled)
self.utilDeleteScriptAction.setEnabled(isEnabled)
self.utilUploadIconAction.setEnabled(isEnabled)
self.utilUploadScriptAction.setEnabled(isEnabled)
def __configureDatastore(self, datastore=None):
""" Shows the Data Store configuration wizard to add a new Data Store. """
DataStoreConfigurationWizardController(self, self.repositoryConfiguration.dataStoreHandler,
self.repositoryConfiguration.iconHandler, datastore)
def changeIconPixmap(self, icon, oldLabel, newLabel):
""" Changes the icon (node) pixmap. """
changedTitle = oldLabel
if not oldLabel == newLabel and not self._getIconFromLabel(newLabel) == None:
result = self.__showQuestion("DataFinder: Confirm Overwrite", "A Node of the same name \"" + unicode(newLabel) + \
"\" already exists.\nBefore renaming, please close that node.")
return changedTitle
if not oldLabel == newLabel and self._isTypeExisting(newLabel):
result = self.__showQuestion("DataFinder: Confirm Overwrite", "Overwrite existing \"" + unicode(newLabel) + "\"?")
if result:
return changedTitle
iconItem = self._getIconFromLabel(oldLabel)
if not iconItem is None:
if icon is None:
iconItem.setIconLabel(newLabel)
else:
iconItem.setIconLabel(newLabel)
iconItem.setIconPixmap(icon.pixmap())
changedTitle = newLabel
self.iconView.updateCanvasView()
def _isNodeExisting(self, title):
"""
Check if the node with title does already exist.
@param title: Title of the node.
@type title: C{string}
"""
returnValue = True
if self._getIconFromLabel(title) is None:
returnValue = False
return returnValue
def _isTypeExisting(self, newName):
""" Checks if the type exists. """
returnValue = False
if not self.repositoryConfiguration.getDataType(newName) is None:
returnValue = True
elif not self.repositoryConfiguration.getRelation(newName) is None:
returnValue = True
return returnValue
def _getIconFromLabel(self, searchLabel):
"""
Returns the icon specified by searchLabel.
@param searchLabel: the label of the icon.
@type searchLabel: C{String}
@rtype: L{PrototypeIcon}
"""
return self.iconView.getIcon(searchLabel)
def updateWebdavServerSlot(self, url, username, password):
""" Reconnect to WebDAV server. """
# update preferences
if self.dfLoginDialog.savePasswordCheckBox.isChecked():
self._preferences.addConnection(url, username, password)
else:
self._preferences.addConnection(url, username, None)
self._preferences.store()
try:
if not self.repositoryConfiguration is None:
self.repositoryConfiguration.release()
repositoryConfiguration = None
repositoryConfiguration = self._repositoryManager.getRepositoryConfiguration(url, username, password)
repositoryConfiguration.load()
except ConfigurationError, error:
if not repositoryConfiguration is None:
repositoryConfiguration.release()
self.__showErrorMessage("Cannot connect to specified server. Reason: '%s'" % error.message \
+ "Please try again or create a new configuration.")
self.__setConnectionState(False)
else:
self.repositoryConfiguration = repositoryConfiguration
self.__setConnectionState(True)
def updateDataTypes(self):
""" Updates list of data types. """
self.dataTypeBrowser.clear()
self._dataTypes = list()
if not self.repositoryConfiguration is None:
self._dataTypes = self.repositoryConfiguration.datatypes
for dataType in self._dataTypes:
item = QListViewItem(self.dataTypeBrowser, None)
if not dataType.name is None:
item.setText(0, unicode(dataType.name))
pixmap = utils.getPixmapForImageName(dataType.iconName)
item.setPixmap(0, pixmap)
def updateRelationTypes(self):
""" Updates the displayed list of relation types. """
self.relationTypeBrowser.clear()
self._relationTypes = list()
if not self.repositoryConfiguration is None:
self._relationTypes = self.repositoryConfiguration.relations
for relationType in self._relationTypes:
item = QListViewItem(self.relationTypeBrowser, None)
if not relationType.name is None:
item.setText(0, relationType.name)
pixmap = utils.getPixmapForImageName(relationType.iconName)
item.setPixmap(0, pixmap)
def updateDataStores(self):
""" Updates the displayed list of Data Stores. """
self.dataStoreBrowser.clear()
self._dataStores = list()
if not self.repositoryConfiguration is None:
self._dataStores = self.repositoryConfiguration.datastores
for dataStore in self._dataStores:
item = QListViewItem(self.dataStoreBrowser, None)
if not dataStore.name is None:
item.setText(0, dataStore.name)
if dataStore.isDefault:
item.setText(1, "Yes")
pixmap = utils.getPixmapForImageName(dataStore.iconName)
item.setPixmap(0, pixmap)
def editSlot(self):
""" Edits the selected item (data type, relation, or data store). """
currentTabLabel = str(self.dataNavigator.tabLabel(self.dataNavigator.currentPage()))
if currentTabLabel == AdminMain.__tabDataTypesTitle:
item = self.dataTypeBrowser.selectedItem()
if not item is None:
title = unicode(item.text(0))
dataTypeController = data_type_dialog.DataTypeController(self, title, self.repositoryConfiguration)
dataTypeController.show()
elif currentTabLabel == AdminMain.__tabRelationTypesTitle:
item = self.relationTypeBrowser.selectedItem()
if not item is None:
title = unicode(item.text(0))
relationTypeController = relation_type_dialog.RelationTypeController(self, title, self.repositoryConfiguration)
relationTypeController.show()
elif currentTabLabel == AdminMain.__tabDataStoresTitle:
item = self.dataStoreBrowser.selectedItem()
if not item is None:
datastore = self.repositoryConfiguration.getDataStore(unicode(item.text(0)))
self.__configureDatastore(datastore)
def __addDataTypeIconSlot(self):
""" Edits the selected data type. """
item = self.dataTypeBrowser.selectedItem()
if not item is None:
dataType = self.repositoryConfiguration.getDataType(unicode(item.text(0)))
title = dataType.name
iconSet = QIconSet(utils.getPixmapForImageName(dataType.iconName))
if not self._isNodeExisting(title):
iconSet = QIconSet(utils.getPixmapForImageName(dataType.iconName))
self.iconView.addDataTypeIcon(title, iconSet)
self.__logger.info("Data Type %s was successfully loaded." % title)
else:
self.iconView.markDataTypeIcon(title)
def __addRelationTypeIconSlot(self):
""" Edits the selected relation type. """
item = self.relationTypeBrowser.selectedItem()
if not item is None:
relationType = self.repositoryConfiguration.getRelation(unicode(item.text(0)))
title = relationType.name
iconSet = QIconSet(utils.getPixmapForImageName(relationType.iconName))
if not self._isNodeExisting(title):
iconSet = QIconSet(utils.getPixmapForImageName(relationType.iconName))
self.iconView.addRelationIcon(relationType, title, iconSet)
self.__logger.info("Relation Type %s was successfully loaded." % title)
else:
self.iconView.markRelationIcon(title)
def deleteSlot(self):
""" Deletes the selected data type. """
currentTabLabel = unicode(self.dataNavigator.tabLabel(self.dataNavigator.currentPage()))
try:
if currentTabLabel == AdminMain.__tabDataTypesTitle:
self.deleteSelectedDataType()
elif currentTabLabel == AdminMain.__tabRelationTypesTitle:
self.deleteSelectedRelationType()
elif currentTabLabel == AdminMain.__tabDataStoresTitle:
self.__deleteSelectedDataStore()
except ConfigurationError, error:
errorMessage = "Cannot delete the specific item. Reason: '%s'" % error.message
self.__showErrorMessage(errorMessage)
self.__logger.error(errorMessage)
def deleteSelectedDataType(self):
""" Deletes the selected data type. """
item = self.dataTypeBrowser.selectedItem()
if not item is None:
title = unicode(item.text(0))
dtype = self.repositoryConfiguration.getDataType(title)
if not dtype is None:
result = self.__showQuestion("DataFinder: Confirm Delete", "Delete Data Type \"" + title + "\"?")
if result:
self.repositoryConfiguration.removeDataType(dtype.name)
self.repositoryConfiguration.dataModelHandler.store()
self.updateDataTypes()
self.iconView.markDataTypeIcon(title)
self.iconView.removeIcon()
else:
self.__logger.warning("Data Type not found in configuration.")
def deleteSelectedRelationType(self):
""" Deletes the selected relation type. """
item = self.relationTypeBrowser.selectedItem()
if not item is None:
title = unicode(item.text(0))
if title == ROOT_RELATION_NAME:
self.__showErrorMessage("The the root relations cannot be removed.")
else:
rtype = self.repositoryConfiguration.getRelation(title)
if not rtype is None:
result = self.__showQuestion("DataFinder: Confirm Delete", "Delete Relation Type \"" + title + "\"?")
if result:
self.repositoryConfiguration.removeRelation(rtype.name)
self.repositoryConfiguration.dataModelHandler.store()
self.updateRelationTypes()
self.iconView.markRelationIcon(title)
self.iconView.removeIcon()
else:
self.__logger.warning("Relation Type not found in configuration.")
def __deleteSelectedDataStore(self):
""" Deletes the selected Data Store. """
item = self.dataStoreBrowser.selectedItem()
if not item is None:
title = unicode(item.text(0))
stype = self.repositoryConfiguration.getDataStore(title)
if not stype is None:
result = self.__showQuestion("DataFinder: Confirm Delete", "Delete Data Store \"" + title + "\"?")
if result:
self.repositoryConfiguration.removeDataStore(stype.name)
self.repositoryConfiguration.dataStoreHandler.store()
self.updateDataStores()
else:
self.__logger.warning("Data Store was not found in configuration.")
def addDataTypeSlot(self):
""" Adds a new data type. """
data_type_dialog.DataTypeController(self, None, self.repositoryConfiguration, False).show()
def addRelationTypeSlot(self):
""" Adds a new relation type. """
relation_type_dialog.RelationTypeController(self, None, self.repositoryConfiguration, False).show()
def addDataStoreSlot(self):
""" Configures a new data store. """
self.__configureDatastore()
def exportDatamodelSlot(self):
""" Exports the current data model to the local file system. """
proposedName = "datamodel.xml"
targetFilePath = unicode(QFileDialog.getSaveFileName(os.path.join(self._lastUploadDirectory, proposedName),
"*.xml", self, "Export data model...", "Choose a file" ))
if len(targetFilePath) > 0:
self._lastUploadDirectory = os.path.dirname(targetFilePath)
try:
self.repositoryConfiguration.exportDatamodel(targetFilePath)
except ConfigurationError, error:
self.__showErrorMessage("Cannot export data model to '%s'. Reason: '%s'" % (targetFilePath, error.message))
else:
self.__logger.info("Successfully exported the current data model to '%s'." % targetFilePath)
def importDatamodelSlot(self):
""" Imports the current data model with the one read from the local file system. """
targetFilePath = unicode(QFileDialog.getOpenFileName(self._lastUploadDirectory, "*.xml",
self, "Import data model...", "Choose a file" ))
if os.path.isfile(targetFilePath):
self._lastUploadDirectory = os.path.dirname(targetFilePath)
try:
self.repositoryConfiguration.importDatamodel(targetFilePath)
self.repositoryConfiguration.dataModelHandler.store()
except ConfigurationError, error:
self.__showErrorMessage("Cannot import data model from '%s'. Reason: '%s'" \
% (targetFilePath, error.message))
else:
self.updateDataTypes()
self.updateRelationTypes()
self.__logger.info("Successfully imported the data model.")
def exportDataStoresSlot(self):
""" Exports the current data model to the local file system. """
proposedName = "datastores.xml"
targetFilePath = unicode(QFileDialog.getSaveFileName(os.path.join(self._lastUploadDirectory, proposedName),
"*.xml", self, "Export data store configuration...", "Choose a file" ))
if len(targetFilePath) > 0:
self._lastUploadDirectory = os.path.dirname(targetFilePath)
try:
self.repositoryConfiguration.exportDataStores(targetFilePath)
except ConfigurationError, error:
self.__showErrorMessage("Cannot export data store configuration to '%s'. \nReason: '%s'" \
% (targetFilePath, error.message))
else:
self.__logger.info("Successfully exported the current data store configurations to '%s'." % targetFilePath)
def importDataStoresSlot(self):
""" Imports the current data model with the one read from the local file system. """
targetFilePath = unicode(QFileDialog.getOpenFileName(self._lastUploadDirectory, "*.xml",
self, "Import data store configuration...", "Choose a file" ))
if len(targetFilePath) > 0:
self._lastUploadDirectory = os.path.dirname(unicode(targetFilePath))
try:
self.repositoryConfiguration.importDataStores(targetFilePath)
self.repositoryConfiguration.dataStoreHandler.store()
except ConfigurationError, error:
self.__showErrorMessage("Cannot import data store configuration from '%s'. Reason: '%s'" \
% (targetFilePath, error.message))
else:
self.updateDataStores()
self.__logger.info("Successfully imported data store configurations.")
def __showErrorMessage(self, errorMessage):
""" Display the given error message. """
QMessageBox.critical(self, self.__errorMessageCaption, errorMessage)
self.__logger.error(errorMessage)
def __showQuestion(self, caption, question):
""" Ask the user a question and returns the result. """
result = False
answer = QMessageBox.warning(self, caption, question, QMessageBox.No, QMessageBox.Yes)
if answer == QMessageBox.Yes:
result = True
return result
def __showInformation(self, information):
""" Shows the given information. """
QMessageBox.information(self, self.__informationMessageCaption, information, QMessageBox.Ok)
def fileConnectSlot(self):
""" Displays the login dialog. """
self.dfLoginDialog.presetUrlList()
self.dfLoginDialog.show()
def createConfigurationSlot(self):
""" Creates a new configuration. """
self.__logger.debug("Display dialog for creation of a new configuration.")
CreateConfigurationController(self._repositoryManager)
def reloadConfigurationSlot(self):
""" Reloads data types, relations and data stores. """
try:
self.repositoryConfiguration.load()
except ConfigurationError, error:
self.__showErrorMessage(error.message)
self.__setConnectionState(False)
else:
self.__setConnectionState(True)
def deleteConfigurationSlot(self):
""" Deletes the current configuration. """
question = "Do you really want to delete this configuration?\n" \
+ "Warning: The complete content of collection '%s' is removed, too." \
% self.repositoryConfiguration.repositoryConfigurationUri
result = self.__showQuestion("Confirm delete", question)
if result:
try:
self.repositoryConfiguration.delete()
except ConfigurationError, error:
errorMessage = "Unable to delete configuration.\n Reason: '%s'" % error.message
self.__showErrorMessage(errorMessage)
else:
self.repositoryConfiguration.release()
self.__setConnectionState(False)
def fileExitSlot(self):
""" Exits the administration client. """
try:
self._repositoryManager.savePreferences()
except ConfigurationError, error:
self.__logger.error(error.message)
QApplication.exit(0)
def showAboutDialogSlot(self):
""" Shows the about dialog. """
self.dfAboutDialog.exec_loop()
def uploadIconSlot(self):
"""
Upload icon(s) to WebDAV-server Images collection.
"""
filePaths = list(QFileDialog.getOpenFileNames("Image Files (*16.png)",
self._lastUploadDirectory,
self,
"open file dialog",
"Choose files" ))
if len(filePaths) > 0:
self._lastUploadDirectory = os.path.dirname(unicode(filePaths[0]))
for filePath in filePaths:
iconName = os.path.basename(unicode(filePath))[0:-6]
iconPath = os.path.dirname(unicode(filePath))
# Check if icon size (named) 24 is available
iconPath24Pixel = os.path.join(iconPath, iconName + "24.png")
if not os.path.exists(iconPath24Pixel):
errorMessage = "Icon '%s24.png' does not exist!" % iconName
self.__logger.error(errorMessage)
else:
performIconImport = True
if self.repositoryConfiguration.hasIcon(iconName):
questionMessage = u"Icon '%s' already exists!\n\n Overwrite?" % iconName
performIconImport = self.__showQuestion("Icon Import", questionMessage)
if performIconImport:
try:
self.repositoryConfiguration.addIcon(iconName, iconPath)
except ConfigurationError, error:
self.__logger.error(error.message)
def deleteIconSlot(self):
""" Delete icon from server. """
icons = self.repositoryConfiguration.icons
if len(icons) == 0:
self.__showInformation("No icons have been found on server.")
else:
selectDialog = icon_selection_dialog.SelectUserIconDialog(multiSelection=True)
iconsToRemove = selectDialog.getIconName(icons)
for icon in iconsToRemove:
self.repositoryConfiguration.removeIcon(self.repositoryConfiguration.getIcon(icon))
def uploadScriptSlot(self):
""" Upload script(s) to WebDAV-server Scripts collection. """
filePaths = list(QFileDialog.getOpenFileNames("DataFinder Script Files (*.py *.tar)",
self._lastUploadDirectory,
self,
"open file dialog",
"Choose files" ))
if len(filePaths) > 0:
self._lastUploadDirectory = os.path.dirname(unicode(filePaths[0]))
for filePath in filePaths:
filePath = unicode(filePath)
performScriptImport = True
if self.repositoryConfiguration.hasScript(os.path.basename(filePath)):
question = "Script '%s' already exists!\n\n Overwrite?" % os.path.basename(filePath)
performScriptImport = self.__showQuestion("Script Upload", question)
if performScriptImport:
try:
self.repositoryConfiguration.addScript("file:///" + filePath)
except ConfigurationError, error:
errorMessage = "Cannot add script.\n Reason: '%s'" % error.message
self.__showErrorMessage(errorMessage)
def deleteScriptSlot(self):
"""
Delete script from server.
"""
scripts = self.repositoryConfiguration.scripts
if len(scripts) == 0:
self.__showInformation("No scripts have been found on server.")
else:
selectDialog = script_selection_dialog.SelectScriptDialog()
selectDialog.setScripts(scripts)
scriptsToRemove = selectDialog.getScriptToRemove()
for script in scriptsToRemove:
try:
self.repositoryConfiguration.removeScript(script)
except ConfigurationError, error:
errorMessage = "Cannot remove script.\n Reason: '%s'" % error.message
self.__showErrorMessage(errorMessage)
def main():
""" Start function. """
application = QApplication(sys.argv)
splashScreen = utils.showSplash("splash_datafinder_admin.png")
splashScreen.show()
repositoryManager = RepositoryManager()
repositoryManager.load()
adminMainWindow = AdminMain(repositoryManager)
application.connect(application, SIGNAL("lastWindowClosed()"), application, SLOT("quit()"))
application.setMainWidget(adminMainWindow)
screen = QApplication.desktop().screenGeometry()
adminMainWindow.move(QPoint(screen.center().x() - adminMainWindow.width() / 2, screen.center().y() - adminMainWindow.height() / 2))
adminMainWindow.show()
splashScreen.close(True)
adminMainWindow.fileConnectSlot()
application.exec_loop()
if __name__ == "__main__":
main()
|
import argparse
import glob
import os
import librosa
import numpy as np
import onnxruntime
import soundfile as sf
from tqdm import tqdm
import audio_utils
class DECModel:
def __init__(self, model_path, window_length, hop_fraction,
dft_size, hidden_size, sampling_rate=16000, spectral_floor=-120.0):
self.hop_fraction = hop_fraction
self.dft_size = dft_size
self.hidden_size = hidden_size
self.sampling_rate = sampling_rate
self.spectral_floor = spectral_floor
self.frame_size = int(window_length * sampling_rate)
self.hop_size = int(window_length * sampling_rate * hop_fraction)
self.window = audio_utils.hamming(self.frame_size, hop=hop_fraction)
self.model = onnxruntime.InferenceSession(model_path)
def calc_features(self, xmag_mic, xmag_far):
feat_mic = audio_utils.logpow(xmag_mic, floor=self.spectral_floor)
feat_far = audio_utils.logpow(xmag_far, floor=self.spectral_floor)
feat = np.concatenate([feat_mic, feat_far])
feat /= 20.
feat = feat[np.newaxis, np.newaxis, :]
feat = feat.astype(np.float32)
return feat
def enhance(self, path_mic, path_far, path_out):
# load inputs
x_mic, _ = librosa.load(path_mic, sr=self.sampling_rate)
x_far, _ = librosa.load(path_far, sr=self.sampling_rate)
# cut to equal length
min_len = min(len(x_mic), len(x_far))
x_mic = x_mic[:min_len]
x_far = x_far[:min_len]
# zero pad from left
pad_left, pad_right = self.hop_size, 0
x_mic = np.pad(x_mic, (pad_left, pad_right))
x_far = np.pad(x_far, (pad_left, pad_right))
# init buffers
num_frames = (len(x_mic) - self.frame_size) // self.hop_size + 1
x_back = np.zeros(self.frame_size + (num_frames - 1) * self.hop_size)
h01 = np.zeros((1, 1, self.hidden_size), dtype=np.float32)
h02 = np.zeros((1, 1, self.hidden_size), dtype=np.float32)
# frame-wise inference
for ix_start in range(0, len(x_mic) - self.frame_size, self.hop_size):
ix_end = ix_start + self.frame_size
cspec_mic = np.fft.rfft(x_mic[ix_start:ix_end] * self.window, self.dft_size)
xmag_mic, xphs_mic = audio_utils.magphasor(cspec_mic)
cspec_far = np.fft.rfft(x_far[ix_start:ix_end] * self.window)
xmag_far = np.abs(cspec_far)
feat = self.calc_features(xmag_mic, xmag_far)
inputs = {
"input": feat,
"h01": h01,
"h02": h02,
}
mask, h01, h02 = self.model.run(None, inputs)
mask = mask[0, 0]
x_enh = np.fft.irfft(mask * xmag_mic * xphs_mic, self.dft_size)
x_back[ix_start:ix_end] += x_enh
return x_back[pad_left:]
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Baseline model inference")
parser.add_argument("--model_path", "-m", help="ONNX model to use for inference.", default="dec-baseline-model-interspeech2021.onnx")
parser.add_argument("--data_dir", "-d", required=True, help="Directory containing mic and farend files.")
parser.add_argument("--output_dir", "-o", required=True, help="Output directory to save enhanced output files.")
args = parser.parse_args()
if not os.path.exists(args.output_dir):
print(f"Creating output directory: {args.output_dir}")
os.makedirs(args.output_dir)
sampling_rate = 16000
model = DECModel(
model_path=args.model_path,
window_length=0.02,
hop_fraction=0.5,
dft_size=320,
hidden_size=322,
sampling_rate=sampling_rate,
spectral_floor=-120.)
mic_paths = glob.glob(os.path.join(args.data_dir, "*_mic.wav"))
for mic_path in tqdm(mic_paths):
basename = os.path.basename(mic_path)
farend_path = mic_path.replace("_mic.wav", "_lpb.wav")
if not os.path.exists(farend_path):
print("Farend file not found, skipping:", farend_path)
continue
out_path = os.path.join(args.output_dir, basename)
if os.path.exists(out_path):
print("Enhanced file exists, overwriting:", out_path)
x_enhanced = model.enhance(mic_path, farend_path, out_path)
sf.write(out_path, x_enhanced, sampling_rate) |
# -*- coding: utf-8 -*-
import os
from shovel.config import Config
from shovel.pit import Pit
DEFAULT_BUCKET = os.environ.get('SHOVEL_DEFAULT_BUCKET')
DEFAULT_ROOT = os.environ.get('SHOVEL_DEFAULT_ROOT') or 'bottomless-pit'
def bury(project, name, version, local_path, force=False, ignore_exists=False):
"""Upload the contents of the target path to the pit."""
pit = get_default_pit()
pit.bury(project, name, version, local_path, force, ignore_exists)
def dig(project, name, version, local_path):
"""Download the contents of the target dataset from the pit."""
pit = get_default_pit()
pit.dig(project, name, version, local_path)
def peek(project=None, name=None, version=None, *, local_path=None):
pit = get_default_pit()
return pit.peek(project, name, version, local_path=local_path)
def get_default_config():
return Config(bucket=DEFAULT_BUCKET, root=DEFAULT_ROOT)
def get_default_pit():
config = get_default_config()
return Pit(config.bucket, config.root)
|
motor2_cp1 = 4
motor2_cp2 = 5
motor2_ep = 10
motor1_cp1 = 2
motor1_cp2 = 3
motor1_ep = 9
motor_speed = 1000
lights_pin = 13
beep_pin = 8
serialport = '/dev/tty.usbmodem1411' |
import numpy as np
import model
import monte_carlo
class Sequence():
"""
Object that stores a sequence with positions with an update method to update the positions and sequence
through a Markov Chain Monte Carlo Method.
"""
def __init__(self, N=None, selected_iterator=None, p_mutation=None,
sequencepotentials=('rise'),
averagedpotentials=('tilt', 'roll'),
positions_perturbation_function=None,
sequence_perturbation_function=None,
tension_deformation=0,
**kwargs):
self.positions_perturbation_function = positions_perturbation_function
self.sequence_perturbation_function = sequence_perturbation_function
if N is None:
N = model.LENGTH
if selected_iterator is None:
selected_iterator = 'metropolis_sp'
self.selected = selected_iterator
if p_mutation is None:
p_mutation = 0.2
random_sequence = np.random.choice(np.arange(4, dtype=np.uint8), (N))
self.sequence = random_sequence
if tension_deformation is None:
self.positions = np.linspace(0, N - 1, N)
else:
self.positions = np.linspace(0, N - 1, N)
self.rest_positions = self.positions.copy()
self.p_mutation = p_mutation
self.sequencepotentials = sequencepotentials
self.averagedpotentials = averagedpotentials
self.tension_deformation = tension_deformation
self._setup_settr()
self._update_MC_func()
self.calc_energy()
def _setup_settr(self):
def __setattr__(self, key, value):
super().__setattr__(key, value)
self._update_MC_func()
def update(self, steps=100, temp=1):
# print('updating')
if self.selected.endswith('sp'):
dE = self.MC_func(self.positions, self.sequence, steps, temp, self.p_mutation)
else:
dE = self.MC_func(self.positions, self.sequence, steps, temp)
self.energy += dE
@property
def rise_potential(self):
if 'rise' in self.averagedpotentials:
return model.averaged_rise_potential(self.positions)
elif 'rise' in self.sequencepotentials:
return model.rise_potential(self.positions, self.sequence)
else:
return 0
@property
def tilt_potential(self):
if 'tilt' in self.averagedpotentials:
return model.averaged_tilt_potential(self.positions)
elif 'tilt' in self.sequencepotentials:
return model.tilt_potential(self.positions, self.sequence)
else:
return 0
@property
def roll_potential(self):
if 'roll' in self.averagedpotentials:
return model.averaged_roll_potential(self.positions)
elif 'roll' in self.sequencepotentials:
return model.roll_potential(self.positions, self.sequence)
else:
return 0
@property
def tension_energy(self):
if self.tension_deformation is None:
return 0
else:
return model.tension_energy(self.positions, self.tension_deformation)
def calc_energy(self):
self.energy = np.sum(self.rise_potential) + np.sum(
[self.tilt_potential, self.roll_potential]) + self.tension_energy
@property
def MC_func(self):
return self._MC_func
def _update_MC_func(self):
print('creating algorithm')
algorithm, sp = str(self.selected).split('_')
if algorithm == 'metropolis':
self._MC_func = monte_carlo.Metropolis(sequencepotentials=self.sequencepotentials,
averagedpotentials=self.averagedpotentials,
tension_deformation=self.tension_deformation)
if algorithm == 'checkerboard':
self._MC_func = monte_carlo.Checkerboard(sequencepotentials=self.sequencepotentials,
averagedpotentials=self.averagedpotentials,
tension_deformation=self.tension_deformation)
if algorithm == 'old':
self._MC_func = monte_carlo.DoubleEnergyMetropolis()
if __name__ == '__main__':
s = Sequence()
import model as m
print(s.energy)
print(s.energy)
import monte_carlo as mc
energy_func = mc.create_energy_function(s.sequencepotentials, s.averagedpotentials)
print(energy_func(s.positions, s.sequence))
import matplotlib.pyplot as plt
plt.plot(m.rise_potential(s.positions, s.sequence))
s.update(steps=1e6, temp=1)
plt.plot(m.rise_potential(s.positions, s.sequence))
print('bla')
print(s.energy)
print(energy_func(s.positions, s.sequence))
|
from .statements import Select,Where,GroupBy,OrderBy,Aggregate
class query:
def __init__(self):
self.select = []
self.where = []
self.groupBy = []
self.aggregate = []
self.language = 'en'
self.orderBy = []
def set_select(self,data_table:str,indicators:list):
select_item = Select(data_table, indicators)
self.select.append(select_item)
def get_select(self):
select_statement = []
for s in self.select:
select_statement.append(s.make_dictionary())
print(select_statement)
return select_statement
def set_where(self,reference:str,value:list,date_from=None,date_to=None):
where_item = Where(reference,value,date_from,date_to)
self.select.append(select_item)
def set_group_by(self,reference:str,level=''):
group_by_item = GroupBy(reference,levels)
self.groupBy.append(group_by_item)
def set_order_by(self,reference:str,order_type:str):
order_by_item = OrderBy(reference, order_type)
self.groupBy.append(order_by_item)
def set_aggregate(self,indicator:str,operation:list):
aggregate_item = Aggregate(indicator,operation)
self.aggregate.append(aggregate_item)
def set_language(self,language:str):
self.language = language
def show(self):
# In progress
select_statement = []
for s in self.select:
select_statement.append(s.make_dictionary())
where_statement = []
for w in self.where:
where_statement.append(w.make_dictionary())
group_by_statement = []
for gb in self.groupBy:
group_by_statement.append(gb.make_dictionary())
aggregate_statement = []
for a in self.aggregate:
aggregate_statement.append(a.make_dictionary())
order_by_statement = []
for ob in self.orderBy:
order_by_statement.append(ob.make_dictionary())
print({
"select":select_statement,
"where":where_statement,
"iterate":[{
"group_by":group_by_statement,
"aggregate":aggregate_statement
}],
"language":self.language,
"order_by":order_by_statement
})
return {
"select":select_statement,
"where":where_statement,
"iterate":[{
"group_by":group_by_statement,
"aggregate":aggregate_statement
}],
"language":self.language,
"order_by":order_by_statement
}
|
import os
PRINTER_HOST = 'example.com'
PRINTER_USER = 'admin'
PRINTER_PASS = 'admin'
ADMIN_KEY = 'adminkey'
CLIENT_KEY = 'clientkey'
WEB_KEY = 'webkey'
SECRET_KEY='clavemuysecreta',
DEBUG = True
LOG = True |
import json
import os
import re
from abc import ABC
from abc import abstractmethod
from concurrent.futures import ThreadPoolExecutor
from typing import Dict
from typing import IO
from typing import Tuple
from typing import Text
from typing import List
from typing import Any
from typing import Union
from typing import Optional
from urllib.request import Request
from urllib.request import urlopen
from queue import Queue
from graphql import get_introspection_query
from graphql import build_schema
from graphql import build_client_schema
from graphql import GraphQLSchema
from graphql import GraphQLArgument
from graphql import GraphQLField
from graphql import GraphQLInputField
from graphql import GraphQLNamedType
from graphql import GraphQLScalarType
from graphql import GraphQLObjectType
from graphql import GraphQLInputObjectType
from jinja2 import Template
from typer import progressbar
__all__ = ["make_action"]
VarsFieldType = Union[GraphQLField, GraphQLInputField, GraphQLArgument]
DefinitionType = Union[GraphQLInputObjectType, GraphQLScalarType]
url_regx = (
r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"
)
class GraphqlDocsParse(ABC):
scalar_default = {
"Int": 0,
"String": "",
"ID": 0,
"Date": "2022-01-24",
"DateTime": "2022-01-24",
"Float": 0.0,
"Boolean": False,
"JSON": "JSON",
}
# sqlmap 替换类型
sqlmap_regx = ["String", "Date", "JSON"]
def __init__(self):
self.schemas: Optional[GraphQLSchema] = None
self.json_queue = Queue()
@abstractmethod
def build_graphql_schema(self):
"""构建schema 文档的方法"""
pass
def start(self, depth: int = 1, is_sqlmap: bool = False):
"""对外暴露的解析方法"""
if self.schemas is None:
self.build_graphql_schema()
self.load_query(depth, is_sqlmap=is_sqlmap)
@staticmethod
def query_template(
is_type: str,
operation_name: str,
vars_str: str,
resolve_str: str,
args: List[str],
) -> str:
"""
query语句模板
:param is_type: query 语句类型, mutation , query, subscription
:param operation_name: 接口名称
:param vars_str: 顶层参数定义
:param resolve_str: 底层使用变量参数
:param args: query请求体查询字段列表
:return: 完整的query 语句
"""
gql = Template(
""" {{ type }} {{ operationName }} {% if vars %}{{vars}}{% endif %}{
{{operationName}}{%if res_vars %} {{res_vars}}{%endif%}{% if args%}{
{% for arg in args %} {{arg}}
{% endfor %}}
{%endif%}
}
"""
).render(
{
"type": is_type,
"operationName": operation_name,
"vars": vars_str,
"res_vars": resolve_str,
"args": args,
}
)
return gql
def get_return_obj(self, field_obj: GraphQLField) -> GraphQLNamedType:
"""获取到最下一层的 GraphQLObjectType"""
field_name = str(field_obj.type)
if (start := field_name.find("[")) != -1:
end = field_name.find("]")
field_name = field_name[start + 1: end]
if field_name.endswith("!"):
field_name = field_name[0:-1]
return self.schemas.type_map.get(field_name)
def get_variables(
self,
items: Dict[str, VarsFieldType],
data_map: Optional[Dict[Any, Any]] = None,
is_sqlmap: bool = False,
) -> Dict[str, Any]:
"""
填充数据
:param items: 被迭代对象
:param data_map: 存储数据对象
:param is_sqlmap: 是否使用sqlmap规则
:return: 填充完成的参数字典
"""
if data_map is None:
data_map = {}
for k, v in items.items():
v_type = str(v.type)
# 标记是否是列表
flag = False
if (start := v_type.find("[")) != -1:
end = v_type.find("]")
v_type = v_type[start + 1: end]
flag = True
if v_type.endswith("!"):
v_type = v_type[0:-1]
type_obj: Optional[DefinitionType] = self.schemas.type_map.get(v_type)
# Input 输入类型
if isinstance(type_obj, GraphQLInputObjectType):
arg_result = self.get_variables(type_obj.fields, {}, is_sqlmap)
# 标量类型
else:
if type_obj.name in GraphqlDocsParse.scalar_default:
type_name = type_obj.name
# 自定义标量类型
elif (
element_type := v_type.split("_")[1]
) in GraphqlDocsParse.scalar_default:
type_name = element_type
else:
raise TypeError(f"类型 {v_type} 未设置默认数据哦", type(v.type), v, k)
if is_sqlmap and type_name in GraphqlDocsParse.sqlmap_regx:
arg_result = "*"
else:
arg_result = GraphqlDocsParse.scalar_default[type_name]
data_map.update({k: [arg_result] if flag else arg_result})
return data_map
def find_fields(
self,
field_obj: Union[GraphQLNamedType, GraphQLObjectType],
results: Optional[List[str]] = None,
depth: int = 1,
):
"""
递归找到query语句中可用查询字段列表
:param field_obj: 字段对象
:param results: 结果集列表
:param depth: 递归次数
:return:
"""
if results is None:
results = []
for k, v in field_obj.fields.items():
obj = self.get_return_obj(v)
if isinstance(obj, GraphQLObjectType) and depth != 0:
results.append("%s{" % k)
results.extend(self.find_fields(obj, depth=depth - 1))
results.append("}")
elif isinstance(obj, GraphQLObjectType):
pass
else:
results.append(k)
return results
def get_query_str(
self, is_type: str, query_name: str, field_obj: GraphQLField, depth: int
) -> str:
"""
生成query 语句
:param is_type: 接口类型 mutation , query, subscription
:param query_name: 接口名称
:param field_obj: 字段对象
:param depth: 查找query语句体中的 字段 使用的递归层级
:return gql: 完整的query 语句
"""
# args query 请求体中可用字段列表
args = []
field_objs = self.get_return_obj(field_obj)
if isinstance(field_objs, GraphQLObjectType):
args.extend(self.find_fields(field_objs, depth=depth))
# 获取参数部分query语句
vars_str = "" # 变量第一层及
resolve_str = "" # 变量 最后一层反转
for k, v in field_obj.args.items():
vars_str += f"${k}: {v.type}, "
resolve_str += f"{k}: ${k}, "
if vars_str != "":
vars_content = f"({vars_str[0:-2]})"
resolve_content = f"({resolve_str[0:-2]})"
else:
vars_content = None
resolve_content = None
gql = GraphqlDocsParse.query_template(
is_type,
operation_name=query_name,
vars_str=vars_content,
resolve_str=resolve_content,
args=args,
)
return gql
def load_query(self, depth: int = 1, is_sqlmap: bool = False):
"""query 查询名称, 返回字段类型名称"""
for types_name in ["query", "mutation", "subscription"]:
gql_type = getattr(self.schemas, f"{types_name}_type")
if not hasattr(gql_type, "fields"):
pass
else:
for query_name, query_return in gql_type.fields.items():
result = {
"query": self.get_query_str(
types_name, query_name, query_return, depth
),
"variables": self.get_variables(
query_return.args, is_sqlmap=is_sqlmap
),
"operationName": query_name,
}
self.json_queue.put(result)
class GraphqlDocsParseUrl(GraphqlDocsParse):
"""Graphql URL 方式解析文档"""
def __init__(self, url: str = None, headers: Optional[Dict[str, str]] = None):
self.url = url
self.headers = headers
super().__init__()
@property
def url(self):
return self._url
@url.setter
def url(self, value: str):
"""验证"""
if not isinstance(value, str):
raise TypeError("url 不符合规则")
if result := re.match(url_regx, value):
path = result.group(0)
if path.split("/")[-1] != "graphql":
if path[-1] == "/":
value += "graphql"
else:
value += "/graphql"
self._url = value
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, value: Dict[str, str]):
self._headers = {} if value is None else value
self._headers.update({"Content-Type": "application/json"})
def build_graphql_schema(self):
"""通过请求url的方式获取到 需要的 GraphQLSchema 对象"""
data = json.dumps({"query": get_introspection_query(descriptions=True)})
resp = Request(url=self.url, headers=self.headers, data=data.encode("utf-8"))
data = json.loads(urlopen(resp).read())["data"]
self.schemas = build_client_schema(data)
def sqlmap_template(self):
"""
返回sqlmap模板字符串
:return:
"""
url_host = self.url[self.url.find("//") + 2:]
host = url_host.split("/")[0]
url = "/".join(url_host.split("/")[1:])
headers = json.dumps(self.headers)[1:-1].replace('"', "").split(",")
return Template(
"POST /{{url}} HTTP/1.1\nHOST: {{host}}\n{%for header in headers%}{{header|trim}}\n{%endfor%}"
).render(url=url, host=host, headers=headers)
class GraphqlDocsParseFile(GraphqlDocsParse):
"""本地文档解析"""
def __init__(self, path: str):
self.path = path
super().__init__()
@abstractmethod
def load_file_content(self) -> Tuple[str, Union[Dict, Text]]:
pass
def build_graphql_schema(self):
type_str, content = self.load_file_content()
if type_str == "json":
self.schemas = build_client_schema(content)
if type_str == "gql":
self.schemas = build_schema(content)
class GraphqlDocsParseJson(GraphqlDocsParseFile):
def load_file_content(self) -> Tuple[str, Union[Dict, Text]]:
"""读取本地json文件"""
with open(self.path, "r", encoding="utf-8") as f:
content = json.load(f)
return "json", content["data"]
class GraphqlDocsParseSchema(GraphqlDocsParseFile):
"""实际测试过程中该方法会丢失部分语句 测试过程 通过 json 文件 和url 解 共解出 310 条,该方法只解出 304条"""
def load_file_content(self) -> Tuple[str, Union[Dict, Text]]:
"""读取本地.graphql文档"""
with open(self.path, "r", encoding="utf-8") as f:
content = f.read()
return "gql", content
class MakeFile:
"""写 json 文件 写gql文件 写txt(sqlmap) 文件"""
def __init__(self, parse_obj: GraphqlDocsParse, path: str):
"""
初始化数据方法
:param parse_obj: GraphqlDocsParse 解析文档对象
:param path: 文件保存目录
"""
self.parse_obj = parse_obj
self.path = path
if not os.path.isdir(path):
os.mkdir(path)
@abstractmethod
def make_file(self, info: Dict[str, Any]):
"""写入文件"""
pass
def async_write(self):
"""线程池写文件"""
total = 0
qsize = self.parse_obj.json_queue.qsize()
with ThreadPoolExecutor() as executor:
with progressbar(range(qsize), label=f"进度") as progress:
for _ in progress:
future = executor.submit(
self.make_file, self.parse_obj.json_queue.get()
)
future.result()
total += 1
return total
class MakeGqlFile(MakeFile):
def make_file(self, info: Dict[str, Any]):
"""写入gql文件"""
file_path = self.path + "/" + info["operationName"] + ".gql"
with open(file_path, "w", encoding="utf-8") as f:
f.write(info["query"])
class MakeJsonFile(MakeFile):
def make_file(self, info: Dict[str, Any]):
file_path = self.path + "/" + info["operationName"] + ".json"
with open(file_path, "w", encoding="utf-8") as f:
json.dump(info, f)
class MakeSqlmapFile(MakeFile):
def __init__(self, parse_obj: GraphqlDocsParse, path: str, template: str):
"""
初始化数据, 待实现
:param parse_obj:
:param path:
:param template: sqlmap扫描文件内容
"""
self.template = template
super().__init__(parse_obj, path)
def make_file(self, info: Dict[str, Any]):
"""
制作sqlmap -r 可用的文件
:param info: 解析而来的json参数
:return:
"""
file_path = self.path + "/" + info["operationName"] + ".txt"
with open(file_path, "w", encoding="utf-8") as f:
f.write(self.template + "\n" + json.dumps(info))
class MakeBurpFile(MakeSqlmapFile):
separate = "=" * 66
def __init__(self, parse_obj: GraphqlDocsParse, path: str, template: str):
self.fp: Optional[IO] = None
super().__init__(parse_obj, path, template)
def __enter__(self):
self.fp = open(self.path + "/burp.txt", "a", encoding="utf-8")
return self
def make_file(self, info: Dict[str, Any]):
context = Template(
"{{separate}}\n\n{{separate}}\n{{template}}\n{{separate}}\n\n\n"
).render(
separate=MakeBurpFile.separate,
template=self.template + "\n" + json.dumps(info),
)
self.fp.write(context)
def __exit__(self, exc_type, exc_val, exc_tb):
self.fp.close()
def make_action(
path: str,
directory: str,
to_type: str,
headers: Optional[Dict[str, str]] = None,
depth: int = 1,
):
"""
对cli程序暴露的制作文件完整方法
:param path: 文件(.json) / (.graphql) / url 路径
:param directory: 保存在该目录文件下,如果不存在则会创建
:param to_type: 可选json, gql, sqlmap, burp
:param headers: 当path 为 url内容时的可选项
:param depth: 生成的query语句中最大递归深度 默认为1
:return:
"""
is_url = re.match(url_regx, path)
suffix = path.split(".")[-1]
if is_url:
parse_obj = GraphqlDocsParseUrl(path, headers)
if to_type == "sqlmap":
template = parse_obj.sqlmap_template()
parse_obj.start(depth, True)
return MakeSqlmapFile(parse_obj, directory, template).async_write()
if to_type == "burp":
template = parse_obj.sqlmap_template()
parse_obj.start(depth, True)
with MakeBurpFile(parse_obj, directory, template) as make:
make.async_write()
return 1
elif suffix == "json":
parse_obj = GraphqlDocsParseJson(path)
elif suffix == "graphql":
parse_obj = GraphqlDocsParseSchema(path)
else:
raise ValueError("参数错误,path 应该是个 url地址 或者 json文件 或者 graphql(SDL)文件")
parse_obj.start(depth)
if to_type == "json":
return MakeJsonFile(parse_obj, directory).async_write()
elif to_type == "gql":
return MakeGqlFile(parse_obj, directory).async_write()
else:
raise TypeError("暂只支持解析成JSON,GQL,sqlmap 文件")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''subs.py
Usage: subs.py [options]
Options:
-D --debug Debugging output
-q --quiet Quiet output
-h --help Help!
-l --loops=LOOPS Maximum Loops [default: 10]
-s --sleep=SLEEP Sleep wait between loops [default: 30]
-x --xml Generate subscriptions.xml format
-o --output=FILE Output to file
-y --youtube Parse youtube [default: false]
-b --bitchute Parse bitchute [default: false]
'''
import time
import os
import re
from docopt import docopt
conf = docopt(__doc__)
import logging
l_level = logging.INFO
if conf['--debug']:
l_level = logging.DEBUG
elif conf['--quiet']:
l_level = logging.ERROR
logging.basicConfig(
level=l_level,
format='%(asctime)s - %(levelname)s - %(pathname)s - %(funcName)s - %(message)s')
from selenium import webdriver
from selenium.webdriver.common.by import By
url = "https://www.youtube.com/feed/channels"
burl = "https://www.bitchute.com/subscriptions/"
js = '''var scrollInterval = setInterval(function() {
document.documentElement.scrollTop = document.documentElement.scrollHeight;
}, 1000);'''
js2 = '''var channel_links = document.getElementsByClassName("channel-link");
var ret = []; for (var f of channel_links) { ret.push(f.href) }; return ret;'''
bjs2 = '''var channel_links = document.getElementsByClassName("spa");
var ret = []; for (var f of channel_links) { let s = f.getAttribute("rel");
if (s === "author") { ret.push(f.href) } }; return ret; '''
bjs = '''var links = document.getElementsByClassName("spa");
var channel_links = links.getElementsByTagName("a");
var ret = []; for (var f of channel_links) { ret.push(f.href) }; return ret;'''
def youtube():
loop_limit = int(conf['--loops'])
sleep_limit = int(conf['--sleep'])
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--user-data-dir=/home/chrome/.config/chromium")
driver = webdriver.Chrome('/usr/bin/chromedriver', chrome_options=options)
driver.get(url)
time.sleep(sleep_limit)
logging.debug("executing scroller js")
driver.execute_script(js)
loop_count = 0
links = []
links_prev = []
while True:
loop_count = loop_count + 1
logging.info(["Loop count", loop_count])
if loop_count > loop_limit:
break
links_prev = links.copy()
links = []
logging.debug(["Sleeping", sleep_limit])
time.sleep(sleep_limit)
# for a in driver.find_elements_by_tag_name('a'):
for href in driver.execute_script(js2):
if href:
if href not in links:
links.append(href)
logging.debug(href)
logging.info(["links length", len(links)])
logging.info(["pinks length", len(links_prev)])
if loop_count > 0 and len(links) == len(links_prev):
logging.debug("No more new links")
break
logging.info(["Channels Found", len(links)])
for l in sorted(links):
ll = re.sub(r'com/(channel|user)/(\S+)$',
r'com/feeds/videos.xml?\1=\2', l)
if re.search(r'videos\.xml', ll):
logging.debug(["Found", l, ll])
print(ll)
driver.quit()
def bitchute():
sleep_limit = int(conf['--sleep'])
options = webdriver.ChromeOptions()
options.add_argument("--headless")
options.add_argument("--no-sandbox")
options.add_argument("--disable-dev-shm-usage")
options.add_argument("--user-data-dir=/home/chrome/.config/chromium")
driver = webdriver.Chrome('/usr/bin/chromedriver', chrome_options=options)
driver.get(burl)
time.sleep(sleep_limit)
links = []
links_prev = []
b = "https://www.bitchute.com/feeds/rss"
# for href in driver.execute_script(bjs):
for a in driver.find_elements_by_tag_name('a'):
logging.debug(["A", a])
try:
href = a.get_attribute("href")
if href:
if href not in links:
logging.debug(["HREF", href])
links.append(href)
except BaseException as x:
logging.debug(x)
for l in sorted(links):
ll = re.sub(r'com/(channel/(\S+))$',
r'com/feeds/rss/\1', l)
if re.search(r'channel', ll):
logging.debug(["Found", l, ll])
print(ll)
driver.quit()
def main():
logging.info(conf)
if conf['--youtube']:
youtube()
if conf['--bitchute']:
bitchute()
if __name__ == "__main__":
main()
|
# import the necessary packages
from skimage import feature
class HOG:
def __init__(
self,
orientations=9,
pixelsPerCell=(8, 8),
cellsPerBlock=(3, 3),
transform=False,
block_norm="L2-Hys",
):
# store the number of orientations, pixels per cell,
# cells per block, and whether or not power law
# compression should be applied
self.orienations = orientations
self.pixelsPerCell = pixelsPerCell
self.cellsPerBlock = cellsPerBlock
self.transform = transform
self.block_norm = block_norm
def describe(self, image):
# compute HOG for the image
hist = feature.hog(
image,
orientations=self.orienations,
pixels_per_cell=self.pixelsPerCell,
cells_per_block=self.cellsPerBlock,
transform_sqrt=self.transform,
block_norm=self.block_norm,
)
# return the HOG features
return hist
|
from typing import Iterable, Optional, TypeVar
from stock_indicators._cslib import CsIndicator
from stock_indicators._cstypes import List as CsList
from stock_indicators.indicators.common.helpers import RemoveWarmupMixin
from stock_indicators.indicators.common.results import IndicatorResults, ResultBase
from stock_indicators.indicators.common.quote import Quote
def get_adx(quotes: Iterable[Quote], lookback_periods: int = 14):
"""Get ADX calculated.
Average Directional Movement Index (ADX) is a measure of price directional movement.
It includes upward and downward indicators, and is often used to measure strength of trend.
Parameters:
`quotes` : Iterable[Quote]
Historical price quotes.
`lookback_periods` : int, defaults 14
Number of periods in the lookback window.
Returns:
`ADXResults[ADXResult]`
ADXResults is list of ADXResult with providing useful helper methods.
See more:
- [ADX Reference](https://daveskender.github.io/Stock.Indicators.Python/indicators/Adx/#content)
- [Helper Methods](https://daveskender.github.io/Stock.Indicators.Python/utilities/#content)
"""
adx_results = CsIndicator.GetAdx[Quote](CsList(Quote, quotes), lookback_periods)
return ADXResults(adx_results, ADXResult)
class ADXResult(ResultBase):
"""
A wrapper class for a single unit of ADX results.
"""
@property
def pdi(self) -> Optional[float]:
return self._csdata.Pdi
@pdi.setter
def pdi(self, value):
self._csdata.Pdi = value
@property
def mdi(self) -> Optional[float]:
return self._csdata.Mdi
@mdi.setter
def mdi(self, value):
self._csdata.Mdi = value
@property
def adx(self) -> Optional[float]:
return self._csdata.Adx
@adx.setter
def adx(self, value):
self._csdata.Adx = value
_T = TypeVar("_T", bound=ADXResult)
class ADXResults(RemoveWarmupMixin, IndicatorResults[_T]):
"""
A wrapper class for the list of ADX(Average Directional Movement Index) results.
It is exactly same with built-in `list` except for that it provides
some useful helper methods written in C# implementation.
"""
|
import pytest
import sys
from vagranttoansible.vagranttoansible import write_ssh_config_to_file, parse_ssh_config, write_ansible_inventory, \
__version__, get_args
FIXTURES_DIR = 'tests/fixtures/'
EMPTY_FILE = FIXTURES_DIR + 'ssh_config_empty_file'
SIMPLE_FILE = FIXTURES_DIR + 'ssh_config_simple'
def _read_file(filename):
"""
Utility function
:param filename:
:return:
"""
with open(filename) as f:
return f.read()
def test_parse_args_version(capfd):
"""
Should print the program name and version
:param capfd:
:return:
"""
with pytest.raises(SystemExit):
get_args(['-V'])
out, err = capfd.readouterr()
# https://bugs.python.org/issue18920
if sys.version_info >= (3, 4):
assert out == "vagranttoansible %s\n" % __version__
assert err == ''
else:
assert err == "vagranttoansible %s\n" % __version__
assert out == ''
def test_parse_args_error(capfd):
"""
Should fail : option provided without a filename
:return:
"""
with pytest.raises(SystemExit):
get_args(['-o'])
out, err = capfd.readouterr()
assert out == ''
def test_write_ssh_config_to_file(tmpdir):
"""
Should write ssh_config to a file
:return:
"""
ssh_config = 'Host machine3\n'
filepath = tmpdir.join('test.test')
write_ssh_config_to_file(ssh_config, filename=filepath.strpath)
assert ssh_config == filepath.read()
def test_parse_ssh_config_empty_file():
"""
Should property parse the ssh configuration with no item
:return:
"""
filename = EMPTY_FILE
config_list = parse_ssh_config(filename)
assert len(config_list) == 0
def test_parse_ssh_config_simple():
"""
Should property parse the ssh configuration with only one item
:return:
"""
filename = SIMPLE_FILE
config_list = parse_ssh_config(filename)
assert len(config_list) == 1
config = config_list[0]
assert config['host'] == 'machine1'
assert config['options']['hostname'] == '127.0.0.1'
assert config['options']['user'] == 'vagrant'
assert config['options']['port'] == '2222'
assert config['options']['userknownhostsfile'] == '/dev/null'
assert config['options']['stricthostkeychecking'] == 'no'
assert config['options']['passwordauthentication'] == 'no'
assert len(config['options']['identityfile']) == 1
assert config['options']['identityfile'][0] == '/.vagrant/machines/machine1/virtualbox/private_key'
assert config['options']['identitiesonly'] == 'yes'
assert config['options']['loglevel'] == 'FATAL'
def test_write_ansible_inventory_empty_file(capfd, tmpdir):
"""
Should create an empty Ansible inventory file and should not print anything (only \n)
:param capfd:
:return:
"""
config = parse_ssh_config(EMPTY_FILE)
# print to stdout
write_ansible_inventory(config)
out, err = capfd.readouterr()
assert out == '\n'
inventory = tmpdir.join("inventory")
write_ansible_inventory(config, output_file_name=inventory.strpath)
assert inventory.read() == ''
|
from django.apps import AppConfig
from django.db.models import signals
def create_default_store(sender, app_config, verbosity, **kwargs):
# Only create the default sites in databases where Django created the table.
if verbosity >= 2:
print("Creating default Store object")
from .models import Store
Store().save()
class Fileshack(AppConfig):
name = 'fileshack'
verbose_name = 'Fileshack'
def ready(self):
signals.post_migrate.connect(create_default_store, sender=self)
|
import base64
from Crypto.Cipher import DES3
import os
import subprocess
import re
#########################################################
# RPDscan - Remmina Password Decorder/scanner #
# by Freakyclown #
# This tool searches the /home directory for users #
# Remmina preference and config files, then does some #
# magic to decode any saved passwords it finds #
# #
#########################################################
# Changelog
# 0.1 alpha 3/april/2014
#### usage #####################################
# #
# python RPDscan.py #
# #
#################################################
def decodewithscience(private_key_line,saved_key_line):
## grab the password and the secret key and truncate them on the first = ##
private_key = private_key_line.split('=',1)
saved_key = saved_key_line.split('=',1)
## base 64 decode the private key and the saved password key
decoded_private_key = base64.decodestring(str(private_key[1]))
decoded_saved_key = base64.decodestring(str(saved_key[1:]))
## do some magic and decode and print out the decoded password \o/
print "Saved password: "
print DES3.new(decoded_private_key[:24],DES3.MODE_CBC,decoded_private_key[24:]).decrypt(decoded_saved_key)
def muchprefsverywow(pref_key):
## open a process to find all the remmina configs ##
sub_proc_confs = subprocess.Popen("find /home/ -type f -name *.remmina", shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
## for each line do some magic
for sub_conf_found in sub_proc_confs.stdout:
print "========"
with open(sub_conf_found.strip(), 'r') as found_pref:
print "Found a conf file:", sub_conf_found
for conf_secret in found_pref:
if 'username=' in conf_secret:
print conf_secret
if 'server=' in conf_secret:
print conf_secret
if 'domain=' in conf_secret:
print conf_secret
if 'password' in conf_secret:
if conf_secret == "password=\n":
print "no saved password sorry!"
print ""
else:
decodewithscience(pref_key,conf_secret)
print ""
def findalltheprefs():
## open a new process and do the find ##
sub_proc_pref = subprocess.Popen("find /home/ -name remmina.pref", shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
print sub_proc_pref
## iterate over all the found pref files ##
for sub_pref_found in sub_proc_pref.stdout:
print "here"
## tell the user we found something ##
print "found this pref file", sub_pref_found
## open the pref file and check/grab the users private key
with open(sub_pref_found.strip(), 'r') as found_pref:
for pref_secret in found_pref:
if 'secret' in pref_secret:
muchprefsverywow(pref_secret)
findalltheprefs()
|
# from yt_downloader.txt_reader import read
from argparse import ArgumentParser
from yt_downloader import yt_downer
import json
# TODO keep original file option
# TODO make new directory for files option
parser = ArgumentParser()
parser.add_argument('-url', help='Youtube URL to download.')
parser.add_argument('-i','--input-file', help="Read URL's from a text file", type=str)
parser.add_argument('-f', '--format', help='Video or audio format to convert like: mp4. Default is mp3', type=str)
parser.add_argument('-dir', '--directory', help='Download directory', type=str)
parser.add_argument('-d', '--fade', help='Fade-in and fade-out in seconds between the tracks.', type=float)
parser.add_argument('-t', '--tracklist-in-description', help='If tracklist is in description', action='store_true')
parser.add_argument('-s', '--stamps', help='Make stamps: -s "name","3:44","3:50", "name2","3:50","3:53", ... Use only at the end.', nargs='*', type=str)
args = parser.parse_args()
# print(args.stamps)
# print(args)
yt_downer.main(args) |
#! /usr/bin/python3
#-*- coding: utf-8 -*-
from __future__ import print_function
import os, sys
import re
import datetime
from pybern.products.errors.errors import FileFormatError
FILE_FORMAT = 'GPSEST .CRD (Bernese v5.2)'
"""
IGS14: coordinate list
--------------------------------------------------------------------------------
LOCAL GEODETIC DATUM: IGS14_0 EPOCH: 2010-01-01 00:00:00
NUM STATION NAME X (M) Y (M) Z (M) FLAG
"""
def parse_bern52_crd(fn):
stadct = {}
with open(fn, 'r') as fin:
title = fin.readline().strip()
stadct['title'] = title
line = fin.readline()
assert(line.startswith('----------------'))
line = fin.readline()
assert(line.startswith('LOCAL GEODETIC DATUM'))
ref_frame = line.split()[3]
stadct['ref_frame'] = ref_frame
assert(line.split()[4] == 'EPOCH:')
date = datetime.datetime.strptime(' '.join(line.split()[5:]), '%Y-%m-%d %H:%M:%S')
stadct['date'] = date
line = fin.readline()
line = fin.readline()
## Note that IGB14 has an extra column named 'SYSTEM'!
if not re.match(r"^NUM\s+STATION NAME\s+X \(M\)\s+Y \(M\)\s+Z \(M\)\s+FLAG(\s+SYSTEM)?$", line.strip()):
errmsg = '[ERROR] Failed parsing Bern-52 CRD file: {:}\n[ERROR] Expected {:}, found [{:}]'.format(fn, 'NUM STATION NAME X (M) Y (M) Z (M) FLAG', line.strip())
raise RuntimeError(errmsg)
line = fin.readline()
while True:
try:
line = fin.readline()
except:
break
if len(line)<=1:
break
ls = line.split()
name = line[5:10].strip()
domes = line[10:20].strip()
x, y, z = [ float(i) for i in line[20:66].split() ]
flag = line[66:].strip() if len(line)>66 else ''
stadct[name] = {'domes': domes, 'x': x, 'y': y, 'z': z, 'flag': flag}
return stadct
|
__version__ = "5.0.0-beta.13" |
requested_toppings = ['mushrooms', 'extra cheese']
if requested_toppings:
for requested_topping in requested_toppings:
print('Adding ' + requested_topping )
print('\nFinished making your pizza')
else:
print('Are you sure you want a plain pizza?') |
"""
Kernels on molecules:
1) Graph-based kernels: operate on graph representations of molecules.
Can be computed using graphkernels package.
2) String-based kernels: operate on SMILES strings
TBA
Author: [email protected]
TODO:
- Issue: for some reason graphkernels fails when graphs are attributed,
this seems to be a serious drawback (that good are these kernels then?)
- Add other kernels and test performance
- Think of better kernels
NOTES:
- graphkernels reference: https://github.com/BorgwardtLab/GraphKernels
"""
GRAPH_LIB = "igraph" # depending on package for graph kernels
import numpy as np
from rdkit import Chem
from rdkit.Chem import rdmolops
if GRAPH_LIB == "igraph":
import igraph
else:
import networkx
import graphkernels.kernels as gk
from gp.kernel import Kernel
# Graph-based kernels ---------------------------------------------------------
def mol2graph_igraph(mol):
"""
Convert molecule to nx.Graph
Adapted from
https://iwatobipen.wordpress.com/2016/12/30/convert-rdkit-molecule-object-to-igraph-graph-object/
"""
mol = mol.to_rdkit()
admatrix = rdmolops.GetAdjacencyMatrix(mol)
bondidxs = [(b.GetBeginAtomIdx(),b.GetEndAtomIdx() ) for b in mol.GetBonds()]
adlist = np.ndarray.tolist(admatrix)
graph = igraph.Graph()
g = graph.Adjacency(adlist).as_undirected()
## set properties
# for idx in g.vs.indices:
# g.vs[idx][ "AtomicNum" ] = mol.GetAtomWithIdx(idx).GetAtomicNum()
# g.vs[idx][ "AtomicSymbole" ] = mol.GetAtomWithIdx(idx).GetSymbol()
# for bd in bondidxs:
# btype = mol.GetBondBetweenAtoms(bd[0], bd[1]).GetBondTypeAsDouble()
# g.es[g.get_eid(bd[0], bd[1])]["BondType"] = btype
# print( bd, mol.GetBondBetweenAtoms(bd[0], bd[1]).GetBondTypeAsDouble() )
return g
def mol2graph_networkx(mol):
"""
Convert molecule to nx.Graph
Adapted from
https://iwatobipen.wordpress.com/2016/12/30/convert-rdkit-molecule-object-to-igraph-graph-object/
"""
mol = mol.to_rdkit()
admatrix = Chem.rdmolops.GetAdjacencyMatrix(mol)
bondidxs = [(b.GetBeginAtomIdx(),b.GetEndAtomIdx() ) for b in mol.GetBonds()]
graph = nx.Graph(admatrix)
for idx in graph.nodes:
graph.nodes[idx]["AtomicNum"] = mol.GetAtomWithIdx(idx).GetAtomicNum()
graph.nodes[idx]["AtomicSymbol"] = mol.GetAtomWithIdx(idx).GetSymbol()
for bd in bondidxs:
btype = mol.GetBondBetweenAtoms(bd[0], bd[1]).GetBondTypeAsDouble()
graph.edges[bd[0], bd[1]]["BondType"] = str(int(btype))
# print(bd, m1.GetBondBetweenAtoms(bd[0], bd[1]).GetBondTypeAsDouble())
return graph
"""
Kernels available in graphkernels: TODO into functions
K1 = gk.CalculateEdgeHistKernel(graph_list)
K2 = gk.CalculateVertexHistKernel(graph_list)
K3 = gk.CalculateVertexEdgeHistKernel(graph_list)
K4 = gk.CalculateVertexVertexEdgeHistKernel(graph_list)
K5 = gk.CalculateEdgeHistGaussKernel(graph_list)
K6 = gk.CalculateVertexHistGaussKernel(graph_list)
K7 = gk.CalculateVertexEdgeHistGaussKernel(graph_list)
K8 = gk.CalculateGeometricRandomWalkKernel(graph_list)
K9 = gk.CalculateExponentialRandomWalkKernel(graph_list)
K10 = gk.CalculateKStepRandomWalkKernel(graph_list)
K11 = gk.CalculateWLKernel(graph_list)
K12 = gk.CalculateConnectedGraphletKernel(graph_list, 4)
K13 = gk.CalculateGraphletKernel(graph_list, 4)
K14 = gk.CalculateShortestPathKernel(graph_list)
"""
"""
Base class Kernel has a call method
most kernels from graphkernels have only one parameter:
it is either an integer or a continuous quantity
"""
def compute_edgehist_kernel(mols, params):
"""
Compute edge hist kernel
Arguments:
mols {list[Molecule]} -- [description]
"""
par = params["cont_par"]
mol_graphs_list = [mol2graph_igraph(m) for m in mols]
return gk.CalculateEdgeHistKernel(mol_graphs_list,
par=par)
def compute_wl_kernel(mols, params):
"""
Compute edge hist kernel
Arguments:
mols {list[Molecule]} -- [description]
"""
par = int(params["int_par"])
mol_graphs_list = [mol2graph_igraph(m) for m in mols]
return gk.CalculateWLKernel(mol_graphs_list,
par=par)
KERNEL_FUNCS = {
"edgehist_kernel": compute_edgehist_kernel,
"wl_kernel": compute_wl_kernel
}
class MolKernel(Kernel):
def __init__(self, kernel_type, kernel_hyperparams):
self.kernel_type = kernel_type
if kernel_type not in KERNEL_FUNCS:
raise ValueError('Unknown kernel_type %s.'%kernel_type)
self.kernel_func = KERNEL_FUNCS[kernel_type]
# for hp_name in kernel_hyperparams:
# setattr(self, hp_name, kernel_hyperparams[hp_name])
self.hyperparams = kernel_hyperparams
def is_guaranteed_psd(self):
return True
def _child_evaluate(self, X1, X2):
return self.compute_dists(X1, X2)
def compute_dists(self, X1, X2):
"""
Given two lists of mols, computes
all pairwise distances between them
(of size n1 x n2)
"""
# print("here are params:", self.params)
bigmat = self.kernel_func(X1 + X2, self.hyperparams)
n1 = len(X1)
return bigmat[:n1, n1:]
# String-based kernels ---------------------------------------------------------
|
import time
from datetime import datetime
from pynput.keyboard import Controller, Key
from data import lst
import webbrowser
keyboard = Controller()
isStarted = False
for i in lst:
while True:
if isStarted == False:
if datetime.now().hour == int(i[1].split(':')[0]) and datetime.now().minute == int(i[1].split(':')[1]):
webbrowser.open(i[0])
isStarted = True
elif isStarted == True:
if datetime.now().hour == int(i[2].split(':')[0]) and datetime.now().minute == int(i[2].split(':')[1]):
keyboard.press('w')
time.sleep(1)
keyboard.press(Key.enter)
isStarted = False
break |
import tensorflow as tf
from layers.basics import optimize
class BaseSiameseNet:
def __init__(self, max_sequence_len, vocabulary_size, main_cfg, model_cfg, loss_function):
self.x1 = tf.placeholder(dtype=tf.int32, shape=[None, max_sequence_len])
self.x2 = tf.placeholder(dtype=tf.int32, shape=[None, max_sequence_len])
self.is_training = tf.placeholder(dtype=tf.bool)
self.labels = tf.placeholder(dtype=tf.int32, shape=[None, 1])
self.sentences_lengths = tf.placeholder(dtype=tf.int32, shape=[None])
self.dropout=0.2
self.rnn_cell = tf.contrib.rnn.BasicLSTMCell(num_units=64)
self.debug = None
self.embedding_size = main_cfg['PARAMS'].getint('embedding_size')
self.learning_rate = main_cfg['TRAINING'].getfloat('learning_rate')
with tf.variable_scope('embeddings'):
word_embeddings = tf.get_variable('word_embeddings', [vocabulary_size, self.embedding_size])
self.embedded_x1 = tf.gather(word_embeddings, self.x1)
self.embedded_x2 = tf.gather(word_embeddings, self.x2)
with tf.variable_scope('siamese'):
self.predictions = self.siamese_layer(max_sequence_len, model_cfg)
with tf.variable_scope('loss'):
self.loss = loss_function(self.labels, self.predictions)
self.opt = optimize(self.loss, self.learning_rate)
with tf.variable_scope('metrics'):
self.temp_sim = tf.rint(self.predictions)
self.correct_predictions = tf.equal(self.temp_sim, tf.to_float(self.labels))
self.accuracy = tf.reduce_mean(tf.to_float(self.correct_predictions))
with tf.variable_scope('summary'):
tf.summary.scalar("loss", self.loss)
tf.summary.scalar("accuracy", self.accuracy)
self.summary_op = tf.summary.merge_all()
def siamese_layer(self, sequence_len, model_cfg):
"""Implementation of specific siamese layer"""
raise NotImplementedError()
|
"""
Creates a MobileNetV1 Model as defined in:
Andrew G. Howard Menglong Zhu Bo Chen, et.al. (2017 CVPR).
MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications.
Copyright (c) Yang Lu, 2017
"""
import torch.nn as nn
import torch.nn.functional as F
import models.ops as ops
from models.imagenet.utils import make_divisible
from utils.net import make_norm
class BasicBlock(nn.Module):
def __init__(self, inplanes, planes, stride=1, dilation=1, kernel=3, norm='bn', use_se=False, activation=nn.ReLU):
super(BasicBlock, self).__init__()
padding = (dilation * kernel - dilation) // 2
self.inplanes, self.planes = int(inplanes), int(planes)
self.conv1 = nn.Conv2d(inplanes, inplanes, kernel_size=kernel, padding=padding, stride=stride,
dilation=dilation, groups=inplanes, bias=False)
self.bn1 = make_norm(inplanes, norm=norm)
self.conv2 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=1, padding=0, bias=False)
self.bn2 = make_norm(planes, norm=norm)
self.se = ops.Se2d(planes, reduction=4) if use_se else None
try:
self.activation = activation(inplace=True)
except:
self.activation = activation()
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.activation(out)
if self.se is not None:
out = self.se(out)
return out
class MobileNetV1(nn.Module):
def __init__(self, use_se=False, widen_factor=1.0, kernel=3, layers=(2, 2, 6, 2), norm='bn',
activation=nn.ReLU, drop_rate=0.0, num_classes=1000):
""" Constructor
Args:
widen_factor: config of widen_factor
num_classes: number of classes
"""
super(MobileNetV1, self).__init__()
block = BasicBlock
self.use_se = use_se
self.norm = norm
self.drop_rate = drop_rate
self.activation_type = activation
try:
self.activation = activation(inplace=True)
except:
self.activation = activation()
num_of_channels = [32, 64, 128, 256, 512, 1024]
channels = [make_divisible(ch * widen_factor, 8) for ch in num_of_channels]
self.channels = channels
self.conv1 = nn.Conv2d(3, channels[0], kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = make_norm(channels[0], norm=self.norm)
self.conv2 = nn.Conv2d(channels[0], channels[0], kernel_size=kernel, stride=1, padding=kernel // 2,
groups=channels[0], bias=False)
self.bn2 = make_norm(channels[0], norm=self.norm)
self.conv3 = nn.Conv2d(channels[0], channels[1], kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = make_norm(channels[1], norm=self.norm)
self.inplanes = channels[1]
self.layer1 = self._make_layer(block, channels[2], layers[0], stride=2, dilation=1, kernel=kernel)
self.layer2 = self._make_layer(block, channels[3], layers[1], stride=2, dilation=1, kernel=kernel)
self.layer3 = self._make_layer(block, channels[4], layers[2], stride=2, dilation=1, kernel=kernel)
self.layer4 = self._make_layer(block, channels[5], layers[3], stride=2, dilation=1, kernel=kernel)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(channels[5], num_classes)
self._init_weights()
@property
def stage_out_dim(self):
return self.channels[1:]
@property
def stage_out_spatial(self):
return [1 / 2., 1 / 4., 1 / 8., 1 / 16., 1 / 32.]
def _init_weights(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, outplanes, blocks, stride=1, dilation=1, kernel=3):
""" Stack n bottleneck modules where n is inferred from the depth of the network.
Args:
block: block type used to construct ResNet
outplanes: number of output channels (need to multiply by block.expansion)
blocks: number of blocks to be built
stride: factor to reduce the spatial dimensionality in the first bottleneck of the block.
Returns: a Module consisting of n sequential bottlenecks.
"""
layers = []
layers.append(block(self.inplanes, outplanes, stride, dilation=dilation, kernel=kernel, norm=self.norm,
use_se=self.use_se, activation=self.activation_type))
self.inplanes = outplanes
for i in range(1, blocks):
layers.append(block(self.inplanes, outplanes, stride=1, dilation=dilation, kernel=kernel, norm=self.norm,
use_se=self.use_se, activation=self.activation_type))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.activation(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.activation(x)
x = self.conv3(x)
x = self.bn3(x)
x = self.activation(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if self.drop_rate > 0:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
|
from benchmarks.base import BaseRemoteBench
class UpdateImportUrlBench(BaseRemoteBench):
def setup(self, remote):
super().setup(remote)
data_url = self.setup_data("100x1024")
self.dvc("import-url", data_url, "stage")
self.setup_data("200x1024", url=data_url)
def time_import_url_to_remote(self, _):
self.dvc("update", "stage.dvc", proc=True)
class UpdateImportUrlToRemoteBench(BaseRemoteBench):
def setup(self, remote):
super().setup(remote)
data_url = self.setup_data("100x1024")
self.dvc(
"import-url", data_url, "stage", "--to-remote",
)
self.setup_data("200x1024", url=data_url)
def time_import_url_to_remote(self, _):
self.dvc("update", "stage.dvc", "--to-remote", proc=True)
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class GeotermsItem(scrapy.Item):
# define the fields for your item here like:
domain = scrapy.Field()
term = scrapy.Field()
term_english = scrapy.Field()
definition = scrapy.Field()
pass
|
###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
import six
class ComponentConfig:
"""
WAMP application component configuration. An instance of this class is
provided to the constructor of :class:`autobahn.wamp.protocol.ApplicationSession`.
"""
def __init__(self, realm = None, extra = None):
"""
Ctor.
:param realm: The realm the session should join.
:type realm: str
:param extra: Optional dictionary with extra configuration.
:type extra: dict
"""
if six.PY2 and type(realm) == str:
realm = six.u(realm)
self.realm = realm
self.extra = extra
def __str__(self):
return "ComponentConfig(realm = {}, extra = {})".format(self.realm, self.extra)
class RouterOptions:
"""
Router options for creating routers.
"""
URI_CHECK_LOOSE = "loose"
URI_CHECK_STRICT = "strict"
def __init__(self, uri_check = None):
"""
Ctor.
:param uri_check: Method which should be applied to check WAMP URIs.
:type uri_check: str
"""
self.uri_check = uri_check or RouterOptions.URI_CHECK_STRICT
def __str__(self):
return "RouterOptions(uri_check = {})".format(self.uri_check)
class HelloReturn:
"""
Base class for `HELLO` return information.
"""
class Accept(HelloReturn):
"""
Information to accept a `HELLO`.
"""
def __init__(self, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
Ctor.
:param authid: The authentication ID the client is assigned, e.g. `"joe"` or `"[email protected]"`.
:type authid: str
:param authrole: The authentication role the client is assigned, e.g. `"anonymous"`, `"user"` or `"com.myapp.user"`.
:type authrole: str
:param authmethod: The authentication method that was used to authenticate the client, e.g. `"cookie"` or `"wampcra"`.
:type authmethod: str
:param authprovider: The authentication provider that was used to authenticate the client, e.g. `"mozilla-persona"`.
:type authprovider: str
"""
if six.PY2:
if type(authid) == str:
authid = six.u(authid)
if type(authrole) == str:
authrole = six.u(authrole)
if type(authmethod) == str:
authmethod = six.u(authmethod)
if type(authprovider) == str:
authprovider = six.u(authprovider)
assert(authid is None or type(authid) == six.text_type)
assert(authrole is None or type(authrole) == six.text_type)
assert(authmethod is None or type(authmethod) == six.text_type)
assert(authprovider is None or type(authprovider) == six.text_type)
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
def __str__(self):
return "Accept(authid = {}, authrole = {}, authmethod = {}, authprovider = {})".format(self.authid, self.authrole, self.authmethod, self.authprovider)
class Deny(HelloReturn):
"""
Information to deny a `HELLO`.
"""
def __init__(self, reason = u"wamp.error.not_authorized", message = None):
"""
Ctor.
:param reason: The reason of denying the authentication (an URI, e.g. `wamp.error.not_authorized`)
:type reason: str
:param message: A human readable message (for logging purposes).
:type message: str
"""
if six.PY2:
if type(reason) == str:
reason = six.u(reason)
if type(message) == str:
message = six.u(message)
assert(type(reason) == six.text_type)
assert(message is None or type(message) == six.text_type)
self.reason = reason
self.message = message
def __str__(self):
return "Deny(reason = {}, message = '{}'')".format(self.reason, self.message)
class Challenge(HelloReturn):
"""
Information to challenge the client upon `HELLO`.
"""
def __init__(self, method, extra = None):
"""
Ctor.
:param method: The authentication method for the challenge (e.g. `"wampcra"`).
:type method: str
:param extra: Any extra information for the authentication challenge. This is
specific to the authentication method.
:type extra: dict
"""
if six.PY2:
if type(method) == str:
method = six.u(method)
self.method = method
self.extra = extra or {}
def __str__(self):
return "Challenge(method = {}, extra = {})".format(self.method, self.extra)
class HelloDetails:
"""
Provides details of a WAMP session while still attaching.
"""
def __init__(self, roles = None, authmethods = None, authid = None, pending_session = None):
"""
Ctor.
:param roles: The WAMP roles and features supported by the attaching client.
:type roles: dict
:param authmethods: The authentication methods the client is willing to perform.
:type authmethods: list
:param authid: The authentication ID the client wants to authenticate as. Required for WAMP-CRA.
:type authid: str
:param pending_session: The session ID the session will get once successfully attached.
:type pending_session: int
"""
self.roles = roles
self.authmethods = authmethods
self.authid = authid
self.pending_session = pending_session
def __str__(self):
return "HelloDetails(roles = {}, authmethods = {}, authid = {}, pending_session = {})".format(self.roles, self.authmethods, self.authid, self.pending_session)
class SessionDetails:
"""
Provides details for a WAMP session upon open.
@see: :func:`autobahn.wamp.interfaces.ISession.onJoin`
"""
def __init__(self, realm, session, authid = None, authrole = None, authmethod = None, authprovider = None):
"""
Ctor.
:param realm: The realm this WAMP session is attached to.
:type realm: str
:param session: WAMP session ID of this session.
:type session: int
"""
self.realm = realm
self.session = session
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
self.authprovider = authprovider
def __str__(self):
return "SessionDetails(realm = {}, session = {}, authid = {}, authrole = {}, authmethod = {})".format(self.realm, self.session, self.authid, self.authrole, self.authmethod)
class CloseDetails:
"""
Provides details for a WAMP session upon open.
@see: :func:`autobahn.wamp.interfaces.ISession.onLeave`
"""
def __init__(self, reason = None, message = None):
"""
Ctor.
:param reason: The close reason (an URI, e.g. `wamp.close.normal`)
:type reason: str
:param message: Closing log message.
:type message: str
"""
self.reason = reason
self.message = message
def __str__(self):
return "CloseDetails(reason = {}, message = '{}'')".format(self.reason, self.message)
class SubscribeOptions:
"""
Used to provide options for subscribing in
:func:`autobahn.wamp.interfaces.ISubscriber.subscribe`.
"""
def __init__(self, match = None, details_arg = None):
"""
:param match: The topic matching method to be used for the subscription.
:type match: str
:param details_arg: When invoking the handler, provide event details
in this keyword argument to the callable.
:type details_arg: str
"""
assert(match is None or (type(match) == str and match in ['exact', 'prefix', 'wildcard']))
assert(details_arg is None or type(details_arg) == str)
if match and six.PY2 and type(match) == str:
match = six.u(match)
self.match = match
self.details_arg = details_arg
## options dict as sent within WAMP message
self.options = {'match': match}
def __str__(self):
return "SubscribeOptions(match = {}, details_arg = {})".format(self.match, self.details_arg)
class EventDetails:
"""
Provides details on an event when calling an event handler
previously registered.
"""
def __init__(self, publication, publisher = None):
"""
Ctor.
:param publication: The publication ID of the event (always present).
:type publication: int
:param publisher: The WAMP session ID of the original publisher of this event.
:type publisher: int
"""
self.publication = publication
self.publisher = publisher
def __str__(self):
return "EventDetails(publication = {}, publisher = {})".format(self.publication, self.publisher)
class PublishOptions:
"""
Used to provide options for subscribing in
:func:`autobahn.wamp.interfaces.IPublisher.publish`.
"""
def __init__(self,
acknowledge = None,
excludeMe = None,
exclude = None,
eligible = None,
discloseMe = None):
"""
Constructor.
:param acknowledge: If True, acknowledge the publication with a success or
error response.
:type acknowledge: bool
:param excludeMe: If True, exclude the publisher from receiving the event, even
if he is subscribed (and eligible).
:type excludeMe: bool
:param exclude: List of WAMP session IDs to exclude from receiving this event.
:type exclude: list
:param eligible: List of WAMP session IDs eligible to receive this event.
:type eligible: list
:param discloseMe: If True, request to disclose the publisher of this event
to subscribers.
:type discloseMe: bool
"""
assert(acknowledge is None or type(acknowledge) == bool)
assert(excludeMe is None or type(excludeMe) == bool)
assert(exclude is None or (type(exclude) == list and all(type(x) in six.integer_types for x in exclude)))
assert(eligible is None or (type(eligible) == list and all(type(x) in six.integer_types for x in eligible)))
assert(discloseMe is None or type(discloseMe) == bool)
self.acknowledge = acknowledge
self.excludeMe = excludeMe
self.exclude = exclude
self.eligible = eligible
self.discloseMe = discloseMe
## options dict as sent within WAMP message
self.options = {
'acknowledge': acknowledge,
'excludeMe': excludeMe,
'exclude': exclude,
'eligible': eligible,
'discloseMe': discloseMe
}
def __str__(self):
return "PublishOptions(acknowledge = {}, excludeMe = {}, exclude = {}, eligible = {}, discloseMe = {})".format(self.acknowledge, self.excludeMe, self.exclude, self.eligible, self.discloseMe)
class RegisterOptions:
"""
Used to provide options for registering in
:func:`autobahn.wamp.interfaces.ICallee.register`.
"""
def __init__(self, details_arg = None, pkeys = None, discloseCaller = None):
"""
Ctor.
:param details_arg: When invoking the endpoint, provide call details
in this keyword argument to the callable.
:type details_arg: str
"""
self.details_arg = details_arg
self.pkeys = pkeys
self.discloseCaller = discloseCaller
## options dict as sent within WAMP message
self.options = {
'pkeys': pkeys,
'discloseCaller': discloseCaller
}
def __str__(self):
return "RegisterOptions(details_arg = {}, pkeys = {}, discloseCaller = {})".format(self.details_arg, self.pkeys, self.discloseCaller)
class CallDetails:
"""
Provides details on a call when an endpoint previously
registered is being called and opted to receive call details.
"""
def __init__(self, progress = None, caller = None, authid = None, authrole = None, authmethod = None):
"""
Ctor.
:param progress: A callable that will receive progressive call results.
:type progress: callable
:param caller: The WAMP session ID of the caller, if the latter is disclosed.
:type caller: int
:param authid: The authentication ID of the caller.
:type authid: str
:param authrole: The authentication role of the caller.
:type authrole: str
"""
self.progress = progress
self.caller = caller
self.authid = authid
self.authrole = authrole
self.authmethod = authmethod
def __str__(self):
return "CallDetails(progress = {}, caller = {}, authid = {}, authrole = {}, authmethod = {})".format(self.progress, self.caller, self.authid, self.authrole, self.authmethod)
class CallOptions:
"""
Used to provide options for calling with :func:`autobahn.wamp.interfaces.ICaller.call`.
"""
def __init__(self,
onProgress = None,
timeout = None,
discloseMe = None,
runOn = None):
"""
Constructor.
:param onProgress: A callback that will be called when the remote endpoint
called yields interim call progress results.
:type onProgress: a callable
:param timeout: Time in seconds after which the call should be automatically canceled.
:type timeout: float
:param discloseMe: Request to disclose the identity of the caller (it's WAMP session ID)
to Callees. Note that a Dealer, depending on Dealer configuration, might
reject the request, or might disclose the Callee's identity without
a request to do so.
:type discloseMe: bool
:param runOn: If present (non-None), indicates a distributed call. Distributed calls allows
to run a call issued by a Caller on one or more endpoints implementing the
called procedure. Permissible value are: "all", "any" and "partition".
If `runOne == "partition"`, then `runPartitions` MUST be present.
:type runOn: str
"""
assert(onProgress is None or callable(onProgress))
assert(timeout is None or (type(timeout) in list(six.integer_types) + [float] and timeout > 0))
assert(discloseMe is None or type(discloseMe) == bool)
assert(runOn is None or (type(runOn) == six.text_type and runOn in [u"all", u"any", u"partition"]))
self.onProgress = onProgress
self.timeout = timeout
self.discloseMe = discloseMe
self.runOn = runOn
## options dict as sent within WAMP message
self.options = {
'timeout': timeout,
'discloseMe': discloseMe
}
if onProgress:
self.options['receive_progress'] = True
def __str__(self):
return "CallOptions(onProgress = {}, timeout = {}, discloseMe = {}, runOn = {})".format(self.onProgress, self.timeout, self.discloseMe, self.runOn)
class CallResult:
"""
Wrapper for remote procedure call results that contain multiple positional
return values or keyword return values.
"""
def __init__(self, *results, **kwresults):
"""
Constructor.
:param results: The positional result values.
:type results: list
:param kwresults: The keyword result values.
:type kwresults: dict
"""
self.results = results
self.kwresults = kwresults
def __str__(self):
return "CallResult(results = {}, kwresults = {})".format(self.results, self.kwresults)
|
# -*- coding: utf-8 -*-
"""
[ Obtained from https://gist.github.com/chengdi123000/42ec8ed2cbef09ee050766c2f25498cb ]
Created on Wed Feb 14 16:17:38 2018
This handler is used to deal with logging with mpi4py in Python3.
@author: cheng
@reference:
https://cvw.cac.cornell.edu/python/logging
https://groups.google.com/forum/#!topic/mpi4py/SaNzc8bdj6U
https://gist.github.com/JohnCEarls/8172807
"""
#%% mpi4py logging handler
from mpi4py import MPI
import logging
from os.path import abspath
class MPIFileHandler(logging.FileHandler):
def __init__(
self,
filename,
mode=MPI.MODE_WRONLY | MPI.MODE_CREATE | MPI.MODE_APPEND,
encoding="utf-8",
delay=False,
comm=MPI.COMM_WORLD,
):
self.baseFilename = abspath(filename)
self.mode = mode
self.encoding = encoding
self.comm = comm
if delay:
# We don't open the stream, but we still need to call the
# Handler constructor to set level, formatter, lock etc.
logging.Handler.__init__(self)
self.stream = None
else:
logging.StreamHandler.__init__(self, self._open())
def _open(self):
stream = MPI.File.Open(self.comm, self.baseFilename, self.mode)
stream.Set_atomicity(True)
return stream
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
Modification:
stream is MPI.File, so it must use `Write_shared` method rather
than `write` method. And `Write_shared` method only accept
bytestring, so `encode` is used. `Write_shared` should be invoked
only once in each all of this emit function to keep atomicity.
"""
try:
msg = self.format(record)
stream = self.stream
stream.Write_shared((msg + self.terminator).encode(self.encoding))
# self.flush()
except Exception:
self.handleError(record)
def close(self):
if self.stream:
self.stream.Sync()
self.stream.Close()
self.stream = None
#%% example code
if __name__ == "__main__":
comm = MPI.COMM_WORLD
logger = logging.getLogger("rank[%i]" % comm.rank)
logger.setLevel(logging.DEBUG)
mh = MPIFileHandler("logfile.log")
formatter = logging.Formatter("%(asctime)s:%(name)s:%(levelname)s:%(message)s")
mh.setFormatter(formatter)
logger.addHandler(mh)
# 'application' code
logger.debug("debug message")
logger.info("info message")
logger.warning("warn message")
logger.error("error message")
logger.critical("critical message")
|
class Solution:
def mostVisited(self, n: int, rounds: List[int]) -> List[int]:
"""Array.
Running time: O(n * m) where m is the length of rounds.
"""
f = [0] * n
for i in range(len(rounds)-1):
s, e = rounds[i], rounds[i+1]
if e < s:
e += n
while s < e:
f[s%n-1] += 1
s += 1
f[rounds[-1]-1] += 1
m = max(f)
res = []
for i in range(n):
if f[i] == m:
res.append(i + 1)
return res
|
"""
Generator object for standard 96-well plate.
"""
class WellIdGenerator(object):
"""Generates 96-plate well ids from A1 ... A12, ..., H1 ... H12
Also returns plate number if requested.
"""
LETTER_TRANSITION_TABLE = {
'A': 'B',
'B': 'C',
'C': 'D',
'D': 'E',
'E': 'F',
'F': 'G',
'G': 'H',
'H': 'A',
}
def __init__(self, include_plate=False):
self.letter = 'A'
self.number = 1
self.include_plate = include_plate
if include_plate:
self.plate = 1
def __iter__(self):
return self
def next(self):
# Create the current return value.
current_id = self.letter + "%02d" % (self.number,)
# Bump the state.
if self.number == 12:
self.letter = self.LETTER_TRANSITION_TABLE[self.letter]
# If we are back to A, bump the plate number.
if self.include_plate and self.letter == 'A':
self.plate += 1
if self.number == 12:
self.number = 1
else:
self.number += 1
# Return current.
if self.include_plate:
return (self.plate, current_id)
return current_id
|
from unittest import TestCase
from xml_collation.TextGraph import convert_superwitness_to_textgraph
from xml_collation.collate_xml_hierarchy import convert_xml_string_into_tokens, align_tokens_and_return_superwitness
from xml_collation.text_graph_exporter import export_as_dot
class DotTest(TestCase):
def test_dot1_textnodes_only(self):
witness_a = "<tei><s>x y z</s></tei>"
witness_b = "<tei><s>x</s>y<s>z</s></tei>"
tokens_a = convert_xml_string_into_tokens(witness_a)
tokens_b = convert_xml_string_into_tokens(witness_b)
superwitness = align_tokens_and_return_superwitness(tokens_a, tokens_b)
textgraph = convert_superwitness_to_textgraph(superwitness)
dot_export = export_as_dot(textgraph)
expected_out = """strict digraph TextGraph {
1 [label="x"]
2 [label="y"]
3 [label="z"]
1 -> 2
2 -> 3
{ rank=same; 1; 2; 3 }
}"""
self.assertEqual(expected_out, dot_export)
# for the next test there are again two things...
# vertices for the markup
# how do we make sure that we influence the previous test too much
# note that annotation vertices
# need to have unique id (in comparison to the text vertices)
# NOTE: No variation in the text nodes
def test_dot_export_including_annotation(self):
witness_a = "<tei><s1>x y z</s1></tei>"
witness_b = "<tei><s2>x</s2>y<s3>z</s3></tei>"
tokens_a = convert_xml_string_into_tokens(witness_a)
tokens_b = convert_xml_string_into_tokens(witness_b)
superwitness = align_tokens_and_return_superwitness(tokens_a, tokens_b)
textgraph = convert_superwitness_to_textgraph(superwitness)
dot_export = export_as_dot(textgraph, annotations=True)
expected_out = """strict digraph TextGraph {
1 [label="x"]
2 [label="y"]
3 [label="z"]
1 -> 2
2 -> 3
{ rank=same; 1; 2; 3 }
a1 [label="tei"]
a2 [label="s1"]
a3 [label="s2"]
a4 [label="s3"]
a2 -> a1
a3 -> a1
1 -> a2
1 -> a3
a2 -> a1
2 -> a2
a2 -> a1
a4 -> a1
3 -> a2
3 -> a4
}"""
# TODO: There are some duplication annotation edges here that should be removed! (a2 - a1)
# NOTE: For now we work around the problem by adding the "strict" keyword to the DOT export.
self.assertEqual(expected_out, dot_export)
# NOTE: In this test not every token is in both witnesses
def test_dot_export_including_textual_variation(self):
witness_a = "<tei><s1><add>a</add>x y z</s1></tei>"
witness_b = "<tei><s2>x</s2>y<s3>z</s3></tei>"
tokens_a = convert_xml_string_into_tokens(witness_a)
tokens_b = convert_xml_string_into_tokens(witness_b)
superwitness = align_tokens_and_return_superwitness(tokens_a, tokens_b)
textgraph = convert_superwitness_to_textgraph(superwitness)
dot_export = export_as_dot(textgraph, annotations=True)
expected_out = """strict digraph TextGraph {
1 [label="a"]
2 [label="x"]
3 [label="y"]
4 [label="z"]
1 -> 2
2 -> 3
3 -> 4
{ rank=same; 1; 2; 3; 4 }
a1 [label="tei"]
a2 [label="s1"]
a3 [label="add"]
a4 [label="s2"]
a5 [label="s3"]
a2 -> a1
a3 -> a2
1 -> a3
a2 -> a1
a4 -> a1
2 -> a2
2 -> a4
a2 -> a1
3 -> a2
a2 -> a1
a5 -> a1
4 -> a2
4 -> a5
}"""
# TODO: There are some duplication annotation edges here that should be removed! (a2 - a1)
# NOTE: For now we work around the problem by adding the "strict" keyword to the DOT export.
self.assertEqual(expected_out, dot_export)
|
import copy
import re
import numpy as np
import torch
import ops.norm as norm
import ops.tests as tests
def normalize_filter(bs, ws):
bs = {k: v.float() for k, v in bs.items()}
ws = {k: v.float() for k, v in ws.items()}
norm_bs = {}
for k in bs:
ws_norm = torch.norm(ws[k], dim=0, keepdim=True)
bs_norm = torch.norm(bs[k], dim=0, keepdim=True)
norm_bs[k] = ws_norm / (bs_norm + 1e-7) * bs[k]
return norm_bs
def ignore_bn(ws):
ignored_ws = {}
for k in ws:
if len(ws[k].size()) < 2:
ignored_ws[k] = torch.zeros(size=ws[k].size(), device=ws[k].device)
else:
ignored_ws[k] = ws[k]
return ignored_ws
def ignore_running_stats(ws):
return ignore_kw(ws, ["num_batches_tracked"])
def ignore_kw(ws, kws=None):
kws = [] if kws is None else kws
ignored_ws = {}
for k in ws:
if any([re.search(kw, k) for kw in kws]):
ignored_ws[k] = torch.zeros(size=ws[k].size(), device=ws[k].device)
else:
ignored_ws[k] = ws[k]
return ignored_ws
def rand_basis(ws, gpu=True):
return {k: torch.randn(size=v.shape, device="cuda" if gpu else None) for k, v in ws.items()}
def create_bases(model, kws=None, gpu=True):
kws = [] if kws is None else kws
ws0 = copy.deepcopy(model.state_dict())
bases = [rand_basis(ws0, gpu) for _ in range(2)] # Use two bases
bases = [normalize_filter(bs, ws0) for bs in bases]
bases = [ignore_bn(bs) for bs in bases]
bases = [ignore_kw(bs, kws) for bs in bases]
return bases
def get_loss_landscape(model, n_ff, dataset, transform=None,
bases=None, kws=None,
cutoffs=(0.0, 0.9), bins=np.linspace(0.0, 1.0, 11), verbose=False, period=10, gpu=True,
x_min=-1.0, x_max=1.0, n_x=11, y_min=-1.0, y_max=1.0, n_y=11):
model = model.cuda() if gpu else model.cpu()
model = copy.deepcopy(model)
ws0 = copy.deepcopy(model.state_dict())
kws = [] if kws is None else kws
bases = create_bases(model, kws, gpu) if bases is None else bases
xs = np.linspace(x_min, x_max, n_x)
ys = np.linspace(y_min, y_max, n_y)
ratio_grid = np.stack(np.meshgrid(xs, ys), axis=0).transpose((1, 2, 0))
metrics_grid = {}
for ratio in ratio_grid.reshape([-1, 2]):
ws = copy.deepcopy(ws0)
gs = [{k: r * bs[k] for k in bs} for r, bs in zip(ratio, bases)]
gs = {k: torch.sum(torch.stack([g[k] for g in gs]), dim=0) + ws[k] for k in gs[0]}
model.load_state_dict(gs)
print("Grid: ", ratio, end=", ")
*metrics, cal_diag = tests.test(model, n_ff, dataset, transform=transform,
cutoffs=cutoffs, bins=bins, verbose=verbose, period=period, gpu=gpu)
l1, l2 = norm.l1(model, gpu).item(), norm.l2(model, gpu).item()
metrics_grid[tuple(ratio)] = (l1, l2, *metrics)
return metrics_grid
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : tql-Python.
# @File : pseudo
# @Time : 2020/9/6 1:56 下午
# @Author : yuanjie
# @Email : [email protected]
# @Software : PyCharm
# @Description :
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.metrics import roc_auc_score
# 目前只支持二分类 TODO: 回归
class Pseudo(object):
def __init__(self, X, y, X_test, confidences=(0.99, 0.01), **kwargs):
self.X = X
self.y = y
self.X_test = X_test
self.confidences = confidences # 顺序(正,负),加入训练的样本比例: 正、负比例 # todo: 百分位数或者个数,分正负
def run(self, n_iter=1):
X, y = self.X.copy(), self.y.copy()
stat = []
for i in range(0, n_iter + 1):
clf, score, self.test_preds = self.fit_predict(X, y, self.X_test)
if i == 0:
print(f"Init Score: {score}")
else:
print(f"PseudoLabeling{i} Score: {score}")
X, y = self.pseudo_labeling(self.test_preds)
stat.append((i, clf, score))
self.stat_info = pd.DataFrame(stat, columns=['n_iter', 'model', 'score'])
print(self.stat_info)
def fit_predict(self, X, y, X_test, **kwargs):
"""重写继承即可
:return: clf, score, test_preds
"""
clf = LGBMClassifier()
clf.fit(X, y)
score = roc_auc_score(y, clf.predict_proba(X)[:, 1]) # 验证方式???线下训练集CV
test_preds = clf.predict_proba(X_test)
return clf, score, test_preds
def pseudo_labeling(self, preds):
thresholds = np.quantile(preds, self.confidences) # 计算上下分位数
print(f"Thresholds: {thresholds}")
pos_index = np.where(preds > thresholds[0])[0]
neg_index = np.where(preds < thresholds[1])[0]
self.X_pseudo = self.X_test[np.r_[pos_index, neg_index], :]
self.y_pseudo = np.r_[np.ones(len(pos_index)), np.zeros(len(neg_index))]
# 合并数据
print(f"Add: PositiveSamples={len(pos_index)} NegativeSamples={len(neg_index)}\n")
X = np.r_[self.X, self.X_pseudo]
y = np.r_[self.y, self.y_pseudo]
return X, y
|
import argbind
@argbind.bind(without_prefix=True, positional=True)
def main(
arg1 : int,
arg2 : str = 'arg2',
arg3 : float = 1.0
):
"""Same script, ArgBind style.
Parameters
----------
arg1 : int
The first argument (positional).
arg2 : str, optional
The second argument (keyword), by default 'arg2'.
arg3 : float, optional
The third argument (keyword), by default 1.0
"""
print(arg1, arg2, arg3)
if __name__ == "__main__":
args = argbind.parse_args()
with argbind.scope(args):
main()
|
'''
Copyright (c) 2021. IIP Lab, Wuhan University
'''
import os
import argparse
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras import backend as K
from data import *
from train import *
from layers import ProductOfExpertGaussian as POE
### Modality to their short name
mod_rep_dict = {
"resnet50" : "V",
"audiovgg" : "A",
"fudannlp" : "T",
}
### Short name to the modalities
rep_mod_dict = \
{value: key for key, value in mod_rep_dict.items()}
### Modality to their shape
mod_shape_dict = {
"resnet50" : 128,
"audiovgg" : 128,
"fudannlp" : 20,
}
def ord_rep(rep_str):
ord_rep = ""
for i, letter in enumerate(["V", "A", "T"]):
if letter in rep_str:
ord_rep += letter
return ord_rep
def rep2mods(rep_str):
test_mods = []
for i, letter in enumerate(["V", "A", "T"]):
if letter in rep_str:
test_mods.append(rep_mod_dict[letter])
return test_mods
def mods2index(mods_list, mod_pos_dict):
idx_list = [mod_pos_dict[mod] for mod in mods_list]
return sorted(idx_list)
def get_model_info(model_path):
info_dict = {}
path_list = model_path.split(os.path.sep)
info_dict["encodertype"] = path_list[-6]
info_dict["length"] = int(path_list[-5].split("_")[-1])
info_dict["split"] = int(path_list[-4])
info_dict["lambda"] = float(path_list[-3])
return info_dict
def get_testgen(feature_root, target_root, split_root, test_mods, phase):
'''
Get data generator for test
'''
test_gen = VariationalEncoderDecoderGen(
phase = phase,
feature_root = feature_root,
target_root = target_root,
split_root = split_root,
modalities = test_mods,
batch_size = 128,
shuffle = False, ### You cannot shuffle data in test phase
concat = False,
)
return test_gen
def build_test_model(model_path,
train_shapes,
test_mods,
rnn_type,
mod_pos_dict,
modalities,
summary=False):
model = get_model(train_shapes, rnn_type, modalities, summary=False)
model.load_weights(model_path)
if modalities == ["user"]:
### Get the input tensor
abst_in = model.inputs[-1]
uid_in = model.inputs[0]
mods_in = model.inputs[1]
uid_emb = model.get_layer("uid_emb")(uid_in)
uid_emb = model.get_layer("uid_emb_reshape")(uid_emb)
concat = layers.Concatenate(axis=-1)([uid_emb, mods_in])
mean_stds = model.encoders[0](concat)
mean = mean_stds[0]
input_space = [uid_in] + [mods_in] + [abst_in]
preds_seq = model.decoder([mean, abst_in])
### Get learnt user embeddings
test_model = [models.Model(inputs=input_space, outputs=mean_stds)]
### Evaluation
test_model.append(models.Model(inputs=input_space, outputs=preds_seq))
if summary:
[test_model[i].summary() for i in range(len(test_model))]
else:
### Get index for each modality
mod_idxes = mods2index(test_mods, mod_pos_dict)
### Get the input tensor indicated by mod_idxes
uemb_in = model.inputs[0]
mods_in = [model.inputs[1:-1][i] for i in mod_idxes]
abst_in = model.inputs[-1]
### Build the model for prediction
encoders = [model.encoders[i] for i in mod_idxes]
mean_stds = [encoder(mod_in) for encoder, mod_in in zip(encoders, mods_in)]
mean, _ = POE()(mean_stds)
preds_seq = model.decoder([mean, abst_in])
test_model = models.Model(inputs=[uemb_in]+mods_in+[abst_in], outputs=preds_seq)
if summary:
test_model.summary()
return test_model
def user_predict(model, test_gen, pred_path):
num_videos = test_gen.num_videos
batch_size = test_gen.batch_size
timesteps = test_gen.timesteps
# emb_dim = model[0].output_shape[0][-1]
### for user-encoder evaluation
preds = np.empty((num_videos, timesteps), dtype=np.float32)
truth = np.empty((num_videos, timesteps), dtype=np.float32)
for i, [features, target] in enumerate(test_gen):
preds_batch = np.squeeze(model[1].predict(features))
preds[i * batch_size:(i + 1) * batch_size] = preds_batch.squeeze()
truth[i * batch_size:(i + 1) * batch_size] = target.squeeze()
if pred_path is not None:
print("Prediction has been saved to {}".format(pred_path))
np.save(pred_path, preds)
return preds, truth
def uemb_output(model, test_gen, emb_path):
num_videos = test_gen.num_videos
batch_size = test_gen.batch_size
# timesteps = test_gen.timesteps
emb_dim = model[0].output_shape[0][-1]
### for user embeddings
uemb_mean = np.empty((num_videos, emb_dim), dtype=np.float32)
uemb_std = np.empty((num_videos, emb_dim), dtype=np.float32)
for i, [features, target] in enumerate(test_gen):
uemb_mean[i * batch_size:(i + 1) * batch_size] = model[0].predict(features)[0].squeeze()
uemb_std[i * batch_size:(i + 1) * batch_size] = model[0].predict(features)[1].squeeze()
uemb = np.concatenate((uemb_mean[:, None, :], uemb_std[:, None, :]), axis=1)
if emb_path is not None:
print("User embeddings have been saved to {}".format(emb_path))
np.save(emb_path, uemb)
def predict(test_model, test_gen, save_path):
num_videos = test_gen.num_videos
batch_size = test_gen.batch_size
timesteps = test_gen.timesteps
preds = np.empty([num_videos, timesteps], dtype=np.float32)
truth = np.empty([num_videos, timesteps], dtype=np.float32)
for i, [features, targets] in enumerate(test_gen):
preds[i*batch_size:(i+1)*batch_size] = test_model.predict(features).squeeze()
truth[i*batch_size:(i+1)*batch_size] = targets.squeeze()
if save_path is not None:
print("Prediction saved to {}".format(save_path))
np.save(save_path, preds)
return preds, truth
def evaluate(preds, truth, save_path):
def pearson_corr(preds, truth):
corr = 0
num_samples = len(preds)
cnt_samples = num_samples
for i in range(num_samples):
corr_this = pd.Series(preds[i]).corr(pd.Series(truth[i]))
if np.isnan(corr_this):
cnt_samples = cnt_samples-1
continue
corr += corr_this
return corr / cnt_samples
def spearman_corr(preds, truth):
corr = 0
p_val = 0
num_samples = len(preds)
cnt_samples = num_samples
for i in range(num_samples):
corr_this, p_value_this = spearmanr(pd.Series(preds[i]), pd.Series(truth[i]))
if np.isnan(corr_this):
cnt_samples = cnt_samples-1
continue
corr += corr_this
return corr / cnt_samples
def nmse(preds, truth):
return np.mean(np.square(preds - truth)) / (truth.std()**2)
nmse = nmse(preds, truth)
corr = pearson_corr(preds, truth)
srcc = spearman_corr(preds, truth)
table = pd.DataFrame({
"nmse" : [nmse],
"corr" : [corr],
"srcc" : [srcc]})
print("test nmse: {:.4f}".format(nmse))
print("test corr: {:.4f}".format(corr))
print("test srcc: {:.4f}".format(srcc))
table.to_csv(save_path, mode='a', index=False, sep="\t")
return nmse, corr, srcc
def test_run(model_path, rnn_type="simple", abbr_test_mods="U", device="0"):
### Set tensorflow session
tf.reset_default_graph()
os.environ["CUDA_VISIBLE_DEVICES"] = device
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
K.set_session(sess)
### Save path to the prediction result
model_info = get_model_info(model_path)
model_root = os.path.split(model_path)[0]
test_root = os.path.join(model_root, "test", std_mods(abbr_test_mods))
if not os.path.exists(test_root):
os.makedirs(test_root)
pred_path = os.path.join(test_root, "predict.npy")
### Get the test data generator
feature_root = os.path.join("data")
split_root = os.path.join(feature_root, "split", str(model_info["split"]))
target_root = os.path.join(feature_root, "len_{}".format(model_info["length"]))
### Get the model for prediction
if model_info["encodertype"] == "user":
train_mods = ["user"]
mod_pos_dict = {"user": 0}
uemb_path = os.path.join(feature_root, "user_emb.npy")
test_mods = train_mods
train_shapes = [[1], [3]] + [[model_info["length"], 1]]
test_model = build_test_model(model_path, train_shapes, test_mods, rnn_type, mod_pos_dict, train_mods)
test_gen = get_testgen(feature_root, target_root, split_root, test_mods, phase="test")
### Evaluation
preds, truth = user_predict(test_model, test_gen, pred_path)
### User embeddings output
uemb_gen = get_testgen(feature_root, target_root, split_root, test_mods, phase="all")
uemb_output(test_model, uemb_gen, uemb_path)
else:
train_mods = ["resnet50", "audiovgg", "fudannlp"]
mod_pos_dict = {mod: train_mods.index(mod) for mod in mod_rep_dict.keys()}
test_mods = rep2mods(ord_rep(abbr_test_mods))
train_shapes = [[2, 8]] + [[mod_shape_dict[mod]] for mod in train_mods] + [[model_info["length"], 1]]
test_model = build_test_model(model_path, train_shapes, test_mods, rnn_type, mod_pos_dict, train_mods)
test_gen = get_testgen(feature_root, target_root, split_root, test_mods, phase="test")
preds, truth = predict(test_model, test_gen, pred_path)
### Evaluate model with numerous indexes
eval_path = os.path.join(test_root, "eval.txt")
nmse, corr, srcc = evaluate(preds, truth, eval_path)
K.clear_session()
return nmse, corr, srcc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", type=str,
help="path where the pre-trained model is stored.")
parser.add_argument("--rnn_type", type=str, default="simple",
help="type of decoder")
parser.add_argument("--test_mods", type=str, default="U",
help="modalities available in the test phase")
parser.add_argument("--device", type=str, default="0",
help="specify the GPU device")
args = parser.parse_args()
test_run(model_path=args.model_path, rnn_type=args.rnn_type, abbr_test_mods=args.test_mods, device=args.device) |
import math
import os
import pickle
import struct
import numpy as np
import quaternion
import numba as nb
import moderngl
from visnav.missions.bennu import BennuSystemModel
try:
from moderngl.ext.obj import Obj
except:
from ModernGL.ext.obj import Obj
from visnav.algo import tools
from visnav.algo.image import ImageProc
from visnav.iotools.objloader import ShapeModel
from visnav.missions.didymos import DidymosSystemModel, DidymosPrimary
from visnav.missions.rosetta import RosettaSystemModel, ChuryumovGerasimenko
#from memory_profiler import profile
class RenderEngine:
_ctx = None
(
_LOC_TEXTURE,
_LOC_SHADOW_MAP,
_LOC_HAPKE_K,
) = range(3)
(
REFLMOD_LAMBERT,
REFLMOD_LUNAR_LAMBERT,
REFLMOD_HAPKE,
) = range(3)
REFLMOD_PARAMS = {
REFLMOD_LAMBERT: [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
REFLMOD_LUNAR_LAMBERT: ChuryumovGerasimenko.LUNAR_LAMBERT_PARAMS,
REFLMOD_HAPKE: ChuryumovGerasimenko.HAPKE_PARAMS,
}
# phase angle (g) range: np.linspace(0, 180, 19)
# roughness angle range (th_p) range: np.linspace(0, 60, 7)
HAPKE_K = np.array([
[1.00, 1.00, 1.00, 1.00, 1.00, 1.00, 1.00],
# [1.00, 0.997, 0.991, 0.984, 0.974, 0.961, 0.943], # g=2deg
# [1.00, 0.994, 0.981, 0.965, 0.944, 0.918, 0.881], # g=5deg
[1.00, 0.991, 0.970, 0.943, 0.909, 0.866, 0.809],
[1.00, 0.988, 0.957, 0.914, 0.861, 0.797, 0.715],
[1.00, 0.986, 0.947, 0.892, 0.825, 0.744, 0.644],
[1.00, 0.984, 0.938, 0.871, 0.789, 0.692, 0.577],
[1.00, 0.982, 0.926, 0.846, 0.748, 0.635, 0.509],
[1.00, 0.979, 0.911, 0.814, 0.698, 0.570, 0.438],
[1.00, 0.974, 0.891, 0.772, 0.637, 0.499, 0.366],
[1.00, 0.968, 0.864, 0.719, 0.566, 0.423, 0.296],
[1.00, 0.959, 0.827, 0.654, 0.487, 0.346, 0.231],
[1.00, 0.946, 0.777, 0.575, 0.403, 0.273, 0.175],
[1.00, 0.926, 0.708, 0.484, 0.320, 0.208, 0.130],
[1.00, 0.894, 0.617, 0.386, 0.243, 0.153, 0.094],
[1.00, 0.840, 0.503, 0.290, 0.175, 0.107, 0.064],
[1.00, 0.747, 0.374, 0.201, 0.117, 0.070, 0.041],
[1.00, 0.590, 0.244, 0.123, 0.069, 0.040, 0.023],
[1.00, 0.366, 0.127, 0.060, 0.032, 0.018, 0.010],
[1.00, 0.128, 0.037, 0.016, 0.0085, 0.0047, 0.0026],
[1.00, 0, 0, 0, 0, 0, 0],
]).T
def __init__(self, view_width, view_height, antialias_samples=0, enable_extra_data=False):
if RenderEngine._ctx is None:
RenderEngine._ctx = moderngl.create_standalone_context()
self._ctx = RenderEngine._ctx
self._width = view_width
self._height = view_height
self._samples = antialias_samples
self._enable_extra_data = enable_extra_data
self._wireframe_prog = self._load_prog('wireframe.vert', 'wireframe.frag', 'wireframe2.geom')
self._shadow_prog = self._load_prog('shadow.vert', 'shadow.frag')
if self._enable_extra_data:
self._extra_data_prog = self._load_prog('extra_data.vert', 'extra_data.frag')
self._prog = self._load_prog('shader_v400.vert', 'shader_v400.frag')
self._cbo = self._ctx.renderbuffer((view_width, view_height), samples=antialias_samples, dtype='f4')
self._dbo = self._ctx.depth_texture((view_width, view_height), samples=antialias_samples, alignment=1)
self._fbo = self._ctx.framebuffer([self._cbo], self._dbo)
if self._samples > 0:
self._cbo2 = self._ctx.renderbuffer((view_width, view_height), dtype='f4')
self._dbo2 = self._ctx.depth_texture((view_width, view_height), alignment=1)
self._fbo2 = self._ctx.framebuffer([self._cbo2], self._dbo2)
# for shadows
n = int(math.sqrt(self._samples or 1))
self._scbo = self._ctx.renderbuffer((view_width*n, view_height*n))
self._sdbo = self._ctx.depth_texture((view_width*n, view_height*n), alignment=1)
self._sfbo = self._ctx.framebuffer([self._scbo], self._sdbo)
self._vbos = []
self._objs = []
self._s_vbos = []
self._s_objs = []
self._e_vbos = []
self._e_objs = []
self._w_vbos = []
self._w_objs = []
self._raw_objs = []
self._textures = []
self._proj_mx = None
self._view_mx = np.identity(4)
self._model_mx = None
self._frustum_near = None
self._frustum_far = None
self._persp_proj = False
def __del__(self):
self._wireframe_prog.release()
self._shadow_prog.release()
if self._enable_extra_data:
self._extra_data_prog.release()
self._prog.release()
self._cbo.release()
self._dbo.release()
self._fbo.release()
self._scbo.release()
self._sdbo.release()
self._sfbo.release()
if self._samples > 0:
self._cbo2.release()
self._dbo2.release()
self._fbo2.release()
for o in self._objs:
o.release()
for o in self._vbos:
o.release()
for o in self._s_objs:
o.release()
for o in self._s_vbos:
o.release()
for o in self._e_objs:
o.release()
for o in self._e_vbos:
o.release()
for o in self._w_objs:
o.release()
for o in self._w_vbos:
o.release()
for t in self._textures:
if t is not None:
t.release()
def _load_prog(self, vert, frag, geom=None):
vertex_shader_source = open(os.path.join(os.path.dirname(__file__), vert)).read()
fragment_shader_source = open(os.path.join(os.path.dirname(__file__), frag)).read()
geom_shader_source = None if geom is None else open(os.path.join(os.path.dirname(__file__), geom)).read()
return self._ctx.program(vertex_shader=vertex_shader_source,
fragment_shader=fragment_shader_source,
geometry_shader=geom_shader_source)
@property
def ctx(self):
return self._ctx
@property
def width(self):
return self._width
@property
def height(self):
return self._height
def set_frustum(self, x_fov, y_fov, frustum_near, frustum_far):
self._frustum_near = frustum_near
self._frustum_far = frustum_far
self._persp_proj = True
# calculate projection matrix based on frustum
n = frustum_near
f = frustum_far
r = n * math.tan(math.radians(x_fov/2))
t = n * math.tan(math.radians(y_fov/2))
self._proj_mx = np.zeros((4, 4))
self._proj_mx[0, 0] = n/r
self._proj_mx[1, 1] = n/t
self._proj_mx[2, 2] = -(f+n)/(f-n)
self._proj_mx[3, 2] = -1
self._proj_mx[2, 3] = -2*f*n/(f-n)
@property
def frustum_near(self):
return self._frustum_near
@property
def frustum_far(self):
return self._frustum_far
def set_orth_frustum(self, width, height, frustum_near, frustum_far):
self._frustum_near = n = frustum_near
self._frustum_far = f = frustum_far
self._persp_proj = False
l = -width/2
r = width/2
b = -height/2
t = height/2
self._proj_mx = self._ortho_mx_size(l, r, b, t, n, f)
def load_object(self, object, obj_idx=None, smooth=False, wireframe=False, cache_file=None):
if cache_file is None or not os.path.isfile(cache_file):
if isinstance(object, str):
object = ShapeModel(fname=object)
elif isinstance(object, Obj):
object = ShapeModel(data={
'faces': np.array(object.face, dtype=np.uint32)[:, ],
'vertices': np.array(object.vert, dtype=np.float32),
'normals': np.array(object.norm, dtype=np.float32),
'texcoords': np.array(object.text, dtype=np.float32)[:, :2],
})
object.load_texture()
obj_bytes = None if wireframe else object.pack_all()
s_obj_bytes = object.pack_simple()
object = None if wireframe else object
if cache_file is not None:
with open(cache_file, 'wb') as fh:
pickle.dump((object, obj_bytes, s_obj_bytes), fh)
else:
with open(cache_file, 'rb') as fh:
object, obj_bytes, s_obj_bytes = pickle.load(fh)
if wireframe:
return self.load_cached_wf_object(s_obj_bytes, obj_idx=obj_idx)
else:
return self.load_cached_object(object, obj_bytes, s_obj_bytes, obj_idx=obj_idx)
def load_cached_object(self, object, obj_bytes, s_obj_bytes, obj_idx=None):
if obj_idx is not None:
self._objs[obj_idx].release()
self._vbos[obj_idx].release()
self._s_objs[obj_idx].release()
self._s_vbos[obj_idx].release()
if self._enable_extra_data:
self._e_objs[obj_idx].release()
self._e_vbos[obj_idx].release()
self._textures[obj_idx].release()
texture = None
if object.tex is not None:
texture = self._ctx.texture(object.tex.T.shape, 1, np.flipud(object.tex).tobytes(), dtype='f4')
texture.build_mipmaps()
# TODO: check if this can be done and if it's faster:
# if obj_idx is not None:
# self._vbos[obj_idx].orphan(len(obj_bytes))
# self._vbos[obj_idx].write(obj_bytes)
# # no changes to obj
# else:
# # continue as before
vbo = self._ctx.buffer(obj_bytes)
obj = self._ctx.simple_vertex_array(self._prog, vbo, 'vertexPosition_modelFrame', 'vertexNormal_modelFrame', 'aTexCoords')
s_vbo = self._ctx.buffer(s_obj_bytes)
s_obj = self._ctx.simple_vertex_array(self._shadow_prog, s_vbo, 'vertexPosition_modelFrame')
if self._enable_extra_data:
e_vbo = self._ctx.buffer(obj_bytes)
e_obj = self._ctx.simple_vertex_array(self._extra_data_prog, e_vbo, 'vertexPosition_modelFrame',
'vertexNormal_modelFrame', 'aTexCoords')
if obj_idx is None:
self._vbos.append(vbo)
self._objs.append(obj)
self._s_vbos.append(s_vbo)
self._s_objs.append(s_obj)
if self._enable_extra_data:
self._e_vbos.append(e_vbo)
self._e_objs.append(e_obj)
self._textures.append(texture)
self._raw_objs.append(object)
else:
self._vbos[obj_idx] = vbo
self._objs[obj_idx] = obj
self._s_vbos[obj_idx] = s_vbo
self._s_objs[obj_idx] = s_obj
if self._enable_extra_data:
self._e_vbos[obj_idx] = e_vbo
self._e_objs[obj_idx] = e_obj
self._raw_objs[obj_idx] = object
self._textures[obj_idx] = texture
return len(self._objs) - 1
def load_cached_wf_object(self, w_obj_bytes, obj_idx=None):
if obj_idx is not None:
self._w_objs[obj_idx].release()
self._w_vbos[obj_idx].release()
w_vbo = self._ctx.buffer(w_obj_bytes)
w_obj = self._ctx.simple_vertex_array(self._wireframe_prog, w_vbo, 'vertexPosition_modelFrame')
if obj_idx is None:
self._w_vbos.append(w_vbo)
self._w_objs.append(w_obj)
else:
self._w_vbos[obj_idx] = w_vbo
self._w_objs[obj_idx] = w_obj
return len(self._w_objs) - 1
def ray_intersect_dist(self, obj_idxs, rel_pos_v, rel_rot_q):
# return distance to objects along -z-axis, supports laser algorithm, put here because efficient
if False:
# Should find the nearest intersection with object faces on the camera axis.
# However, tools.intersections return some error code, seems difficult to debug..
candidates = []
ray = np.array([0, 0, -1.0]).reshape((3, 1))
for i, obj_idx in enumerate(obj_idxs):
verts = tools.q_times_mx(rel_rot_q[i], self._raw_objs[obj_idx].vertices) + rel_pos_v[i]
x = tools.intersections(self._raw_objs[obj_idx].faces, verts, ray)
candidates.extend(np.abs(x))
dist = np.min(candidates) if len(candidates)>0 else None
else:
# alternative method: just render and pick center pixel
_, depth = self.render(obj_idxs, rel_pos_v, rel_rot_q, [1, 0, 0],
get_depth=True, shadows=False, textures=False)
dist = depth[depth.shape[0]//2, depth.shape[1]//2]
if dist >= self._frustum_far * 0.99:
dist = None
return dist
def render_wireframe(self, obj_idxs, rel_pos_v, rel_rot_q, color):
obj_idxs = [obj_idxs] if isinstance(obj_idxs, int) else obj_idxs
rel_pos_v = np.array(rel_pos_v).reshape((-1, 3))
rel_rot_q = np.array(rel_rot_q).reshape((-1,))
color = np.array(color).reshape((-1, 3))
assert len(obj_idxs) == rel_pos_v.shape[0] == rel_rot_q.shape[0], 'obj_idxs, rel_pos_v and rel_rot_q dimensions dont match'
self._fbo.use()
self._ctx.disable(moderngl.DEPTH_TEST)
self._ctx.disable(moderngl.CULL_FACE)
#self._ctx.front_face = 'ccw' # cull back faces (front faces suggested but that had glitches)
self._ctx.clear(0, 0, 0, float('inf'))
# self._ctx.clear(depth=float('inf'))
for i, obj_idx in enumerate(obj_idxs):
self._set_params(obj_idx, rel_pos_v[i], rel_rot_q[i], self._wireframe_prog)
self._wireframe_prog['color'].value = tuple(color[i])
self._w_objs[obj_idx].render()
if self._samples > 0:
self._ctx.copy_framebuffer(self._fbo2, self._fbo)
fbo = self._fbo2
else:
fbo = self._fbo
data = np.frombuffer(fbo.read(components=3, alignment=1), dtype='u1').reshape((self._height, self._width, 3))
data = np.flipud(data)
return data
def render_extra_data(self, obj_idxs, rel_pos_v, rel_rot_q, light_v):
assert self._enable_extra_data, 'RenderEngine constructed with enable_extra_data=False'
obj_idxs = [obj_idxs] if isinstance(obj_idxs, int) else obj_idxs
rel_pos_v = np.array(rel_pos_v).reshape((-1, 3))
rel_rot_q = np.array(rel_rot_q).reshape((-1,))
light_v = np.array(light_v)
assert len(obj_idxs) == rel_pos_v.shape[0] == rel_rot_q.shape[0], 'obj_idxs, rel_pos_v and rel_rot_q dimensions dont match'
self._fbo.use()
self._ctx.enable(moderngl.DEPTH_TEST)
self._ctx.enable(moderngl.CULL_FACE)
self._ctx.front_face = 'ccw' # cull back faces
self._ctx.clear(np.nan, np.nan, np.nan, np.nan, float('inf'))
for i, obj_idx in enumerate(obj_idxs):
self._set_params(obj_idx, rel_pos_v[i], rel_rot_q[i], light_v, prog=self._extra_data_prog)
self._extra_data_prog['select'].value = 0 # 0: px model coords, 1: light incidence, emission, phase angle (cos)
self._e_objs[obj_idx].render()
if self._samples > 0:
self._ctx.copy_framebuffer(self._fbo2, self._fbo)
fbo = self._fbo2
else:
fbo = self._fbo
data = np.frombuffer(fbo.read(components=3, alignment=1, dtype='f4'), dtype='f4')\
.reshape((self._height, self._width, 3))
data = np.flipud(data)
return data
# @profile(stream=open('memory_profiler.log', 'w+'))
def render(self, obj_idxs, rel_pos_v, rel_rot_q, light_v, get_depth=False, shadows=True, textures=True,
gamma=1.0, reflection=REFLMOD_LUNAR_LAMBERT, flux_density=False):
obj_idxs = [obj_idxs] if isinstance(obj_idxs, int) else obj_idxs
rel_pos_v = np.array(rel_pos_v).reshape((-1, 3))
rel_rot_q = np.array(rel_rot_q).reshape((-1,))
light_v = np.array(light_v)
assert len(obj_idxs) == rel_pos_v.shape[0] == rel_rot_q.shape[0], 'obj_idxs, rel_pos_v and rel_rot_q dimensions dont match'
shadow_mvps = None
if shadows:
shadow_mvps = self._render_shadowmap(obj_idxs, rel_pos_v, rel_rot_q, light_v)
self._fbo.use()
self._ctx.enable(moderngl.DEPTH_TEST)
self._ctx.enable(moderngl.CULL_FACE)
self._ctx.front_face = 'ccw' # cull back faces
self._ctx.clear(0, 0, 0, 0, float('inf'))
if shadows:
self._shadow_map.use(RenderEngine._LOC_SHADOW_MAP)
self._prog['shadow_map'].value = RenderEngine._LOC_SHADOW_MAP
for i, obj_idx in enumerate(obj_idxs):
self._set_params(obj_idx, rel_pos_v[i], rel_rot_q[i], light_v, shadow_mvps,
textures, reflection, prog=self._prog, flux_density=flux_density)
self._objs[obj_idx].render()
if self._samples > 0:
self._ctx.copy_framebuffer(self._fbo2, self._fbo)
fbo = self._fbo2
dbo = self._dbo2
else:
fbo = self._fbo
dbo = self._dbo
data = np.frombuffer(fbo.read(components=1, alignment=1, dtype='f4'), dtype='f4').reshape((self._height, self._width))
data = np.flipud(data)
if get_depth:
depth = np.frombuffer(dbo.read(alignment=1), dtype='f4').reshape((self._height, self._width))
depth = np.flipud(depth)
# normalize depth
if self._persp_proj:
# for perspective projection
a = -(self._frustum_far - self._frustum_near) / (2.0 * self._frustum_far * self._frustum_near)
b = (self._frustum_far + self._frustum_near) / (2.0 * self._frustum_far * self._frustum_near)
if self._frustum_far/self._frustum_near < 1e7:
depth = np.divide(1.0, (2.0 * a) * depth - (a - b)) # 1/((2*X-1)*a+b)
else:
# up to difference of 1e14
depth = np.divide(1.0, (2.0 * a) * depth.astype(np.float64) - (a - b)).astype(np.float32)
else:
# for orthographic projection
# - depth is between 0 and 1
depth = depth * (self._frustum_far - self._frustum_near) + self._frustum_near
# free memory to avoid memory leaks
if shadows:
self._shadow_map.release()
if flux_density:
data = data * flux_density
else:
data = np.clip(data*255, 0, 255).astype('uint8')
if gamma != 1.0:
data = ImageProc.adjust_gamma(data, gamma)
return (data, depth) if get_depth else data
def _set_params(self, obj_idx, rel_pos_v, rel_rot_q, light_v=None, shadow_mvps=None, use_textures=True,
reflection=REFLMOD_LUNAR_LAMBERT, prog=None, flux_density=False):
self._model_mx = np.identity(4)
self._model_mx[:3, :3] = quaternion.as_rotation_matrix(rel_rot_q)
self._model_mx[:3, 3] = rel_pos_v
prog = prog or self._prog
mv = self._view_mx.dot(self._model_mx)
mvp = self._proj_mx.dot(mv)
prog['mvp'].write((mvp.T).astype('float32').tobytes())
if 'mv' in prog:
prog['mv'].write((mv.T).astype('float32').tobytes())
if 'use_texture' in prog:
use_texture = use_textures and self._textures[obj_idx] is not None
prog['use_texture'].value = use_texture
if use_texture:
self._textures[obj_idx].use(RenderEngine._LOC_TEXTURE)
prog['texture_map'].value = RenderEngine._LOC_TEXTURE
if 'reflection_model' in prog:
prog['use_flux_density'].value = flux_density is not False
prog['brightness_coef'].value = 1 if flux_density else 0.65
prog['reflection_model'].value = reflection
prog['model_coefs'].value = RenderEngine.REFLMOD_PARAMS[reflection]
if 'lightDirection_viewFrame' in prog:
prog['lightDirection_viewFrame'].value = tuple(-light_v) # already in view frame
if 'use_shadows' in prog:
use_shadows = shadow_mvps is not None
prog['use_shadows'].value = use_shadows
if use_shadows:
self._prog['shadow_mvp'].write(shadow_mvps[obj_idx].T.astype('float32').tobytes())
if 'reflection_model' in prog:
if reflection == RenderEngine.REFLMOD_HAPKE and RenderEngine.REFLMOD_PARAMS[reflection][9] % 2 > 0:
hapke_K = self._ctx.texture((7, 19), 1, data=RenderEngine.HAPKE_K.T.astype('float32').tobytes(), alignment=1, dtype='f4')
hapke_K.use(RenderEngine._LOC_HAPKE_K)
prog['hapke_K'].value = RenderEngine._LOC_HAPKE_K
def _render_shadowmap(self, obj_idxs, rel_pos_v, rel_rot_q, light_v):
# shadows following http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-16-shadow-mapping/
v = np.identity(4)
angle = math.acos(np.clip(np.array([0, 0, -1]).dot(light_v), -1, 1))
axis = np.cross(np.array([0, 0, -1]), light_v)
q_cam2light = tools.angleaxis_to_q((angle, *axis))
v[:3, :3] = quaternion.as_rotation_matrix(q_cam2light.conj())
mvs = {}
for i, obj_idx in enumerate(obj_idxs):
m = np.identity(4)
m[:3, :3] = quaternion.as_rotation_matrix(rel_rot_q[obj_idx])
m[:3, 3] = rel_pos_v[i]
mv = v.dot(m)
mvs[obj_idx] = mv
proj = self._ortho_mx(obj_idxs, mvs)
bias = self._bias_mx() # map from [-1,1] x [-1,1] to [0,1]x[0,1] so that can use with "texture" command
self._sfbo.use()
self._ctx.enable(moderngl.DEPTH_TEST)
self._ctx.enable(moderngl.CULL_FACE)
self._ctx.front_face = 'ccw' # cull back faces (front faces suggested but that had glitches)
self._ctx.clear(depth=float('inf'))
shadow_mvps = {}
for i in obj_idxs:
mvp = proj.dot(mvs[i])
shadow_mvps[i] = bias.dot(mvp) # used later to project model vertices to same 2d shadow frame
self._shadow_prog['mvp'].write(mvp.T.astype('float32').tobytes())
self._s_objs[i].render()
data = self._sdbo.read(alignment=1)
n = int(math.sqrt(self._samples or 1))
self._shadow_map = self._ctx.texture((self._width*n, self._height*n), 1, data=data, alignment=1, dtype='f4')
if False:
import cv2
d = np.frombuffer(data, dtype='f4').reshape((self._width, self._height))
a = np.max(d.flatten())
b = np.min(d.flatten())
print('%s,%s' % (a, b))
cv2.imshow('distance from sun', d)
#cv2.waitKey()
#quit()
return shadow_mvps
def _ortho_mx_size(self, l, r, b, t, n, f):
P = np.identity(4)
P[0, 0] = 2 / (r - l)
P[1, 1] = 2 / (t - b)
P[2, 2] = -2 / (f - n)
P[0, 3] = -(r + l) / (r - l)
P[1, 3] = -(t + b) / (t - b)
P[2, 3] = -(f + n) / (f - n)
if False:
# transform should result that all are in range [-1,1]
tr = P.dot(np.array([
[l, b, n, 1],
[r, t, f, 1],
]).T)
print('%s'%tr)
return P
def _ortho_mx(self, obj_idxs, mvs):
l = float('inf') # min x
r = -float('inf') # max x
b = float('inf') # min y
t = -float('inf') # max y
n = float('inf') # min z
f = -float('inf') # max z
for i, obj_idx in enumerate(obj_idxs):
v3d = self._raw_objs[obj_idx].vertices
vert = mvs[obj_idx].dot(np.concatenate((v3d, np.ones((len(v3d),1))), axis=1).T).T
vert = vert[:, :3] / vert[:, 3:]
x0, y0, z0 = np.min(vert, axis=0)
x1, y1, z1 = np.max(vert, axis=0)
l = min(l, x0)
r = max(r, x1)
b = min(b, y0)
t = max(t, y1)
n = min(n, -z1) # negative z-axis in front of camera, however, near and far values typically positive
f = max(f, -z0)
P = self._ortho_mx_size(l, r, b, t, n, f)
return P
def _bias_mx(self):
return np.array([
[0.5, 0.0, 0.0, 0.5],
[0.0, 0.5, 0.0, 0.5],
[0.0, 0.0, 0.5, 0.5],
[0.0, 0.0, 0.0, 1.0],
])
if __name__ == '__main__':
from visnav.settings import *
import cv2
sm = BennuSystemModel(hi_res_shape_model=True)
# sm = DidymosSystemModel(use_narrow_cam=False, target_primary=False, hi_res_shape_model=False)
# sm = RosettaSystemModel()
re = RenderEngine(sm.cam.width, sm.cam.height, antialias_samples=16)
re.set_frustum(sm.cam.x_fov, sm.cam.y_fov, sm.min_distance*0.2, sm.max_distance*1.1)
pos = [0, 0, -sm.min_med_distance * 1]
q = tools.angleaxis_to_q((math.radians(20), 0, 1, 0))
#obj_idx = re.load_object('../data/67p-17k.obj')
#obj_idx = re.load_object('../data/67p-4k.obj')
#obj_idx = re.load_object('../data/ryugu+tex-d1-4k.obj')
if False:
# test result grid
obj_idx = re.load_object(os.path.join(DATA_DIR, 'ryugu+tex-d1-100.obj'), wireframe=True)
q = tools.angleaxis_to_q((math.radians(3), 0, 1, 0))
pos = [0, 0, -7]
for i in range(60):
image = re.render_wireframe(obj_idx, pos, q ** i, (0, 1, 0))
cv2.imshow('fs', image)
cv2.waitKey()
quit()
else:
obj_idx = re.load_object(sm.asteroid.real_shape_model)
#obj_idx = re.load_object(sm.asteroid.target_model_file)
#obj_idx = re.load_object(sm.asteroid.hires_target_model_file)
#obj_idx = re.load_object(sm.asteroid.target_model_file)
#obj_idx = re.load_object(os.path.join(DATA_DIR, 'test-ball.obj'))
if False:
# test depth rendering for laser algo
re.set_orth_frustum(sm.asteroid.max_radius * 0.002, sm.asteroid.max_radius * 0.002, 0, sm.max_distance)
img, depth = re.render(obj_idx, [0, 0, -0.22], q, [1, 0, 0], get_depth=True, shadows=False, textures=False)
print('center depth: %s' % (depth[depth.shape[0]//2, depth.shape[1]//2]))
a, b = np.min(depth), np.max(depth)
dd = (((depth-a)/(b-a))**(1/8.0) * 255).astype('uint8')
cv2.imshow('depth', dd)
cv2.imshow('img', img)
cv2.waitKey()
quit()
if False:
# test multi-object shadow rendering
#RenderEngine.REFLMOD_PARAMS[RenderEngine.REFLMOD_LUNAR_LAMBERT][6] = 2
obj_idx_d1 = re.load_object(DidymosPrimary(hi_res_shape_model=False).target_model_file)
obj_idx_sc = re.load_object(sm.sc_model_file)
p = [obj_idx_d1, obj_idx, obj_idx_sc], [[0, 1.0, 1.0], [0, 0, -0.22], [0, 0, 0]], [q, q, np.quaternion(1,0,1,0).normalized()]
# p = obj_idx, [0, 0, -0.22], q
img, depth = re.render(*p, [0, 0, -1], get_depth=True, shadows=True, textures=True,
reflection=RenderEngine.REFLMOD_HAPKE)
cv2.imshow('img', img)
cv2.waitKey()
quit()
if False:
for i in range(36):
image = re.render(obj_idx, [0, 0, -sm.min_med_distance*3], q**i, np.array([1, 0, 0])/math.sqrt(1), get_depth=False)
cv2.imshow('image', image)
cv2.waitKey()
elif True:
RenderEngine.REFLMOD_PARAMS = sm.asteroid.reflmod_params
imgs = ()
i = 1
th = math.radians(100)
#for i in range(4, 7):
for th in np.linspace(math.radians(90), 0, 4):
imgs_j = ()
for j, hapke in enumerate((True, False)):
model = RenderEngine.REFLMOD_HAPKE if hapke else RenderEngine.REFLMOD_LUNAR_LAMBERT
if hapke and j == 0:
RenderEngine.REFLMOD_PARAMS[model][9] = 0
if hapke and j == 1:
RenderEngine.REFLMOD_PARAMS[model][9] = 1
light = tools.q_times_v(tools.ypr_to_q(th, 0, 0), np.array([0, 0, -1]))
image = re.render(obj_idx, pos, q**i, tools.normalize_v(light), get_depth=False, reflection=model)
image = ImageProc.adjust_gamma(image, 1.8)
imgs_j += (image,)
imgs += (np.vstack(imgs_j),)
#cv2.imshow('depth', np.clip((sm.min_med_distance+sm.asteroid.mean_radius - depth)/5, 0, 1))
img = np.hstack(imgs)
sc = 1536 / img.shape[1]
cv2.imshow('images', cv2.resize(img, None, fx=sc, fy=sc))
cv2.waitKey()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017, Data61
# Commonwealth Scientific and Industrial Research Organisation (CSIRO)
# ABN 41 687 119 230.
#
# This software may be distributed and modified according to the terms of
# the BSD 2-Clause license. Note that NO WARRANTY is provided.
# See "LICENSE_BSD2.txt" for details.
#
# @TAG(DATA61_BSD)
#
'''
Apply various static analysis tools to the accelerator.
'''
from __future__ import absolute_import, division, print_function, \
unicode_literals
import os, re, subprocess, sys, unittest
ME = os.path.abspath(__file__)
MY_DIR = os.path.dirname(ME)
# Make CAmkES importable
sys.path.append(os.path.join(MY_DIR, '../..'))
from camkes.internal.tests.utils import CAmkESTest, which
# License server for the Goanna static analyser. You need a valid license to
# run Goanna, which we have internally at NICTA, but you'll need to be able to
# reach this server.
GOANNA_LICENSE_SERVER = 'goanna.research.nicta.com.au'
def goanna_license_server_reachable():
try:
with open(os.devnull, 'w') as f:
subprocess.check_call(['ping', '-c', '1', '-w', '5',
GOANNA_LICENSE_SERVER], stdout=f, stderr=f)
return True
except subprocess.CalledProcessError:
return False
GOANNA_WRAPPER = os.path.join(MY_DIR, '../goanna_wrapper.py')
class TestStaticAnalysis(CAmkESTest):
@unittest.skipIf(which('cmake') is None or which('goannacc') is None or
not goanna_license_server_reachable(), 'CMake or Goanna not found or '
'Goanna license server unavailable')
def test_goanna_compilation(self):
'''
Test whether the Goanna static analyser can find any problems with the
accelerator.
'''
# Build the accelerator with Goanna. We use GNU Make here to avoid the
# output of Ninja (which it appears there is no way to hide) getting in
# the way.
tmp = self.mkdtemp()
ret, stdout, stderr = self.execute(['cmake', '-G', 'Unix Makefiles',
MY_DIR], cwd=tmp, env=dict(os.environ.items() + [('CC',
GOANNA_WRAPPER), ('CFLAGS', '--license-server=%s' %
GOANNA_LICENSE_SERVER)]))
if ret != 0:
self.fail('cmake failed:\n%s\n%s' % (stdout, stderr))
ret, stdout, stderr = self.execute(['make', 'camkes-accelerator'],
cwd=tmp)
if ret != 0:
self.fail('goannacc failed to compile the accelerator:\n%s' %
stderr)
# Check if we saw any warnings.
warning_line = re.compile(r'(?P<relfile>[^\s]+):(?P<lineno>\d+):'
r'\s*warning:\s*Goanna\[(?P<checkname>[^\]]+)\]\s*Severity-'
r'(?P<severity>\w+),\s*(?P<message>[^\.]*)\.\s*(?P<rules>.*)$')
for line in [x.strip() for x in stderr.split('\n') if x.strip() != '']:
if warning_line.match(line):
self.fail('Goanna found new issues with the accelerator '
'source:\n%s' % stderr)
@unittest.skipIf(which('cmake') is None or which('ninja') is None or which('scan-build') is None, 'CMake or Ninja or scan-build not found')
def test_clang_static_analyser(self):
'''
Run the Clang static analyser on the accelerator.
'''
tmp = self.mkdtemp()
ret, stdout, stderr = self.execute(['scan-build', '--status-bugs',
'cmake', '-G', 'Ninja', MY_DIR], cwd=tmp)
if ret != 0:
self.fail('cmake failed:\n%s\n%s' % (stdout, stderr))
ret, stdout, stderr = self.execute(['ninja', 'camkes-accelerator'],
cwd=tmp)
if ret != 0:
self.fail('scan-build failed:\n%s\n%s' % (stdout, stderr))
@unittest.skipIf(which('cppcheck') is None, 'cppcheck not found')
def test_cppcheck(self):
'''
Run the Cppcheck static analyser on the accelerator.
'''
accelerator = os.path.join(MY_DIR, 'accelerator.c')
ret, stdout, stderr = self.execute(['cppcheck', '--error-exitcode=-1',
'--library=gnu.cfg', accelerator])
if ret != 0:
self.fail('%s\n%s' % (stdout, stderr))
if __name__ == '__main__':
unittest.main()
|
from django import forms
from django.contrib.auth.models import User
class UserForm(forms.ModelForm):
name = forms.CharField(max_length=254, help_text='Podaj imie...')
surname = forms.CharField(max_length=254, help_text='Podaj nazwisko...')
new_login = forms.CharField(max_length=254, help_text='Podaj login...')
password = forms.CharField(widget=forms.PasswordInput())
email = forms.EmailField(max_length=254, help_text='Podaj email...')
class Meta:
model = User
fields = ('name', 'surname', 'new_login', 'email', 'password')
|
#! /usr/bin/env python
from __future__ import print_function
import unittest
import time
import nose
import sys
import os
from getpass import getuser
from firecloud import fccore
from firecloud import api as fapi
class TestFISSLowLevel(unittest.TestCase):
"""Unit test the low-level interface of FireCloud-enabled FISS. There should
be at least one test per low-level call, with composite tests as feasible.
"""
@classmethod
def setUpClass(cls, msg=""):
'''Set up FireCloud etc to run tests'''
print("\nStarting low-level api tests ...\n", file=sys.stderr)
fiss_verbosity = os.environ.get("FISS_TEST_VERBOSITY", None)
if fiss_verbosity == None:
fiss_verbosity = 0
fcconfig = fccore.config_parse()
cls.project = fcconfig.project
if not cls.project:
raise ValueError("Your configuration must define a FireCloud project")
fcconfig.set_verbosity(fiss_verbosity)
# Set up a temp workspace for duration of tests; and in case a previous
# test failed, attempt to unlock & delete before creating anew. Note
# that bc we execute space create/delete here, their tests are NO-OPs
cls.workspace = getuser() + '_FISS_TEST'
r = fapi.unlock_workspace(cls.project, cls.workspace)
r = fapi.delete_workspace(cls.project, cls.workspace)
r = fapi.create_workspace(cls.project, cls.workspace)
fapi._check_response_code(r, 201)
@classmethod
def tearDownClass(cls):
print("\nFinishing low-level CLI tests ...\n", file=sys.stderr)
r = fapi.delete_workspace(cls.project, cls.workspace)
# Test individual api calls, 1 test per api call,
# listed in alphabetical order for convenience
@unittest.skip("Not Implemented")
def test_abort_submission(self):
"""Test abort_submission()."""
pass
def test_clone_workspace(self):
"""Test clone_workspace()."""
temp_space = getuser() + '_FISS_TEST_CLONE'
r = fapi.unlock_workspace(self.project, temp_space)
r = fapi.delete_workspace(self.project, temp_space)
r = fapi.clone_workspace(self.project, self.workspace,
self.project, temp_space)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 201)
# Compare new workspace and old workspace
# Cleanup, Delete workspace
r = fapi.delete_workspace(self.project, temp_space)
print(r.status_code, r.content)
self.assertIn(r.status_code, [200, 202])
@unittest.skip("Not Implemented")
def test_copy_config_from_repo(self):
"""Test copy_config_from_repo()."""
pass
@unittest.skip("Not Implemented")
def test_copy_config_to_repo(self):
"""Test copy_config_to_repo()."""
pass
@unittest.skip("Not Implemented")
def test_copy_entities(self):
"""Test copy_entities()."""
pass
@unittest.skip("Not Implemented")
def test_create_submission(self):
"""Test create_submission()."""
pass
@unittest.skip("Not Implemented")
def test_create_workspace_config(self):
"""Test create_config()."""
pass
@unittest.skip("Not Implemented")
def test_delete_entity(self):
"""Test delete_entity()."""
pass
@unittest.skip("Not Implemented")
def test_delete_pair(self):
"""Test delete_pair()."""
pass
@unittest.skip("Not Implemented")
def test_delete_pair_set(self):
"""Test delete_pair_set()."""
pass
@unittest.skip("Not Implemented")
def test_delete_participant(self):
"""Test delete_participant()."""
pass
@unittest.skip("Not Implemented")
def test_delete_participant_set(self):
"""Test delete_participant_set()."""
pass
@unittest.skip("Not Implemented")
def test_delete_repository_method(self):
"""Test delete_repository_method()."""
pass
@unittest.skip("Not Implemented")
def test_delete_sample(self):
"""Test delete_sample()."""
pass
@unittest.skip("Not Implemented")
def test_delete_sample_set(self):
"""Test delete_sample_set()."""
pass
def test_create_workspace(self):
# NO-OP, because this feature is tested in setUpClass & elsewhere
pass
def test_delete_workspace(self):
# NO-OP, because this feature is tested in setUpClass & elsewhere
pass
@unittest.skip("Not Implemented")
def test_delete_workspace_config(self):
"""Test delete_config()."""
pass
@unittest.skip("Not Implemented")
def test_get_config_template(self):
"""Test get_config_template()."""
pass
def test_get_entities(self):
"""Test get_entities()."""
r = fapi.get_entities(self.project,
self.workspace,
"participant")
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_get_entities_tsv(self):
"""Test get_entities_tsv()."""
r = fapi.get_entities_tsv(self.project,
self.workspace,
"participant")
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_get_entities_with_type(self):
"""Test get_entities_with_type()."""
r = fapi.get_entities_with_type(self.project,
self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_get_entity(self):
"""Test get_entity()."""
pass
@unittest.skip("Not Implemented")
def test_get_inputs_outputs(self):
"""Test get_inputs_outputs()."""
pass
@unittest.skip("Not Implemented")
def test_get_repository_config(self):
"""Test get_repository_config()."""
pass
@unittest.skip("Not Implemented")
def test_get_repository_config_acl(self):
"""Test get_repository_config_acl()."""
pass
@unittest.skip("Not Implemented")
def test_get_repository_method(self):
"""Test get_repository_method()."""
pass
@unittest.skip("Not Implemented")
def test_get_repository_method_acl(self):
"""Test get_repository_method_acl()."""
pass
def test_get_api_methods_definitions(self):
"""Test get_api_methods_definitions()."""
r = fapi.get_api_methods_definitions()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_get_status(self):
"""Test get_status()."""
r = fapi.get_status()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_get_submission(self):
"""Test get_submission()."""
pass
@unittest.skip("Not Implemented")
def test_get_submission_queue(self):
"""Test get_submission_queue()."""
pass
@unittest.skip("Not Implemented")
def test_get_workflow_outputs(self):
"""Test get_workflow_outputs()."""
pass
def test_get_workspace(self):
"""Test get_workspace()."""
r = fapi.get_workspace(self.project, self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
space_dict = r.json()['workspace']
self.assertEqual(space_dict['name'], self.workspace)
self.assertEqual(space_dict['namespace'], self.project)
def test_get_workspace_acl(self):
"""Test get_workspace_acl()."""
r = fapi.get_workspace_acl(self.project, self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_get_workspace_config(self):
"""Test get_workspace_config()."""
pass
def test_list_billing_projects(self):
"""Test list_billing_projects()."""
r = fapi.list_billing_projects()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_list_entity_types(self):
"""Test list_entity_types()."""
r = fapi.list_entity_types(self.project,
self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_list_repository_configs(self):
"""Test list_repository_configs()."""
r = fapi.list_repository_configs()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_list_repository_methods(self):
"""Test list_repository_methods()."""
r = fapi.list_repository_methods()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_list_submissions(self):
"""Test list_submissions()."""
r = fapi.list_submissions(self.project,
self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_list_workspace_configs(self):
"""Test get_configs()."""
r = fapi.get_configs(self.test_namespace,
self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_list_workspaces(self):
"""Test list_workspaces()."""
r = fapi.list_workspaces()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
workspace_names = [w['workspace']['name'] for w in r.json()]
self.assertIn(self.workspace, workspace_names)
def test_lock_workspace(self):
"""Test lock_workspace()"""
r = fapi.lock_workspace(self.project, self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 204)
# Unlock, for other tests
fapi.unlock_workspace(self.project, self.workspace)
def test_health(self):
"""Test health()."""
r = fapi.health()
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_overwrite_workspace_config(self):
"""Test overwrite_workspace_config()."""
pass
@unittest.skip("Not Implemented")
def test_rename_workspace_config(self):
"""Test rename_workspace_config()."""
pass
def test_unlock_workspace(self):
"""Test unlock_workspace()."""
r = fapi.unlock_workspace(self.project, self.workspace)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 204)
@unittest.skip("Not Implemented")
def test_update_repository_config_acl(self):
"""Test update_repository_config_acl()."""
pass
@unittest.skip("Not Implemented")
def test_update_repository_method(self):
"""Test update_repository_method()."""
pass
@unittest.skip("Not Implemented")
def test_update_repository_method_acl(self):
"""Test update_repository_method_acl()."""
pass
def test_update_workspace_acl(self):
"""Test update_workspace_acl()."""
updates = [{ "email": "[email protected]",
"accessLevel": "READER"}]
r = fapi.update_workspace_acl(self.project, self.workspace,
updates)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
def test_update_workspace_attributes(self):
"""Test update_workspace_attributes()."""
updates = [fapi._attr_set("key1", "value1")]
r = fapi.update_workspace_attributes(self.project,
self.workspace, updates)
print(r.status_code, r.content)
self.assertEqual(r.status_code, 200)
@unittest.skip("Not Implemented")
def test_update_workspace_config(self):
"""Test update_workspace_config()."""
pass
@unittest.skip("Not Implemented")
def test_upload_entities(self):
"""Test upload_entities()."""
pass
@unittest.skip("Not Implemented")
def test_upload_entities_tsv(self):
"""Test upload_entities_tsv()."""
pass
@unittest.skip("Not Implemented")
def test_validate_config(self):
"""Test validate_config()."""
pass
def main():
nose.main()
if __name__ == '__main__':
main()
|
import tensorflow as tf
from pgn.layers import Encoder, Decoder, Pointer, BahdanauAttention
from utils.saveLoader import load_embedding_matrix
class PGN(tf.keras.Model):
def __init__(self, params):
super(PGN, self).__init__()
self.embedding_matrix = load_embedding_matrix()
self.params = params
self.encoder = Encoder(params["vocab_size"],
params["embed_size"],
self.embedding_matrix,
params["enc_units"],
params["batch_size"])
self.attention = BahdanauAttention(units=params["attn_units"])
self.decoder = Decoder(params["vocab_size"],
params["embed_size"],
self.embedding_matrix,
params["dec_units"],
params["batch_size"],
self.attention)
self.pointer = Pointer()
# def call_decoder_one_step(self, dec_input, dec_hidden,
# enc_output, enc_extended_inp,
# batch_oov_len, enc_pad_mask,
# use_coverage, prev_coverage):
# context_vector, attentions, coverage_ret = self.attention(dec_hidden,
# enc_output,
# enc_pad_mask,
# use_coverage,
# prev_coverage)
# dec_x, pred, dec_hidden = self.decoder(dec_input,
# dec_hidden,
# enc_output,
# context_vector)
# if self.params["pointer_gen"]:
# p_gen = self.pointer(context_vector, dec_hidden, tf.squeeze(dec_x, axis=1))
# final_dists = _calc_final_dist(enc_extended_inp,
# [pred],
# [attentions],
# [p_gen],
# batch_oov_len,
# self.params["vocab_size"],
# self.params["batch_size"])
# return tf.stack(final_dists, 1), dec_hidden, context_vector, attentions, p_gen, coverage_ret
# else:
# return pred, dec_hidden, context_vector, attentions, None, coverage_ret
# # return pred, dec_hidden, context_vector, attention_weights
def call(self, enc_inp, dec_inp,
enc_extended_inp, batch_oov_len,
enc_pad_mask, use_coverage=True):
'''
:param enc_inp:
:param dec_inp: tf.expand_dims(dec_inp[:, t], 1)
:param enc_extended_inp:
:param batch_oov_len:
'''
# # 用tf.TensorArray代替list
# predictions = tf.TensorArray(tf.float32, size=dec_inp.shape[1])
# attentions = tf.TensorArray(tf.float32, size=dec_inp.shape[1])
# p_gens = tf.TensorArray(tf.float32, size=dec_inp.shape[1])
# coverages = tf.TensorArray(tf.float32, size=dec_inp.shape[1])
predictions = []
attentions = []
p_gens = []
coverages = []
# 计算encoder的输出
enc_output, enc_hidden = self.encoder(enc_inp)
dec_hidden = enc_hidden
# (batch_size, enc_len, 1)
prev_coverage = tf.zeros((enc_output.shape[0],enc_output.shape[1],1))
for t in tf.range(dec_inp.shape[1]):
context_vector, dec_hidden, \
dec_x, pred, attn, prev_coverage = self.decoder(dec_inp[:, t], # (batch_size, )
dec_hidden, # (batch_size, dec_units)
enc_output, # (batch_size, enc_len, enc_units)
enc_pad_mask, # (batch_size, enc_len)
prev_coverage,
use_coverage
)
p_gen = self.pointer(context_vector, dec_hidden, dec_x)
# 每轮迭代后把相应数据写入TensorArray
predictions.append(pred)
attentions.append(attn)
p_gens.append(p_gen)
coverages.append(prev_coverage)
# predictions.write(t, pred)
# attentions.write(t, attn)
# p_gens.write(t, p_gen)
# coverages.write(t, prev_coverage)
#
# predictions = tf.transpose(predictions.stack(), perm=[1, 0, 2])
# attentions = tf.transpose(attentions.stack(), perm=[1, 0, 2])
# p_gens = tf.transpose(p_gens.stack(), perm=[1, 0, 2])
# coverages = tf.transpose(coverages.stack(), perm=[1, 0, 2, 3])
#
predictions = tf.stack(predictions, axis=1)
attentions = tf.stack(attentions, axis=1)
p_gens = tf.stack(p_gens, axis=1)
coverages = tf.stack(coverages, axis=1)
coverages = tf.squeeze(coverages, -1)
# 计算final_dist
# 注tf.transpose()的作用是调整坐标轴顺序
# predictions.stack() 的 shape == (dec_len, batch_size, vocab_size)
# 执行了tf.transpose 后 shape == (batch_size, dec_len, vocab_size)
final_dist = _calc_final_dist(enc_extended_inp,
predictions,
attentions,
p_gens,
batch_oov_len,
self.params["vocab_size"],
self.params["batch_size"])
# final_dist (batch_size, dec_len, vocab_size+batch_oov_len)
# (batch_size, dec_len, enc_len)
return final_dist, attentions, coverages
# return final_dist, dec_hidden, context_vector, attentions, p_gen, prev_coverage
def _calc_final_dist(_enc_batch_extend_vocab, vocab_dists, attn_dists, p_gens, batch_oov_len, vocab_size,
batch_size):
# 确定的修改代码
# 先计算公式的左半部分
# _vocab_dists_pgn (batch_size, dec_len, vocab_size)
_vocab_dists_pgn = vocab_dists * p_gens
# 根据oov表的长度补齐原词表
# _extra_zeros (batch_size, dec_len, batch_oov_len)
if batch_oov_len !=0:
_extra_zeros = tf.zeros((batch_size, p_gens.shape[1], batch_oov_len))
# 拼接后公式的左半部分完成了
# _vocab_dists_extended (batch_size, dec_len, vocab_size+batch_oov_len)
_vocab_dists_extended = tf.concat([_vocab_dists_pgn, _extra_zeros], axis=-1)
# 公式右半部分
# 乘以权重后的注意力
# _attn_dists_pgn (batch_size, dec_len, enc_len)
_attn_dists_pgn = attn_dists * (1 - p_gens)
# 拓展后的长度
_extended_vocab_size = vocab_size + batch_oov_len
# 要更新的数组 _attn_dists_pgn
# 更新之后数组的形状与 公式左半部分一致
# shape=[batch_size, dec_len, vocab_size+batch_oov_len]
shape = _vocab_dists_extended.shape
enc_len = tf.shape(_enc_batch_extend_vocab)[1]
dec_len = tf.shape(_vocab_dists_extended)[1]
# batch_nums (batch_size, )
batch_nums = tf.range(0, limit=batch_size)
# batch_nums (batch_size, 1)
batch_nums = tf.expand_dims(batch_nums, 1)
# batch_nums (batch_size, 1, 1)
batch_nums = tf.expand_dims(batch_nums, 2)
# tile 在第1,2个维度上分别复制batch_nums dec_len,enc_len次
# batch_nums (batch_size, dec_len, enc_len)
batch_nums = tf.tile(batch_nums, [1, dec_len, enc_len])
# (dec_len, )
dec_len_nums = tf.range(0, limit=dec_len)
# (1, dec_len)
dec_len_nums = tf.expand_dims(dec_len_nums, 0)
# (1, dec_len, 1)
dec_len_nums = tf.expand_dims(dec_len_nums, 2)
# tile是用来在不同维度上复制张量的
# dec_len_nums (batch_size, dec_len, enc_len)
dec_len_nums = tf.tile(dec_len_nums, [batch_size, 1, enc_len])
# _enc_batch_extend_vocab_expand (batch_size, 1, enc_len)
_enc_batch_extend_vocab_expand = tf.expand_dims(_enc_batch_extend_vocab, 1)
# _enc_batch_extend_vocab_expand (batch_size, dec_len, enc_len)
_enc_batch_extend_vocab_expand = tf.tile(_enc_batch_extend_vocab_expand, [1, dec_len, 1])
# 因为要scatter到一个3D tensor上,所以最后一维是3
# indices (batch_size, dec_len, enc_len, 3)
indices = tf.stack((batch_nums,
dec_len_nums,
_enc_batch_extend_vocab_expand),
axis=3)
# 开始更新
attn_dists_projected = tf.scatter_nd(indices, _attn_dists_pgn, shape)
# 至此完成了公式的右半边
# 计算最终分布
final_dists = _vocab_dists_extended + attn_dists_projected
return final_dists |
import math
from typing import Tuple
import numpy as np
import scipy.stats as scs
from tabulate import tabulate
class DistrManager:
def __init__(self, alpha: float = 0.05) -> None:
self._p = 1 - alpha
def get_probability(
self, distr: np.ndarray, limits: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
p_array = np.array([])
n_array = np.array([])
for idx in range(-1, len(limits)):
previous_cdf = 0 if idx == -1 else scs.norm.cdf(limits[idx])
current_cdf = 1 if idx == len(limits) - 1 else scs.norm.cdf(limits[idx + 1])
p_array = np.append(p_array, current_cdf - previous_cdf)
if idx == -1:
n_array = np.append(n_array, len(distr[distr <= limits[0]]))
elif idx == len(limits) - 1:
n_array = np.append(n_array, len(distr[distr >= limits[-1]]))
else:
n_array = np.append(
n_array,
len(distr[(distr <= limits[idx + 1]) & (distr >= limits[idx])]),
)
return n_array, p_array
def print_table(
self,
n_array: np.ndarray,
p_array: np.ndarray,
limits: np.ndarray,
size: int,
format: str = "latex",
) -> None:
result = np.divide(
np.multiply((n_array - size * p_array), (n_array - size * p_array)),
p_array * size,
)
rows = list()
for idx in range(len(n_array)):
if idx == 0:
boarders = [-np.inf, np.around(limits[0], decimals=8)]
elif idx == len(n_array) - 1:
boarders = [np.around(limits[-1], decimals=8), np.inf]
else:
boarders = [
np.around(limits[idx - 1], decimals=8),
np.around(limits[idx], decimals=8),
]
rows.append(
[
idx + 1,
boarders,
n_array[idx],
np.around(p_array[idx], decimals=8),
np.around(p_array[idx] * size, decimals=8),
np.around(n_array[idx] - size * p_array[idx], decimals=8),
np.around(result[idx], decimals=8),
]
)
rows.append(
[
len(n_array) + 1,
"-",
np.sum(n_array),
np.around(np.sum(p_array), decimals=8),
np.around(np.sum(p_array * size), decimals=8),
np.around(np.sum(n_array - size * p_array), decimals=8),
np.around(np.sum(result), decimals=8),
]
)
print("\n", tabulate(rows, tablefmt=format), "\n")
def generate(self, distr_name: str, size: int) -> None:
if distr_name == "Normal":
distr = np.random.normal(0, 1, size=100)
elif distr_name == "Laplace":
distr = scs.laplace.rvs(size=20, scale=1 / math.sqrt(2), loc=0)
elif distr_name == "Uniform":
distr = scs.uniform.rvs(size=20, loc=-math.sqrt(3), scale=2 * math.sqrt(3))
else:
raise ValueError(f"Unexpected distribution: {distr_name}")
k = math.ceil(1.72 * size ** (1 / 3))
print(f"\n{distr_name} distribution:")
print(f"mu = {np.around(np.mean(distr), decimals=8)}")
print(f"sigma = {np.around(np.std(distr), decimals=8)}")
print(f"k = {k}")
print(f"chi_2 = {scs.chi2.ppf(self._p, k - 1)}")
limits = np.linspace(-1.1, 1.1, num=k - 1)
n_array, p_array = self.get_probability(distr, limits)
self.print_table(n_array, p_array, limits, size)
|
import socket
import threading
host = '127.0.0.1'
port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen(5)
print '[*] Listening on %s:%d' % (host, port)
def handle_client(client_socket):
request = client_socket.recv(1024)
print '[*] Reviced %s' % request
client_socket.send('ACK')
client_socket.close()
while True:
client, addr = server.accept()
print '[*] Accepted connection from %s:%d' % (addr[0], addr[1])
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
# Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Viewfinder LinkIdentityOperation.
This operation links a previously unlinked identity to a target user account.
"""
__authors__ = ['[email protected] (Andy Kimball)']
import logging
from tornado import gen
from viewfinder.backend.db.identity import Identity
from viewfinder.backend.db.operation import Operation
from viewfinder.backend.op.notification_manager import NotificationManager
from viewfinder.backend.resources.message.error_messages import ALREADY_LINKED
from viewfinder.backend.op.viewfinder_op import ViewfinderOperation
class LinkIdentityOperation(ViewfinderOperation):
"""The LinkIdentity operation follows the four phase pattern described in the header of
operation_map.py.
"""
def __init__(self, client, target_user_id, source_identity_key):
super(LinkIdentityOperation, self).__init__(client)
self._target_user_id = target_user_id
self._source_identity_key = source_identity_key
@classmethod
@gen.coroutine
def Execute(cls, client, target_user_id, source_identity_key):
"""Entry point called by the operation framework."""
yield LinkIdentityOperation(client, target_user_id, source_identity_key)._Link()
@gen.coroutine
def _Link(self):
"""Orchestrates the link identity operation by executing each of the phases in turn."""
yield self._Check()
self._client.CheckDBNotModified()
yield self._Update()
# No accounting for this operation.
yield Operation.TriggerFailpoint(self._client)
yield self._Notify()
@gen.coroutine
def _Check(self):
"""Gathers pre-mutation information:
1. Queries for the identity.
Validates the following:
1. Identity cannot be already linked to a different user.
"""
self._identity = yield gen.Task(Identity.Query, self._client, self._source_identity_key, None, must_exist=False)
if self._identity is None:
self._identity = Identity.CreateFromKeywords(key=self._source_identity_key, authority='Viewfinder')
if self._identity.user_id is not None and self._identity.user_id != self._target_user_id:
raise PermissionError(ALREADY_LINKED, account=Identity.GetDescription(self._source_identity_key))
@gen.coroutine
def _Update(self):
"""Updates the database:
1. Binds the identity to the target user.
"""
self._identity.expires = 0
self._identity.user_id = self._target_user_id
yield gen.Task(self._identity.Update, self._client)
@gen.coroutine
def _Notify(self):
"""Creates notifications:
1. Notifies other users with contacts that are bound to the identity.
2. Notifies target user that identities have changed.
"""
# Send notifications for all identities that were re-bound.
yield NotificationManager.NotifyLinkIdentity(self._client,
self._target_user_id,
self._source_identity_key,
self._op.timestamp)
|
""" Register modules with `ptflops` """
from .closure import Add, Multiply # noqa: F401
from .container import ( # noqa: F401
Broadcast,
BroadcastReduce,
Conditional,
Parallel,
Reduce,
Residual,
Sequential,
)
from .conv import Conv1d, Conv2d, Conv3d # noqa: F401
from .linear import Linear # noqa: F401
from .logging import getLogger
from .pooling import ( # noqa: F401
AdaptiveAvgPool2d,
AdaptiveAvgPool3d,
AdaptiveMaxPool2d,
AdaptiveMaxPool3d,
AvgPool1d,
AvgPool2d,
AvgPool3d,
MaxPool1d,
MaxPool2d,
MaxPool3d,
)
logger = getLogger(__name__)
# Register modules in `ptflops`
def _register_ptflops():
try:
from ptflops import flops_counter as fc
# Conv
fc.MODULES_MAPPING[Conv1d] = fc.conv_flops_counter_hook
fc.MODULES_MAPPING[Conv2d] = fc.conv_flops_counter_hook
fc.MODULES_MAPPING[Conv3d] = fc.conv_flops_counter_hook
# Pooling
fc.MODULES_MAPPING[AvgPool1d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[MaxPool1d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[AvgPool2d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[MaxPool2d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[AdaptiveAvgPool2d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[AdaptiveMaxPool2d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[AvgPool3d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[MaxPool3d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[AdaptiveAvgPool3d] = fc.pool_flops_counter_hook
fc.MODULES_MAPPING[AdaptiveMaxPool3d] = fc.pool_flops_counter_hook
# Linear
fc.MODULES_MAPPING[Linear] = fc.linear_flops_counter_hook
except ModuleNotFoundError: # pragma: no cover
pass
except Exception as e: # pragma: no cover
logger.warning(f"Failed to add flops_counter_hook: {e}")
_register_ptflops()
|
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/95_cli.ipynb (unless otherwise specified).
__all__ = ['logger', 'get_parser', 'main']
# Cell
import argparse
import logging
from pathlib import Path
import random
from .utils import log_elapsed_time
from .coco import merge_datasets, cut_annotations_per_category, remove_invalid_elements
from .json_file import *
from .json_tree import *
from .crop_tree import *
# Cell
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logger = logging.getLogger()
# Cell
def get_parser():
parser = argparse.ArgumentParser(
description="Tool for converting datasets in COCO format between different representations"
)
parser.add_argument("--in_json_file", type=Path, nargs="*", default=[],
help=(
"Path to one or multiple json files storing COCO dataset "
"in `json_file` representation (all json-based datasets will be merged)."
))
parser.add_argument("--in_json_tree", type=Path, nargs="*", default=[],
help=(
"Path to one or multiple directories storing COCO dataset "
"in `json_tree` representation (all json-based datasets will be merged)."
))
parser.add_argument("--in_crop_tree", type=Path, nargs="*", default=[],
help=(
"Path to one or multiple directories storing COCO dataset "
"in `crop_tree` representation (all crop-based datasets will be merged and will "
"overwrite the json-based datasets)."
))
parser.add_argument("--out_path", type=Path,
help="Path to the output dataset (file or directory: depends on `--out_format`)")
parser.add_argument("--out_format", choices=['json_file', 'json_tree', 'crop_tree'])
parser.add_argument("--seed", type=int, default=42, help="Random seed.")
parser.add_argument("--max_crops_per_class", type=int, default=None,
help=(
"If set, the tool will randomly select up to this number of "
"crops (annotations) per each class (category) and drop the others."),
)
parser.add_argument("--drop_invalid_elements", action='store_true',
help="If set, drops broken elements (for example, negative IDs or broken bboxes).")
parser.add_argument("--dump_crop_tree_num_processes", type=int, default=1)
parser.add_argument("--overwrite", action='store_true',
help="If set, will delete the output file/directory before dumping the result dataset.")
parser.add_argument("--indent", default=4,
type=lambda x: int(x) if str(x).lower() not in ('none', 'null', '~') else None,
help="Indentation in the output json files.")
parser.add_argument("--update", action='store_true',
help="Whether to update objects with the same ID, but different content during the dataset merge. "
"If not used and such objects are found - exception will be thrown. "
"The update strategy: [in_json_tree, in_json_file, in_crop_tree], from left to right within each group, top-right one wins. "
"Beware, crop_tree datasets are owerwritting and removing data from other datasets: "
"consider first merging crop_tree with it's json_tree/file into json_tree/file and merge the resulting dataset with others."
)
parser.add_argument("--debug", action='store_true')
return parser
# Cell
@log_elapsed_time(lambda t: logger.info(f'Total elapsed: {t.elapsed}'))
def main(args=None):
args = args or get_parser().parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logger.info(f'Arguments: {args}')
in_json_tree_list = args.in_json_tree
in_json_file_list = args.in_json_file
in_crop_tree_list = args.in_crop_tree
seed = args.seed
max_crops_per_class = args.max_crops_per_class
drop_invalid_elements = args.drop_invalid_elements
out_path = args.out_path
out_format = args.out_format
dump_crop_tree_num_processes = args.dump_crop_tree_num_processes
overwrite = args.overwrite
indent = args.indent
update: bool = args.update
if out_path and not out_format or not out_path and out_format:
raise ValueError(f'Option --out_format requires --out_path and vice versa')
random.seed(args.seed)
coco = None
coco_count = 0
for in_json_tree in in_json_tree_list:
coco = merge_datasets(coco, load_json_tree(in_json_tree), update)
coco_count += 1
for in_json_file in in_json_file_list:
coco = merge_datasets(coco, load_json_file(in_json_file), update)
coco_count += 1
if coco is None:
raise ValueError(f'Not found base dataset, please specify either of: '
'--in_json_tree / --in_json_file (multiple arguments allowed)')
if coco_count > 1:
logger.info(f'Total loaded json dataset: {coco.to_full_str()}')
coco_crop = None
coco_crop_count = 0
for in_crop_tree in in_crop_tree_list:
coco_crop = merge_datasets(coco_crop, load_crop_tree(in_crop_tree, coco), update)
coco_crop_count += 1
if coco_crop is not None:
if coco_crop_count > 1:
logger.info(f'Total loaded crop-tree dataset: {coco_crop.to_full_str()}')
logger.info('Using coco_crop dataset only.')
coco = coco_crop
if drop_invalid_elements:
coco = remove_invalid_elements(coco)
logger.info(f'After removing invalid elements: {coco.to_full_str()}')
if max_crops_per_class:
logger.info(f'Cutting off crops up to {max_crops_per_class} per class, random seed={seed}')
coco = cut_annotations_per_category(coco, max_crops_per_class)
logger.info(f'After cutting off: {coco.to_full_str()}')
logger.info(f'[.] Result dataset: {coco.to_full_str()}')
details = ''
if out_format is not None:
assert out_path
dump_kwargs = dict(skip_nulls=True, overwrite=overwrite, indent=indent)
if out_format == 'json_file':
dump_fun = dump_json_file
elif out_format == 'json_tree':
dump_fun = dump_json_tree
elif out_format == 'crop_tree':
dump_fun = dump_crop_tree
dump_kwargs['num_processes'] = dump_crop_tree_num_processes
else:
raise ValueError(out_format)
dump_fun(coco, out_path, **dump_kwargs)
if out_path.is_dir():
details = f': {[p.name for p in out_path.iterdir()]}'
logger.info(f'[+] Success: {out_format} dumped to {out_path}' + details) |
### reporting
from time import time, strftime
from locust import events
from logging import getLogger
log = getLogger(__name__)
log_file_name = f'test_details_{strftime("%Y_%m_%d_%H_%M_%S")}.csv'
log_file = open(log_file_name, 'wt')
log_file.write('timestamp\tresult\tresponse_time\trequest_type\tname\tresponse_length\n')
def save_success(request_type, name, response_time, response_length, **kw):
log_file.write(f'{int(time() * 1000)}\tSUCCESS\t{int(response_time)}\t{request_type}\t{name}\t{response_length}\n')
log_file.flush()
def save_failure(request_type, name, response_time, response_length, **kw):
log_file.write(f'{int(time() * 1000)}\tFAILURE\t{int(response_time)}\t{request_type}\t{name}\t{response_length}\n')
log_file.flush()
def close_log_file(**kw):
log_file.close()
events.request_success += save_success
events.request_failure += save_failure
events.quitting += close_log_file
|
# by:koala @mixiologist
# Lord Userbot
from userbot import DEVS, WHITELIST, blacklistman
from userbot.events import register
from userbot.utils import chataction, get_user_from_event, man_cmd
# Ported For Lord-Userbot by liualvinas/Alvin
@chataction()
async def handler(tele):
if not tele.user_joined and not tele.user_added:
return
try:
from userbot.modules.sql_helper.gmute_sql import is_gmuted
guser = await tele.get_user()
gmuted = is_gmuted(guser.id)
except BaseException:
return
if gmuted:
for i in gmuted:
if i.sender == str(guser.id):
chat = await tele.get_chat()
admin = chat.admin_rights
creator = chat.creator
if admin or creator:
try:
await client.edit_permissions(
tele.chat_id, guser.id, view_messages=False
)
await tele.reply(
f"**Gbanned Spoted** \n"
f"**First Name :** [{guser.id}](tg://user?id={guser.id})\n"
f"**Action :** `Banned`"
)
except BaseException:
return
@man_cmd(pattern="gband(?: |$)(.*)")
@register(pattern=r"^\.cgband(?: |$)(.*)", sudo=True)
async def gben(userbot):
dc = userbot
sender = await dc.get_sender()
me = await dc.client.get_me()
if sender.id != me.id:
dark = await dc.reply("`Gbanning...`")
else:
dark = await dc.edit("`Memproses Global Banned Jamet..`")
await dark.edit("`Global Banned Akan Segera Aktif..`")
a = b = 0
if userbot.is_private:
user = userbot.chat
reason = userbot.pattern_match.group(1)
try:
user, reason = await get_user_from_event(userbot)
except BaseException:
pass
try:
if not reason:
reason = "Private"
except BaseException:
return await dark.edit("**Gagal Global Banned :(**")
if user:
if user.id in DEVS:
return await dark.edit("**Gagal Global Banned, dia adalah Pembuat Saya 🤪**")
if user.id in WHITELIST:
return await dark.edit(
"**Gagal Global Banned, dia adalah admin @MikooUserbot 🤪**"
)
try:
from userbot.modules.sql_helper.gmute_sql import gmute
except BaseException:
pass
testuserbot = [
d.entity.id
for d in await userbot.client.get_dialogs()
if (d.is_group or d.is_channel)
]
for i in testuserbot:
try:
await userbot.client.edit_permissions(i, user, view_messages=False)
a += 1
await dark.edit(
r"\\**#GBanned_User**//"
f"\n\n**First Name:** [{user.first_name}](tg://user?id={user.id})\n"
f"**User ID:** `{user.id}`\n"
f"**Action:** `Global Banned`"
)
except BaseException:
b += 1
else:
await dark.edit("**Balas Ke Pesan Penggunanya Goblok**")
try:
if gmute(user.id) is False:
return await dark.edit(
"**#Already_GBanned**\n\nUser Already Exists in My Gban List.**"
)
except BaseException:
pass
return await dark.edit(
r"\\**#GBanned_User**//"
f"\n\n**First Name:** [{user.first_name}](tg://user?id={user.id})\n"
f"**User ID:** `{user.id}`\n"
f"**Action:** `Global Banned by {me.first_name}`"
)
@man_cmd(pattern=r"ungband(?: |$)(.*)")
@register(pattern=r"^\.cungband(?: |$)(.*)", sudo=True)
async def gunben(userbot):
dc = userbot
sender = await dc.get_sender()
me = await dc.client.get_me()
if sender.id != me.id:
dark = await dc.reply("`Ungbanning...`")
else:
dark = await dc.edit("`Ungbanning....`")
await dark.edit("`Membatalkan Perintah Global Banned`")
a = b = 0
if userbot.is_private:
user = userbot.chat
reason = userbot.pattern_match.group(1)
try:
user, reason = await get_user_from_event(userbot)
except BaseException:
pass
try:
if not reason:
reason = "Private"
except BaseException:
return await dark.edit("**Gagal Ungbanned :(**")
if user:
if user.id in blacklistman:
return await dark.edit(
"**Gagal ungbanned, Karna dia ada di Blacklist Man**"
)
try:
from userbot.modules.sql_helper.gmute_sql import ungmute
except BaseException:
pass
testuserbot = [
d.entity.id
for d in await userbot.client.get_dialogs()
if (d.is_group or d.is_channel)
]
for i in testuserbot:
try:
await userbot.client.edit_permissions(i, user, send_messages=True)
a += 1
await dark.edit("`Membatalkan Global Banned...`")
except BaseException:
b += 1
else:
await dark.edit("`Balas Ke Pesan Penggunanya Goblok`")
try:
if ungmute(user.id) is False:
return await dark.edit("**Error! Pengguna Sedang Tidak Di Global Banned.**")
except BaseException:
pass
return await dark.edit(
r"\\**#UnGbanned_User**//"
f"\n\n**First Name:** [{user.first_name}](tg://user?id={user.id})\n"
f"**User ID:** `{user.id}`\n"
f"**Action:** `UnGBanned by {me.first_name}`"
)
|
class WisePreInputData:
NONE = "None"
def __init__(self,
figure_error_file=NONE,
figure_error_step=0.0,
figure_error_amplitude_scaling=1.0,
figure_user_units_to_m=1.0,
roughness_file=NONE,
roughness_x_scaling=1.0,
roughness_y_scaling=1.0
):
super().__init__()
self.figure_error_file = figure_error_file
self.figure_error_step = figure_error_step
self.figure_error_amplitude_scaling = figure_error_amplitude_scaling
self.figure_user_units_to_m = figure_user_units_to_m
self.roughness_file = roughness_file
self.roughness_x_scaling =roughness_x_scaling
self.roughness_y_scaling = roughness_y_scaling
from wofrywise2.propagator.propagator1D.wise_propagator import WisePropagationElements
from wofrywise2.propagator.wavefront1D.wise_wavefront import WiseWavefront
from wofrywise2.beamline.wise_beamline_element import WiseBeamlineElement, WiseOpticalElement
import copy
class WiseData(object):
def __init__(self, wise_beamline=WisePropagationElements(), wise_wavefront=WiseWavefront()):
super().__init__()
self.wise_beamline = wise_beamline
self.wise_wavefront = wise_wavefront
def duplicate(self):
duplicated_wise_beamline = None
if not self.wise_beamline is None:
duplicated_wise_beamline = WisePropagationElements()
for beamline_element in self.wise_beamline.get_propagation_elements():
duplicated_wise_optical_element = copy.deepcopy(beamline_element.get_optical_element().wise_optical_element)
duplicated_wise_beamline.add_beamline_element(WiseBeamlineElement(optical_element=WiseOpticalElement(wise_optical_element=duplicated_wise_optical_element)))
duplicated_wise_wavefront = None
if not self.wise_wavefront is None:
duplicated_wise_wavefront = WiseWavefront(wise_computation_results=copy.deepcopy(self.wise_wavefront.wise_computation_result))
return WiseData(wise_beamline=duplicated_wise_beamline,
wise_wavefront=duplicated_wise_wavefront)
|
# -*- coding: utf-8 -*-
import logging
from hashcode19.helpers import Input, Output, PictureType, Slide, score_transition
logger = logging.getLogger(__name__)
def main(inp: Input) -> Output:
"""Sort by number of tags, then trivially pack vertical pictures.
Choose the best next slide in 1000 other slides."""
slideshow = []
slides = [Slide([inp.id_to_pic[h]]) for h in inp.type_to_pics[PictureType.H]]
vp = inp.type_to_pics[PictureType.V]
slides = slides + [Slide([inp.id_to_pic[vp[i]], inp.id_to_pic[vp[i+1]]]) for i in range(0, len(vp)//2, 2)]
slides = sorted(slides, key=lambda x: len(x.tags), reverse=False)
cur_slide, slides = slides[0], slides[1:]
best_slide = None
slideshow.append(cur_slide)
while len(slides) > 0:
if len(slides) % 1000 == 0:
logger.debug("Slides remaining: {}".format(len(slides)))
best_max = -1
best_idx = -1
for i in range(min(1000, len(slides))):
s = slides[i]
cur_score = score_transition(cur_slide, s)
if cur_score > best_max:
best_max = cur_score
best_slide = s
best_idx = i
slideshow.append(best_slide)
slides.pop(best_idx)
cur_slide = best_slide
return Output(slideshow)
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import Mock
from twisted.internet import defer
import tests.unittest
import tests.utils
class ClientIpStoreTestCase(tests.unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ClientIpStoreTestCase, self).__init__(*args, **kwargs)
self.store = None # type: synapse.storage.DataStore
self.clock = None # type: tests.utils.MockClock
@defer.inlineCallbacks
def setUp(self):
self.hs = yield tests.utils.setup_test_homeserver(self.addCleanup)
self.store = self.hs.get_datastore()
self.clock = self.hs.get_clock()
@defer.inlineCallbacks
def test_insert_new_client_ip(self):
self.clock.now = 12345678
user_id = "@user:id"
yield self.store.insert_client_ip(
user_id, "access_token", "ip", "user_agent", "device_id"
)
result = yield self.store.get_last_client_ip_by_device(user_id, "device_id")
r = result[(user_id, "device_id")]
self.assertDictContainsSubset(
{
"user_id": user_id,
"device_id": "device_id",
"access_token": "access_token",
"ip": "ip",
"user_agent": "user_agent",
"last_seen": 12345678000,
},
r,
)
@defer.inlineCallbacks
def test_disabled_monthly_active_user(self):
self.hs.config.limit_usage_by_mau = False
self.hs.config.max_mau_value = 50
user_id = "@user:server"
yield self.store.insert_client_ip(
user_id, "access_token", "ip", "user_agent", "device_id"
)
active = yield self.store.user_last_seen_monthly_active(user_id)
self.assertFalse(active)
@defer.inlineCallbacks
def test_adding_monthly_active_user_when_full(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 50
lots_of_users = 100
user_id = "@user:server"
self.store.get_monthly_active_count = Mock(
return_value=defer.succeed(lots_of_users)
)
yield self.store.insert_client_ip(
user_id, "access_token", "ip", "user_agent", "device_id"
)
active = yield self.store.user_last_seen_monthly_active(user_id)
self.assertFalse(active)
@defer.inlineCallbacks
def test_adding_monthly_active_user_when_space(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 50
user_id = "@user:server"
active = yield self.store.user_last_seen_monthly_active(user_id)
self.assertFalse(active)
yield self.store.insert_client_ip(
user_id, "access_token", "ip", "user_agent", "device_id"
)
active = yield self.store.user_last_seen_monthly_active(user_id)
self.assertTrue(active)
@defer.inlineCallbacks
def test_updating_monthly_active_user_when_space(self):
self.hs.config.limit_usage_by_mau = True
self.hs.config.max_mau_value = 50
user_id = "@user:server"
yield self.store.register(user_id=user_id, token="123", password_hash=None)
active = yield self.store.user_last_seen_monthly_active(user_id)
self.assertFalse(active)
yield self.store.insert_client_ip(
user_id, "access_token", "ip", "user_agent", "device_id"
)
active = yield self.store.user_last_seen_monthly_active(user_id)
self.assertTrue(active)
|
import attr
class MyMixin(object):
def __init__(self, *a, param=True, **kw):
print(f"MyMixin.__init__ of {type(self)})")
self.param = param
super().__init__(*a, **kw)
@attr.s
class A:
value: int = attr.ib(default=3)
class B(A, MyMixin):
pass
class C(MyMixin, A):
pass
@attr.s
class Aprime:
value: int = attr.ib(default=3)
Aprime = type("Aprime", (MyMixin, Aprime), {})
@attr.s
class Adouble(MyMixin):
value: int = attr.ib(default=3)
def __attrs_post_init__(self):
super().__init__() # hum...
a = A()
b = B()
c = C()
ap = Aprime()
ad = Adouble()
assert a.value == 3
assert b.value == 3
assert c.value == 3
assert ap.value == 3
assert ad.value == 3
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = ['Tomas Mendez Echenagucia <[email protected]>']
__copyright__ = 'Copyright 2020, Design Machine Group - University of Washington'
__license__ = 'MIT License'
__email__ = '[email protected]'
from compas_vibro.viewers import PlotlyViewer
class ModalViewer(PlotlyViewer):
"""Plotly based viewer for modal analysis.
"""
def __init__(self, structure):
super().__init__(structure)
self.scale = 20
def show(self):
self.make_layout('modal')
self.plot_shape('modal')
self._show('modal')
if __name__ == "__main__":
import os
import compas_vibro
from compas_vibro.structure import Structure
for i in range(60): print()
filepath = os.path.join(compas_vibro.DATA, 'ansys_mesh_flat_20x20_modal.obj')
s = Structure.from_obj(filepath)
v = ModalViewer(s)
v.show()
|
"""
Created on Oct 13, 2021
@author: Richard Christie
"""
from opencmiss.utils.zinc.general import ChangeManager
from opencmiss.zinc.node import Nodeset
from opencmiss.zinc.field import Field, FieldGroup
from opencmiss.zinc.scene import Scene
def get_scene_selection_group(scene: Scene, subelement_handling_mode=FieldGroup.SUBELEMENT_HANDLING_MODE_FULL):
"""
Get existing scene selection group of standard name.
:param scene: Zinc Scene to get selection group for.
:param subelement_handling_mode: Mode controlling how faces, lines and nodes are
automatically added or removed with higher dimensional elements.
:return: Existing selection group, or None.
"""
selection_group = scene.getSelectionField().castGroup()
if selection_group.isValid():
selection_group.setSubelementHandlingMode(subelement_handling_mode)
return selection_group
return None
selection_group_name = 'cmiss_selection'
def create_scene_selection_group(scene: Scene, subelement_handling_mode=FieldGroup.SUBELEMENT_HANDLING_MODE_FULL):
"""
Create empty, unmanaged scene selection group of standard name.
Should have already called get_selection_group with None returned.
Can discover orphaned group of that name.
:param scene: Zinc Scene to create selection for.
:param subelement_handling_mode: Mode controlling how faces, lines and nodes are
automatically added or removed with higher dimensional elements. Defaults to on/full.
:return: Selection group for scene.
"""
region = scene.getRegion()
fieldmodule = region.getFieldmodule()
with ChangeManager(fieldmodule):
selection_group = fieldmodule.findFieldByName(selection_group_name)
if selection_group.isValid():
selection_group = selection_group.castGroup()
if selection_group.isValid():
selection_group.clear()
selection_group.setManaged(False)
if not selection_group.isValid():
selection_group = fieldmodule.createFieldGroup()
selection_group.setName(selection_group_name)
selection_group.setSubelementHandlingMode(subelement_handling_mode)
scene.setSelectionField(selection_group)
return selection_group
def group_add_group_elements(group: FieldGroup, other_group: FieldGroup, highest_dimension_only=True):
"""
Add to group elements from other_group.
:param group: Zinc FieldGroup to modify.
:param other_group: Zinc FieldGroup to add elements from.
:param highest_dimension_only: If set (default), only add elements of
highest dimension present in other_group, otherwise add all dimensions.
"""
fieldmodule = group.getFieldmodule()
with ChangeManager(fieldmodule):
for dimension in range(3, 0, -1):
mesh = fieldmodule.findMeshByDimension(dimension)
other_element_group = other_group.getFieldElementGroup(mesh)
if other_element_group.isValid() and (other_element_group.getMeshGroup().getSize() > 0):
element_group = group.getFieldElementGroup(mesh)
if not element_group.isValid():
element_group = group.createFieldElementGroup(mesh)
mesh_group = element_group.getMeshGroup()
mesh_group.addElementsConditional(other_element_group)
if highest_dimension_only:
break
def group_add_group_nodes(group: FieldGroup, other_group: FieldGroup, nodeset: Nodeset):
"""
Add to group elements and/or nodes from other_group.
:param group: Zinc FieldGroup to modify.
:param other_group: Zinc FieldGroup to add nodes from.
:param nodeset: Nodeset to add nodes from.
"""
other_node_group = other_group.getFieldNodeGroup(nodeset)
if other_node_group.isValid() and (other_node_group.getNodesetGroup().getSize() > 0):
node_group = group.getFieldNodeGroup(nodeset)
if not node_group.isValid():
node_group = group.createFieldNodeGroup(nodeset)
nodeset_group = node_group.getNodesetGroup()
nodeset_group.addNodesConditional(other_group.getFieldNodeGroup(nodeset))
def field_is_managed_real_1_to_3_components(field_in: Field):
"""
Conditional function returning True if the field is real-valued
with up to 3 components, and is managed.
"""
return (field_in.getValueType() == Field.VALUE_TYPE_REAL) and \
(field_in.getNumberOfComponents() <= 3) and field_in.isManaged()
|
#!/usr/bin/env python3
from greshunkel import Template, Context, GshklFilterFunc
# Taken from the greshunkel_test.c file
test_template =\
"""
<html>
<body>
xXx SCREAM _include.html xXx
xXx LOOP i LOOP_TEST xXx
<li>xXx @TEST xXx xXx @i xXx</li>
xXx BBL xXx
<span>This is the real xXx @TRICKY xXx xXx @ONE xXx</span>
<p>This is a regular string: xXx @TEST xXx</p>
<p>This is an integer: xXx @FAKEINT xXx</p>
<ul>
xXx LOOP i LOOP_TEST xXx
<li>XxX return_z xXx @i xXx XxX</li>
xXx BBL xXx
</ul>
<p>Context Interpolation:</p>
<p>xXx @sub.name xXx - xXx @sub.other xXx</p>
<p>XxX return_hello doesnt_matter_at_all XxX</p>
xXx LOOP subs SUB_LOOP_TEST xXx
<p>FILTERS IN FILTERS IN LOOPS: XxX return_z F XxX</p>
<p>XxX return_hello f XxX</p>
<p>xXx @subs.name xXx - xXx @subs.other xXx</p>
xXx BBL xXx
</body>
</html>
"""
def return_hello(arg):
return b"test"
def return_z(arg):
return b"z"
def main():
return_helloc = GshklFilterFunc(return_hello)
return_zc = GshklFilterFunc(return_z)
context = Context({
"TEST": "This is a test.",
"FAKEINT": 666,
"TRICKY": "TrIcKy",
"ONE": 1,
"LOOP_TEST": ["a", "b", "c", 1, 2, 3],
"SUB_LOOP_TEST": [
{"name": "One", "other": 1 },
{"name": "Two", "other": 2 },
{"name": "Three", "other": 3 },
],
"return_hello": return_helloc,
"return_z": return_zc,
"sub": { "name": "test", "other": 777 },
})
template = Template(test_template)
print(template.render(context))
if __name__ == '__main__':
main()
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class TimeWindowRemote(RemoteModel):
"""
This table list out the entries of Time Window System.
| ``id:`` The internal NetMRI identifier of a time window.
| ``attribute type:`` number
| ``schedule_name:`` The schedule name of a time window.
| ``attribute type:`` string
| ``time_zone:`` The time zone of a time window.
| ``attribute type:`` string
| ``system_window_ind:`` A flag indicates whether a time zone is a system time window or not.
| ``attribute type:`` bool
| ``recur_type:`` The recurrence type of a time window.
| ``attribute type:`` string
| ``start_date:`` The starting effective date of this record.
| ``attribute type:`` datetime
| ``end_date:`` The ending effective date of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``interval:`` The time interval(minutes) of a time window.
| ``attribute type:`` number
| ``ordinal:`` The ordinal number of a time window.
| ``attribute type:`` number
| ``period_mins:`` The duration (specified in minutes) of a time window system.
| ``attribute type:`` number
| ``start_min:`` The starting time of a time window.
| ``attribute type:`` number
| ``end_min:`` The ending time of a time window.
| ``attribute type:`` number
| ``sun_start:`` The start value of a Sunday in time window.
| ``attribute type:`` number
| ``sun_end:`` The end value of a Sunday in time window.
| ``attribute type:`` number
| ``mon_start:`` The start value of a Monday in time window.
| ``attribute type:`` number
| ``mon_end:`` The end value of a Monday in time window.
| ``attribute type:`` number
| ``tue_start:`` The start value of a Tuesday in time window.
| ``attribute type:`` number
| ``tue_end:`` The end value of a Tuesday in a time window.
| ``attribute type:`` number
| ``wed_start:`` The start value of a Wednesday in time window.
| ``attribute type:`` number
| ``wed_end:`` The end value of a Wednesday in a time window.
| ``attribute type:`` number
| ``thu_start:`` The start value of a Thursday in time window.
| ``attribute type:`` number
| ``thu_end:`` The end value of a Thursday in time window.
| ``attribute type:`` number
| ``fri_start:`` The start value of a Friday in time window.
| ``attribute type:`` number
| ``fri_end:`` The end value of a Friday in time window.
| ``attribute type:`` number
| ``sat_start:`` The start value of a Saturday in time window.
| ``attribute type:`` number
| ``sat_end:`` The end value of a Saturday in time window.
| ``attribute type:`` number
| ``created_at:`` The date and time the record was initially created in NetMRI.
| ``attribute type:`` datetime
| ``updated_at:`` The date and time the record was last modified in NetMRI.
| ``attribute type:`` datetime
"""
properties = ("id",
"schedule_name",
"time_zone",
"system_window_ind",
"recur_type",
"start_date",
"end_date",
"interval",
"ordinal",
"period_mins",
"start_min",
"end_min",
"sun_start",
"sun_end",
"mon_start",
"mon_end",
"tue_start",
"tue_end",
"wed_start",
"wed_end",
"thu_start",
"thu_end",
"fri_start",
"fri_end",
"sat_start",
"sat_end",
"created_at",
"updated_at",
)
|
from typing import List
from django import forms
from django.conf import settings
from .value import LocalizedValue
from .widgets import LocalizedFieldWidget
class LocalizedFieldForm(forms.MultiValueField):
"""Form for a localized field, allows editing
the field in multiple languages."""
widget = LocalizedFieldWidget
value_class = LocalizedValue
def __init__(self, *args, **kwargs):
"""Initializes a new instance of :see:LocalizedFieldForm."""
fields = []
for lang_code, _ in settings.LANGUAGES:
field_options = {'required': False}
if lang_code == settings.LANGUAGE_CODE:
field_options['required'] = kwargs.get('required', True)
field_options['label'] = lang_code
fields.append(forms.fields.CharField(**field_options))
super(LocalizedFieldForm, self).__init__(
fields,
require_all_fields=False,
*args, **kwargs
)
# set 'required' attribute for each widget separately
for f, w in zip(self.fields, self.widget.widgets):
w.is_required = f.required
def compress(self, value: List[str]) -> LocalizedValue:
"""Compresses the values from individual fields
into a single :see:LocalizedValue instance.
Arguments:
value:
The values from all the widgets.
Returns:
A :see:LocalizedValue containing all
the value in several languages.
"""
localized_value = self.value_class()
for (lang_code, _), value in zip(settings.LANGUAGES, value):
localized_value.set(lang_code, value)
return localized_value
|
"""
Author: Jason Eisele
Date: October 1, 2020
Email: [email protected]
Scope: App for Tensorflow Doggo classifier
"""
from fastapi import APIRouter
from app.api.routes import heartbeat, prediction
api_router = APIRouter()
api_router.include_router(heartbeat.router, tags=["health"], prefix="/health")
api_router.include_router(prediction.router, tags=["prediction"], prefix="/predict") |
#!/home/mario/anaconda3/envs/project2_venv/bin python
"""
DESCRIPTION:
This script tries to gather all the steps needed to
perform once the basecalls have been obtained.
"""
# Libraries
import os
import sys
from tombo import tombo_helper, tombo_stats, resquiggle
import h5py
import mappy
from tqdm import tqdm
import multiprocessing as mp
from multiprocessing import Manager
if __name__ == "__main__":
workdir = sys.argv[1]
n_processes = int(sys.argv[2])
flowcell = sys.argv[3]
selected_folder=sys.argv[4]
# Read files
reads_folder = workdir + '/' + 'reads' + '/' + flowcell
basecalls_folder = workdir + '/' + 'basecalls' + '/' + flowcell
fastq_file = basecalls_folder + '/' + 'multi.fastq'
single_reads_folder = reads_folder + '/' + 'single' + '/' + selected_folder
# Annotate the reads with the basecalls
print('***************************************************************************************')
print('Annotate the reads')
print('***************************************************************************************')
# Read all the possible fastqs
command = f'tombo preprocess annotate_raw_with_fastqs --fast5-basedir {single_reads_folder} --fastq-filenames {fastq_file} --overwrite'
code = os.system(command)
print('Annotation completed')
# Resquiggle
print('***************************************************************************************')
print('Resquiggle the reads...')
print('***************************************************************************************')
reference_file = workdir + '/' + 'reference.fasta'
command = f'tombo resquiggle {single_reads_folder} {reference_file} --processes {n_processes} --num-most-common-errors 5 --overwrite'
os.system(command)
print('Resquiggling completed')
|
# Copyright (c) 2006-2009 The Trustees of Indiana University.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# - Neither the Indiana University nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# VMX Machine Instructions
from corepy.spre.spe import MachineInstruction
from vmx_fields import *
class OPCD_vD_vA_vB_vC_XO(MachineInstruction):
signature = (vD, vA, vB, vC)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | vA.render(operands['vA']) | vB.render(operands['vB']) | vC.render(operands['vC']) | VA_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_vA_vB_SH_XO(MachineInstruction):
signature = (vD, vA, vB, SH)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | vA.render(operands['vA']) | vB.render(operands['vB']) | SH.render(operands['SH']) | VA_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_vA_vB_XO(MachineInstruction):
signature = (vD, vA, vB)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | vA.render(operands['vA']) | vB.render(operands['vB']) | VX_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_XO(MachineInstruction):
signature = (vD)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | VX_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vB_XO(MachineInstruction):
signature = (vB)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vB.render(operands['vB']) | VX_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_vB_XO(MachineInstruction):
signature = (vD, vB)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | vB.render(operands['vB']) | VX_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_UIMM_vB_XO(MachineInstruction):
signature = (vD, UIMM, vB)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | UIMM.render(operands['UIMM']) | vB.render(operands['vB']) | VX_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_SIMM_XO(MachineInstruction):
signature = (vD, SIMM)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | SIMM.render(operands['SIMM']) | VX_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_T_STRM_A_B_XO(MachineInstruction):
signature = (A, B, STRM)
def _render(params, operands):
return OPCD.render(params['OPCD']) | T.render(params['T']) | STRM.render(operands['STRM']) | A.render(operands['vD']) | B.render(operands['B']) | X_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_T_STRM_XO(MachineInstruction):
signature = (STRM)
def _render(params, operands):
return OPCD.render(params['OPCD']) | T.render(0) | STRM.render(operands['STRM']) | A.render(operands['vD']) | B.render(operands['B']) | X_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_T_XO(MachineInstruction):
signature = ()
def _render(params, operands):
return OPCD.render(params['OPCD']) | T.render(1) | X_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_A_B_XO(MachineInstruction):
signature = (vD, A, B)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | A.render(operands['A']) | B.render(operands['B']) | X_XO.render(params['XO'])
render = staticmethod(_render)
class OPCD_vD_vA_vB_RC_XO(MachineInstruction):
signature = (vD, vA, vB)
opt_kw = (Rc,)
def _render(params, operands):
return OPCD.render(params['OPCD']) | vD.render(operands['vD']) | vA.render(operands['vA']) | vB.render(operands['vB']) | Rc.render(operands['Rc']) | VXR_XO.render(params['XO'])
render = staticmethod(_render)
|
# iot-sec Device class
from mongoengine import *
import datetime
class Device(Document):
id = StringField(required=True, primary_key=True, unique=True)
manuf = StringField()
model = StringField()
pubkey = BinaryField()
lastSeen = DateTimeField(default=datetime.datetime.now) |
#!/usr/bin/env python
"""
An interface for creating custom waves on the PiGlow.
"""
import math
from itertools import tee
from time import sleep
from sys import exit
try:
import numpy
except ImportError:
exit("This library requires the numpy module\nInstall with: sudo pip install numpy")
import piglow
# organised by rings (inner ring first)
LEDS = [
[6, 12, 18],
[5, 11, 17],
[4, 10, 16],
[3, 9, 15],
[2, 8, 14],
[1, 7, 13],
]
def wave(led_max=150, frame_delay=0.02, frame_count=None, initial_brightness=None, direction=None):
"""
Creates a wave effect through the PiGlow board.
Args (all are optional):
led_max (int): the LED brightness at the peak of the wave.
frame_delay (float): the time between each transition.
frame_count (int): the number of transitions in a single wave.
initial_brightness (int): the current brightness of the LEDs.
direction (string): either 'inward' or 'outward'.
"""
if initial_brightness is None:
initial_brightness = min(piglow.get())
if direction is None:
direction = 'outward'
if frame_count is None:
frame_count = len(LEDS)
if direction == 'outward':
LEDS.reverse()
led_set_count = len(LEDS)
# initialise all of the LEDs
piglow.all(initial_brightness)
piglow.show()
wave = _create_led_sine_wave(led_max, frame_count, initial_brightness, led_set_count)
for wave_point in _window(wave, led_set_count):
for i, led_set in enumerate(LEDS):
for led in led_set:
piglow.led(led, int(wave_point[i]))
piglow.show()
sleep(frame_delay)
def _create_led_sine_wave(led_max, frame_count, initial_brightness, led_set_count):
"""
Creates a custom sine wave for a set of LEDs.
Args:
led_max (int): the brightest an LED can go.
frame_count (int): the wave length.
initial_brightness (int): the current brightness of the LEDs.
led_set_count (int): the number of sets the wave passes through.
Returns a numpy `array` containing the entire sine wave.
"""
# create a generic sine wave that starts at -1
peak = numpy.linspace(-math.radians(90), math.radians(270), frame_count)
# adjust the amplitude to suite the range of our LED brightness
peak = numpy.sin(peak) * led_max / 2 + led_max / 2
# finally, we calibrate the wave troughs
peak[peak < initial_brightness] = initial_brightness
# create a beginning and end for the wave to occupy
buffer = numpy.linspace(initial_brightness, initial_brightness, led_set_count)
# Boom, we have our wave!
return numpy.concatenate([buffer, peak, buffer])
def _window(iterable, size=3):
"""
Generate a sequence of `sliding windows` over an iterable.
>>> list(_window(range(5)))
[(0, 1, 2), (1, 2, 3), (2, 3, 4)]
"""
iters = tee(iterable, size)
for i in range(1, size):
for each in iters[i:]:
next(each, None)
return zip(*iters)
if __name__ == '__main__':
wave()
|
from panstamps.__version__ import __version__
from panstamps import utKit
from panstamps import cl_utils
from panstamps.downloader import downloader
from panstamps.image import image
|
#The function of the program that would print the variables fed to it
def cheeseAndCrackers(cheese_count, boxes_of_crackers):
print "You have %d cheeses!" %cheese_count
print "You have %d boxes of crackers!" %boxes_of_crackers
print "Man that's enough for a party"
print "Get a blanket.\n"
#First try to fetching variables into the function by giving it numbers directly
print "We can just give the function numbers directly:"
cheese_and_crackers(20,30) #variables fetched
# by fetching variables from a script
print "OR, we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
# and then feeding them to the function
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
#Doing math insid the function
print "We can even do math inside too:"
cheese_and_crackers(10 + 20, 5 + 6)
# Why not both
print "And we can combine the two, variables and math:"
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
|
class Indexer:
def __init__(self,r,i):
self.real=r
self.imag=i
def __iadd__(self,c):
return Indexer(self.real+c.real,self.imag+c.imag)
def __str__(self): #repr or str
return '{} i{}'.format(self.real,self.imag)
c1=Indexer(10,20)
c2=Indexer(40,80)
print('c1 : ',c1)
print('c2 : ',c2)
c1+=c2
print('c3 : ',c1)
'''
output:
c1 : 10 i20
c2 : 40 i80
c3 : 50 i100
'''
|
#!/usr/bin/python
#
# Copyright 2019 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
from __future__ import absolute_import, division, print_function
import sys
import click
from polyaxon_sdk.rest import ApiException
from urllib3.exceptions import HTTPError
from polyaxon.cli.errors import handle_cli_error
from polyaxon.cli.getters.user import get_username_or_local
from polyaxon.client import PolyaxonClient
from polyaxon.logger import clean_outputs
from polyaxon.utils.formatting import (
Printer,
dict_tabulate,
get_meta_response,
list_dicts_to_tabulate,
)
from polyaxon.utils.query_params import get_query_params
@click.group()
@click.option("--username", "-u", type=str)
@click.pass_context
@clean_outputs
def bookmark(ctx, username): # pylint:disable=redefined-outer-name
"""Commands for bookmarks."""
ctx.obj = ctx.obj or {}
ctx.obj["username"] = username
@bookmark.command()
@click.option("--limit", type=int, help="To limit the list of projects.")
@click.option("--offset", type=int, help="To offset the list of projects.")
@click.pass_context
@clean_outputs
def projects(ctx, limit, offset):
"""List bookmarked projects for user.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon bookmark projects
```
\b
```bash
$ polyaxon bookmark -u adam projects
```
"""
user = get_username_or_local(ctx.obj.get("username"))
try:
params = get_query_params(limit=limit, offset=offset)
polyaxon_client = PolyaxonClient()
response = polyaxon_client.projects_v1.list_bookmarked_projects(user, **params)
except (ApiException, HTTPError) as e:
handle_cli_error(
e, message="Could not get bookmarked projects for user `{}`.".format(user)
)
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header("Bookmarked projects for user `{}`.".format(user))
Printer.print_header("Navigation:")
dict_tabulate(meta)
else:
Printer.print_header("No bookmarked projects found for user `{}`.".format(user))
objects = [
Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response.results
]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Projects:")
dict_tabulate(objects, is_list_dict=True)
@bookmark.command()
@click.option("--limit", type=int, help="To limit the list of runs.")
@click.option("--offset", type=int, help="To offset the list of runs.")
@click.pass_context
@clean_outputs
def runs(ctx, limit, offset):
"""List bookmarked runs for user.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples:
\b
```bash
$ polyaxon bookmark experiments
```
\b
```bash
$ polyaxon bookmark -u adam experiments
```
"""
user = get_username_or_local(ctx.obj.get("username"))
try:
params = get_query_params(limit=limit, offset=offset)
polyaxon_client = PolyaxonClient()
response = polyaxon_client.runs_v1.list_bookmarked_runs(user, **params)
except (ApiException, HTTPError) as e:
handle_cli_error(
e,
message="Could not get bookmarked experiments for user `{}`.".format(user),
)
sys.exit(1)
meta = get_meta_response(response)
if meta:
Printer.print_header("Bookmarked experiments for user `{}`.".format(user))
Printer.print_header("Navigation:")
dict_tabulate(meta)
else:
Printer.print_header(
"No bookmarked experiments found for user `{}`.".format(user)
)
objects = [
Printer.add_status_color(o.to_light_dict(humanize_values=True))
for o in response.results
]
objects = list_dicts_to_tabulate(objects)
if objects:
Printer.print_header("Experiments:")
dict_tabulate(objects, is_list_dict=True)
|
# Copyright Istio Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines constant paths for static and generated YAML."""
import os
_RESOURCES_DIR = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
HELM_SERVICE_ACCOUNT_YAML_PATH = os.path.join(_RESOURCES_DIR,
'helm-service-account.yaml')
PERSISTENT_VOLUME_YAML_PATH = os.path.join(_RESOURCES_DIR,
'persistent-volume.yaml')
PROMETHEUS_VALUES_GEN_YAML_PATH = os.path.join(_RESOURCES_DIR,
'values-prometheus.gen.yaml')
SERVICE_GRAPH_GEN_YAML_PATH = os.path.join(_RESOURCES_DIR,
'service-graph.gen.yaml')
ISTIO_GEN_YAML_PATH = os.path.join(_RESOURCES_DIR, 'istio.gen.yaml')
ISTIO_INGRESS_YAML_PATH = os.path.join(_RESOURCES_DIR,
'istio-ingress.gen.yaml')
|
from django.contrib import admin
from .models import Contacts
# Register your models here.
admin.site.register(Contacts) |
#Program to generate simple g-code per /u/dubc4 request in the /r/3dprinting subreddit
import math
class StirGCodeGenerator:
def __init__(self, printerDims, zFinal, stirDiameter, stirSpeed, stirTime, stirHeight,
travelSpeed=2400, compatibility=False):
""" Creates a GCode generator for stirring a set amount of time.
'printerDims' is a tuple of printer dimensions (length (x), width (y), height (z)) [mm]
'zFinal' stirrer height at the end of stirring [mm], must be less than printer height
'stirDiameter' diameter of the circle to trace while stirring [mm],
Must be less than printer length and width.
'stirSpeed' speed at which to stir [mm/sec]
'stirTime' duration of stirring [mins]
'travelSpeed' speed of travel moves [mm/sec]
'compatibility' boolean for whether to support old firmwares (if True disallows M808 repeat)
Defaults to False (M808 allowed).
"""
xMax, yMax, zMax = printerDims
self.center = [round(float(xMax) / 2, 2), round(float(yMax) / 2, 2), round(float(zMax) / 2, 2)]
self.zFinal = round(float(zFinal))
self.stirRadius = round(float(stirDiameter) / 2, 2)
self.stirTime = float(stirTime)
self.loops = round(self.stirTime * 60 / (math.pi * float(stirDiameter) / float(stirSpeed)))
self.stirSpeed = round(float(stirSpeed) * 60, 2)
self.stirHeight = round(float(stirHeight), 2)
self.travelSpeed = round(float(travelSpeed))
self.compatibility = compatibility
def generate(self, filename, endCode=None):
""" Generates gcode and writes to 'filename'.
Existing files will be overwritten.
'endCode' is a gcode file that gets appended to the end of the generated one.
"""
xOffset = self.center[0] - self.stirRadius
yOffset = self.center[1]
gcode = (
*self.generate_setup(xOffset, yOffset),
*self.generate_stirring(xOffset, yOffset),
*self.generate_cleanup()
)
#file writing
with open(filename, "w") as output:
for section in gcode:
output.write('\n'.join(section))
output.write('\n'*2) # delimit sections with a blank line in between
if not endCode:
return # finish now if no endcode to add
with open(endCode) as addendum:
for line in addendum:
output.write(line)
def generate_setup(self, xOffset, yOffset):
return (
("; *** G-code Prefix ***",
"; Set unit system ([mm] mode)",
"G21"),
(";Align coordinates to stirrer",
"G28 ; Home Position",
"G90 ; Absolute Positioning"),
(";Position stirrer",
f"G0 X{xOffset} Y{yOffset} F{self.travelSpeed}",
f"G0 Z{self.stirHeight} F{self.travelSpeed}")
)
def generate_stirring(self, xOffset, yOffset):
heading = f";Stirring {self.loops} times (~{self.stirTime} mins)"
if self.compatibility:
return (
(heading,
*(f"G2 X{xOffset} Y{yOffset} I{self.stirRadius} J0 F{self.stirSpeed}"
for _ in range(self.loops))),
)
return (
(";Start Loop",
f"M808 L{self.loops}"),
(heading,
f"G2 X{xOffset} Y{yOffset} I{self.stirRadius} J0 F{self.stirSpeed}"),
(";End Loop",
"M808")
)
def generate_cleanup(self):
return (
(";Raise stirrer",
f"G0 Z{self.zFinal} F{self.travelSpeed}"),
)
#Example run:
"""
xMax = 200
yMax = 200
zMax = 200
zFinal = 50
stirHeight = 20
stirDiameter = 30
stirSpeed = 10
stirTime = 5
fileName = "test.gcode"
"""
print("Please enter the following inputs.")
xMax = input("Printer x length (mm): ")
yMax = input("Printer y length (mm): ")
zMax = input("Printer z length (mm): ")
stirDiameter = input("Stirring diameter (mm): ")
stirSpeed = input("Stirring speed (mm/sec): ")
stirTime = input("Stirring duration (min): ")
stirHeight = input("Stirrer height (mm): ")
zFinal = input("Final stirrer height (mm): ")
travelSpeed = input("Travel speed (mm/sec - default 2400): ") or 2400
disable_M808 = input("Compatiblity mode (disable M808) [y/N]?: ").lower() == 'y'
fileName = input("Enter filename: ")
endCode = input("Enter end code filename (leave blank to skip): ").strip()
g = StirGCodeGenerator((xMax, yMax, zMax), zFinal, stirDiameter, stirSpeed, stirTime, stirHeight,
travelSpeed, disable_M808)
g.generate(fileName, endCode)
|
import asyncio
import discord
from discord.ext import commands
from discord.utils import get
def is_authority():
"""Checks that a member is a mod"""
async def predicate(ctx):
return "Authority Ping" in [r.name for r in ctx.author.roles]
return commands.check(predicate)
def can_banish():
"""Checks that a member is a part of the shadowy organization that runs the world"""
async def predicate(ctx):
return ctx.author.id in [380550351423537153, 363690578950488074]
return commands.check(predicate)
# Change this (maybe)
def jail_roles(member: discord.Member):
"""Returns all jail roles that a member has"""
j_roles = [get(member.guild.roles, name="Horny inmate"), get(member.guild.roles, name="Horny Inmate 0001"),
get(member.guild.roles, name="Horny Inmate 0002"),
get(member.guild.roles, name="Horny Inmate 0003"),
get(member.guild.roles, name="MAXIMUM SECURITY HORNY MF")]
member_roles = []
for role in j_roles:
if role in member.roles:
member_roles.append(role)
return member_roles
class Moderation(commands.Cog):
"""BONK! These commands can be used by users with the role Authority Ping to jail and release other users."""
def __init__(self, bot):
self.bot = bot
self.jailed = set() # Keep track of who"s currently in jail for timer release purposes
self.jail_dict = {"max": "MAXIMUM SECURITY HORNY MF",
"1": "Horny Inmate 0001",
"2": "Horny Inmate 0002",
"3": "Horny Inmate 0003"}
self.timers = {}
async def timer(self, ctx, duration, member):
"""Makes an auto-release timer"""
await asyncio.sleep(duration)
if member in self.jailed:
member = await member.guild.fetch_member(member.id)
await ctx.invoke(self.bot.get_command("release"), member=member)
self.jailed.discard(member)
@commands.command(
brief="Sends a member to horny jail",
usage="owo bonk <user> [cell] [time]",
help="This command can only be used by people with the role Authority Ping. "
"It gives a member the Horny inmate role, along with a role for the cell that they're assigned to, "
"defaulting to sending users to cell 1 for 30 minutes. Specify cell using '1', '2', '3', or 'max', "
"and time using a number in minutes.\n"
"\n"
"[Examples]\n"
"[1] owo bonk floop#6996 max 10\n"
"[2] owo bonk Weebaoo 2\n"
"[3] owo bonk 409822116955947018")
@is_authority()
async def bonk(self, ctx, member: discord.Member, cell="1", sentence_time: int = 30):
if get(member.guild.roles, name="Server Booster") in member.roles or \
get(member.guild.roles, name="Authority Ping") in member.roles:
await ctx.channel.send("This member cannot be bonked")
return
if sentence_time <= 0:
await ctx.channel.send("?????????????\nPlease jail people for 1 or more minutes")
return
# Remove all previous jail roles
if j_roles := jail_roles(member):
await member.remove_roles(*j_roles)
await ctx.channel.send("Freed prisoner... ready for transport")
# Gets jail roles
horny_role = get(member.guild.roles, name="Horny inmate")
cell_role = get(member.guild.roles, name=self.jail_dict[cell.lower()])
if cell.lower() == "max":
cell = "maximum security"
sentence_time = 60
else:
cell = "cell " + cell
if cell_role is None:
await ctx.channel.send("That is not a valid cell")
return
await ctx.channel.send(f"Sent {member} to {cell} in horny jail for {sentence_time} minutes")
await member.add_roles(horny_role, cell_role) # Bonk
self.jailed.add(member)
self.timers[member] = asyncio.create_task(self.timer(ctx, sentence_time * 60, member))
@commands.command(
brief="Banishes a member to THE SHADOW REALM™️",
usage="owo banish <user>",
help="This mysterious command can only be used by Alain.\n"
"Basically soft-bans people\n")
@can_banish()
async def banish(self, ctx, member: discord.Member):
role = get(member.guild.roles, name="Banished")
if role in member.roles:
await ctx.channel.send(f"{member} is already in THE SHADOW REALM™️")
return
if j_roles := jail_roles(member):
await member.remove_roles(*j_roles)
await ctx.channel.send(f"Freed prisoner... ready for transport")
await member.add_roles(role)
self.jailed.add(member)
await ctx.channel.send(f"Banished {member} to THE SHADOW REALM™️")
@commands.command(
brief="Releases someone from the depths",
usage="owo release <user>",
help="This command can only be used by people with the role Authority Ping.\n"
"It removes all horny jail roles from a member, and also removes the banished role if the person using the"
" command is Alain.")
@is_authority()
async def release(self, ctx, member: discord.Member):
banish_role = get(member.guild.roles, name="Banished") # Shadow realm role
released = False
if banish_role in member.roles: # Unbanish if applicable
if can_banish():
await member.remove_roles(banish_role)
await ctx.channel.send(f"Released {member} from THE SHADOW REALM™️")
self.jailed.discard(member)
released = True
else:
await ctx.channel.send("You don't have permission to release from THE SHADOW REALM™️")
if j_roles := jail_roles(member): # Unbonk
await ctx.channel.send(f"Released {member} from horny jail")
await member.remove_roles(*j_roles)
try:
self.timers[member].cancel()
except KeyError:
pass
self.jailed.discard(member)
released = True
if not released:
await ctx.channel.send(f"There is nothing to release {member} from!")
return
def setup(bot):
bot.add_cog(Moderation(bot))
|
# Generated by Django 3.2 on 2021-11-27 04:48
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='District',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15, unique=True)),
('description', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Provience',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15, unique=True)),
('description', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='SubDistrict',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=15, unique=True)),
('type', models.CharField(choices=[('Metro Cities', 'Metro Cities'), ('Sub-Metro Cities', 'Sub-Metro Cities'), ('Municipalities', 'Municipalities'), ('Rural Municipalities', 'Rural Municipalities')], max_length=25)),
('description', models.CharField(blank=True, max_length=100)),
('district', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location.district')),
],
),
migrations.CreateModel(
name='Ward',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, unique=True)),
('description', models.CharField(blank=True, max_length=100)),
('subdistrict', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location.subdistrict')),
],
),
migrations.AddField(
model_name='district',
name='provience',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='location.provience'),
),
]
|
from hindinballs.util import Tree
root = Tree("root")
allNodes = {"root": root}
set2Word = {}
word2Set = {}
with open("data/set2WordV.txt","r") as s2w:
cont = s2w.read()
sets = cont.split("$")
for set in sets:
pairs = set.split(":")
num = pairs[0]
word = pairs[1]
set2Word[num] = word
word2Set[word] = num
# final cleansing of the paths
paths = []
clean_paths = []
with open("data/tree_struct.txt", 'r') as tree_struct:
struct_cont = tree_struct.read()
paths = struct_cont.split("$")
# count = 0
for path in paths:
tokens = path.split("<-")
num_of_tokens = len(tokens)
new_path = tokens[0]
for i in range(1, num_of_tokens):
if tokens[i] in set2Word.keys():
new_path += "<-" + tokens[i]
clean_paths.append(new_path)
for path in clean_paths:
tokens = path.split("<-")
# print(tokens)
num_tokens = len(tokens)
# saving prev_token to save it's child if they don't exist already
prev_token = tokens[num_tokens - 1]
# remember to leave last set, need to replace it with actual word
for i in range(0, num_tokens - 2):
name = str(tokens[num_tokens - 1 - i])
# to avoid index out of bound error
if i > 0:
prev_token = tokens[num_tokens - i]
if name in allNodes.keys():
continue
else:
# create new node and save it's mapping in the allNodes dict
temp = Tree(name)
allNodes[name] = temp
if i == 0:
# if it's the node that goes below root
allNodes["root"].add_child(temp)
else:
# if it is some other node's child, other than root(prev_token's)
allNodes[prev_token].add_child(temp)
# now for the last node, as word
name = str(tokens[0])
# if the word is not directly attached to the root
if num_tokens >= 3:
if name in word2Set.keys():
name = word2Set[name]
if name not in allNodes.keys():
temp = Tree(name)
allNodes[name] = temp
allNodes[tokens[2]].add_child(temp)
else:
if name in word2Set.keys():
name = word2Set[name]
if name not in allNodes.keys():
temp = Tree(name)
allNodes[name] = temp
allNodes["root"].add_child(temp)
# levelDict = root.printLevelOrder()
# print(levelDict)
# print(wordAndOrderDict)
# Now replace set in the levelDict with word and sort them, and replace the -1 in the wordAndOrderDict
# with correct place
setOrderNum = {'root': 1 }
with open("data/sameLevelWords.txt", "w") as slw:
stck =[root]
while(True):
if len(stck) != 0:
temp = stck.pop(0)
if len(temp.children) > 0:
o_list = temp.children
c_list = []
for child in o_list:
word_name = child.name
if child.name in set2Word.keys():
word_name = set2Word[child.name]
c_list.append(word_name)
stck.append(child)
c_list.sort()
k=0
for i in c_list:
k=k+1
if i in word2Set.keys():
setOrderNum[word2Set[i]] = k
else:
setOrderNum[i] = k
else:
break
slw.write("\n\n")
# printing cat codes of all the words in a file
def print_catcodes():
with open("data/catCodes.txt", "w") as ctcd:
# count = 0
# A dictionary to hold all the words whose cat_codes are already generated
cat_printed = {}
for path in clean_paths:
tokens = path.split("<-")
num_of_tokens = len(tokens)
for j in range(0, num_of_tokens):
leaf = tokens[j]
if leaf in set2Word.keys():
leaf = set2Word[leaf]
if leaf not in cat_printed:
cat_printed[leaf]=True
sen = ""
slen = 0
for i in range(j, num_of_tokens):
if j==0 and i==1:
slen = 1
continue
word = tokens[i]
if word in set2Word.keys():
word = set2Word[word]
if word in word2Set.keys():
sen = str(setOrderNum[word2Set[word]])+" "+sen
# discussion need to be done here
elif word in setOrderNum.keys():
# count += 1
sen = str(setOrderNum[word])+" "+sen
else:
print("Evil case")
continue
# root order for every word
# need to add special case for root*
sen = "1 "+sen.strip()
for k in range(0, 13-(num_of_tokens-j-slen)):
sen += " 0"
ctcd.write(leaf+" "+sen+"\n")
# print(count)
# root.printTree()
# printing word sense children file as English
def print_word_senses():
with open("data/wordSenseChildren.txt","w") as wsc:
stck = [root]
while (True):
if len(stck) != 0:
temp = stck.pop(0)
if temp.name in set2Word.keys():
wsc.write(set2Word[temp.name])
else:
wsc.write(temp.name)
if len(temp.children) > 0:
for child in temp.children:
stck.insert(0, child)
if child.name in set2Word.keys():
wsc.write(" "+set2Word[child.name])
else:
wsc.write(" "+child.name)
wsc.write("\n")
else:
break
print_catcodes()
print_word_senses() |
"""
Tests for the unwrapped ufuncs.
This is a WIP; it doesn't work yet for all cases, and might not be a good
approach anyway. For now, test_check_functions is adequate, handling the
wrapped ufuncs via check_functions "eval" and "exec" machinery.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
import gsw
from gsw._utilities import Bunch
from gsw.tests.check_functions import parse_check_functions
cv = Bunch(np.load('gsw_cv_v3_0.npz'))
cf = Bunch()
d = dir(gsw._gsw_ufuncs)
funcnames = [name for name in d if '__' not in name]
mfuncs = parse_check_functions('gsw_check_functions_save.m')
mfuncs = [mf for mf in mfuncs if mf.name in d]
mfuncnames = [mf.name for mf in mfuncs]
@pytest.fixture(scope='session', params=mfuncs)
def cfcf(request):
return cv, cf, request.param
def test_mechanism(cfcf):
cv, cf, mfunc = cfcf
print("<%s>" % mfunc.name)
def value_from_name(vname):
b, name = vname.split('.')
if b == 'cf':
return cf[name]
elif b == 'cv':
return cv[name]
else:
raise ValueError("Can't find cf. or cv. in %s" % vname)
def set_from_name(vname, value):
b, name = vname.split('.')
if b == 'cf':
cf[name] = value
else:
raise ValueError("attempting to set value in %s" % (b,))
func = getattr(gsw._gsw_ufuncs, mfunc.name)
args = [eval(a) for a in mfunc.argstrings]
#print("<<%s>>" % (args,))
out = func(*args)
#print("<<<%s>>>" % (out,))
if isinstance(out, tuple):
nout = len(out)
else:
nout = 1
out = (out,)
n = min(nout, len(mfunc.outstrings))
for i, s in enumerate(mfunc.outstrings[:n]):
set_from_name(s, out[i])
if mfunc.test_varstrings is not None:
ntests = (len(mfunc.test_varstrings) - 1) // 3
for i in range(ntests):
expected = value_from_name(mfunc.test_varstrings[3*i+1])
found = value_from_name(mfunc.test_varstrings[3*i+2])
tolerance = value_from_name(mfunc.test_varstrings[3*i+3])
#print(expected)
#print(found)
print(tolerance)
try:
assert_allclose(expected, found, atol=tolerance)
except TypeError:
print(mfunc.test_varstrings[3*i+3], tolerance.shape)
print(mfunc.test_varstrings)
# The following is not right, but this step is unimportant.
#set_from_name(mfunc.test_varstrings[0], expected - found)
else:
print(">>%s<<" % mfunc.testline)
print("missing mfunc.test_varstrings")
mfunc.run()
if hasattr(mfunc, 'exception'):
print(">>>%s<<<", mfunc.exception)
else:
assert mfunc.passed
|
# -*- coding: utf-8 -*-
#
# This file is part of EUDAT B2Share.
# Copyright (C) 2016 CERN.
#
# B2Share is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# B2Share is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with B2Share; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Record modification prior to indexing."""
import pytz
from invenio_search import current_search_client
from invenio_records_files.models import RecordsBuckets
from invenio_pidrelations.contrib.versioning import PIDNodeVersioning
from .utils import is_deposit, is_publication
from .providers import RecordUUIDProvider
def record_to_index(record):
"""Route the given record to the right index and document type."""
def doc_type(alias):
try:
return list(current_search_client.indices.get_alias(index=alias, ignore=[404]).keys())[0]
except:
return alias
if is_deposit(record.model):
return 'record', doc_type('records')
elif is_publication(record.model):
return 'deposit', doc_type('deposits')
else:
raise ValueError('Invalid record. It is neither a deposit'
' nor a publication')
def indexer_receiver(sender, json=None, record=None, index=None,
**dummy_kwargs):
"""Connect to before_record_index signal to transform record for ES."""
from b2share.modules.access.policies import allow_public_file_metadata
from b2share.modules.records.fetchers import b2share_parent_pid_fetcher, b2share_record_uuid_fetcher
if 'external_pids' in json['_deposit']:
# Keep the 'external_pids' if the record is a draft (deposit) or
# if the files are public.
if (not is_deposit(record.model) and allow_public_file_metadata(json)):
json['external_pids'] = json['_deposit']['external_pids']
del json['_deposit']['external_pids']
if not index.startswith('records'):
return
try:
if '_files' in json:
if not allow_public_file_metadata(json):
for f in json['_files']:
del f['key']
del json['_deposit']
json['_created'] = pytz.utc.localize(record.created).isoformat()
json['_updated'] = pytz.utc.localize(record.updated).isoformat()
json['owners'] = record['_deposit']['owners']
json['_internal'] = dict()
# add the 'is_last_version' flag
parent_pid = b2share_parent_pid_fetcher(None, record).pid_value
pid = b2share_record_uuid_fetcher(None, record).pid_value
last_version_pid = PIDNodeVersioning(
pid=RecordUUIDProvider.get(parent_pid).pid
).last_child
json['_internal']['is_last_version'] = \
(last_version_pid.pid_value == pid)
# insert the bucket id for link generation in search results
record_buckets = RecordsBuckets.query.filter(
RecordsBuckets.record_id == record.id).all()
if record_buckets:
json['_internal']['files_bucket_id'] = \
str(record_buckets[0].bucket_id)
except Exception:
raise
|
import datetime
import os
import time
from Main import MachineSpecificSettings, Hyperparameters
from Main.AlphaZero.DistributedSelfPlay import Connection, FitModel
from Main.AlphaZero import Utils
from Main.Training.Connect4 import MemoryBuffers
STATUS_TRAIN_DATA = "trainData"
STATUS_INIT_MODEL = "initModel"
def _getModelPath():
return os.path.abspath("TrainerModel")
def _writeModelToDiskAsBytes(modelAsBytes):
tempFilePath = _getModelPath()
f = open(tempFilePath, 'wb')
f.write(modelAsBytes)
f.close()
return tempFilePath
def _readModelFromDisk():
f = open(_getModelPath(), 'rb')
temp = f.read()
f.close()
return temp
def _getLearningRate(generation):
for a in Hyperparameters.LEARNING_RATE_SCHEDULE:
cycleNumber, lr = a
if (generation < cycleNumber):
return lr
_, finalLr = Hyperparameters.LEARNING_RATE_SCHEDULE[-1]
return finalLr
def _init(port):
connection = Connection.Connection(ip='localhost', port=port, server=False)
status, data = connection.readMessage()
assert status == STATUS_INIT_MODEL
modelAsBytes, trainerSettings = data
modelAbsPath = _writeModelToDiskAsBytes(modelAsBytes)
Hyperparameters.REPLAY_BUFFER_LENGTH = trainerSettings[0]
Hyperparameters.SLIDING_WINDOW_TURNS_TO_FULL = trainerSettings[1]
# Used for naming the runtime analasys log
if ("Y" in input("Use old training data (Y/N):").upper()):
MemoryBuffers.loadOldTrainingDataFromDisk()
return connection, modelAbsPath
'''
The looping trainer is passed Self-Play data from the Overlord
This data is appended to the replay buffer, where all data contained in the buffer is used in the supervised learning
Upon finish, the updated network is sent back to the overlord
'''
def loopingTrainer(port, gpuSettings):
connection, modelAbsPath = _init(port)
import os, StartInit
StartInit.init()
print("Starting Trainer GPU-Settings: {}".format(gpuSettings))
os.environ['CUDA_VISIBLE_DEVICES'] = gpuSettings
from Main.AlphaZero import NeuralNetworks
import numpy as np
import keras
MachineSpecificSettings.setupHyperparameters()
singleModel = keras.models.load_model(modelAbsPath)
# In our experiments we ended up using only a single GPU for training. Since a to big batch-size gave weird results
if (MachineSpecificSettings.AMOUNT_OF_GPUS > 1):
trainingModel = NeuralNetworks.createMultipleGPUModel(singleModel)
else:
trainingModel = singleModel
# Training Loop
while (True):
status, data = connection.readMessage()
print("Got msg:", status)
if (status == STATUS_TRAIN_DATA): # TODO: Create an informative else statement
t1 = time.time() # Only used for displaying elapsed time to the user
modelVersion, states, values, policies, weights = data
# Setup settings for this training turn
keras.backend.set_value(trainingModel.optimizer.lr, _getLearningRate(modelVersion))
MemoryBuffers.CURRENT_MODEL_VERSION = modelVersion
MemoryBuffers.addLabelsToReplayBuffer(states, values, policies)
# Get all the data contained in the Replay Buffers. With pre-calculated average of similair states
inStates, valueLabels, policyLabels = MemoryBuffers.getDistinctTrainingData()
s = np.array(inStates)
v = np.array(valueLabels)
p = np.array(policyLabels)
# Run the supervised-learning
dataProcessingTime = time.time() - t1
print("Data preprocessing finished: {}".format(dataProcessingTime))
print("Using LR:", keras.backend.get_value(trainingModel.optimizer.lr))
trainingModel.fit(np.array(s), [np.array(v), np.array(p)],
epochs=Hyperparameters.EPOCHS_PER_TRAINING, batch_size=Hyperparameters.MINI_BATCH_SIZE,
verbose=2,
shuffle=True)
singleModel.save(modelAbsPath, overwrite=True)
singleModel.save(Hyperparameters.MODELS_SAVE_PATH + str(modelVersion + 1))
trainedModelAsBytes = _readModelFromDisk()
print("Training finished:", time.time() - t1)
connection.sendMessage("Finished", (trainedModelAsBytes,))
MemoryBuffers.storeTrainingDataToDisk()
|
import os
import re
from math import floor
from PIL import Image
def default_save_path(path, size):
savepath, ext = os.path.splitext(path)
savepath = '%s__w%dh%d%s' % (savepath, *size, ext)
return savepath
def assure_path_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def normilize_size(size, img_size):
if None not in size:
return size
W, H = img_size
w, h = size
if w is None:
return h * W / H, h
return w, w * H / W
def get_cover_size(from_size, to_size):
p = max([ts / fs for ts, fs in zip(to_size, from_size)])
return tuple(floor(p * fs) for fs in from_size)
def get_contain_size(from_size, to_size):
p = min([ts / fs for ts, fs in zip(to_size, from_size)])
return tuple(floor(p * fs) for fs in from_size)
def get_coords_from_center(from_size, to_size):
return (
floor((from_size[0] - to_size[0]) / 2),
floor((from_size[1] - to_size[1]) / 2),
floor((from_size[0] + to_size[0]) / 2),
floor((from_size[1] + to_size[1]) / 2)
)
def adjust_coords(coords, size, point):
vec = [
size[0] * (point[0] - 50) / 100,
size[1] * (point[1] - 50) / 100
]
if coords[0] + vec[0] < 0:
vec[0] = - coords[0]
if coords[1] + vec[1] < 0:
vec[1] = - coords[1]
if coords[3] + vec[1] > size[1]:
vec[1] = size[1] - coords[3]
if coords[2] + vec[0] > size[0]:
vec[0] = size[0] - coords[2]
return tuple(floor(sum(coord)) for coord in zip(coords, 2 * vec))
def cover(path, size, point, savepath=None, quality=90):
with Image.open(path) as img:
size = normilize_size(size, img.size)
if savepath is None:
savepath = default_save_path(path, size)
assure_path_exists(os.path.dirname(savepath))
cover_size = get_cover_size(img.size, size)
coords = get_coords_from_center(cover_size, size)
coords = adjust_coords(coords, cover_size, point)
img = img.resize(cover_size, Image.ANTIALIAS)
img = img.crop(coords)
img.save(savepath, subsampling=0,
quality=quality, optimize=True)
return (True, savepath)
return (False, '')
def contain(path, size, savepath=None, quality=90):
with Image.open(path) as img:
size = normilize_size(size, img.size)
if savepath is None:
savepath = default_save_path(path, size)
assure_path_exists(os.path.dirname(savepath))
contain_size = get_contain_size(img.size, size)
img = img.resize(contain_size, Image.ANTIALIAS)
img.save(savepath, subsampling=0,
quality=quality, optimize=True)
return (True, savepath)
return (False, '')
if __name__ == "__main__":
img_path = os.path.abspath('img.jpg')
cover(img_path, (500, None), (50, 50))
|
"""Tests for format_tutorials.py. The `test_expected_output()` function loads a
table of strings and ensures that each leads to the expected output when run
through `process_content()`"""
from format_tutorials import *
import csv
import os
def test_expected_output():
input_strings: list = None
csv_file_path: str = os.path.join(os.path.dirname(__file__), "tests/format_tutorial_test_strings.csv")
with open(csv_file_path, "r") as csv_file:
reader = csv.DictReader(csv_file)
input_strings = [row for row in reader]
for row in input_strings:
input_line: str = row["input"]
expected_output: str = row["expected_output"]
error_message: str = row["error_message"]
assert process_content(input_line) == expected_output, error_message
if __name__ == '__main__':
test_expected_output()
|
# Made by Emperorc
import sys
from com.l2jserver.gameserver.model.quest import State
from com.l2jserver.gameserver.model.quest import QuestState
from com.l2jserver.gameserver.model.quest.jython import QuestJython as JQuest
qn = "66_CertifiedArbalester"
#NPCs
Rindy = 32201
Clayton = 30464
Poitan = 30458
Holvas = 30058
Meldina = 32214
Selsia = 32220
Gaius = 30171
Gauen = 30717
Kaiena = 30720
#Mobs
Floran = range(21102,21108) + [20781]
EG = range (20199,20203) + [20083,20144]
Grandis = 20554
Gargoyle = 20563
Timaks = [20584,20585]
Lady = 27336
#Items
Diamond = 7562
En_Crys,En_Crys_Core,Page,Page_Comp,Mark_Train,Order_Frag,Order_Comp,Talisman,Research,Mark = range(9773,9783)
class Quest (JQuest) :
def __init__(self,id,name,descr):
JQuest.__init__(self,id,name,descr)
self.questItemIds = range(9773,9782)
def onEvent (self,event,st) :
htmltext = event
player = st.getPlayer()
if event == "32201-02.htm" :
st.set("cond","1")
st.setState(State.STARTED)
#st.giveItems(Diamond,64)
st.playSound("ItemSound.quest_accept")
elif event == "32201-03.htm" :
st.set("cond","2")
st.playSound("ItemSound.quest_middle")
elif event == "30464-05.htm" :
st.set("cond","3")
st.playSound("ItemSound.quest_middle")
elif event == "30464-08.htm" :
st.takeItems(En_Crys,-1)
elif event == "30464-09.htm" :
st.giveItems(En_Crys_Core,1)
st.set("cond","5")
st.playSound("ItemSound.quest_middle")
elif event == "30458-03.htm" :
st.takeItems(En_Crys_Core,-1)
elif event == "30458-07.htm" :
st.set("cond","6")
st.playSound("ItemSound.quest_middle")
elif event == "30058-04.htm" :
st.set("cond","7")
st.playSound("ItemSound.quest_middle")
elif event == "30058-07.htm" :
st.set("cond","9")
st.playSound("ItemSound.quest_middle")
st.giveItems(Page_Comp,1)
elif event == "32214-03.htm" :
st.set("cond","10")
st.playSound("ItemSound.quest_middle")
st.takeItems(Page_Comp,-1)
st.giveItems(Mark_Train,1)
elif event == "32220-11.htm" :
st.set("cond","11")
st.playSound("ItemSound.quest_middle")
elif event == "30171-02.htm" :
st.takeItems(Order_Comp,-1)
elif event == "30171-05.htm" :
st.set("cond","14")
st.playSound("ItemSound.quest_middle")
elif event == "30717-02.htm" :
st.takeItems(Talisman,-1)
elif event == "30717-07.htm" :
st.set("cond","17")
st.playSound("ItemSound.quest_middle")
elif event == "30720-03.htm" :
st.set("cond","18")
st.playSound("ItemSound.quest_middle")
elif event == "32220-19.htm" :
st.set("cond","19")
st.playSound("ItemSound.quest_middle")
elif event == "Despawn Crimson Lady" :
st.set("spawned","0")
return htmltext
def onTalk (self,npc,player):
htmltext = "<html><body>You are either not on a quest that involves this NPC, or you don't meet this NPC's minimum quest requirements.</body></html>"
st = player.getQuestState(qn)
if not st : return htmltext
npcId = npc.getNpcId()
id = st.getState()
cond = st.getInt("cond")
if id == State.COMPLETED :
htmltext = "<html><body>This quest has already been completed.</body></html>"
elif npcId == Rindy :
if player.getClassId().getId() != 126 or player.getLevel() < 39:
htmltext = "<html><body>Only Warders of level 39 and above are allowed to take this quest! Go away before I get angry!</body></html>"
st.exitQuest(1)
elif id == State.CREATED :
htmltext = "32201-01.htm"
elif cond == 1 :
htmltext = "32201-03.htm"
elif cond == 2 :
htmltext = "32201-04.htm"
elif npcId == Clayton :
if cond == 2 :
htmltext = "30464-01.htm"
elif cond == 3 :
htmltext = "30464-06.htm"
elif cond == 4 :
htmltext = "30464-07.htm"
elif cond == 5 :
htmltet = "30464-09.htm"
elif npcId == Poitan :
if cond == 5 :
htmltext = "30458-01.htm"
elif cond == 6 :
htmltext = "30458-08.htm"
elif npcId == Holvas :
if cond == 6 :
htmltext = "30058-01.htm"
elif cond == 7 :
htmltext = "30058-05.htm"
elif cond == 8 :
htmltext = "30058-06.htm"
st.takeItems(Page,-1)
elif cond == 9 :
htmltext = "30058-08.htm"
elif npcId == Meldina :
if cond == 9 :
htmltext = "32214-01.htm"
elif cond == 10 :
htmltext = "32214-04.htm"
elif npcId == Selsia :
if cond == 10 :
htmltext = "32220-01.htm" #3220-07.htm,3220-08.htm,3220-10.htm are completely custom. Need to find
#out what she actually says and if it is the same result as 3220-09.htm.
elif cond == 11 :
htmltext = "32220-11.htm"
elif cond == 18 :
htmltext = "32220-12.htm"
elif cond == 19 :
htmltext = "32220-19.htm"
elif cond == 20 :
htmltext = "32220-20.htm"
st.takeItems(Research,-1)
st.giveItems(Mark,1)
st.exitQuest(False)
st.playSound("ItemSound.quest_finish")
st.addExpAndSp(108974,12357)
st.unset("cond")
elif npcId == Gaius :
if cond == 13 :
htmltext = "30171-01.htm"
elif cond == 14 :
htmltext = "30171-06.htm"
elif cond == 16 :
htmltext = "30171-07.htm"
elif npcId == Gauen :
if cond == 16 :
htmltext = "30717-01.htm"
elif cond == 17 :
htmltext = "30717-08.htm"
elif npcId == Kaiena :
if cond == 17 :
htmltext = "30720-01.htm"
elif cond == 18 :
htmltext = "30720-04.htm"
return htmltext
def onKill(self,npc,player,isPet):
st = player.getQuestState(qn)
if not st : return
if st.getState() != State.STARTED : return
npcId = npc.getNpcId()
cond = st.getInt("cond")
if npcId in Floran :
if st.getQuestItemsCount(En_Crys) < 30 and cond == 3 :
st.giveItems(En_Crys,1)
if st.getQuestItemsCount(En_Crys) == 30 :
st.playSound("ItemSound.quest_middle")
st.set("cond","4")
else:
st.playSound("ItemSound.quest_itemget")
elif npcId in EG :
if st.getQuestItemsCount(Page) < 30 and cond == 7 :
st.giveItems(Page,1)
if st.getQuestItemsCount(Page) == 30 :
st.playSound("ItemSound.quest_middle")
st.set("cond","8")
else:
st.playSound("ItemSound.quest_itemget")
elif npcId == Grandis :
count = st.getQuestItemsCount(Order_Frag)
if count < 10 and (cond == 11 or cond == 12):
if count == 9 :
st.playSound("ItemSound.quest_middle")
st.takeItems(Order_Frag,-1)
st.giveItems(Order_Comp,1)
st.set("cond","13")
else :
st.giveItems(Order_Frag,1)
st.playSound("ItemSound.quest_itemget")
if count == 0 :
st.set("cond","12")
elif npcId == Gargoyle :
count = st.getQuestItemsCount(Talisman)
if count < 10 and (cond == 14 or cond == 15):
st.giveItems(Talisman,1)
if count == 9 :
st.playSound("ItemSound.quest_middle")
st.set("cond","16")
else :
st.playSound("ItemSound.quest_itemget")
if count == 0 :
st.set("cond","15")
elif npcId in Timaks :
if st.getRandom(40) < 1 and cond == 19 and not st.getInt("spawned") :
st.addSpawn(Lady,180000)
st.set("spawned","1")
st.startQuestTimer("Despawn Crimson Lady",180000)
elif npcId == Lady :
if cond == 19 and not st.getQuestItemsCount(Research) :
st.giveItems(Research,1)
st.set("cond","20")
st.unset("spawned")
return
QUEST = Quest(66,qn,"Certified Arbalester")
QUEST.addStartNpc(Rindy)
QUEST.addTalkId(Rindy)
QUEST.addTalkId(Clayton)
QUEST.addTalkId(Poitan)
QUEST.addTalkId(Holvas)
QUEST.addTalkId(Meldina)
QUEST.addTalkId(Selsia)
QUEST.addTalkId(Gaius)
QUEST.addTalkId(Gauen)
QUEST.addTalkId(Kaiena)
for mob in Floran + EG + Timaks :
QUEST.addKillId(mob)
QUEST.addKillId(Grandis)
QUEST.addKillId(Gargoyle)
QUEST.addKillId(Lady) |
import collections
from functools import wraps
from inspect import signature
def string_to_list(s, sep=";"):
if s is None:
return s
if not isinstance(s, str) and isinstance(s, collections.abc.Iterable):
return s
return s.rstrip(sep).split(sep) if s else []
def list_to_string(l, sep=";"):
if l is None:
return l
if isinstance(l, str):
return l
if not isinstance(l, collections.abc.Iterable):
return l
return sep.join(l) if l else ""
def convert_list_arguments(*indices):
def decorator(f):
sig = signature(f)
names = tuple(sig.parameters.keys())
@wraps(f)
def wrapper(*args, **kwargs):
ba = sig.bind(*args, **kwargs)
ba.apply_defaults()
for i in indices:
if isinstance(i, int):
i = names[i]
if isinstance(i, str):
ba.arguments[i] = list_to_string(ba.arguments[i])
args = ba.args
kwargs = ba.kwargs
return f(*args, **kwargs)
return wrapper
return decorator
|
"""
extended learning rate scheduler,
which adaptive changes the learning rate based on the progress
"""
import logging
import mxnet as mx
class FixedScheduler(mx.lr_scheduler.LRScheduler):
def __call__(self, num_update):
return self.base_lr
class LinearScheduler(mx.lr_scheduler.LRScheduler):
"""Reduce learning rate linearly
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * (1 - n/iters)
Parameters
----------
step: int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, updates, frequency=0, stop_lr=-1., offset=0):
super(LinearScheduler, self).__init__()
if updates < 1:
raise ValueError('Schedule required max number of updates to be greater than 1 round')
self._updates = updates
self._frequency = frequency
self._stop_lr = stop_lr
self._offset = offset
self._pre_updates = -1
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
now_update = self._offset + num_update
if now_update > self._updates:
if self._pre_updates != num_update:
print 'Exceeds the number of updates, {} > {}'.format(now_update, self._updates)
self._pre_updates = num_update
now_update = self._updates
lr = self.base_lr * (1 - float(now_update) / self._updates)
if self._stop_lr > 0. and lr < self._stop_lr:
lr = self._stop_lr
if self._frequency > 0 and num_update % self._frequency == 0 and self._pre_updates != num_update:
logging.info('Update[%d]: Current learning rate is %0.5e',
num_update, lr)
self._pre_updates = num_update
return lr
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor_shape.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor_shape.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n,tensorflow/core/framework/tensor_shape.proto\x12\ntensorflow\"z\n\x10TensorShapeProto\x12-\n\x03\x64im\x18\x02 \x03(\x0b\x32 .tensorflow.TensorShapeProto.Dim\x12\x14\n\x0cunknown_rank\x18\x03 \x01(\x08\x1a!\n\x03\x44im\x12\x0c\n\x04size\x18\x01 \x01(\x03\x12\x0c\n\x04name\x18\x02 \x01(\tB/\n\x18org.tensorflow.frameworkB\x11TensorShapeProtosP\x01\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TENSORSHAPEPROTO_DIM = _descriptor.Descriptor(
name='Dim',
full_name='tensorflow.TensorShapeProto.Dim',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='size', full_name='tensorflow.TensorShapeProto.Dim.size', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='name', full_name='tensorflow.TensorShapeProto.Dim.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=149,
serialized_end=182,
)
_TENSORSHAPEPROTO = _descriptor.Descriptor(
name='TensorShapeProto',
full_name='tensorflow.TensorShapeProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dim', full_name='tensorflow.TensorShapeProto.dim', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='unknown_rank', full_name='tensorflow.TensorShapeProto.unknown_rank', index=1,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TENSORSHAPEPROTO_DIM, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=182,
)
_TENSORSHAPEPROTO_DIM.containing_type = _TENSORSHAPEPROTO
_TENSORSHAPEPROTO.fields_by_name['dim'].message_type = _TENSORSHAPEPROTO_DIM
DESCRIPTOR.message_types_by_name['TensorShapeProto'] = _TENSORSHAPEPROTO
TensorShapeProto = _reflection.GeneratedProtocolMessageType('TensorShapeProto', (_message.Message,), dict(
Dim = _reflection.GeneratedProtocolMessageType('Dim', (_message.Message,), dict(
DESCRIPTOR = _TENSORSHAPEPROTO_DIM,
__module__ = 'tensorflow.core.framework.tensor_shape_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorShapeProto.Dim)
))
,
DESCRIPTOR = _TENSORSHAPEPROTO,
__module__ = 'tensorflow.core.framework.tensor_shape_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorShapeProto)
))
_sym_db.RegisterMessage(TensorShapeProto)
_sym_db.RegisterMessage(TensorShapeProto.Dim)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\021TensorShapeProtosP\001'))
# @@protoc_insertion_point(module_scope)
|
#!/usr/bin/env python
"""
dnolivieri: 23 dec 2015
Bootstrap workflow for the MR ensemble RF code.
grep \> all_R.fasta | awk -F"|" '{print $1; split($1,a,"-"); locus[a[3]]+=1}END{for (i in locus){print i, locus[i]}}'
david@treg:/media/disk2TB/BioInf/VsRF/analysis/trees$ cat $(ls *_R.fasta) > all_R.fasta
david@treg:/media/disk2TB/BioInf/VsRF/analysis/trees$ cat $(ls *_S.fasta) > all_S.fasta
grep \> all_R.fasta | awk -F"|" '{split($1,a,"-"); s=sprintf("%s-%s", substr(a[2],1,6),a[3]); print s}'
david@treg:/media/disk2TB/BioInf/VsRF/analysis/trees$ grep \> all_R.fasta | awk -F"|" '{split($1,a,"-"); s=sprintf("%s-%s", substr(a[2],1,6),a[3]); locus[s]+=1}END{for (i in locus){split(i,b,"-"); print b[2
cat $(ls -1 *_R.fasta) > all_R.fasta; cat $(ls -1 *_S.fasta) > all_S.fasta
### This is to get info:
grep \> all_R.fasta | awk -F"|" '{split($1,a,"-"); s=sprintf("%s-%s", substr(a[2],1,6),a[3]); locus[s]+=1}END{for (i in locus){split(i,b,"-"); print b[2],i, locus[i]}}' | sort
"""
import dendropy
from dendropy.calculate import treemeasure
from dendropy import treecalc
from Bio import SeqIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import SeqFeature
import KdeDist01 as KDE
class compareTrees:
def __init__(self, loci_classes):
self.loci_classes=loci_classes
def write_sequences(self, infile, M, extension):
outFile = infile.replace(".fasta",extension+".fasta")
ofile = open(outFile, "w")
for record in SeqIO.parse(infile, "fasta"):
if record.id.split("[")[0] in M:
SeqIO.write(record, ofile, "fasta")
ofile.close()
def get_distance(self, infile):
"""
tree = dendropy.Tree.get(
path="pythonidae.mle.nex",
schema="nexus")
"""
tree = dendropy.Tree.get(
path=infile,
schema="newick")
pdm = treemeasure.PatristicDistanceMatrix(tree)
A = [ t.label for t in tree.taxon_namespace ]
#print A
Linitial = len(A)
#pdm = treecalc.PatristicDistanceMatrix(tree)
cnt=0
for i, t1 in enumerate(tree.taxon_namespace):
for t2 in tree.taxon_namespace[i+1:]:
if pdm(t1, t2) < 0.002:
cnt+=1
#print("Distance between '%s' and '%s': %s" % (t1.label, t2.label, pdm(t1, t2)))
print t1.label, t2.label
if t1.label in A:
A.remove(t1.label)
if t2.label in A:
A.remove(t2.label)
R = [i for i in A if 'RF' in i]
S = [i for i in A if 'Vs' in i]
print "Tot initial=", Linitial
print "Total=", cnt
return R, S
def get_dist_data(self, infile, series):
D = {}
for l in self.loci_classes:
D.update({l:[]})
for record in SeqIO.parse(infile, "fasta"):
rec_name=str(record.id)
rec_desc=str(record.description)
mr_prob=0.0
if series=='R':
mr_prob = float(rec_desc.split("|")[2])
locus=rec_name.split("-")[2]
else:
mr_prob = float(rec_name.split("-")[3].split("[")[0])
locus=rec_name.split("-")[2]
if locus in self.loci_classes:
D[locus].append( (mr_prob, rec_name) )
if mr_prob>2.6:
print rec_name
return D
def getKdedistributions(self, infile, series):
D = self.get_dist_data(infile, series)
make_plots=True
Kd = KDE.KDEProbDistribution(D, self.loci_classes)
if make_plots:
X_Means = Kd.get_kde_struct(show_plot=True)
print "X_Means=",X_Means
def dist_pairs(self, t1x, t2x):
tree = dendropy.Tree.get(
path=infile,
schema="newick")
pdm = treemeasure.PatristicDistanceMatrix(tree)
A = [ t.label for t in tree.taxon_namespace ]
for i, t1 in enumerate(tree.taxon_namespace):
for t2 in tree.taxon_namespace[i+1:]:
if ((t1.label.split("-")[0] == t1x) and (t2.label.split("-")[0] == t2x)) or ((t1.label.split("-")[0] == t2x) and (t2.label.split("-")[0] == t1x)):
print t1.label, t2.label, pdm(t1, t2)
# -----------------------------------------------
if __name__ == '__main__':
infile="Macaca_fascicularis_AQIA01.nwk"
mlist=["Chlorocebus_AQIB01.nwk","Gorilla_gorilla_CABD02.nwk","Macaca_fascicularis_AQIA01.nwk","Mandrillus_leucophaeus_JYKQ01.nwk","Microcebus_murinus_ABDC01.nwk","Nomascus_leucogenys_ADFV01.nwk","Pan_paniscus_AJFE01.nwk","Papio_anubis_AHZZ01.nwk","Pongo_abelii_ABGA01.nwk","Propithecus_coquereli_JZKE01.nwk","Rhinopithecus_roxellana_JABR01.nwk","Saimiri_AGCE01.nwk", "Tarsius_syrichta_ABRT01.nwk"]
#mlist=["Microcebus_murinus_ABDC01.nwk"]
#mlist=["Macaca_mulatta_MMUL01.nwk"]
loci_classes=[ 'ighv', 'iglv', 'igkv', 'trav','trbv','trgv', 'trdv']
T=compareTrees(loci_classes)
for i in mlist:
infile = "./trees/"+i
print infile
R,S = T.get_distance(infile)
print "len(R)=", len(R), " len(S)=", len(S)
print "S=", S
fstfile = infile.replace(".nwk", ".fasta")
T.write_sequences( fstfile, R, "_R")
T.write_sequences( fstfile, S, "_S")
"""
#infile="./trees/all_R.fasta"
#T.getKdedistributions(infile, 'R')
infile="./trees/all_S.fasta"
T.getKdedistributions(infile, 'S')
"""
"""
infile="./trees/Macaca_fascicularis_AQIA01.nwk"
t1 = 'Vs395'
t2= 'V93RF'
T.dist_pairs(t1, t2)
"""
|
# coding:utf-8
import numpy as np
import pandas as pd
from sklearn import svm
import seaborn as sns
import scipy.io as sio
import matplotlib.pyplot as plt
from sklearn import metrics
# from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import GridSearchCV
if __name__ == '__main__':
mat = sio.loadmat('data/ex6data3.mat')
print(mat.keys())
training = pd.DataFrame(mat.get('X'), columns=['X1', 'X2'])
training['y'] = mat.get('y')
cv = pd.DataFrame(mat.get('Xval'), columns=['X1', 'X2'])
cv['y'] = mat.get('yval')
candidate = [0.01, 0.03, 0.1, 0.3, 1, 3, 10, 30, 100]
parameters = {'C': candidate, 'gamma': candidate}
svc = svm.SVC()
clf = GridSearchCV(svc, parameters, n_jobs=4)
clf.fit(training[['X1', 'X2']], training['y'])
print(clf.best_params_)
print(clf.best_score_)
ypred = clf.predict(cv[['X1', 'X2']])
print(metrics.classification_report(cv['y'], ypred))
|
import shutil
import subprocess
import sys
import unittest
from mock_tests import test_sqlparsing
import os
from pymongo import MongoClient
client = MongoClient()
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
result = unittest.TextTestRunner(verbosity=2, failfast=True).run(
unittest.TestLoader().loadTestsFromModule(test_sqlparsing)
)
if not result.wasSuccessful():
sys.exit(1)
app_root = os.path.join(TEST_DIR, 'djongo_tests', 'project')
dummy_app = os.path.join(app_root, 'dummy')
if 'migrations' in os.listdir(dummy_app):
shutil.rmtree(os.path.join(dummy_app, 'migrations'))
client.drop_database('djongo-test')
manage_py = os.path.join(app_root, "manage.py")
cmds = [
'makemigrations dummy',
'migrate',
# 'inspectdb',
# 'test'
]
for cmd in cmds:
subprocess.run(f'python {manage_py} {cmd}', check=True) |
import copy
from hypothesis import given
from tests.utils import (BoundPortedLeavesPair,
are_bound_ported_leaves_equal)
from . import strategies
@given(strategies.leaves_pairs)
def test_shallow(leaves_pair: BoundPortedLeavesPair) -> None:
bound, ported = leaves_pair
assert are_bound_ported_leaves_equal(copy.copy(bound), copy.copy(ported))
@given(strategies.leaves_pairs)
def test_deep(leaves_pair: BoundPortedLeavesPair) -> None:
bound, ported = leaves_pair
assert are_bound_ported_leaves_equal(copy.deepcopy(bound),
copy.deepcopy(ported))
|
import sys
from subprocess import call
from .command import BaseCommand
class Command(BaseCommand):
help = 'Run background task worker'
def add_arguments(self, parser):
parser.add_argument('-b', '--beats', action='store_true', help='Run beats sheduler with worker')
def execute(self, args):
print(args)
arguments = ''
if args.beats: arguments = '-B'
try:
sys.exit(call(f'celery -A rested.worker worker {arguments} --loglevel=info', shell=True))
except KeyboardInterrupt:
pass # should pass these interruprs to subprocess, right now parent closes before subprocess
|
from sandglass.time.api import API
from sandglass.time.api import ApiDescribeResource
class ApiV1DescribeResource(ApiDescribeResource):
"""
Resource to describe API version 1.
"""
version = "v1"
def describe(self):
resource_info_list = []
for resource in self.resources:
path = resource.get_collection_path()
resource_info = {
'name': resource.name,
'path': path,
'describe': "{}@describe".format(path),
'doc': (resource.__doc__.strip() if resource.__doc__ else ''),
}
resource_info_list.append(resource_info)
data = {
'version': self.version,
'resources': resource_info_list,
}
return data
def includeme(config):
"""
Load API version 1 resources.
"""
# API version must be the last item in route_prefix
version = config.route_prefix.split('/')[-1]
# Add support for describing resources in current API
config.add_resource_describe(version, ApiV1DescribeResource)
# Load API REST routes for current config path
config.add_api_rest_routes()
# Attach resources to API REST routes
for resource in API.get_resources(version):
config.add_rest_resource(resource)
|
# Must be run from parent directory: poetic pumas
import os
import sys
import time
from random import choice
from string import ascii_lowercase as KEYS
from typing import Dict
from blessed import Terminal
from pygame import mixer
from soundboard import Soundboard
def draw(x: int, y: int, char: str) -> None:
"""Test"""
print(term.move_xy(x, y) + term.bold_on_red + char, end="", flush=True)
def load_sounds() -> Dict[str, Dict[str, mixer.Sound]]:
"""Loads all sounds in the current directory.
Returns a (random) keymap to play the sounds.
:return: a dictionary with each key pointing to a tuple with the name of
the file and the corresponding mixer.Sound object
"""
sounds = [
{"name": x[:-4], "sound": mixer.Sound(os.path.join(Soundboard.SFX_DIR, x))}
for x in sorted(os.listdir(Soundboard.SFX_DIR))
if x.endswith(".wav")
]
keys = []
while len(keys) < len(sounds):
k = choice(KEYS)
if k not in keys:
keys.append(k)
keymap = {k: v for k, v in zip(sorted(keys), sounds)}
mixer.set_num_channels(len(sounds))
return keymap
if __name__ == "__main__":
term = Terminal()
s = Soundboard()
keymap = load_sounds()
with term.cbreak(), term.hidden_cursor():
s.play_music("blockdude")
# clear the screen
print(term.home + term.clear)
x, y = 0, 0
sep = term.width // 6
# Draw TUI
for k in sorted(keymap.keys()):
draw(x, y, f"{k}:{keymap[k]['name']}")
x += sep
if x > term.width - sep:
x = 0
y += term.height // 4
wait_time = 0.2
last_inp = None
last_inp_time = 0
inp = ""
while True:
try:
inp = repr(term.inkey()).strip("'")
except KeyboardInterrupt:
sys.exit(0)
if inp in keymap.keys() and (
inp != last_inp or (time.time() - last_inp_time > wait_time)
):
s.play_sfx(keymap[inp]['name'])
last_inp = inp
last_inp_time = time.time()
|
import os
import math
import pathlib
import sys
import numpy as np
import pandas as pd
from datetime import datetime
from openpyxl import Workbook, load_workbook
import parser_april20_backward
import parser_may20_forward
def read_data(path):
try:
data = pd.read_html(path, decimal=',', thousands='.')
return data[0]
except Exception as excep:
sys.stderr.write("'Não foi possível ler o arquivo: " +
path + '. O seguinte erro foi gerado: ' + excep)
os._exit(1)
# Strange way to check nan. Only I managed to make work
# Source: https://stackoverflow.com/a/944712/5822594
def isNaN(string):
return string != string
def get_begin_row(rows, begin_string):
begin_row = 0
if(type(rows[0][1]) == str):
return begin_row
else:
for row in rows:
if row[1] == begin_string:
begin_row += 1
break
begin_row += 1
# We need to continue interate until wee a value that is not
# whitespace. That happen due to the spreadsheet formatting.
while isNaN(rows[begin_row][1]):
begin_row += 1
return begin_row
def get_end_row(rows, begin_row, end_string):
end_row = 0
if("MPMG" not in rows[0]):
for row in rows:
# First goes to begin_row.
if end_row < begin_row:
end_row += 1
continue
# Then keep moving until find the name TOTAL row.
if row[0] == end_string:
end_row -= 1
break
end_row += 1
return end_row
def parse(month, year, file_names):
employees = {}
for fn in file_names:
if "Verbas Indenizatorias" not in fn:
# Puts all parsed employees in the big map
if (int(year) < 2020) or (int(month) <= 4 and year == "2020"):
employees.update(parser_april20_backward.parse_employees(fn))
else:
employees.update(parser_may20_forward.parse_employees(fn))
try:
for fn in file_names:
if "Verbas Indenizatorias" in fn:
if (int(year) < 2020) or (int(month) <= 4 and year == "2020"):
parser_april20_backward.update_employee_indemnity(fn, employees)
else:
parser_may20_forward.update_employee_indemnity(fn, employees)
except KeyError as e:
sys.stderr.write(
"Registro inválido ao processar verbas indenizatórias: {}".format(e)
)
os._exit(1)
return list(employees.values())
|
# -*- coding: utf-8 -*-
import os, sys, commands, csv, datetime
import pygame
SHOULD_RUN_FULLSCREEN = os.getenv('PHOTOMATON_SHOULD_RUN_FULLSCREEN') == '1'
PATH_TO_LAST_CAPTURED_PHOTO = os.getenv('PHOTOMATON_LAST_CAPTURED_TEXT_PATH')
PATH_TO_PHOTOSETS_CSV = os.getenv('PHOTOMATON_PHOTOSETS_CSV_PATH')
COMMAND_FOR_TAKING_A_PHOTO = os.getenv('PHOTOMATON_COMMAND_FOR_TAKING_A_PHOTO')
COMMAND_FOR_PLAYING_SUCCESS_SFX = os.getenv('PHOTOMATON_COMMAND_FOR_PLAYING_SUCCESS_SFX')
PATH_TO_BACKGROUND_IMAGE = "%s/background.png" % os.getenv('PHOTOMATON_RESOURCES_DIR')
PATH_TO_DEFAULT_QUADRANT_IMAGE = "%s/default.png" % os.getenv('PHOTOMATON_RESOURCES_DIR')
WINDOW_CAPTION = os.getenv('PHOTOMATON_WINDOW_CAPTION')
RENDERER_WIDTH, RENDERER_HEIGHT = [int(os.getenv(e)) for e in ['PHOTOMATON_UI_SCREEN_WIDTH', 'PHOTOMATON_UI_SCREEN_HEIGHT']]
# -----------------------------------------
QUADRANT_SIZE = QUADRANT_WIDTH, QUADRANT_HEIGHT = RENDERER_WIDTH/2, RENDERER_HEIGHT/2
QUADRANT_POSITIONS = [
(0, 0),
(QUADRANT_WIDTH, 0),
(0, QUADRANT_HEIGHT),
(QUADRANT_WIDTH, QUADRANT_HEIGHT),
]
def ptm_load_image_and_scale_into_quadrant(path):
surface = pygame.image.load(path)
scaled_surface = pygame.transform.scale(surface, QUADRANT_SIZE)
return (path, scaled_surface)
SURFACE_FOR_DEFAULT_QUADRANT = ptm_load_image_and_scale_into_quadrant(PATH_TO_DEFAULT_QUADRANT_IMAGE)
SURFACE_FOR_BACKGROUND = pygame.transform.scale(pygame.image.load(PATH_TO_BACKGROUND_IMAGE), (RENDERER_WIDTH, RENDERER_HEIGHT))
# ----------------------------------------- state
# what to display on the next frame
# - 'idle' displays a background image
# - 'show_quadrants' displays the last 4 captured images
ptm_current_mode = 'idle'
# holds the 4 latest captured image as (path, pygame surface) tuples
ptm_quadrants = [SURFACE_FOR_DEFAULT_QUADRANT]*4
# -----------------------------------------
def ptm_last_captured_image_path():
f = open(PATH_TO_LAST_CAPTURED_PHOTO, 'r')
last_image_path = f.readline().strip()
f.close()
return last_image_path
def ptm_take_photo_then_display_into_quadrant(index_into_ptm_quadrants):
output = commands.getstatusoutput(COMMAND_FOR_TAKING_A_PHOTO)
if output[0] != 0:
print("Error: failed to take a photo because this command failed : '%s'" % COMMAND_FOR_TAKING_A_PHOTO)
print("stderr for that command:\n%s" % output[1])
sys.exit(1)
path = ptm_last_captured_image_path()
ptm_quadrants[index_into_ptm_quadrants] = ptm_load_image_and_scale_into_quadrant(path)
def ptm_reset_quadrant(index):
ptm_quadrants[index] = SURFACE_FOR_DEFAULT_QUADRANT
def ptm_write_photoset_filenames_into_csv():
f = open(PATH_TO_PHOTOSETS_CSV, 'a+')
writer = csv.writer(f, delimiter=';', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow([os.path.basename(e[0]) for e in ptm_quadrants])
f.close()
print("[%s] Photoset successfully saved" % datetime.datetime.now().strftime('%H:%M:%S'))
output = commands.getstatusoutput(COMMAND_FOR_PLAYING_SUCCESS_SFX)
def ptm_run():
pygame.init()
screen = pygame.display.set_mode((RENDERER_WIDTH, RENDERER_HEIGHT), pygame.FULLSCREEN if SHOULD_RUN_FULLSCREEN else 0, 32)
pygame.display.set_caption(WINDOW_CAPTION)
clock = pygame.time.Clock()
def render():
if ptm_current_mode == 'show_quadrants':
for ptm_image, pos in zip(ptm_quadrants, QUADRANT_POSITIONS):
path, image = ptm_image
screen.blit(image, pos)
elif ptm_current_mode == 'idle':
screen.blit(SURFACE_FOR_BACKGROUND, (0,0))
pygame.display.flip()
def capture_one_photoset_then_revert_to_default_state():
global ptm_current_mode
ptm_current_mode = 'show_quadrants'
render()
for i in [0,1,2,3]:
ptm_take_photo_then_display_into_quadrant(i)
render()
ptm_write_photoset_filenames_into_csv()
for i in [0,1,2,3]:
ptm_reset_quadrant(i)
ptm_current_mode = 'idle'
while True:
clock.tick(40) # fps
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONUP:
capture_one_photoset_then_revert_to_default_state()
elif event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key in [pygame.K_q, pygame.K_ESCAPE]):
sys.exit(0)
pygame.event.clear()
render()
if __name__ == '__main__':
ptm_run()
|
def convert(string: str) -> int:
result = 0
bool_negative = False
if string.startswith("-"):
bool_negative = True
string = string[1:]
length = len(string)
for c in string:
if ord(c) >= ord("9") or ord(c) <= ord("0"):
return 0
result += int(c) * (10 ** (length - 1))
length -= 1
if (not bool_negative and result > 2 ** 31 - 1) or (bool_negative and result > 2 ** 31):
return 0
else:
return -result if bool_negative else result
if __name__ == '__main__':
s = str(2 ** 31 - 1)
print(convert(s))
s = "12345676543212345"
print(convert(s))
s = "-" + str(2 ** 31)
print(convert(s))
|
# Modifications © 2019 Hashmap, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
from prefect import task
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import Lasso
@task
def train(**kwargs):
# set the data paths
local_path = os.path.dirname(os.path.abspath(__file__))
data_path = os.path.realpath(os.path.join(local_path, '../data/engineered/superconduct/interactions'))
# Load the raw data
data_engineered = pd.read_parquet(data_path)
target = data_engineered.loc[:, ['critical_temp']]
data_engineered = data_engineered.loc[:, data_engineered.columns[:-1]]
# Retain the columns, these are needed to be added back in after the model has been trained,
# and the features have been selected
features = data_engineered.columns
# Train the model selector
clf = Lasso(alpha=.35, precompute=True, max_iter=50)
model = SelectFromModel(clf, threshold=0.25)
model.fit(data_engineered, target['critical_temp'].values)
# Apply the model selector
data_engineered = pd.DataFrame(
model.transform(data_engineered)
)
data_engineered.columns = [feature for support, feature in zip(model.get_support(), features) if support]
# Append target column back onto dataset
data_engineered['critical_temp'] = target
# Write the results to parquet
output_path = os.path.realpath(os.path.join(local_path, '../data/selected/superconduct'))
if not os.path.exists(output_path):
os.makedirs(output_path, exist_ok=True)
output_path = os.path.realpath(os.path.join(local_path, '../data/selected/superconduct/lasso'))
data_engineered.to_parquet(output_path)
|
import logging
from malduck.extractor import Extractor
from malduck.pe import MemoryPEData
log = logging.getLogger(__name__)
__author__ = "c3rb3ru5"
__version__ = "1.0.0"
class Azorult(Extractor):
"""
Azorult C2 Domain Configuration Extractor
"""
family = 'azorult'
yara_rules = 'azorult',
@Extractor.extractor('ref_c2')
def ref_c2(self, p, addr):
c2_list_va = p.uint32v(addr + 21)
c2 = p.asciiz(c2_list_va).decode('utf-8')
if len(c2) <= 0:
return None
return {'family': 'azorult', 'urls': [c2]}
|
from notebooks.profiles._base_profile import BaseProfile # noqa: F401
from notebooks.profiles._euclidean_profile import EuclideanProfile # noqa: F401
from notebooks.profiles._naive_bayes_profile import NaiveBayesProfile # noqa: F401
from notebooks.profiles._voting_profile import VotingProfile # noqa: F401
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, get_user_model
from django.contrib.auth.models import User
from django.utils.http import is_safe_url
from django.conf.urls.static import static
from .forms import ContactForm, LoginForm, RegisterForm, GuestForm
from .models import GuestEmail
# Create your views here.
def login_page(request):
form = LoginForm(request.POST or None)
context = {
'title': 'Login Page',
'form': form
}
print('User logged in')
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
try:
del request.session['guest_email_id']
except:
pass
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
else:
return redirect('/')
else:
print('Error')
return render(request, 'accounts/login.html', context)
user = get_user_model()
def register_page(request):
form = RegisterForm(request.POST or None)
if form.is_valid():
print(form.cleaned_data)
context = {
'title': 'Register Page',
'form': form
}
if form.is_valid():
print(form.cleaned_data)
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
new_user = User.objects.create_user(username, email, password)
print(new_user)
return render(request, 'accounts/register.html', context)
def guest_register_view(request):
form = GuestForm(request.POST or None)
context = {'form': form}
next_ = request.GET.get('next')
next_post = request.POST.get('next')
redirect_path = next_ or next_post or None
if form.is_valid():
email = form.cleaned_data.get('email')
new_guest_email = GuestEmail.objects.create(email=email)
request.session['guest_email_id'] = new_guest_email.id
if is_safe_url(redirect_path, request.get_host()):
return redirect(redirect_path)
else:
return redirect('register')
return redirect('/register')
|
import plotly.express as px
from mobspy import *
"""
Here we have a NOR_GATE
There are two possible repressors for the Promoter A and B
If any of them bind to the Promoter the protein can no longer be expressed
"""
def NOR_GATE(A_conc, B_conc):
# Here we define the Protein to be produced, the Promoter that will act as the gate
# A and B are the inputs any of them can inactivate the Promoter so they inherit from Repressor
Repressor, Promoter, Protein = BaseSpecies(4)
Repressor + Promoter.active >> Promoter.inactive [0.5]
A, B = New(Repressor)
Promoter >> Promoter + Protein [lambda promoter: 1 if promoter.active else 0]
Protein >> Zero [2]
Promoter(100)
A(A_conc), B(B_conc)
MySim = Simulation(A | B | Promoter | Protein)
MySim.duration = 10
MySim.save_data = False
MySim.plot_data = False
MySim.run()
return MySim.results['data']['Protein']['runs'][0][-1]
heatmap = []
for a in [0, 25, 50, 75, 100]:
heatmap_line = []
for b in [0, 25, 50, 75, 100]:
output = NOR_GATE(a, b)
heatmap_line.append(output)
heatmap.append(heatmap_line)
for line in heatmap:
print(line)
fig = px.imshow(heatmap, x=[0, 25, 50, 75, 100], y=[0, 25, 50, 75, 100], labels=dict(x='b', y='a'))
fig.show() |
info = {
"name": "br",
"date_order": "YMD",
"january": [
"gen",
"genver"
],
"february": [
"c'hwe",
"c'hwevrer"
],
"march": [
"meur",
"meurzh"
],
"april": [
"ebr",
"ebrel"
],
"may": [
"mae"
],
"june": [
"mezh",
"mezheven"
],
"july": [
"goue",
"gouere"
],
"august": [
"eost"
],
"september": [
"gwen",
"gwengolo"
],
"october": [
"here"
],
"november": [
"du"
],
"december": [
"ker",
"kerzu",
"kzu"
],
"monday": [
"lun"
],
"tuesday": [
"meu",
"meurzh"
],
"wednesday": [
"mer",
"merc'her"
],
"thursday": [
"yaou"
],
"friday": [
"gwe",
"gwener"
],
"saturday": [
"sad",
"sadorn"
],
"sunday": [
"sul"
],
"am": [
"am"
],
"pm": [
"gm"
],
"year": [
"bl",
"bloaz"
],
"month": [
"miz"
],
"week": [
"sizhun"
],
"day": [
"d",
"deiz"
],
"hour": [
"e",
"eur"
],
"minute": [
"min",
"munut"
],
"second": [
"eilenn",
"s"
],
"relative-type": {
"0 day ago": [
"hiziv"
],
"0 hour ago": [
"this hour"
],
"0 minute ago": [
"this minute"
],
"0 month ago": [
"ar miz-mañ"
],
"0 second ago": [
"brem",
"bremañ"
],
"0 week ago": [
"ar sizhun-mañ"
],
"0 year ago": [
"hevlene"
],
"1 day ago": [
"dec'h"
],
"1 month ago": [
"ar miz diaraok"
],
"1 week ago": [
"ar sizhun diaraok"
],
"1 year ago": [
"warlene"
],
"in 1 day": [
"warc'hoazh"
],
"in 1 month": [
"ar miz a zeu"
],
"in 1 week": [
"ar sizhun a zeu"
],
"in 1 year": [
"ar bl a zeu",
"ar bloaz a zeu"
]
},
"relative-type-regex": {
"\\1 day ago": [
"(\\d+) d zo",
"(\\d+) deiz zo"
],
"\\1 hour ago": [
"(\\d+) e zo",
"(\\d+) eur zo"
],
"\\1 minute ago": [
"(\\d+) min zo",
"(\\d+) munut zo"
],
"\\1 month ago": [
"(\\d+) miz zo"
],
"\\1 second ago": [
"(\\d+) eilenn zo",
"(\\d+) s zo"
],
"\\1 week ago": [
"(\\d+) sizhun zo"
],
"\\1 year ago": [
"(\\d+) bl zo",
"(\\d+) bloaz zo",
"(\\d+) vloaz zo"
],
"in \\1 day": [
"a-benn (\\d+) d",
"a-benn (\\d+) deiz"
],
"in \\1 hour": [
"a-benn (\\d+) e",
"a-benn (\\d+) eur"
],
"in \\1 minute": [
"a-benn (\\d+) min",
"a-benn (\\d+) munut"
],
"in \\1 month": [
"a-benn (\\d+) miz"
],
"in \\1 second": [
"a-benn (\\d+) eilenn",
"a-benn (\\d+) s"
],
"in \\1 week": [
"a-benn (\\d+) sizhun"
],
"in \\1 year": [
"a-benn (\\d+) bl",
"a-benn (\\d+) bloaz",
"a-benn (\\d+) vloaz"
]
},
"locale_specific": {},
"skip": [
" ",
"'",
",",
"-",
".",
"/",
";",
"@",
"[",
"]",
"|",
","
]
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.